mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
Update to kube v1.17
Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
committed by
mergify[bot]
parent
327fcd1b1b
commit
3af1e26d7c
1
vendor/go.etcd.io/etcd/Documentation/README.md
generated
vendored
Symbolic link
1
vendor/go.etcd.io/etcd/Documentation/README.md
generated
vendored
Symbolic link
@ -0,0 +1 @@
|
||||
docs.md
|
202
vendor/go.etcd.io/etcd/LICENSE
generated
vendored
Normal file
202
vendor/go.etcd.io/etcd/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
5
vendor/go.etcd.io/etcd/NOTICE
generated
vendored
Normal file
5
vendor/go.etcd.io/etcd/NOTICE
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
CoreOS Project
|
||||
Copyright 2014 CoreOS, Inc
|
||||
|
||||
This product includes software developed at CoreOS, Inc.
|
||||
(http://www.coreos.com/).
|
977
vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go
generated
vendored
Normal file
977
vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go
generated
vendored
Normal file
@ -0,0 +1,977 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: auth.proto
|
||||
|
||||
/*
|
||||
Package authpb is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
auth.proto
|
||||
|
||||
It has these top-level messages:
|
||||
UserAddOptions
|
||||
User
|
||||
Permission
|
||||
Role
|
||||
*/
|
||||
package authpb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
|
||||
math "math"
|
||||
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
io "io"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type Permission_Type int32
|
||||
|
||||
const (
|
||||
READ Permission_Type = 0
|
||||
WRITE Permission_Type = 1
|
||||
READWRITE Permission_Type = 2
|
||||
)
|
||||
|
||||
var Permission_Type_name = map[int32]string{
|
||||
0: "READ",
|
||||
1: "WRITE",
|
||||
2: "READWRITE",
|
||||
}
|
||||
var Permission_Type_value = map[string]int32{
|
||||
"READ": 0,
|
||||
"WRITE": 1,
|
||||
"READWRITE": 2,
|
||||
}
|
||||
|
||||
func (x Permission_Type) String() string {
|
||||
return proto.EnumName(Permission_Type_name, int32(x))
|
||||
}
|
||||
func (Permission_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2, 0} }
|
||||
|
||||
type UserAddOptions struct {
|
||||
NoPassword bool `protobuf:"varint,1,opt,name=no_password,json=noPassword,proto3" json:"no_password,omitempty"`
|
||||
}
|
||||
|
||||
func (m *UserAddOptions) Reset() { *m = UserAddOptions{} }
|
||||
func (m *UserAddOptions) String() string { return proto.CompactTextString(m) }
|
||||
func (*UserAddOptions) ProtoMessage() {}
|
||||
func (*UserAddOptions) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} }
|
||||
|
||||
// User is a single entry in the bucket authUsers
|
||||
type User struct {
|
||||
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
|
||||
Roles []string `protobuf:"bytes,3,rep,name=roles" json:"roles,omitempty"`
|
||||
Options *UserAddOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"`
|
||||
}
|
||||
|
||||
func (m *User) Reset() { *m = User{} }
|
||||
func (m *User) String() string { return proto.CompactTextString(m) }
|
||||
func (*User) ProtoMessage() {}
|
||||
func (*User) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} }
|
||||
|
||||
// Permission is a single entity
|
||||
type Permission struct {
|
||||
PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"`
|
||||
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||
RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Permission) Reset() { *m = Permission{} }
|
||||
func (m *Permission) String() string { return proto.CompactTextString(m) }
|
||||
func (*Permission) ProtoMessage() {}
|
||||
func (*Permission) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2} }
|
||||
|
||||
// Role is a single entry in the bucket authRoles
|
||||
type Role struct {
|
||||
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission" json:"keyPermission,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Role) Reset() { *m = Role{} }
|
||||
func (m *Role) String() string { return proto.CompactTextString(m) }
|
||||
func (*Role) ProtoMessage() {}
|
||||
func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{3} }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*UserAddOptions)(nil), "authpb.UserAddOptions")
|
||||
proto.RegisterType((*User)(nil), "authpb.User")
|
||||
proto.RegisterType((*Permission)(nil), "authpb.Permission")
|
||||
proto.RegisterType((*Role)(nil), "authpb.Role")
|
||||
proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
|
||||
}
|
||||
func (m *UserAddOptions) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *UserAddOptions) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.NoPassword {
|
||||
dAtA[i] = 0x8
|
||||
i++
|
||||
if m.NoPassword {
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *User) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *User) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Name) > 0 {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
|
||||
i += copy(dAtA[i:], m.Name)
|
||||
}
|
||||
if len(m.Password) > 0 {
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintAuth(dAtA, i, uint64(len(m.Password)))
|
||||
i += copy(dAtA[i:], m.Password)
|
||||
}
|
||||
if len(m.Roles) > 0 {
|
||||
for _, s := range m.Roles {
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
dAtA[i] = uint8(l)
|
||||
i++
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
if m.Options != nil {
|
||||
dAtA[i] = 0x22
|
||||
i++
|
||||
i = encodeVarintAuth(dAtA, i, uint64(m.Options.Size()))
|
||||
n1, err := m.Options.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n1
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *Permission) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Permission) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.PermType != 0 {
|
||||
dAtA[i] = 0x8
|
||||
i++
|
||||
i = encodeVarintAuth(dAtA, i, uint64(m.PermType))
|
||||
}
|
||||
if len(m.Key) > 0 {
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintAuth(dAtA, i, uint64(len(m.Key)))
|
||||
i += copy(dAtA[i:], m.Key)
|
||||
}
|
||||
if len(m.RangeEnd) > 0 {
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd)))
|
||||
i += copy(dAtA[i:], m.RangeEnd)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *Role) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Role) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Name) > 0 {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
|
||||
i += copy(dAtA[i:], m.Name)
|
||||
}
|
||||
if len(m.KeyPermission) > 0 {
|
||||
for _, msg := range m.KeyPermission {
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintAuth(dAtA, i, uint64(msg.Size()))
|
||||
n, err := msg.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *UserAddOptions) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if m.NoPassword {
|
||||
n += 2
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *User) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Name)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovAuth(uint64(l))
|
||||
}
|
||||
l = len(m.Password)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovAuth(uint64(l))
|
||||
}
|
||||
if len(m.Roles) > 0 {
|
||||
for _, s := range m.Roles {
|
||||
l = len(s)
|
||||
n += 1 + l + sovAuth(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.Options != nil {
|
||||
l = m.Options.Size()
|
||||
n += 1 + l + sovAuth(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *Permission) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if m.PermType != 0 {
|
||||
n += 1 + sovAuth(uint64(m.PermType))
|
||||
}
|
||||
l = len(m.Key)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovAuth(uint64(l))
|
||||
}
|
||||
l = len(m.RangeEnd)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovAuth(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *Role) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Name)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovAuth(uint64(l))
|
||||
}
|
||||
if len(m.KeyPermission) > 0 {
|
||||
for _, e := range m.KeyPermission {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovAuth(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovAuth(x uint64) (n int) {
|
||||
for {
|
||||
n++
|
||||
x >>= 7
|
||||
if x == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
func sozAuth(x uint64) (n int) {
|
||||
return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *UserAddOptions) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAuth
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: UserAddOptions: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: UserAddOptions: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field NoPassword", wireType)
|
||||
}
|
||||
var v int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAuth
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.NoPassword = bool(v != 0)
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipAuth(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *User) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAuth
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: User: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAuth
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Name == nil {
|
||||
m.Name = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAuth
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Password == nil {
|
||||
m.Password = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAuth
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
case 4:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAuth
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Options == nil {
|
||||
m.Options = &UserAddOptions{}
|
||||
}
|
||||
if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipAuth(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *Permission) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAuth
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Permission: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType)
|
||||
}
|
||||
m.PermType = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAuth
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.PermType |= (Permission_Type(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAuth
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Key == nil {
|
||||
m.Key = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAuth
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.RangeEnd == nil {
|
||||
m.RangeEnd = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipAuth(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *Role) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAuth
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Role: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAuth
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Name == nil {
|
||||
m.Name = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAuth
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.KeyPermission = append(m.KeyPermission, &Permission{})
|
||||
if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipAuth(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthAuth
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipAuth(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowAuth
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowAuth
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
return iNdEx, nil
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowAuth
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthAuth
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
var innerWire uint64
|
||||
var start int = iNdEx
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowAuth
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
innerWireType := int(innerWire & 0x7)
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipAuth(dAtA[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
return iNdEx, nil
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
return iNdEx, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) }
|
||||
|
||||
var fileDescriptorAuth = []byte{
|
||||
// 338 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xcf, 0x4e, 0xea, 0x40,
|
||||
0x14, 0xc6, 0x3b, 0xb4, 0x70, 0xdb, 0xc3, 0x85, 0x90, 0x13, 0x72, 0x6f, 0x83, 0x49, 0x6d, 0xba,
|
||||
0x6a, 0x5c, 0x54, 0x85, 0x8d, 0x5b, 0x8c, 0x2c, 0x5c, 0x49, 0x26, 0x18, 0x97, 0xa4, 0xa4, 0x13,
|
||||
0x24, 0xc0, 0x4c, 0x33, 0x83, 0x31, 0x6c, 0x7c, 0x0e, 0x17, 0x3e, 0x10, 0x4b, 0x1e, 0x41, 0xf0,
|
||||
0x45, 0x4c, 0x67, 0xf8, 0x13, 0xa2, 0xbb, 0xef, 0x7c, 0xe7, 0xfb, 0x66, 0x7e, 0x99, 0x01, 0x48,
|
||||
0x5f, 0x16, 0xcf, 0x49, 0x2e, 0xc5, 0x42, 0x60, 0xa5, 0xd0, 0xf9, 0xa8, 0xd5, 0x1c, 0x8b, 0xb1,
|
||||
0xd0, 0xd6, 0x65, 0xa1, 0xcc, 0x36, 0xba, 0x86, 0xfa, 0xa3, 0x62, 0xb2, 0x9b, 0x65, 0x0f, 0xf9,
|
||||
0x62, 0x22, 0xb8, 0xc2, 0x73, 0xa8, 0x72, 0x31, 0xcc, 0x53, 0xa5, 0x5e, 0x85, 0xcc, 0x7c, 0x12,
|
||||
0x92, 0xd8, 0xa5, 0xc0, 0x45, 0x7f, 0xe7, 0x44, 0x6f, 0xe0, 0x14, 0x15, 0x44, 0x70, 0x78, 0x3a,
|
||||
0x67, 0x3a, 0xf1, 0x97, 0x6a, 0x8d, 0x2d, 0x70, 0x0f, 0xcd, 0x92, 0xf6, 0x0f, 0x33, 0x36, 0xa1,
|
||||
0x2c, 0xc5, 0x8c, 0x29, 0xdf, 0x0e, 0xed, 0xd8, 0xa3, 0x66, 0xc0, 0x2b, 0xf8, 0x23, 0xcc, 0xcd,
|
||||
0xbe, 0x13, 0x92, 0xb8, 0xda, 0xfe, 0x97, 0x18, 0xe0, 0xe4, 0x94, 0x8b, 0xee, 0x63, 0xd1, 0x07,
|
||||
0x01, 0xe8, 0x33, 0x39, 0x9f, 0x28, 0x35, 0x11, 0x1c, 0x3b, 0xe0, 0xe6, 0x4c, 0xce, 0x07, 0xcb,
|
||||
0xdc, 0xa0, 0xd4, 0xdb, 0xff, 0xf7, 0x27, 0x1c, 0x53, 0x49, 0xb1, 0xa6, 0x87, 0x20, 0x36, 0xc0,
|
||||
0x9e, 0xb2, 0xe5, 0x0e, 0xb1, 0x90, 0x78, 0x06, 0x9e, 0x4c, 0xf9, 0x98, 0x0d, 0x19, 0xcf, 0x7c,
|
||||
0xdb, 0xa0, 0x6b, 0xa3, 0xc7, 0xb3, 0xe8, 0x02, 0x1c, 0x5d, 0x73, 0xc1, 0xa1, 0xbd, 0xee, 0x5d,
|
||||
0xc3, 0x42, 0x0f, 0xca, 0x4f, 0xf4, 0x7e, 0xd0, 0x6b, 0x10, 0xac, 0x81, 0x57, 0x98, 0x66, 0x2c,
|
||||
0x45, 0x03, 0x70, 0xa8, 0x98, 0xb1, 0x5f, 0x9f, 0xe7, 0x06, 0x6a, 0x53, 0xb6, 0x3c, 0x62, 0xf9,
|
||||
0xa5, 0xd0, 0x8e, 0xab, 0x6d, 0xfc, 0x09, 0x4c, 0x4f, 0x83, 0xb7, 0xfe, 0x6a, 0x13, 0x58, 0xeb,
|
||||
0x4d, 0x60, 0xad, 0xb6, 0x01, 0x59, 0x6f, 0x03, 0xf2, 0xb9, 0x0d, 0xc8, 0xfb, 0x57, 0x60, 0x8d,
|
||||
0x2a, 0xfa, 0x23, 0x3b, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x61, 0x66, 0xc6, 0x9d, 0xf4, 0x01,
|
||||
0x00, 0x00,
|
||||
}
|
242
vendor/go.etcd.io/etcd/clientv3/auth.go
generated
vendored
Normal file
242
vendor/go.etcd.io/etcd/clientv3/auth.go
generated
vendored
Normal file
@ -0,0 +1,242 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"go.etcd.io/etcd/auth/authpb"
|
||||
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type (
|
||||
AuthEnableResponse pb.AuthEnableResponse
|
||||
AuthDisableResponse pb.AuthDisableResponse
|
||||
AuthenticateResponse pb.AuthenticateResponse
|
||||
AuthUserAddResponse pb.AuthUserAddResponse
|
||||
AuthUserDeleteResponse pb.AuthUserDeleteResponse
|
||||
AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse
|
||||
AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse
|
||||
AuthUserGetResponse pb.AuthUserGetResponse
|
||||
AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse
|
||||
AuthRoleAddResponse pb.AuthRoleAddResponse
|
||||
AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse
|
||||
AuthRoleGetResponse pb.AuthRoleGetResponse
|
||||
AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse
|
||||
AuthRoleDeleteResponse pb.AuthRoleDeleteResponse
|
||||
AuthUserListResponse pb.AuthUserListResponse
|
||||
AuthRoleListResponse pb.AuthRoleListResponse
|
||||
|
||||
PermissionType authpb.Permission_Type
|
||||
Permission authpb.Permission
|
||||
)
|
||||
|
||||
const (
|
||||
PermRead = authpb.READ
|
||||
PermWrite = authpb.WRITE
|
||||
PermReadWrite = authpb.READWRITE
|
||||
)
|
||||
|
||||
type UserAddOptions authpb.UserAddOptions
|
||||
|
||||
type Auth interface {
|
||||
// AuthEnable enables auth of an etcd cluster.
|
||||
AuthEnable(ctx context.Context) (*AuthEnableResponse, error)
|
||||
|
||||
// AuthDisable disables auth of an etcd cluster.
|
||||
AuthDisable(ctx context.Context) (*AuthDisableResponse, error)
|
||||
|
||||
// UserAdd adds a new user to an etcd cluster.
|
||||
UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error)
|
||||
|
||||
// UserAddWithOptions adds a new user to an etcd cluster with some options.
|
||||
UserAddWithOptions(ctx context.Context, name string, password string, opt *UserAddOptions) (*AuthUserAddResponse, error)
|
||||
|
||||
// UserDelete deletes a user from an etcd cluster.
|
||||
UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error)
|
||||
|
||||
// UserChangePassword changes a password of a user.
|
||||
UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error)
|
||||
|
||||
// UserGrantRole grants a role to a user.
|
||||
UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error)
|
||||
|
||||
// UserGet gets a detailed information of a user.
|
||||
UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error)
|
||||
|
||||
// UserList gets a list of all users.
|
||||
UserList(ctx context.Context) (*AuthUserListResponse, error)
|
||||
|
||||
// UserRevokeRole revokes a role of a user.
|
||||
UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error)
|
||||
|
||||
// RoleAdd adds a new role to an etcd cluster.
|
||||
RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error)
|
||||
|
||||
// RoleGrantPermission grants a permission to a role.
|
||||
RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error)
|
||||
|
||||
// RoleGet gets a detailed information of a role.
|
||||
RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error)
|
||||
|
||||
// RoleList gets a list of all roles.
|
||||
RoleList(ctx context.Context) (*AuthRoleListResponse, error)
|
||||
|
||||
// RoleRevokePermission revokes a permission from a role.
|
||||
RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error)
|
||||
|
||||
// RoleDelete deletes a role.
|
||||
RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error)
|
||||
}
|
||||
|
||||
type authClient struct {
|
||||
remote pb.AuthClient
|
||||
callOpts []grpc.CallOption
|
||||
}
|
||||
|
||||
func NewAuth(c *Client) Auth {
|
||||
api := &authClient{remote: RetryAuthClient(c)}
|
||||
if c != nil {
|
||||
api.callOpts = c.callOpts
|
||||
}
|
||||
return api
|
||||
}
|
||||
|
||||
func (auth *authClient) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
|
||||
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...)
|
||||
return (*AuthEnableResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *authClient) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
|
||||
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...)
|
||||
return (*AuthDisableResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
|
||||
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: &authpb.UserAddOptions{NoPassword: false}}, auth.callOpts...)
|
||||
return (*AuthUserAddResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *authClient) UserAddWithOptions(ctx context.Context, name string, password string, options *UserAddOptions) (*AuthUserAddResponse, error) {
|
||||
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: (*authpb.UserAddOptions)(options)}, auth.callOpts...)
|
||||
return (*AuthUserAddResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *authClient) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
|
||||
resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...)
|
||||
return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *authClient) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
|
||||
resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...)
|
||||
return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *authClient) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
|
||||
resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...)
|
||||
return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *authClient) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
|
||||
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...)
|
||||
return (*AuthUserGetResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *authClient) UserList(ctx context.Context) (*AuthUserListResponse, error) {
|
||||
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...)
|
||||
return (*AuthUserListResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *authClient) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
|
||||
resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...)
|
||||
return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *authClient) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
|
||||
resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...)
|
||||
return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
|
||||
perm := &authpb.Permission{
|
||||
Key: []byte(key),
|
||||
RangeEnd: []byte(rangeEnd),
|
||||
PermType: authpb.Permission_Type(permType),
|
||||
}
|
||||
resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...)
|
||||
return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *authClient) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
|
||||
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...)
|
||||
return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *authClient) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
|
||||
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...)
|
||||
return (*AuthRoleListResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *authClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
|
||||
resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: []byte(key), RangeEnd: []byte(rangeEnd)}, auth.callOpts...)
|
||||
return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *authClient) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
|
||||
resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...)
|
||||
return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func StrToPermissionType(s string) (PermissionType, error) {
|
||||
val, ok := authpb.Permission_Type_value[strings.ToUpper(s)]
|
||||
if ok {
|
||||
return PermissionType(val), nil
|
||||
}
|
||||
return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s)
|
||||
}
|
||||
|
||||
type authenticator struct {
|
||||
conn *grpc.ClientConn // conn in-use
|
||||
remote pb.AuthClient
|
||||
callOpts []grpc.CallOption
|
||||
}
|
||||
|
||||
func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
|
||||
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...)
|
||||
return (*AuthenticateResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *authenticator) close() {
|
||||
auth.conn.Close()
|
||||
}
|
||||
|
||||
func newAuthenticator(ctx context.Context, target string, opts []grpc.DialOption, c *Client) (*authenticator, error) {
|
||||
conn, err := grpc.DialContext(ctx, target, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
api := &authenticator{
|
||||
conn: conn,
|
||||
remote: pb.NewAuthClient(conn),
|
||||
}
|
||||
if c != nil {
|
||||
api.callOpts = c.callOpts
|
||||
}
|
||||
return api, nil
|
||||
}
|
293
vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go
generated
vendored
Normal file
293
vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go
generated
vendored
Normal file
@ -0,0 +1,293 @@
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package balancer implements client balancer.
|
||||
package balancer
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/clientv3/balancer/connectivity"
|
||||
"go.etcd.io/etcd/clientv3/balancer/picker"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc/balancer"
|
||||
grpcconnectivity "google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/resolver"
|
||||
_ "google.golang.org/grpc/resolver/dns" // register DNS resolver
|
||||
_ "google.golang.org/grpc/resolver/passthrough" // register passthrough resolver
|
||||
)
|
||||
|
||||
// Config defines balancer configurations.
|
||||
type Config struct {
|
||||
// Policy configures balancer policy.
|
||||
Policy picker.Policy
|
||||
|
||||
// Picker implements gRPC picker.
|
||||
// Leave empty if "Policy" field is not custom.
|
||||
// TODO: currently custom policy is not supported.
|
||||
// Picker picker.Picker
|
||||
|
||||
// Name defines an additional name for balancer.
|
||||
// Useful for balancer testing to avoid register conflicts.
|
||||
// If empty, defaults to policy name.
|
||||
Name string
|
||||
|
||||
// Logger configures balancer logging.
|
||||
// If nil, logs are discarded.
|
||||
Logger *zap.Logger
|
||||
}
|
||||
|
||||
// RegisterBuilder creates and registers a builder. Since this function calls balancer.Register, it
|
||||
// must be invoked at initialization time.
|
||||
func RegisterBuilder(cfg Config) {
|
||||
bb := &builder{cfg}
|
||||
balancer.Register(bb)
|
||||
|
||||
bb.cfg.Logger.Debug(
|
||||
"registered balancer",
|
||||
zap.String("policy", bb.cfg.Policy.String()),
|
||||
zap.String("name", bb.cfg.Name),
|
||||
)
|
||||
}
|
||||
|
||||
type builder struct {
|
||||
cfg Config
|
||||
}
|
||||
|
||||
// Build is called initially when creating "ccBalancerWrapper".
|
||||
// "grpc.Dial" is called to this client connection.
|
||||
// Then, resolved addresses will be handled via "HandleResolvedAddrs".
|
||||
func (b *builder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
||||
bb := &baseBalancer{
|
||||
id: strconv.FormatInt(time.Now().UnixNano(), 36),
|
||||
policy: b.cfg.Policy,
|
||||
name: b.cfg.Name,
|
||||
lg: b.cfg.Logger,
|
||||
|
||||
addrToSc: make(map[resolver.Address]balancer.SubConn),
|
||||
scToAddr: make(map[balancer.SubConn]resolver.Address),
|
||||
scToSt: make(map[balancer.SubConn]grpcconnectivity.State),
|
||||
|
||||
currentConn: nil,
|
||||
connectivityRecorder: connectivity.New(b.cfg.Logger),
|
||||
|
||||
// initialize picker always returns "ErrNoSubConnAvailable"
|
||||
picker: picker.NewErr(balancer.ErrNoSubConnAvailable),
|
||||
}
|
||||
|
||||
// TODO: support multiple connections
|
||||
bb.mu.Lock()
|
||||
bb.currentConn = cc
|
||||
bb.mu.Unlock()
|
||||
|
||||
bb.lg.Info(
|
||||
"built balancer",
|
||||
zap.String("balancer-id", bb.id),
|
||||
zap.String("policy", bb.policy.String()),
|
||||
zap.String("resolver-target", cc.Target()),
|
||||
)
|
||||
return bb
|
||||
}
|
||||
|
||||
// Name implements "grpc/balancer.Builder" interface.
|
||||
func (b *builder) Name() string { return b.cfg.Name }
|
||||
|
||||
// Balancer defines client balancer interface.
|
||||
type Balancer interface {
|
||||
// Balancer is called on specified client connection. Client initiates gRPC
|
||||
// connection with "grpc.Dial(addr, grpc.WithBalancerName)", and then those resolved
|
||||
// addresses are passed to "grpc/balancer.Balancer.HandleResolvedAddrs".
|
||||
// For each resolved address, balancer calls "balancer.ClientConn.NewSubConn".
|
||||
// "grpc/balancer.Balancer.HandleSubConnStateChange" is called when connectivity state
|
||||
// changes, thus requires failover logic in this method.
|
||||
balancer.Balancer
|
||||
|
||||
// Picker calls "Pick" for every client request.
|
||||
picker.Picker
|
||||
}
|
||||
|
||||
type baseBalancer struct {
|
||||
id string
|
||||
policy picker.Policy
|
||||
name string
|
||||
lg *zap.Logger
|
||||
|
||||
mu sync.RWMutex
|
||||
|
||||
addrToSc map[resolver.Address]balancer.SubConn
|
||||
scToAddr map[balancer.SubConn]resolver.Address
|
||||
scToSt map[balancer.SubConn]grpcconnectivity.State
|
||||
|
||||
currentConn balancer.ClientConn
|
||||
connectivityRecorder connectivity.Recorder
|
||||
|
||||
picker picker.Picker
|
||||
}
|
||||
|
||||
// HandleResolvedAddrs implements "grpc/balancer.Balancer" interface.
|
||||
// gRPC sends initial or updated resolved addresses from "Build".
|
||||
func (bb *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
||||
if err != nil {
|
||||
bb.lg.Warn("HandleResolvedAddrs called with error", zap.String("balancer-id", bb.id), zap.Error(err))
|
||||
return
|
||||
}
|
||||
bb.lg.Info("resolved",
|
||||
zap.String("picker", bb.picker.String()),
|
||||
zap.String("balancer-id", bb.id),
|
||||
zap.Strings("addresses", addrsToStrings(addrs)),
|
||||
)
|
||||
|
||||
bb.mu.Lock()
|
||||
defer bb.mu.Unlock()
|
||||
|
||||
resolved := make(map[resolver.Address]struct{})
|
||||
for _, addr := range addrs {
|
||||
resolved[addr] = struct{}{}
|
||||
if _, ok := bb.addrToSc[addr]; !ok {
|
||||
sc, err := bb.currentConn.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{})
|
||||
if err != nil {
|
||||
bb.lg.Warn("NewSubConn failed", zap.String("picker", bb.picker.String()), zap.String("balancer-id", bb.id), zap.Error(err), zap.String("address", addr.Addr))
|
||||
continue
|
||||
}
|
||||
bb.lg.Info("created subconn", zap.String("address", addr.Addr))
|
||||
bb.addrToSc[addr] = sc
|
||||
bb.scToAddr[sc] = addr
|
||||
bb.scToSt[sc] = grpcconnectivity.Idle
|
||||
sc.Connect()
|
||||
}
|
||||
}
|
||||
|
||||
for addr, sc := range bb.addrToSc {
|
||||
if _, ok := resolved[addr]; !ok {
|
||||
// was removed by resolver or failed to create subconn
|
||||
bb.currentConn.RemoveSubConn(sc)
|
||||
delete(bb.addrToSc, addr)
|
||||
|
||||
bb.lg.Info(
|
||||
"removed subconn",
|
||||
zap.String("picker", bb.picker.String()),
|
||||
zap.String("balancer-id", bb.id),
|
||||
zap.String("address", addr.Addr),
|
||||
zap.String("subconn", scToString(sc)),
|
||||
)
|
||||
|
||||
// Keep the state of this sc in bb.scToSt until sc's state becomes Shutdown.
|
||||
// The entry will be deleted in HandleSubConnStateChange.
|
||||
// (DO NOT) delete(bb.scToAddr, sc)
|
||||
// (DO NOT) delete(bb.scToSt, sc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// HandleSubConnStateChange implements "grpc/balancer.Balancer" interface.
|
||||
func (bb *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s grpcconnectivity.State) {
|
||||
bb.mu.Lock()
|
||||
defer bb.mu.Unlock()
|
||||
|
||||
old, ok := bb.scToSt[sc]
|
||||
if !ok {
|
||||
bb.lg.Warn(
|
||||
"state change for an unknown subconn",
|
||||
zap.String("picker", bb.picker.String()),
|
||||
zap.String("balancer-id", bb.id),
|
||||
zap.String("subconn", scToString(sc)),
|
||||
zap.Int("subconn-size", len(bb.scToAddr)),
|
||||
zap.String("state", s.String()),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
bb.lg.Info(
|
||||
"state changed",
|
||||
zap.String("picker", bb.picker.String()),
|
||||
zap.String("balancer-id", bb.id),
|
||||
zap.Bool("connected", s == grpcconnectivity.Ready),
|
||||
zap.String("subconn", scToString(sc)),
|
||||
zap.Int("subconn-size", len(bb.scToAddr)),
|
||||
zap.String("address", bb.scToAddr[sc].Addr),
|
||||
zap.String("old-state", old.String()),
|
||||
zap.String("new-state", s.String()),
|
||||
)
|
||||
|
||||
bb.scToSt[sc] = s
|
||||
switch s {
|
||||
case grpcconnectivity.Idle:
|
||||
sc.Connect()
|
||||
case grpcconnectivity.Shutdown:
|
||||
// When an address was removed by resolver, b called RemoveSubConn but
|
||||
// kept the sc's state in scToSt. Remove state for this sc here.
|
||||
delete(bb.scToAddr, sc)
|
||||
delete(bb.scToSt, sc)
|
||||
}
|
||||
|
||||
oldAggrState := bb.connectivityRecorder.GetCurrentState()
|
||||
bb.connectivityRecorder.RecordTransition(old, s)
|
||||
|
||||
// Update balancer picker when one of the following happens:
|
||||
// - this sc became ready from not-ready
|
||||
// - this sc became not-ready from ready
|
||||
// - the aggregated state of balancer became TransientFailure from non-TransientFailure
|
||||
// - the aggregated state of balancer became non-TransientFailure from TransientFailure
|
||||
if (s == grpcconnectivity.Ready) != (old == grpcconnectivity.Ready) ||
|
||||
(bb.connectivityRecorder.GetCurrentState() == grpcconnectivity.TransientFailure) != (oldAggrState == grpcconnectivity.TransientFailure) {
|
||||
bb.updatePicker()
|
||||
}
|
||||
|
||||
bb.currentConn.UpdateBalancerState(bb.connectivityRecorder.GetCurrentState(), bb.picker)
|
||||
}
|
||||
|
||||
func (bb *baseBalancer) updatePicker() {
|
||||
if bb.connectivityRecorder.GetCurrentState() == grpcconnectivity.TransientFailure {
|
||||
bb.picker = picker.NewErr(balancer.ErrTransientFailure)
|
||||
bb.lg.Info(
|
||||
"updated picker to transient error picker",
|
||||
zap.String("picker", bb.picker.String()),
|
||||
zap.String("balancer-id", bb.id),
|
||||
zap.String("policy", bb.policy.String()),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
// only pass ready subconns to picker
|
||||
scToAddr := make(map[balancer.SubConn]resolver.Address)
|
||||
for addr, sc := range bb.addrToSc {
|
||||
if st, ok := bb.scToSt[sc]; ok && st == grpcconnectivity.Ready {
|
||||
scToAddr[sc] = addr
|
||||
}
|
||||
}
|
||||
|
||||
bb.picker = picker.New(picker.Config{
|
||||
Policy: bb.policy,
|
||||
Logger: bb.lg,
|
||||
SubConnToResolverAddress: scToAddr,
|
||||
})
|
||||
bb.lg.Info(
|
||||
"updated picker",
|
||||
zap.String("picker", bb.picker.String()),
|
||||
zap.String("balancer-id", bb.id),
|
||||
zap.String("policy", bb.policy.String()),
|
||||
zap.Strings("subconn-ready", scsToStrings(scToAddr)),
|
||||
zap.Int("subconn-size", len(scToAddr)),
|
||||
)
|
||||
}
|
||||
|
||||
// Close implements "grpc/balancer.Balancer" interface.
|
||||
// Close is a nop because base balancer doesn't have internal state to clean up,
|
||||
// and it doesn't need to call RemoveSubConn for the SubConns.
|
||||
func (bb *baseBalancer) Close() {
|
||||
// TODO
|
||||
}
|
93
vendor/go.etcd.io/etcd/clientv3/balancer/connectivity/connectivity.go
generated
vendored
Normal file
93
vendor/go.etcd.io/etcd/clientv3/balancer/connectivity/connectivity.go
generated
vendored
Normal file
@ -0,0 +1,93 @@
|
||||
// Copyright 2019 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package connectivity implements client connectivity operations.
|
||||
package connectivity
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
)
|
||||
|
||||
// Recorder records gRPC connectivity.
|
||||
type Recorder interface {
|
||||
GetCurrentState() connectivity.State
|
||||
RecordTransition(oldState, newState connectivity.State)
|
||||
}
|
||||
|
||||
// New returns a new Recorder.
|
||||
func New(lg *zap.Logger) Recorder {
|
||||
return &recorder{lg: lg}
|
||||
}
|
||||
|
||||
// recorder takes the connectivity states of multiple SubConns
|
||||
// and returns one aggregated connectivity state.
|
||||
// ref. https://github.com/grpc/grpc-go/blob/master/balancer/balancer.go
|
||||
type recorder struct {
|
||||
lg *zap.Logger
|
||||
|
||||
mu sync.RWMutex
|
||||
|
||||
cur connectivity.State
|
||||
|
||||
numReady uint64 // Number of addrConns in ready state.
|
||||
numConnecting uint64 // Number of addrConns in connecting state.
|
||||
numTransientFailure uint64 // Number of addrConns in transientFailure.
|
||||
}
|
||||
|
||||
func (rc *recorder) GetCurrentState() (state connectivity.State) {
|
||||
rc.mu.RLock()
|
||||
defer rc.mu.RUnlock()
|
||||
return rc.cur
|
||||
}
|
||||
|
||||
// RecordTransition records state change happening in subConn and based on that
|
||||
// it evaluates what aggregated state should be.
|
||||
//
|
||||
// - If at least one SubConn in Ready, the aggregated state is Ready;
|
||||
// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
|
||||
// - Else the aggregated state is TransientFailure.
|
||||
//
|
||||
// Idle and Shutdown are not considered.
|
||||
//
|
||||
// ref. https://github.com/grpc/grpc-go/blob/master/balancer/balancer.go
|
||||
func (rc *recorder) RecordTransition(oldState, newState connectivity.State) {
|
||||
rc.mu.Lock()
|
||||
defer rc.mu.Unlock()
|
||||
|
||||
for idx, state := range []connectivity.State{oldState, newState} {
|
||||
updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
|
||||
switch state {
|
||||
case connectivity.Ready:
|
||||
rc.numReady += updateVal
|
||||
case connectivity.Connecting:
|
||||
rc.numConnecting += updateVal
|
||||
case connectivity.TransientFailure:
|
||||
rc.numTransientFailure += updateVal
|
||||
default:
|
||||
rc.lg.Warn("connectivity recorder received unknown state", zap.String("connectivity-state", state.String()))
|
||||
}
|
||||
}
|
||||
|
||||
switch { // must be exclusive, no overlap
|
||||
case rc.numReady > 0:
|
||||
rc.cur = connectivity.Ready
|
||||
case rc.numConnecting > 0:
|
||||
rc.cur = connectivity.Connecting
|
||||
default:
|
||||
rc.cur = connectivity.TransientFailure
|
||||
}
|
||||
}
|
16
vendor/go.etcd.io/etcd/clientv3/balancer/picker/doc.go
generated
vendored
Normal file
16
vendor/go.etcd.io/etcd/clientv3/balancer/picker/doc.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package picker defines/implements client balancer picker policy.
|
||||
package picker
|
39
vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go
generated
vendored
Normal file
39
vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package picker
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
)
|
||||
|
||||
// NewErr returns a picker that always returns err on "Pick".
|
||||
func NewErr(err error) Picker {
|
||||
return &errPicker{p: Error, err: err}
|
||||
}
|
||||
|
||||
type errPicker struct {
|
||||
p Policy
|
||||
err error
|
||||
}
|
||||
|
||||
func (ep *errPicker) String() string {
|
||||
return ep.p.String()
|
||||
}
|
||||
|
||||
func (ep *errPicker) Pick(context.Context, balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||
return nil, nil, ep.err
|
||||
}
|
91
vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go
generated
vendored
Normal file
91
vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package picker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// Picker defines balancer Picker methods.
|
||||
type Picker interface {
|
||||
balancer.Picker
|
||||
String() string
|
||||
}
|
||||
|
||||
// Config defines picker configuration.
|
||||
type Config struct {
|
||||
// Policy specifies etcd clientv3's built in balancer policy.
|
||||
Policy Policy
|
||||
|
||||
// Logger defines picker logging object.
|
||||
Logger *zap.Logger
|
||||
|
||||
// SubConnToResolverAddress maps each gRPC sub-connection to an address.
|
||||
// Basically, it is a list of addresses that the Picker can pick from.
|
||||
SubConnToResolverAddress map[balancer.SubConn]resolver.Address
|
||||
}
|
||||
|
||||
// Policy defines balancer picker policy.
|
||||
type Policy uint8
|
||||
|
||||
const (
|
||||
// Error is error picker policy.
|
||||
Error Policy = iota
|
||||
|
||||
// RoundrobinBalanced balances loads over multiple endpoints
|
||||
// and implements failover in roundrobin fashion.
|
||||
RoundrobinBalanced
|
||||
|
||||
// Custom defines custom balancer picker.
|
||||
// TODO: custom picker is not supported yet.
|
||||
Custom
|
||||
)
|
||||
|
||||
func (p Policy) String() string {
|
||||
switch p {
|
||||
case Error:
|
||||
return "picker-error"
|
||||
|
||||
case RoundrobinBalanced:
|
||||
return "picker-roundrobin-balanced"
|
||||
|
||||
case Custom:
|
||||
panic("'custom' picker policy is not supported yet")
|
||||
|
||||
default:
|
||||
panic(fmt.Errorf("invalid balancer picker policy (%d)", p))
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new Picker.
|
||||
func New(cfg Config) Picker {
|
||||
switch cfg.Policy {
|
||||
case Error:
|
||||
panic("'error' picker policy is not supported here; use 'picker.NewErr'")
|
||||
|
||||
case RoundrobinBalanced:
|
||||
return newRoundrobinBalanced(cfg)
|
||||
|
||||
case Custom:
|
||||
panic("'custom' picker policy is not supported yet")
|
||||
|
||||
default:
|
||||
panic(fmt.Errorf("invalid balancer picker policy (%d)", cfg.Policy))
|
||||
}
|
||||
}
|
95
vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go
generated
vendored
Normal file
95
vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go
generated
vendored
Normal file
@ -0,0 +1,95 @@
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package picker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// newRoundrobinBalanced returns a new roundrobin balanced picker.
|
||||
func newRoundrobinBalanced(cfg Config) Picker {
|
||||
scs := make([]balancer.SubConn, 0, len(cfg.SubConnToResolverAddress))
|
||||
for sc := range cfg.SubConnToResolverAddress {
|
||||
scs = append(scs, sc)
|
||||
}
|
||||
return &rrBalanced{
|
||||
p: RoundrobinBalanced,
|
||||
lg: cfg.Logger,
|
||||
scs: scs,
|
||||
scToAddr: cfg.SubConnToResolverAddress,
|
||||
}
|
||||
}
|
||||
|
||||
type rrBalanced struct {
|
||||
p Policy
|
||||
|
||||
lg *zap.Logger
|
||||
|
||||
mu sync.RWMutex
|
||||
next int
|
||||
scs []balancer.SubConn
|
||||
scToAddr map[balancer.SubConn]resolver.Address
|
||||
}
|
||||
|
||||
func (rb *rrBalanced) String() string { return rb.p.String() }
|
||||
|
||||
// Pick is called for every client request.
|
||||
func (rb *rrBalanced) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||
rb.mu.RLock()
|
||||
n := len(rb.scs)
|
||||
rb.mu.RUnlock()
|
||||
if n == 0 {
|
||||
return nil, nil, balancer.ErrNoSubConnAvailable
|
||||
}
|
||||
|
||||
rb.mu.Lock()
|
||||
cur := rb.next
|
||||
sc := rb.scs[cur]
|
||||
picked := rb.scToAddr[sc].Addr
|
||||
rb.next = (rb.next + 1) % len(rb.scs)
|
||||
rb.mu.Unlock()
|
||||
|
||||
rb.lg.Debug(
|
||||
"picked",
|
||||
zap.String("picker", rb.p.String()),
|
||||
zap.String("address", picked),
|
||||
zap.Int("subconn-index", cur),
|
||||
zap.Int("subconn-size", n),
|
||||
)
|
||||
|
||||
doneFunc := func(info balancer.DoneInfo) {
|
||||
// TODO: error handling?
|
||||
fss := []zapcore.Field{
|
||||
zap.Error(info.Err),
|
||||
zap.String("picker", rb.p.String()),
|
||||
zap.String("address", picked),
|
||||
zap.Bool("success", info.Err == nil),
|
||||
zap.Bool("bytes-sent", info.BytesSent),
|
||||
zap.Bool("bytes-received", info.BytesReceived),
|
||||
}
|
||||
if info.Err == nil {
|
||||
rb.lg.Debug("balancer done", fss...)
|
||||
} else {
|
||||
rb.lg.Warn("balancer failed", fss...)
|
||||
}
|
||||
}
|
||||
return sc, doneFunc, nil
|
||||
}
|
247
vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go
generated
vendored
Normal file
247
vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go
generated
vendored
Normal file
@ -0,0 +1,247 @@
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package endpoint resolves etcd entpoints using grpc targets of the form 'endpoint://<id>/<endpoint>'.
|
||||
package endpoint
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
const scheme = "endpoint"
|
||||
|
||||
var (
|
||||
targetPrefix = fmt.Sprintf("%s://", scheme)
|
||||
|
||||
bldr *builder
|
||||
)
|
||||
|
||||
func init() {
|
||||
bldr = &builder{
|
||||
resolverGroups: make(map[string]*ResolverGroup),
|
||||
}
|
||||
resolver.Register(bldr)
|
||||
}
|
||||
|
||||
type builder struct {
|
||||
mu sync.RWMutex
|
||||
resolverGroups map[string]*ResolverGroup
|
||||
}
|
||||
|
||||
// NewResolverGroup creates a new ResolverGroup with the given id.
|
||||
func NewResolverGroup(id string) (*ResolverGroup, error) {
|
||||
return bldr.newResolverGroup(id)
|
||||
}
|
||||
|
||||
// ResolverGroup keeps all endpoints of resolvers using a common endpoint://<id>/ target
|
||||
// up-to-date.
|
||||
type ResolverGroup struct {
|
||||
mu sync.RWMutex
|
||||
id string
|
||||
endpoints []string
|
||||
resolvers []*Resolver
|
||||
}
|
||||
|
||||
func (e *ResolverGroup) addResolver(r *Resolver) {
|
||||
e.mu.Lock()
|
||||
addrs := epsToAddrs(e.endpoints...)
|
||||
e.resolvers = append(e.resolvers, r)
|
||||
e.mu.Unlock()
|
||||
r.cc.NewAddress(addrs)
|
||||
}
|
||||
|
||||
func (e *ResolverGroup) removeResolver(r *Resolver) {
|
||||
e.mu.Lock()
|
||||
for i, er := range e.resolvers {
|
||||
if er == r {
|
||||
e.resolvers = append(e.resolvers[:i], e.resolvers[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
e.mu.Unlock()
|
||||
}
|
||||
|
||||
// SetEndpoints updates the endpoints for ResolverGroup. All registered resolver are updated
|
||||
// immediately with the new endpoints.
|
||||
func (e *ResolverGroup) SetEndpoints(endpoints []string) {
|
||||
addrs := epsToAddrs(endpoints...)
|
||||
e.mu.Lock()
|
||||
e.endpoints = endpoints
|
||||
for _, r := range e.resolvers {
|
||||
r.cc.NewAddress(addrs)
|
||||
}
|
||||
e.mu.Unlock()
|
||||
}
|
||||
|
||||
// Target constructs a endpoint target using the endpoint id of the ResolverGroup.
|
||||
func (e *ResolverGroup) Target(endpoint string) string {
|
||||
return Target(e.id, endpoint)
|
||||
}
|
||||
|
||||
// Target constructs a endpoint resolver target.
|
||||
func Target(id, endpoint string) string {
|
||||
return fmt.Sprintf("%s://%s/%s", scheme, id, endpoint)
|
||||
}
|
||||
|
||||
// IsTarget checks if a given target string in an endpoint resolver target.
|
||||
func IsTarget(target string) bool {
|
||||
return strings.HasPrefix(target, "endpoint://")
|
||||
}
|
||||
|
||||
func (e *ResolverGroup) Close() {
|
||||
bldr.close(e.id)
|
||||
}
|
||||
|
||||
// Build creates or reuses an etcd resolver for the etcd cluster name identified by the authority part of the target.
|
||||
func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
|
||||
if len(target.Authority) < 1 {
|
||||
return nil, fmt.Errorf("'etcd' target scheme requires non-empty authority identifying etcd cluster being routed to")
|
||||
}
|
||||
id := target.Authority
|
||||
es, err := b.getResolverGroup(id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build resolver: %v", err)
|
||||
}
|
||||
r := &Resolver{
|
||||
endpointID: id,
|
||||
cc: cc,
|
||||
}
|
||||
es.addResolver(r)
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (b *builder) newResolverGroup(id string) (*ResolverGroup, error) {
|
||||
b.mu.RLock()
|
||||
_, ok := b.resolverGroups[id]
|
||||
b.mu.RUnlock()
|
||||
if ok {
|
||||
return nil, fmt.Errorf("Endpoint already exists for id: %s", id)
|
||||
}
|
||||
|
||||
es := &ResolverGroup{id: id}
|
||||
b.mu.Lock()
|
||||
b.resolverGroups[id] = es
|
||||
b.mu.Unlock()
|
||||
return es, nil
|
||||
}
|
||||
|
||||
func (b *builder) getResolverGroup(id string) (*ResolverGroup, error) {
|
||||
b.mu.RLock()
|
||||
es, ok := b.resolverGroups[id]
|
||||
b.mu.RUnlock()
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("ResolverGroup not found for id: %s", id)
|
||||
}
|
||||
return es, nil
|
||||
}
|
||||
|
||||
func (b *builder) close(id string) {
|
||||
b.mu.Lock()
|
||||
delete(b.resolverGroups, id)
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
func (b *builder) Scheme() string {
|
||||
return scheme
|
||||
}
|
||||
|
||||
// Resolver provides a resolver for a single etcd cluster, identified by name.
|
||||
type Resolver struct {
|
||||
endpointID string
|
||||
cc resolver.ClientConn
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// TODO: use balancer.epsToAddrs
|
||||
func epsToAddrs(eps ...string) (addrs []resolver.Address) {
|
||||
addrs = make([]resolver.Address, 0, len(eps))
|
||||
for _, ep := range eps {
|
||||
addrs = append(addrs, resolver.Address{Addr: ep})
|
||||
}
|
||||
return addrs
|
||||
}
|
||||
|
||||
func (*Resolver) ResolveNow(o resolver.ResolveNowOption) {}
|
||||
|
||||
func (r *Resolver) Close() {
|
||||
es, err := bldr.getResolverGroup(r.endpointID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
es.removeResolver(r)
|
||||
}
|
||||
|
||||
// ParseEndpoint endpoint parses an endpoint of the form
|
||||
// (http|https)://<host>*|(unix|unixs)://<path>)
|
||||
// and returns a protocol ('tcp' or 'unix'),
|
||||
// host (or filepath if a unix socket),
|
||||
// scheme (http, https, unix, unixs).
|
||||
func ParseEndpoint(endpoint string) (proto string, host string, scheme string) {
|
||||
proto = "tcp"
|
||||
host = endpoint
|
||||
url, uerr := url.Parse(endpoint)
|
||||
if uerr != nil || !strings.Contains(endpoint, "://") {
|
||||
return proto, host, scheme
|
||||
}
|
||||
scheme = url.Scheme
|
||||
|
||||
// strip scheme:// prefix since grpc dials by host
|
||||
host = url.Host
|
||||
switch url.Scheme {
|
||||
case "http", "https":
|
||||
case "unix", "unixs":
|
||||
proto = "unix"
|
||||
host = url.Host + url.Path
|
||||
default:
|
||||
proto, host = "", ""
|
||||
}
|
||||
return proto, host, scheme
|
||||
}
|
||||
|
||||
// ParseTarget parses a endpoint://<id>/<endpoint> string and returns the parsed id and endpoint.
|
||||
// If the target is malformed, an error is returned.
|
||||
func ParseTarget(target string) (string, string, error) {
|
||||
noPrefix := strings.TrimPrefix(target, targetPrefix)
|
||||
if noPrefix == target {
|
||||
return "", "", fmt.Errorf("malformed target, %s prefix is required: %s", targetPrefix, target)
|
||||
}
|
||||
parts := strings.SplitN(noPrefix, "/", 2)
|
||||
if len(parts) != 2 {
|
||||
return "", "", fmt.Errorf("malformed target, expected %s://<id>/<endpoint>, but got %s", scheme, target)
|
||||
}
|
||||
return parts[0], parts[1], nil
|
||||
}
|
||||
|
||||
// Dialer dials a endpoint using net.Dialer.
|
||||
// Context cancelation and timeout are supported.
|
||||
func Dialer(ctx context.Context, dialEp string) (net.Conn, error) {
|
||||
proto, host, _ := ParseEndpoint(dialEp)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
default:
|
||||
}
|
||||
dialer := &net.Dialer{}
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
dialer.Deadline = deadline
|
||||
}
|
||||
return dialer.DialContext(ctx, proto, host)
|
||||
}
|
68
vendor/go.etcd.io/etcd/clientv3/balancer/utils.go
generated
vendored
Normal file
68
vendor/go.etcd.io/etcd/clientv3/balancer/utils.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package balancer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sort"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
func scToString(sc balancer.SubConn) string {
|
||||
return fmt.Sprintf("%p", sc)
|
||||
}
|
||||
|
||||
func scsToStrings(scs map[balancer.SubConn]resolver.Address) (ss []string) {
|
||||
ss = make([]string, 0, len(scs))
|
||||
for sc, a := range scs {
|
||||
ss = append(ss, fmt.Sprintf("%s (%s)", a.Addr, scToString(sc)))
|
||||
}
|
||||
sort.Strings(ss)
|
||||
return ss
|
||||
}
|
||||
|
||||
func addrsToStrings(addrs []resolver.Address) (ss []string) {
|
||||
ss = make([]string, len(addrs))
|
||||
for i := range addrs {
|
||||
ss[i] = addrs[i].Addr
|
||||
}
|
||||
sort.Strings(ss)
|
||||
return ss
|
||||
}
|
||||
|
||||
func epsToAddrs(eps ...string) (addrs []resolver.Address) {
|
||||
addrs = make([]resolver.Address, 0, len(eps))
|
||||
for _, ep := range eps {
|
||||
u, err := url.Parse(ep)
|
||||
if err != nil {
|
||||
addrs = append(addrs, resolver.Address{Addr: ep, Type: resolver.Backend})
|
||||
continue
|
||||
}
|
||||
addrs = append(addrs, resolver.Address{Addr: u.Host, Type: resolver.Backend})
|
||||
}
|
||||
return addrs
|
||||
}
|
||||
|
||||
var genN = new(uint32)
|
||||
|
||||
func genName() string {
|
||||
now := time.Now().UnixNano()
|
||||
return fmt.Sprintf("%X%X", now, atomic.AddUint32(genN, 1))
|
||||
}
|
672
vendor/go.etcd.io/etcd/clientv3/client.go
generated
vendored
Normal file
672
vendor/go.etcd.io/etcd/clientv3/client.go
generated
vendored
Normal file
@ -0,0 +1,672 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"go.etcd.io/etcd/clientv3/balancer"
|
||||
"go.etcd.io/etcd/clientv3/balancer/picker"
|
||||
"go.etcd.io/etcd/clientv3/balancer/resolver/endpoint"
|
||||
"go.etcd.io/etcd/clientv3/credentials"
|
||||
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/pkg/logutil"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
grpccredentials "google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
|
||||
ErrOldCluster = errors.New("etcdclient: old cluster version")
|
||||
|
||||
roundRobinBalancerName = fmt.Sprintf("etcd-%s", picker.RoundrobinBalanced.String())
|
||||
)
|
||||
|
||||
func init() {
|
||||
lg := zap.NewNop()
|
||||
if os.Getenv("ETCD_CLIENT_DEBUG") != "" {
|
||||
lcfg := logutil.DefaultZapLoggerConfig
|
||||
lcfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
|
||||
|
||||
var err error
|
||||
lg, err = lcfg.Build() // info level logging
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: support custom balancer
|
||||
balancer.RegisterBuilder(balancer.Config{
|
||||
Policy: picker.RoundrobinBalanced,
|
||||
Name: roundRobinBalancerName,
|
||||
Logger: lg,
|
||||
})
|
||||
}
|
||||
|
||||
// Client provides and manages an etcd v3 client session.
|
||||
type Client struct {
|
||||
Cluster
|
||||
KV
|
||||
Lease
|
||||
Watcher
|
||||
Auth
|
||||
Maintenance
|
||||
|
||||
conn *grpc.ClientConn
|
||||
|
||||
cfg Config
|
||||
creds grpccredentials.TransportCredentials
|
||||
resolverGroup *endpoint.ResolverGroup
|
||||
mu *sync.RWMutex
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
// Username is a user name for authentication.
|
||||
Username string
|
||||
// Password is a password for authentication.
|
||||
Password string
|
||||
authTokenBundle credentials.Bundle
|
||||
|
||||
callOpts []grpc.CallOption
|
||||
|
||||
lg *zap.Logger
|
||||
}
|
||||
|
||||
// New creates a new etcdv3 client from a given configuration.
|
||||
func New(cfg Config) (*Client, error) {
|
||||
if len(cfg.Endpoints) == 0 {
|
||||
return nil, ErrNoAvailableEndpoints
|
||||
}
|
||||
|
||||
return newClient(&cfg)
|
||||
}
|
||||
|
||||
// NewCtxClient creates a client with a context but no underlying grpc
|
||||
// connection. This is useful for embedded cases that override the
|
||||
// service interface implementations and do not need connection management.
|
||||
func NewCtxClient(ctx context.Context) *Client {
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
return &Client{ctx: cctx, cancel: cancel}
|
||||
}
|
||||
|
||||
// NewFromURL creates a new etcdv3 client from a URL.
|
||||
func NewFromURL(url string) (*Client, error) {
|
||||
return New(Config{Endpoints: []string{url}})
|
||||
}
|
||||
|
||||
// NewFromURLs creates a new etcdv3 client from URLs.
|
||||
func NewFromURLs(urls []string) (*Client, error) {
|
||||
return New(Config{Endpoints: urls})
|
||||
}
|
||||
|
||||
// Close shuts down the client's etcd connections.
|
||||
func (c *Client) Close() error {
|
||||
c.cancel()
|
||||
if c.Watcher != nil {
|
||||
c.Watcher.Close()
|
||||
}
|
||||
if c.Lease != nil {
|
||||
c.Lease.Close()
|
||||
}
|
||||
if c.resolverGroup != nil {
|
||||
c.resolverGroup.Close()
|
||||
}
|
||||
if c.conn != nil {
|
||||
return toErr(c.ctx, c.conn.Close())
|
||||
}
|
||||
return c.ctx.Err()
|
||||
}
|
||||
|
||||
// Ctx is a context for "out of band" messages (e.g., for sending
|
||||
// "clean up" message when another context is canceled). It is
|
||||
// canceled on client Close().
|
||||
func (c *Client) Ctx() context.Context { return c.ctx }
|
||||
|
||||
// Endpoints lists the registered endpoints for the client.
|
||||
func (c *Client) Endpoints() []string {
|
||||
// copy the slice; protect original endpoints from being changed
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
eps := make([]string, len(c.cfg.Endpoints))
|
||||
copy(eps, c.cfg.Endpoints)
|
||||
return eps
|
||||
}
|
||||
|
||||
// SetEndpoints updates client's endpoints.
|
||||
func (c *Client) SetEndpoints(eps ...string) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.cfg.Endpoints = eps
|
||||
c.resolverGroup.SetEndpoints(eps)
|
||||
}
|
||||
|
||||
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
|
||||
func (c *Client) Sync(ctx context.Context) error {
|
||||
mresp, err := c.MemberList(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var eps []string
|
||||
for _, m := range mresp.Members {
|
||||
eps = append(eps, m.ClientURLs...)
|
||||
}
|
||||
c.SetEndpoints(eps...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) autoSync() {
|
||||
if c.cfg.AutoSyncInterval == time.Duration(0) {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
return
|
||||
case <-time.After(c.cfg.AutoSyncInterval):
|
||||
ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
|
||||
err := c.Sync(ctx)
|
||||
cancel()
|
||||
if err != nil && err != c.ctx.Err() {
|
||||
lg.Lvl(4).Infof("Auto sync endpoints failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) processCreds(scheme string) (creds grpccredentials.TransportCredentials) {
|
||||
creds = c.creds
|
||||
switch scheme {
|
||||
case "unix":
|
||||
case "http":
|
||||
creds = nil
|
||||
case "https", "unixs":
|
||||
if creds != nil {
|
||||
break
|
||||
}
|
||||
creds = credentials.NewBundle(credentials.Config{}).TransportCredentials()
|
||||
default:
|
||||
creds = nil
|
||||
}
|
||||
return creds
|
||||
}
|
||||
|
||||
// dialSetupOpts gives the dial opts prior to any authentication.
|
||||
func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) {
|
||||
if c.cfg.DialKeepAliveTime > 0 {
|
||||
params := keepalive.ClientParameters{
|
||||
Time: c.cfg.DialKeepAliveTime,
|
||||
Timeout: c.cfg.DialKeepAliveTimeout,
|
||||
PermitWithoutStream: c.cfg.PermitWithoutStream,
|
||||
}
|
||||
opts = append(opts, grpc.WithKeepaliveParams(params))
|
||||
}
|
||||
opts = append(opts, dopts...)
|
||||
|
||||
dialer := endpoint.Dialer
|
||||
if creds != nil {
|
||||
opts = append(opts, grpc.WithTransportCredentials(creds))
|
||||
// gRPC load balancer workaround. See credentials.transportCredential for details.
|
||||
if credsDialer, ok := creds.(TransportCredentialsWithDialer); ok {
|
||||
dialer = credsDialer.Dialer
|
||||
}
|
||||
} else {
|
||||
opts = append(opts, grpc.WithInsecure())
|
||||
}
|
||||
opts = append(opts, grpc.WithContextDialer(dialer))
|
||||
|
||||
// Interceptor retry and backoff.
|
||||
// TODO: Replace all of clientv3/retry.go with interceptor based retry, or with
|
||||
// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#retry-policy
|
||||
// once it is available.
|
||||
rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction))
|
||||
opts = append(opts,
|
||||
// Disable stream retry by default since go-grpc-middleware/retry does not support client streams.
|
||||
// Streams that are safe to retry are enabled individually.
|
||||
grpc.WithStreamInterceptor(c.streamClientInterceptor(c.lg, withMax(0), rrBackoff)),
|
||||
grpc.WithUnaryInterceptor(c.unaryClientInterceptor(c.lg, withMax(defaultUnaryMaxRetries), rrBackoff)),
|
||||
)
|
||||
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
// Dial connects to a single endpoint using the client's config.
|
||||
func (c *Client) Dial(ep string) (*grpc.ClientConn, error) {
|
||||
creds, err := c.directDialCreds(ep)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Use the grpc passthrough resolver to directly dial a single endpoint.
|
||||
// This resolver passes through the 'unix' and 'unixs' endpoints schemes used
|
||||
// by etcd without modification, allowing us to directly dial endpoints and
|
||||
// using the same dial functions that we use for load balancer dialing.
|
||||
return c.dial(fmt.Sprintf("passthrough:///%s", ep), creds)
|
||||
}
|
||||
|
||||
func (c *Client) getToken(ctx context.Context) error {
|
||||
var err error // return last error in a case of fail
|
||||
var auth *authenticator
|
||||
|
||||
eps := c.Endpoints()
|
||||
for _, ep := range eps {
|
||||
// use dial options without dopts to avoid reusing the client balancer
|
||||
var dOpts []grpc.DialOption
|
||||
_, host, _ := endpoint.ParseEndpoint(ep)
|
||||
target := c.resolverGroup.Target(host)
|
||||
creds := c.dialWithBalancerCreds(ep)
|
||||
dOpts, err = c.dialSetupOpts(creds, c.cfg.DialOptions...)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to configure auth dialer: %v", err)
|
||||
continue
|
||||
}
|
||||
dOpts = append(dOpts, grpc.WithBalancerName(roundRobinBalancerName))
|
||||
auth, err = newAuthenticator(ctx, target, dOpts, c)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
defer auth.close()
|
||||
|
||||
var resp *AuthenticateResponse
|
||||
resp, err = auth.authenticate(ctx, c.Username, c.Password)
|
||||
if err != nil {
|
||||
// return err without retrying other endpoints
|
||||
if err == rpctypes.ErrAuthNotEnabled {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
c.authTokenBundle.UpdateAuthToken(resp.Token)
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// dialWithBalancer dials the client's current load balanced resolver group. The scheme of the host
|
||||
// of the provided endpoint determines the scheme used for all endpoints of the client connection.
|
||||
func (c *Client) dialWithBalancer(ep string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||
_, host, _ := endpoint.ParseEndpoint(ep)
|
||||
target := c.resolverGroup.Target(host)
|
||||
creds := c.dialWithBalancerCreds(ep)
|
||||
return c.dial(target, creds, dopts...)
|
||||
}
|
||||
|
||||
// dial configures and dials any grpc balancer target.
|
||||
func (c *Client) dial(target string, creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||
opts, err := c.dialSetupOpts(creds, dopts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure dialer: %v", err)
|
||||
}
|
||||
|
||||
if c.Username != "" && c.Password != "" {
|
||||
c.authTokenBundle = credentials.NewBundle(credentials.Config{})
|
||||
|
||||
ctx, cancel := c.ctx, func() {}
|
||||
if c.cfg.DialTimeout > 0 {
|
||||
ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
|
||||
}
|
||||
|
||||
err = c.getToken(ctx)
|
||||
if err != nil {
|
||||
if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
|
||||
if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
|
||||
err = context.DeadlineExceeded
|
||||
}
|
||||
cancel()
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials()))
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
|
||||
opts = append(opts, c.cfg.DialOptions...)
|
||||
|
||||
dctx := c.ctx
|
||||
if c.cfg.DialTimeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
|
||||
defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options?
|
||||
}
|
||||
|
||||
conn, err := grpc.DialContext(dctx, target, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func (c *Client) directDialCreds(ep string) (grpccredentials.TransportCredentials, error) {
|
||||
_, host, scheme := endpoint.ParseEndpoint(ep)
|
||||
creds := c.creds
|
||||
if len(scheme) != 0 {
|
||||
creds = c.processCreds(scheme)
|
||||
if creds != nil {
|
||||
clone := creds.Clone()
|
||||
// Set the server name must to the endpoint hostname without port since grpc
|
||||
// otherwise attempts to check if x509 cert is valid for the full endpoint
|
||||
// including the scheme and port, which fails.
|
||||
overrideServerName, _, err := net.SplitHostPort(host)
|
||||
if err != nil {
|
||||
// Either the host didn't have a port or the host could not be parsed. Either way, continue with the
|
||||
// original host string.
|
||||
overrideServerName = host
|
||||
}
|
||||
clone.OverrideServerName(overrideServerName)
|
||||
creds = clone
|
||||
}
|
||||
}
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
func (c *Client) dialWithBalancerCreds(ep string) grpccredentials.TransportCredentials {
|
||||
_, _, scheme := endpoint.ParseEndpoint(ep)
|
||||
creds := c.creds
|
||||
if len(scheme) != 0 {
|
||||
creds = c.processCreds(scheme)
|
||||
}
|
||||
return creds
|
||||
}
|
||||
|
||||
// WithRequireLeader requires client requests to only succeed
|
||||
// when the cluster has a leader.
|
||||
func WithRequireLeader(ctx context.Context) context.Context {
|
||||
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
}
|
||||
|
||||
func newClient(cfg *Config) (*Client, error) {
|
||||
if cfg == nil {
|
||||
cfg = &Config{}
|
||||
}
|
||||
var creds grpccredentials.TransportCredentials
|
||||
if cfg.TLS != nil {
|
||||
creds = credentials.NewBundle(credentials.Config{TLSConfig: cfg.TLS}).TransportCredentials()
|
||||
}
|
||||
|
||||
// use a temporary skeleton client to bootstrap first connection
|
||||
baseCtx := context.TODO()
|
||||
if cfg.Context != nil {
|
||||
baseCtx = cfg.Context
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(baseCtx)
|
||||
client := &Client{
|
||||
conn: nil,
|
||||
cfg: *cfg,
|
||||
creds: creds,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
mu: new(sync.RWMutex),
|
||||
callOpts: defaultCallOpts,
|
||||
}
|
||||
|
||||
lcfg := logutil.DefaultZapLoggerConfig
|
||||
if cfg.LogConfig != nil {
|
||||
lcfg = *cfg.LogConfig
|
||||
}
|
||||
var err error
|
||||
client.lg, err = lcfg.Build()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.Username != "" && cfg.Password != "" {
|
||||
client.Username = cfg.Username
|
||||
client.Password = cfg.Password
|
||||
}
|
||||
if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 {
|
||||
if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize {
|
||||
return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize)
|
||||
}
|
||||
callOpts := []grpc.CallOption{
|
||||
defaultFailFast,
|
||||
defaultMaxCallSendMsgSize,
|
||||
defaultMaxCallRecvMsgSize,
|
||||
}
|
||||
if cfg.MaxCallSendMsgSize > 0 {
|
||||
callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize)
|
||||
}
|
||||
if cfg.MaxCallRecvMsgSize > 0 {
|
||||
callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize)
|
||||
}
|
||||
client.callOpts = callOpts
|
||||
}
|
||||
|
||||
// Prepare a 'endpoint://<unique-client-id>/' resolver for the client and create a endpoint target to pass
|
||||
// to dial so the client knows to use this resolver.
|
||||
client.resolverGroup, err = endpoint.NewResolverGroup(fmt.Sprintf("client-%s", uuid.New().String()))
|
||||
if err != nil {
|
||||
client.cancel()
|
||||
return nil, err
|
||||
}
|
||||
client.resolverGroup.SetEndpoints(cfg.Endpoints)
|
||||
|
||||
if len(cfg.Endpoints) < 1 {
|
||||
return nil, fmt.Errorf("at least one Endpoint must is required in client config")
|
||||
}
|
||||
dialEndpoint := cfg.Endpoints[0]
|
||||
|
||||
// Use a provided endpoint target so that for https:// without any tls config given, then
|
||||
// grpc will assume the certificate server name is the endpoint host.
|
||||
conn, err := client.dialWithBalancer(dialEndpoint, grpc.WithBalancerName(roundRobinBalancerName))
|
||||
if err != nil {
|
||||
client.cancel()
|
||||
client.resolverGroup.Close()
|
||||
return nil, err
|
||||
}
|
||||
// TODO: With the old grpc balancer interface, we waited until the dial timeout
|
||||
// for the balancer to be ready. Is there an equivalent wait we should do with the new grpc balancer interface?
|
||||
client.conn = conn
|
||||
|
||||
client.Cluster = NewCluster(client)
|
||||
client.KV = NewKV(client)
|
||||
client.Lease = NewLease(client)
|
||||
client.Watcher = NewWatcher(client)
|
||||
client.Auth = NewAuth(client)
|
||||
client.Maintenance = NewMaintenance(client)
|
||||
|
||||
if cfg.RejectOldCluster {
|
||||
if err := client.checkVersion(); err != nil {
|
||||
client.Close()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
go client.autoSync()
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// roundRobinQuorumBackoff retries against quorum between each backoff.
|
||||
// This is intended for use with a round robin load balancer.
|
||||
func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFraction float64) backoffFunc {
|
||||
return func(attempt uint) time.Duration {
|
||||
// after each round robin across quorum, backoff for our wait between duration
|
||||
n := uint(len(c.Endpoints()))
|
||||
quorum := (n/2 + 1)
|
||||
if attempt%quorum == 0 {
|
||||
c.lg.Debug("backoff", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum), zap.Duration("waitBetween", waitBetween), zap.Float64("jitterFraction", jitterFraction))
|
||||
return jitterUp(waitBetween, jitterFraction)
|
||||
}
|
||||
c.lg.Debug("backoff skipped", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum))
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) checkVersion() (err error) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
eps := c.Endpoints()
|
||||
errc := make(chan error, len(eps))
|
||||
ctx, cancel := context.WithCancel(c.ctx)
|
||||
if c.cfg.DialTimeout > 0 {
|
||||
cancel()
|
||||
ctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
|
||||
}
|
||||
|
||||
wg.Add(len(eps))
|
||||
for _, ep := range eps {
|
||||
// if cluster is current, any endpoint gives a recent version
|
||||
go func(e string) {
|
||||
defer wg.Done()
|
||||
resp, rerr := c.Status(ctx, e)
|
||||
if rerr != nil {
|
||||
errc <- rerr
|
||||
return
|
||||
}
|
||||
vs := strings.Split(resp.Version, ".")
|
||||
maj, min := 0, 0
|
||||
if len(vs) >= 2 {
|
||||
var serr error
|
||||
if maj, serr = strconv.Atoi(vs[0]); serr != nil {
|
||||
errc <- serr
|
||||
return
|
||||
}
|
||||
if min, serr = strconv.Atoi(vs[1]); serr != nil {
|
||||
errc <- serr
|
||||
return
|
||||
}
|
||||
}
|
||||
if maj < 3 || (maj == 3 && min < 2) {
|
||||
rerr = ErrOldCluster
|
||||
}
|
||||
errc <- rerr
|
||||
}(ep)
|
||||
}
|
||||
// wait for success
|
||||
for range eps {
|
||||
if err = <-errc; err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
wg.Wait()
|
||||
return err
|
||||
}
|
||||
|
||||
// ActiveConnection returns the current in-use connection
|
||||
func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
|
||||
|
||||
// isHaltErr returns true if the given error and context indicate no forward
|
||||
// progress can be made, even after reconnecting.
|
||||
func isHaltErr(ctx context.Context, err error) bool {
|
||||
if ctx != nil && ctx.Err() != nil {
|
||||
return true
|
||||
}
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
ev, _ := status.FromError(err)
|
||||
// Unavailable codes mean the system will be right back.
|
||||
// (e.g., can't connect, lost leader)
|
||||
// Treat Internal codes as if something failed, leaving the
|
||||
// system in an inconsistent state, but retrying could make progress.
|
||||
// (e.g., failed in middle of send, corrupted frame)
|
||||
// TODO: are permanent Internal errors possible from grpc?
|
||||
return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
|
||||
}
|
||||
|
||||
// isUnavailableErr returns true if the given error is an unavailable error
|
||||
func isUnavailableErr(ctx context.Context, err error) bool {
|
||||
if ctx != nil && ctx.Err() != nil {
|
||||
return false
|
||||
}
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
ev, ok := status.FromError(err)
|
||||
if ok {
|
||||
// Unavailable codes mean the system will be right back.
|
||||
// (e.g., can't connect, lost leader)
|
||||
return ev.Code() == codes.Unavailable
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func toErr(ctx context.Context, err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
err = rpctypes.Error(err)
|
||||
if _, ok := err.(rpctypes.EtcdError); ok {
|
||||
return err
|
||||
}
|
||||
if ev, ok := status.FromError(err); ok {
|
||||
code := ev.Code()
|
||||
switch code {
|
||||
case codes.DeadlineExceeded:
|
||||
fallthrough
|
||||
case codes.Canceled:
|
||||
if ctx.Err() != nil {
|
||||
err = ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func canceledByCaller(stopCtx context.Context, err error) bool {
|
||||
if stopCtx.Err() == nil || err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return err == context.Canceled || err == context.DeadlineExceeded
|
||||
}
|
||||
|
||||
// IsConnCanceled returns true, if error is from a closed gRPC connection.
|
||||
// ref. https://github.com/grpc/grpc-go/pull/1854
|
||||
func IsConnCanceled(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// >= gRPC v1.23.x
|
||||
s, ok := status.FromError(err)
|
||||
if ok {
|
||||
// connection is canceled or server has already closed the connection
|
||||
return s.Code() == codes.Canceled || s.Message() == "transport is closing"
|
||||
}
|
||||
|
||||
// >= gRPC v1.10.x
|
||||
if err == context.Canceled {
|
||||
return true
|
||||
}
|
||||
|
||||
// <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")'
|
||||
return strings.Contains(err.Error(), "grpc: the client connection is closing")
|
||||
}
|
||||
|
||||
// TransportCredentialsWithDialer is for a gRPC load balancer workaround. See credentials.transportCredential for details.
|
||||
type TransportCredentialsWithDialer interface {
|
||||
grpccredentials.TransportCredentials
|
||||
Dialer(ctx context.Context, dialEp string) (net.Conn, error)
|
||||
}
|
141
vendor/go.etcd.io/etcd/clientv3/cluster.go
generated
vendored
Normal file
141
vendor/go.etcd.io/etcd/clientv3/cluster.go
generated
vendored
Normal file
@ -0,0 +1,141 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
|
||||
"go.etcd.io/etcd/pkg/types"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type (
|
||||
Member pb.Member
|
||||
MemberListResponse pb.MemberListResponse
|
||||
MemberAddResponse pb.MemberAddResponse
|
||||
MemberRemoveResponse pb.MemberRemoveResponse
|
||||
MemberUpdateResponse pb.MemberUpdateResponse
|
||||
MemberPromoteResponse pb.MemberPromoteResponse
|
||||
)
|
||||
|
||||
type Cluster interface {
|
||||
// MemberList lists the current cluster membership.
|
||||
MemberList(ctx context.Context) (*MemberListResponse, error)
|
||||
|
||||
// MemberAdd adds a new member into the cluster.
|
||||
MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
|
||||
|
||||
// MemberAddAsLearner adds a new learner member into the cluster.
|
||||
MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
|
||||
|
||||
// MemberRemove removes an existing member from the cluster.
|
||||
MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error)
|
||||
|
||||
// MemberUpdate updates the peer addresses of the member.
|
||||
MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error)
|
||||
|
||||
// MemberPromote promotes a member from raft learner (non-voting) to raft voting member.
|
||||
MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error)
|
||||
}
|
||||
|
||||
type cluster struct {
|
||||
remote pb.ClusterClient
|
||||
callOpts []grpc.CallOption
|
||||
}
|
||||
|
||||
func NewCluster(c *Client) Cluster {
|
||||
api := &cluster{remote: RetryClusterClient(c)}
|
||||
if c != nil {
|
||||
api.callOpts = c.callOpts
|
||||
}
|
||||
return api
|
||||
}
|
||||
|
||||
func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster {
|
||||
api := &cluster{remote: remote}
|
||||
if c != nil {
|
||||
api.callOpts = c.callOpts
|
||||
}
|
||||
return api
|
||||
}
|
||||
|
||||
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
|
||||
return c.memberAdd(ctx, peerAddrs, false)
|
||||
}
|
||||
|
||||
func (c *cluster) MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
|
||||
return c.memberAdd(ctx, peerAddrs, true)
|
||||
}
|
||||
|
||||
func (c *cluster) memberAdd(ctx context.Context, peerAddrs []string, isLearner bool) (*MemberAddResponse, error) {
|
||||
// fail-fast before panic in rafthttp
|
||||
if _, err := types.NewURLs(peerAddrs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := &pb.MemberAddRequest{
|
||||
PeerURLs: peerAddrs,
|
||||
IsLearner: isLearner,
|
||||
}
|
||||
resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
return (*MemberAddResponse)(resp), nil
|
||||
}
|
||||
|
||||
func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
|
||||
r := &pb.MemberRemoveRequest{ID: id}
|
||||
resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
return (*MemberRemoveResponse)(resp), nil
|
||||
}
|
||||
|
||||
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
|
||||
// fail-fast before panic in rafthttp
|
||||
if _, err := types.NewURLs(peerAddrs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// it is safe to retry on update.
|
||||
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
|
||||
resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...)
|
||||
if err == nil {
|
||||
return (*MemberUpdateResponse)(resp), nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
|
||||
// it is safe to retry on list.
|
||||
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, c.callOpts...)
|
||||
if err == nil {
|
||||
return (*MemberListResponse)(resp), nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (c *cluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) {
|
||||
r := &pb.MemberPromoteRequest{ID: id}
|
||||
resp, err := c.remote.MemberPromote(ctx, r, c.callOpts...)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
return (*MemberPromoteResponse)(resp), nil
|
||||
}
|
51
vendor/go.etcd.io/etcd/clientv3/compact_op.go
generated
vendored
Normal file
51
vendor/go.etcd.io/etcd/clientv3/compact_op.go
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
|
||||
)
|
||||
|
||||
// CompactOp represents a compact operation.
|
||||
type CompactOp struct {
|
||||
revision int64
|
||||
physical bool
|
||||
}
|
||||
|
||||
// CompactOption configures compact operation.
|
||||
type CompactOption func(*CompactOp)
|
||||
|
||||
func (op *CompactOp) applyCompactOpts(opts []CompactOption) {
|
||||
for _, opt := range opts {
|
||||
opt(op)
|
||||
}
|
||||
}
|
||||
|
||||
// OpCompact wraps slice CompactOption to create a CompactOp.
|
||||
func OpCompact(rev int64, opts ...CompactOption) CompactOp {
|
||||
ret := CompactOp{revision: rev}
|
||||
ret.applyCompactOpts(opts)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (op CompactOp) toRequest() *pb.CompactionRequest {
|
||||
return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
|
||||
}
|
||||
|
||||
// WithCompactPhysical makes Compact wait until all compacted entries are
|
||||
// removed from the etcd server's storage.
|
||||
func WithCompactPhysical() CompactOption {
|
||||
return func(op *CompactOp) { op.physical = true }
|
||||
}
|
140
vendor/go.etcd.io/etcd/clientv3/compare.go
generated
vendored
Normal file
140
vendor/go.etcd.io/etcd/clientv3/compare.go
generated
vendored
Normal file
@ -0,0 +1,140 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
|
||||
)
|
||||
|
||||
type CompareTarget int
|
||||
type CompareResult int
|
||||
|
||||
const (
|
||||
CompareVersion CompareTarget = iota
|
||||
CompareCreated
|
||||
CompareModified
|
||||
CompareValue
|
||||
)
|
||||
|
||||
type Cmp pb.Compare
|
||||
|
||||
func Compare(cmp Cmp, result string, v interface{}) Cmp {
|
||||
var r pb.Compare_CompareResult
|
||||
|
||||
switch result {
|
||||
case "=":
|
||||
r = pb.Compare_EQUAL
|
||||
case "!=":
|
||||
r = pb.Compare_NOT_EQUAL
|
||||
case ">":
|
||||
r = pb.Compare_GREATER
|
||||
case "<":
|
||||
r = pb.Compare_LESS
|
||||
default:
|
||||
panic("Unknown result op")
|
||||
}
|
||||
|
||||
cmp.Result = r
|
||||
switch cmp.Target {
|
||||
case pb.Compare_VALUE:
|
||||
val, ok := v.(string)
|
||||
if !ok {
|
||||
panic("bad compare value")
|
||||
}
|
||||
cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)}
|
||||
case pb.Compare_VERSION:
|
||||
cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)}
|
||||
case pb.Compare_CREATE:
|
||||
cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
|
||||
case pb.Compare_MOD:
|
||||
cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
|
||||
case pb.Compare_LEASE:
|
||||
cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)}
|
||||
default:
|
||||
panic("Unknown compare type")
|
||||
}
|
||||
return cmp
|
||||
}
|
||||
|
||||
func Value(key string) Cmp {
|
||||
return Cmp{Key: []byte(key), Target: pb.Compare_VALUE}
|
||||
}
|
||||
|
||||
func Version(key string) Cmp {
|
||||
return Cmp{Key: []byte(key), Target: pb.Compare_VERSION}
|
||||
}
|
||||
|
||||
func CreateRevision(key string) Cmp {
|
||||
return Cmp{Key: []byte(key), Target: pb.Compare_CREATE}
|
||||
}
|
||||
|
||||
func ModRevision(key string) Cmp {
|
||||
return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
|
||||
}
|
||||
|
||||
// LeaseValue compares a key's LeaseID to a value of your choosing. The empty
|
||||
// LeaseID is 0, otherwise known as `NoLease`.
|
||||
func LeaseValue(key string) Cmp {
|
||||
return Cmp{Key: []byte(key), Target: pb.Compare_LEASE}
|
||||
}
|
||||
|
||||
// KeyBytes returns the byte slice holding with the comparison key.
|
||||
func (cmp *Cmp) KeyBytes() []byte { return cmp.Key }
|
||||
|
||||
// WithKeyBytes sets the byte slice for the comparison key.
|
||||
func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key }
|
||||
|
||||
// ValueBytes returns the byte slice holding the comparison value, if any.
|
||||
func (cmp *Cmp) ValueBytes() []byte {
|
||||
if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok {
|
||||
return tu.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithValueBytes sets the byte slice for the comparison's value.
|
||||
func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
|
||||
|
||||
// WithRange sets the comparison to scan the range [key, end).
|
||||
func (cmp Cmp) WithRange(end string) Cmp {
|
||||
cmp.RangeEnd = []byte(end)
|
||||
return cmp
|
||||
}
|
||||
|
||||
// WithPrefix sets the comparison to scan all keys prefixed by the key.
|
||||
func (cmp Cmp) WithPrefix() Cmp {
|
||||
cmp.RangeEnd = getPrefix(cmp.Key)
|
||||
return cmp
|
||||
}
|
||||
|
||||
// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
|
||||
func mustInt64(val interface{}) int64 {
|
||||
if v, ok := val.(int64); ok {
|
||||
return v
|
||||
}
|
||||
if v, ok := val.(int); ok {
|
||||
return int64(v)
|
||||
}
|
||||
panic("bad value")
|
||||
}
|
||||
|
||||
// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an
|
||||
// int64 otherwise.
|
||||
func mustInt64orLeaseID(val interface{}) int64 {
|
||||
if v, ok := val.(LeaseID); ok {
|
||||
return int64(v)
|
||||
}
|
||||
return mustInt64(val)
|
||||
}
|
88
vendor/go.etcd.io/etcd/clientv3/config.go
generated
vendored
Normal file
88
vendor/go.etcd.io/etcd/clientv3/config.go
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
// Endpoints is a list of URLs.
|
||||
Endpoints []string `json:"endpoints"`
|
||||
|
||||
// AutoSyncInterval is the interval to update endpoints with its latest members.
|
||||
// 0 disables auto-sync. By default auto-sync is disabled.
|
||||
AutoSyncInterval time.Duration `json:"auto-sync-interval"`
|
||||
|
||||
// DialTimeout is the timeout for failing to establish a connection.
|
||||
DialTimeout time.Duration `json:"dial-timeout"`
|
||||
|
||||
// DialKeepAliveTime is the time after which client pings the server to see if
|
||||
// transport is alive.
|
||||
DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"`
|
||||
|
||||
// DialKeepAliveTimeout is the time that the client waits for a response for the
|
||||
// keep-alive probe. If the response is not received in this time, the connection is closed.
|
||||
DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"`
|
||||
|
||||
// MaxCallSendMsgSize is the client-side request send limit in bytes.
|
||||
// If 0, it defaults to 2.0 MiB (2 * 1024 * 1024).
|
||||
// Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit.
|
||||
// ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
|
||||
MaxCallSendMsgSize int
|
||||
|
||||
// MaxCallRecvMsgSize is the client-side response receive limit.
|
||||
// If 0, it defaults to "math.MaxInt32", because range response can
|
||||
// easily exceed request send limits.
|
||||
// Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit.
|
||||
// ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
|
||||
MaxCallRecvMsgSize int
|
||||
|
||||
// TLS holds the client secure credentials, if any.
|
||||
TLS *tls.Config
|
||||
|
||||
// Username is a user name for authentication.
|
||||
Username string `json:"username"`
|
||||
|
||||
// Password is a password for authentication.
|
||||
Password string `json:"password"`
|
||||
|
||||
// RejectOldCluster when set will refuse to create a client against an outdated cluster.
|
||||
RejectOldCluster bool `json:"reject-old-cluster"`
|
||||
|
||||
// DialOptions is a list of dial options for the grpc client (e.g., for interceptors).
|
||||
// For example, pass "grpc.WithBlock()" to block until the underlying connection is up.
|
||||
// Without this, Dial returns immediately and connecting the server happens in background.
|
||||
DialOptions []grpc.DialOption
|
||||
|
||||
// Context is the default client context; it can be used to cancel grpc dial out and
|
||||
// other operations that do not have an explicit context.
|
||||
Context context.Context
|
||||
|
||||
// LogConfig configures client-side logger.
|
||||
// If nil, use the default logger.
|
||||
// TODO: configure gRPC logger
|
||||
LogConfig *zap.Config
|
||||
|
||||
// PermitWithoutStream when set will allow client to send keepalive pings to server without any active streams(RPCs).
|
||||
PermitWithoutStream bool `json:"permit-without-stream"`
|
||||
|
||||
// TODO: support custom balancer picker
|
||||
}
|
173
vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go
generated
vendored
Normal file
173
vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go
generated
vendored
Normal file
@ -0,0 +1,173 @@
|
||||
// Copyright 2019 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package credentials implements gRPC credential interface with etcd specific logic.
|
||||
// e.g., client handshake with custom authority parameter
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"go.etcd.io/etcd/clientv3/balancer/resolver/endpoint"
|
||||
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
grpccredentials "google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
// Config defines gRPC credential configuration.
|
||||
type Config struct {
|
||||
TLSConfig *tls.Config
|
||||
}
|
||||
|
||||
// Bundle defines gRPC credential interface.
|
||||
type Bundle interface {
|
||||
grpccredentials.Bundle
|
||||
UpdateAuthToken(token string)
|
||||
}
|
||||
|
||||
// NewBundle constructs a new gRPC credential bundle.
|
||||
func NewBundle(cfg Config) Bundle {
|
||||
return &bundle{
|
||||
tc: newTransportCredential(cfg.TLSConfig),
|
||||
rc: newPerRPCCredential(),
|
||||
}
|
||||
}
|
||||
|
||||
// bundle implements "grpccredentials.Bundle" interface.
|
||||
type bundle struct {
|
||||
tc *transportCredential
|
||||
rc *perRPCCredential
|
||||
}
|
||||
|
||||
func (b *bundle) TransportCredentials() grpccredentials.TransportCredentials {
|
||||
return b.tc
|
||||
}
|
||||
|
||||
func (b *bundle) PerRPCCredentials() grpccredentials.PerRPCCredentials {
|
||||
return b.rc
|
||||
}
|
||||
|
||||
func (b *bundle) NewWithMode(mode string) (grpccredentials.Bundle, error) {
|
||||
// no-op
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// transportCredential implements "grpccredentials.TransportCredentials" interface.
|
||||
// transportCredential wraps TransportCredentials to track which
|
||||
// addresses are dialed for which endpoints, and then sets the authority when checking the endpoint's cert to the
|
||||
// hostname or IP of the dialed endpoint.
|
||||
// This is a workaround of a gRPC load balancer issue. gRPC uses the dialed target's service name as the authority when
|
||||
// checking all endpoint certs, which does not work for etcd servers using their hostname or IP as the Subject Alternative Name
|
||||
// in their TLS certs.
|
||||
// To enable, include both WithTransportCredentials(creds) and WithContextDialer(creds.Dialer)
|
||||
// when dialing.
|
||||
type transportCredential struct {
|
||||
gtc grpccredentials.TransportCredentials
|
||||
mu sync.Mutex
|
||||
// addrToEndpoint maps from the connection addresses that are dialed to the hostname or IP of the
|
||||
// endpoint provided to the dialer when dialing
|
||||
addrToEndpoint map[string]string
|
||||
}
|
||||
|
||||
func newTransportCredential(cfg *tls.Config) *transportCredential {
|
||||
return &transportCredential{
|
||||
gtc: grpccredentials.NewTLS(cfg),
|
||||
addrToEndpoint: map[string]string{},
|
||||
}
|
||||
}
|
||||
|
||||
func (tc *transportCredential) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) {
|
||||
// Set the authority when checking the endpoint's cert to the hostname or IP of the dialed endpoint
|
||||
tc.mu.Lock()
|
||||
dialEp, ok := tc.addrToEndpoint[rawConn.RemoteAddr().String()]
|
||||
tc.mu.Unlock()
|
||||
if ok {
|
||||
_, host, _ := endpoint.ParseEndpoint(dialEp)
|
||||
authority = host
|
||||
}
|
||||
return tc.gtc.ClientHandshake(ctx, authority, rawConn)
|
||||
}
|
||||
|
||||
// return true if given string is an IP.
|
||||
func isIP(ep string) bool {
|
||||
return net.ParseIP(ep) != nil
|
||||
}
|
||||
|
||||
func (tc *transportCredential) ServerHandshake(rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) {
|
||||
return tc.gtc.ServerHandshake(rawConn)
|
||||
}
|
||||
|
||||
func (tc *transportCredential) Info() grpccredentials.ProtocolInfo {
|
||||
return tc.gtc.Info()
|
||||
}
|
||||
|
||||
func (tc *transportCredential) Clone() grpccredentials.TransportCredentials {
|
||||
copy := map[string]string{}
|
||||
tc.mu.Lock()
|
||||
for k, v := range tc.addrToEndpoint {
|
||||
copy[k] = v
|
||||
}
|
||||
tc.mu.Unlock()
|
||||
return &transportCredential{
|
||||
gtc: tc.gtc.Clone(),
|
||||
addrToEndpoint: copy,
|
||||
}
|
||||
}
|
||||
|
||||
func (tc *transportCredential) OverrideServerName(serverNameOverride string) error {
|
||||
return tc.gtc.OverrideServerName(serverNameOverride)
|
||||
}
|
||||
|
||||
func (tc *transportCredential) Dialer(ctx context.Context, dialEp string) (net.Conn, error) {
|
||||
// Keep track of which addresses are dialed for which endpoints
|
||||
conn, err := endpoint.Dialer(ctx, dialEp)
|
||||
if conn != nil {
|
||||
tc.mu.Lock()
|
||||
tc.addrToEndpoint[conn.RemoteAddr().String()] = dialEp
|
||||
tc.mu.Unlock()
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
|
||||
// perRPCCredential implements "grpccredentials.PerRPCCredentials" interface.
|
||||
type perRPCCredential struct {
|
||||
authToken string
|
||||
authTokenMu sync.RWMutex
|
||||
}
|
||||
|
||||
func newPerRPCCredential() *perRPCCredential { return &perRPCCredential{} }
|
||||
|
||||
func (rc *perRPCCredential) RequireTransportSecurity() bool { return false }
|
||||
|
||||
func (rc *perRPCCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
|
||||
rc.authTokenMu.RLock()
|
||||
authToken := rc.authToken
|
||||
rc.authTokenMu.RUnlock()
|
||||
return map[string]string{rpctypes.TokenFieldNameGRPC: authToken}, nil
|
||||
}
|
||||
|
||||
func (b *bundle) UpdateAuthToken(token string) {
|
||||
if b.rc == nil {
|
||||
return
|
||||
}
|
||||
b.rc.UpdateAuthToken(token)
|
||||
}
|
||||
|
||||
func (rc *perRPCCredential) UpdateAuthToken(token string) {
|
||||
rc.authTokenMu.Lock()
|
||||
rc.authToken = token
|
||||
rc.authTokenMu.Unlock()
|
||||
}
|
106
vendor/go.etcd.io/etcd/clientv3/doc.go
generated
vendored
Normal file
106
vendor/go.etcd.io/etcd/clientv3/doc.go
generated
vendored
Normal file
@ -0,0 +1,106 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package clientv3 implements the official Go etcd client for v3.
|
||||
//
|
||||
// Create client using `clientv3.New`:
|
||||
//
|
||||
// // expect dial time-out on ipv4 blackhole
|
||||
// _, err := clientv3.New(clientv3.Config{
|
||||
// Endpoints: []string{"http://254.0.0.1:12345"},
|
||||
// DialTimeout: 2 * time.Second,
|
||||
// })
|
||||
//
|
||||
// // etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3
|
||||
// if err == context.DeadlineExceeded {
|
||||
// // handle errors
|
||||
// }
|
||||
//
|
||||
// // etcd clientv3 <= v3.2.9, grpc/grpc-go <= v1.2.1
|
||||
// if err == grpc.ErrClientConnTimeout {
|
||||
// // handle errors
|
||||
// }
|
||||
//
|
||||
// cli, err := clientv3.New(clientv3.Config{
|
||||
// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
|
||||
// DialTimeout: 5 * time.Second,
|
||||
// })
|
||||
// if err != nil {
|
||||
// // handle error!
|
||||
// }
|
||||
// defer cli.Close()
|
||||
//
|
||||
// Make sure to close the client after using it. If the client is not closed, the
|
||||
// connection will have leaky goroutines.
|
||||
//
|
||||
// To specify a client request timeout, wrap the context with context.WithTimeout:
|
||||
//
|
||||
// ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
// resp, err := kvc.Put(ctx, "sample_key", "sample_value")
|
||||
// cancel()
|
||||
// if err != nil {
|
||||
// // handle error!
|
||||
// }
|
||||
// // use the response
|
||||
//
|
||||
// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed.
|
||||
// Clients are safe for concurrent use by multiple goroutines.
|
||||
//
|
||||
// etcd client returns 3 types of errors:
|
||||
//
|
||||
// 1. context error: canceled or deadline exceeded.
|
||||
// 2. gRPC status error: e.g. when clock drifts in server-side before client's context deadline exceeded.
|
||||
// 3. gRPC error: see https://github.com/etcd-io/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
|
||||
//
|
||||
// Here is the example code to handle client errors:
|
||||
//
|
||||
// resp, err := kvc.Put(ctx, "", "")
|
||||
// if err != nil {
|
||||
// if err == context.Canceled {
|
||||
// // ctx is canceled by another routine
|
||||
// } else if err == context.DeadlineExceeded {
|
||||
// // ctx is attached with a deadline and it exceeded
|
||||
// } else if err == rpctypes.ErrEmptyKey {
|
||||
// // client-side error: key is not provided
|
||||
// } else if ev, ok := status.FromError(err); ok {
|
||||
// code := ev.Code()
|
||||
// if code == codes.DeadlineExceeded {
|
||||
// // server-side context might have timed-out first (due to clock skew)
|
||||
// // while original client-side context is not timed-out yet
|
||||
// }
|
||||
// } else {
|
||||
// // bad cluster endpoints, which are not etcd servers
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// go func() { cli.Close() }()
|
||||
// _, err := kvc.Get(ctx, "a")
|
||||
// if err != nil {
|
||||
// // with etcd clientv3 <= v3.3
|
||||
// if err == context.Canceled {
|
||||
// // grpc balancer calls 'Get' with an inflight client.Close
|
||||
// } else if err == grpc.ErrClientConnClosing { // <= gRCP v1.7.x
|
||||
// // grpc balancer calls 'Get' after client.Close.
|
||||
// }
|
||||
// // with etcd clientv3 >= v3.4
|
||||
// if clientv3.IsConnCanceled(err) {
|
||||
// // gRPC client connection is closed
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// The grpc load balancer is registered statically and is shared across etcd clients.
|
||||
// To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment
|
||||
// variable. E.g. "ETCD_CLIENT_DEBUG=1".
|
||||
//
|
||||
package clientv3
|
177
vendor/go.etcd.io/etcd/clientv3/kv.go
generated
vendored
Normal file
177
vendor/go.etcd.io/etcd/clientv3/kv.go
generated
vendored
Normal file
@ -0,0 +1,177 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type (
|
||||
CompactResponse pb.CompactionResponse
|
||||
PutResponse pb.PutResponse
|
||||
GetResponse pb.RangeResponse
|
||||
DeleteResponse pb.DeleteRangeResponse
|
||||
TxnResponse pb.TxnResponse
|
||||
)
|
||||
|
||||
type KV interface {
|
||||
// Put puts a key-value pair into etcd.
|
||||
// Note that key,value can be plain bytes array and string is
|
||||
// an immutable representation of that bytes array.
|
||||
// To get a string of bytes, do string([]byte{0x10, 0x20}).
|
||||
Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error)
|
||||
|
||||
// Get retrieves keys.
|
||||
// By default, Get will return the value for "key", if any.
|
||||
// When passed WithRange(end), Get will return the keys in the range [key, end).
|
||||
// When passed WithFromKey(), Get returns keys greater than or equal to key.
|
||||
// When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision;
|
||||
// if the required revision is compacted, the request will fail with ErrCompacted .
|
||||
// When passed WithLimit(limit), the number of returned keys is bounded by limit.
|
||||
// When passed WithSort(), the keys will be sorted.
|
||||
Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error)
|
||||
|
||||
// Delete deletes a key, or optionally using WithRange(end), [key, end).
|
||||
Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error)
|
||||
|
||||
// Compact compacts etcd KV history before the given rev.
|
||||
Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error)
|
||||
|
||||
// Do applies a single Op on KV without a transaction.
|
||||
// Do is useful when creating arbitrary operations to be issued at a
|
||||
// later time; the user can range over the operations, calling Do to
|
||||
// execute them. Get/Put/Delete, on the other hand, are best suited
|
||||
// for when the operation should be issued at the time of declaration.
|
||||
Do(ctx context.Context, op Op) (OpResponse, error)
|
||||
|
||||
// Txn creates a transaction.
|
||||
Txn(ctx context.Context) Txn
|
||||
}
|
||||
|
||||
type OpResponse struct {
|
||||
put *PutResponse
|
||||
get *GetResponse
|
||||
del *DeleteResponse
|
||||
txn *TxnResponse
|
||||
}
|
||||
|
||||
func (op OpResponse) Put() *PutResponse { return op.put }
|
||||
func (op OpResponse) Get() *GetResponse { return op.get }
|
||||
func (op OpResponse) Del() *DeleteResponse { return op.del }
|
||||
func (op OpResponse) Txn() *TxnResponse { return op.txn }
|
||||
|
||||
func (resp *PutResponse) OpResponse() OpResponse {
|
||||
return OpResponse{put: resp}
|
||||
}
|
||||
func (resp *GetResponse) OpResponse() OpResponse {
|
||||
return OpResponse{get: resp}
|
||||
}
|
||||
func (resp *DeleteResponse) OpResponse() OpResponse {
|
||||
return OpResponse{del: resp}
|
||||
}
|
||||
func (resp *TxnResponse) OpResponse() OpResponse {
|
||||
return OpResponse{txn: resp}
|
||||
}
|
||||
|
||||
type kv struct {
|
||||
remote pb.KVClient
|
||||
callOpts []grpc.CallOption
|
||||
}
|
||||
|
||||
func NewKV(c *Client) KV {
|
||||
api := &kv{remote: RetryKVClient(c)}
|
||||
if c != nil {
|
||||
api.callOpts = c.callOpts
|
||||
}
|
||||
return api
|
||||
}
|
||||
|
||||
func NewKVFromKVClient(remote pb.KVClient, c *Client) KV {
|
||||
api := &kv{remote: remote}
|
||||
if c != nil {
|
||||
api.callOpts = c.callOpts
|
||||
}
|
||||
return api
|
||||
}
|
||||
|
||||
func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
|
||||
r, err := kv.Do(ctx, OpPut(key, val, opts...))
|
||||
return r.put, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) {
|
||||
r, err := kv.Do(ctx, OpGet(key, opts...))
|
||||
return r.get, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) {
|
||||
r, err := kv.Do(ctx, OpDelete(key, opts...))
|
||||
return r.del, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
|
||||
resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
return (*CompactResponse)(resp), err
|
||||
}
|
||||
|
||||
func (kv *kv) Txn(ctx context.Context) Txn {
|
||||
return &txn{
|
||||
kv: kv,
|
||||
ctx: ctx,
|
||||
callOpts: kv.callOpts,
|
||||
}
|
||||
}
|
||||
|
||||
func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
|
||||
var err error
|
||||
switch op.t {
|
||||
case tRange:
|
||||
var resp *pb.RangeResponse
|
||||
resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...)
|
||||
if err == nil {
|
||||
return OpResponse{get: (*GetResponse)(resp)}, nil
|
||||
}
|
||||
case tPut:
|
||||
var resp *pb.PutResponse
|
||||
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
|
||||
resp, err = kv.remote.Put(ctx, r, kv.callOpts...)
|
||||
if err == nil {
|
||||
return OpResponse{put: (*PutResponse)(resp)}, nil
|
||||
}
|
||||
case tDeleteRange:
|
||||
var resp *pb.DeleteRangeResponse
|
||||
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
|
||||
resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...)
|
||||
if err == nil {
|
||||
return OpResponse{del: (*DeleteResponse)(resp)}, nil
|
||||
}
|
||||
case tTxn:
|
||||
var resp *pb.TxnResponse
|
||||
resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...)
|
||||
if err == nil {
|
||||
return OpResponse{txn: (*TxnResponse)(resp)}, nil
|
||||
}
|
||||
default:
|
||||
panic("Unknown op")
|
||||
}
|
||||
return OpResponse{}, toErr(ctx, err)
|
||||
}
|
596
vendor/go.etcd.io/etcd/clientv3/lease.go
generated
vendored
Normal file
596
vendor/go.etcd.io/etcd/clientv3/lease.go
generated
vendored
Normal file
@ -0,0 +1,596 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
type (
|
||||
LeaseRevokeResponse pb.LeaseRevokeResponse
|
||||
LeaseID int64
|
||||
)
|
||||
|
||||
// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse.
|
||||
type LeaseGrantResponse struct {
|
||||
*pb.ResponseHeader
|
||||
ID LeaseID
|
||||
TTL int64
|
||||
Error string
|
||||
}
|
||||
|
||||
// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse.
|
||||
type LeaseKeepAliveResponse struct {
|
||||
*pb.ResponseHeader
|
||||
ID LeaseID
|
||||
TTL int64
|
||||
}
|
||||
|
||||
// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse.
|
||||
type LeaseTimeToLiveResponse struct {
|
||||
*pb.ResponseHeader
|
||||
ID LeaseID `json:"id"`
|
||||
|
||||
// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1.
|
||||
TTL int64 `json:"ttl"`
|
||||
|
||||
// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
|
||||
GrantedTTL int64 `json:"granted-ttl"`
|
||||
|
||||
// Keys is the list of keys attached to this lease.
|
||||
Keys [][]byte `json:"keys"`
|
||||
}
|
||||
|
||||
// LeaseStatus represents a lease status.
|
||||
type LeaseStatus struct {
|
||||
ID LeaseID `json:"id"`
|
||||
// TODO: TTL int64
|
||||
}
|
||||
|
||||
// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse.
|
||||
type LeaseLeasesResponse struct {
|
||||
*pb.ResponseHeader
|
||||
Leases []LeaseStatus `json:"leases"`
|
||||
}
|
||||
|
||||
const (
|
||||
// defaultTTL is the assumed lease TTL used for the first keepalive
|
||||
// deadline before the actual TTL is known to the client.
|
||||
defaultTTL = 5 * time.Second
|
||||
// NoLease is a lease ID for the absence of a lease.
|
||||
NoLease LeaseID = 0
|
||||
|
||||
// retryConnWait is how long to wait before retrying request due to an error
|
||||
retryConnWait = 500 * time.Millisecond
|
||||
)
|
||||
|
||||
// LeaseResponseChSize is the size of buffer to store unsent lease responses.
|
||||
// WARNING: DO NOT UPDATE.
|
||||
// Only for testing purposes.
|
||||
var LeaseResponseChSize = 16
|
||||
|
||||
// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
|
||||
//
|
||||
// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
|
||||
type ErrKeepAliveHalted struct {
|
||||
Reason error
|
||||
}
|
||||
|
||||
func (e ErrKeepAliveHalted) Error() string {
|
||||
s := "etcdclient: leases keep alive halted"
|
||||
if e.Reason != nil {
|
||||
s += ": " + e.Reason.Error()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
type Lease interface {
|
||||
// Grant creates a new lease.
|
||||
Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
|
||||
|
||||
// Revoke revokes the given lease.
|
||||
Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
|
||||
|
||||
// TimeToLive retrieves the lease information of the given lease ID.
|
||||
TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
|
||||
|
||||
// Leases retrieves all leases.
|
||||
Leases(ctx context.Context) (*LeaseLeasesResponse, error)
|
||||
|
||||
// KeepAlive attempts to keep the given lease alive forever. If the keepalive responses posted
|
||||
// to the channel are not consumed promptly the channel may become full. When full, the lease
|
||||
// client will continue sending keep alive requests to the etcd server, but will drop responses
|
||||
// until there is capacity on the channel to send more responses.
|
||||
//
|
||||
// If client keep alive loop halts with an unexpected error (e.g. "etcdserver: no leader") or
|
||||
// canceled by the caller (e.g. context.Canceled), KeepAlive returns a ErrKeepAliveHalted error
|
||||
// containing the error reason.
|
||||
//
|
||||
// The returned "LeaseKeepAliveResponse" channel closes if underlying keep
|
||||
// alive stream is interrupted in some way the client cannot handle itself;
|
||||
// given context "ctx" is canceled or timed out.
|
||||
//
|
||||
// TODO(v4.0): post errors to last keep alive message before closing
|
||||
// (see https://github.com/etcd-io/etcd/pull/7866)
|
||||
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
|
||||
|
||||
// KeepAliveOnce renews the lease once. The response corresponds to the
|
||||
// first message from calling KeepAlive. If the response has a recoverable
|
||||
// error, KeepAliveOnce will retry the RPC with a new keep alive message.
|
||||
//
|
||||
// In most of the cases, Keepalive should be used instead of KeepAliveOnce.
|
||||
KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
|
||||
|
||||
// Close releases all resources Lease keeps for efficient communication
|
||||
// with the etcd server.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type lessor struct {
|
||||
mu sync.Mutex // guards all fields
|
||||
|
||||
// donec is closed and loopErr is set when recvKeepAliveLoop stops
|
||||
donec chan struct{}
|
||||
loopErr error
|
||||
|
||||
remote pb.LeaseClient
|
||||
|
||||
stream pb.Lease_LeaseKeepAliveClient
|
||||
streamCancel context.CancelFunc
|
||||
|
||||
stopCtx context.Context
|
||||
stopCancel context.CancelFunc
|
||||
|
||||
keepAlives map[LeaseID]*keepAlive
|
||||
|
||||
// firstKeepAliveTimeout is the timeout for the first keepalive request
|
||||
// before the actual TTL is known to the lease client
|
||||
firstKeepAliveTimeout time.Duration
|
||||
|
||||
// firstKeepAliveOnce ensures stream starts after first KeepAlive call.
|
||||
firstKeepAliveOnce sync.Once
|
||||
|
||||
callOpts []grpc.CallOption
|
||||
|
||||
lg *zap.Logger
|
||||
}
|
||||
|
||||
// keepAlive multiplexes a keepalive for a lease over multiple channels
|
||||
type keepAlive struct {
|
||||
chs []chan<- *LeaseKeepAliveResponse
|
||||
ctxs []context.Context
|
||||
// deadline is the time the keep alive channels close if no response
|
||||
deadline time.Time
|
||||
// nextKeepAlive is when to send the next keep alive message
|
||||
nextKeepAlive time.Time
|
||||
// donec is closed on lease revoke, expiration, or cancel.
|
||||
donec chan struct{}
|
||||
}
|
||||
|
||||
func NewLease(c *Client) Lease {
|
||||
return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second)
|
||||
}
|
||||
|
||||
func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease {
|
||||
l := &lessor{
|
||||
donec: make(chan struct{}),
|
||||
keepAlives: make(map[LeaseID]*keepAlive),
|
||||
remote: remote,
|
||||
firstKeepAliveTimeout: keepAliveTimeout,
|
||||
lg: c.lg,
|
||||
}
|
||||
if l.firstKeepAliveTimeout == time.Second {
|
||||
l.firstKeepAliveTimeout = defaultTTL
|
||||
}
|
||||
if c != nil {
|
||||
l.callOpts = c.callOpts
|
||||
}
|
||||
reqLeaderCtx := WithRequireLeader(context.Background())
|
||||
l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
|
||||
r := &pb.LeaseGrantRequest{TTL: ttl}
|
||||
resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...)
|
||||
if err == nil {
|
||||
gresp := &LeaseGrantResponse{
|
||||
ResponseHeader: resp.GetHeader(),
|
||||
ID: LeaseID(resp.ID),
|
||||
TTL: resp.TTL,
|
||||
Error: resp.Error,
|
||||
}
|
||||
return gresp, nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
|
||||
r := &pb.LeaseRevokeRequest{ID: int64(id)}
|
||||
resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...)
|
||||
if err == nil {
|
||||
return (*LeaseRevokeResponse)(resp), nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
|
||||
r := toLeaseTimeToLiveRequest(id, opts...)
|
||||
resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...)
|
||||
if err == nil {
|
||||
gresp := &LeaseTimeToLiveResponse{
|
||||
ResponseHeader: resp.GetHeader(),
|
||||
ID: LeaseID(resp.ID),
|
||||
TTL: resp.TTL,
|
||||
GrantedTTL: resp.GrantedTTL,
|
||||
Keys: resp.Keys,
|
||||
}
|
||||
return gresp, nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {
|
||||
resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...)
|
||||
if err == nil {
|
||||
leases := make([]LeaseStatus, len(resp.Leases))
|
||||
for i := range resp.Leases {
|
||||
leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)}
|
||||
}
|
||||
return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
|
||||
ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize)
|
||||
|
||||
l.mu.Lock()
|
||||
// ensure that recvKeepAliveLoop is still running
|
||||
select {
|
||||
case <-l.donec:
|
||||
err := l.loopErr
|
||||
l.mu.Unlock()
|
||||
close(ch)
|
||||
return ch, ErrKeepAliveHalted{Reason: err}
|
||||
default:
|
||||
}
|
||||
ka, ok := l.keepAlives[id]
|
||||
if !ok {
|
||||
// create fresh keep alive
|
||||
ka = &keepAlive{
|
||||
chs: []chan<- *LeaseKeepAliveResponse{ch},
|
||||
ctxs: []context.Context{ctx},
|
||||
deadline: time.Now().Add(l.firstKeepAliveTimeout),
|
||||
nextKeepAlive: time.Now(),
|
||||
donec: make(chan struct{}),
|
||||
}
|
||||
l.keepAlives[id] = ka
|
||||
} else {
|
||||
// add channel and context to existing keep alive
|
||||
ka.ctxs = append(ka.ctxs, ctx)
|
||||
ka.chs = append(ka.chs, ch)
|
||||
}
|
||||
l.mu.Unlock()
|
||||
|
||||
go l.keepAliveCtxCloser(ctx, id, ka.donec)
|
||||
l.firstKeepAliveOnce.Do(func() {
|
||||
go l.recvKeepAliveLoop()
|
||||
go l.deadlineLoop()
|
||||
})
|
||||
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
|
||||
for {
|
||||
resp, err := l.keepAliveOnce(ctx, id)
|
||||
if err == nil {
|
||||
if resp.TTL <= 0 {
|
||||
err = rpctypes.ErrLeaseNotFound
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lessor) Close() error {
|
||||
l.stopCancel()
|
||||
// close for synchronous teardown if stream goroutines never launched
|
||||
l.firstKeepAliveOnce.Do(func() { close(l.donec) })
|
||||
<-l.donec
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *lessor) keepAliveCtxCloser(ctx context.Context, id LeaseID, donec <-chan struct{}) {
|
||||
select {
|
||||
case <-donec:
|
||||
return
|
||||
case <-l.donec:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
|
||||
ka, ok := l.keepAlives[id]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// close channel and remove context if still associated with keep alive
|
||||
for i, c := range ka.ctxs {
|
||||
if c == ctx {
|
||||
close(ka.chs[i])
|
||||
ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
|
||||
ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
// remove if no one more listeners
|
||||
if len(ka.chs) == 0 {
|
||||
delete(l.keepAlives, id)
|
||||
}
|
||||
}
|
||||
|
||||
// closeRequireLeader scans keepAlives for ctxs that have require leader
|
||||
// and closes the associated channels.
|
||||
func (l *lessor) closeRequireLeader() {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
for _, ka := range l.keepAlives {
|
||||
reqIdxs := 0
|
||||
// find all required leader channels, close, mark as nil
|
||||
for i, ctx := range ka.ctxs {
|
||||
md, ok := metadata.FromOutgoingContext(ctx)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
ks := md[rpctypes.MetadataRequireLeaderKey]
|
||||
if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader {
|
||||
continue
|
||||
}
|
||||
close(ka.chs[i])
|
||||
ka.chs[i] = nil
|
||||
reqIdxs++
|
||||
}
|
||||
if reqIdxs == 0 {
|
||||
continue
|
||||
}
|
||||
// remove all channels that required a leader from keepalive
|
||||
newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
|
||||
newCtxs := make([]context.Context, len(newChs))
|
||||
newIdx := 0
|
||||
for i := range ka.chs {
|
||||
if ka.chs[i] == nil {
|
||||
continue
|
||||
}
|
||||
newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
|
||||
newIdx++
|
||||
}
|
||||
ka.chs, ka.ctxs = newChs, newCtxs
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
resp, rerr := stream.Recv()
|
||||
if rerr != nil {
|
||||
return nil, toErr(ctx, rerr)
|
||||
}
|
||||
|
||||
karesp := &LeaseKeepAliveResponse{
|
||||
ResponseHeader: resp.GetHeader(),
|
||||
ID: LeaseID(resp.ID),
|
||||
TTL: resp.TTL,
|
||||
}
|
||||
return karesp, nil
|
||||
}
|
||||
|
||||
func (l *lessor) recvKeepAliveLoop() (gerr error) {
|
||||
defer func() {
|
||||
l.mu.Lock()
|
||||
close(l.donec)
|
||||
l.loopErr = gerr
|
||||
for _, ka := range l.keepAlives {
|
||||
ka.close()
|
||||
}
|
||||
l.keepAlives = make(map[LeaseID]*keepAlive)
|
||||
l.mu.Unlock()
|
||||
}()
|
||||
|
||||
for {
|
||||
stream, err := l.resetRecv()
|
||||
if err != nil {
|
||||
if canceledByCaller(l.stopCtx, err) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
for {
|
||||
resp, err := stream.Recv()
|
||||
if err != nil {
|
||||
if canceledByCaller(l.stopCtx, err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader {
|
||||
l.closeRequireLeader()
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
l.recvKeepAlive(resp)
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(retryConnWait):
|
||||
case <-l.stopCtx.Done():
|
||||
return l.stopCtx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// resetRecv opens a new lease stream and starts sending keep alive requests.
|
||||
func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
|
||||
sctx, cancel := context.WithCancel(l.stopCtx)
|
||||
stream, err := l.remote.LeaseKeepAlive(sctx, append(l.callOpts, withMax(0))...)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.stream != nil && l.streamCancel != nil {
|
||||
l.streamCancel()
|
||||
}
|
||||
|
||||
l.streamCancel = cancel
|
||||
l.stream = stream
|
||||
|
||||
go l.sendKeepAliveLoop(stream)
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
|
||||
func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
|
||||
karesp := &LeaseKeepAliveResponse{
|
||||
ResponseHeader: resp.GetHeader(),
|
||||
ID: LeaseID(resp.ID),
|
||||
TTL: resp.TTL,
|
||||
}
|
||||
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
|
||||
ka, ok := l.keepAlives[karesp.ID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if karesp.TTL <= 0 {
|
||||
// lease expired; close all keep alive channels
|
||||
delete(l.keepAlives, karesp.ID)
|
||||
ka.close()
|
||||
return
|
||||
}
|
||||
|
||||
// send update to all channels
|
||||
nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
|
||||
ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
|
||||
for _, ch := range ka.chs {
|
||||
select {
|
||||
case ch <- karesp:
|
||||
default:
|
||||
if l.lg != nil {
|
||||
l.lg.Warn("lease keepalive response queue is full; dropping response send",
|
||||
zap.Int("queue-size", len(ch)),
|
||||
zap.Int("queue-capacity", cap(ch)),
|
||||
)
|
||||
}
|
||||
}
|
||||
// still advance in order to rate-limit keep-alive sends
|
||||
ka.nextKeepAlive = nextKeepAlive
|
||||
}
|
||||
}
|
||||
|
||||
// deadlineLoop reaps any keep alive channels that have not received a response
|
||||
// within the lease TTL
|
||||
func (l *lessor) deadlineLoop() {
|
||||
for {
|
||||
select {
|
||||
case <-time.After(time.Second):
|
||||
case <-l.donec:
|
||||
return
|
||||
}
|
||||
now := time.Now()
|
||||
l.mu.Lock()
|
||||
for id, ka := range l.keepAlives {
|
||||
if ka.deadline.Before(now) {
|
||||
// waited too long for response; lease may be expired
|
||||
ka.close()
|
||||
delete(l.keepAlives, id)
|
||||
}
|
||||
}
|
||||
l.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream.
|
||||
func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
|
||||
for {
|
||||
var tosend []LeaseID
|
||||
|
||||
now := time.Now()
|
||||
l.mu.Lock()
|
||||
for id, ka := range l.keepAlives {
|
||||
if ka.nextKeepAlive.Before(now) {
|
||||
tosend = append(tosend, id)
|
||||
}
|
||||
}
|
||||
l.mu.Unlock()
|
||||
|
||||
for _, id := range tosend {
|
||||
r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
|
||||
if err := stream.Send(r); err != nil {
|
||||
// TODO do something with this error?
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(retryConnWait):
|
||||
case <-stream.Context().Done():
|
||||
return
|
||||
case <-l.donec:
|
||||
return
|
||||
case <-l.stopCtx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ka *keepAlive) close() {
|
||||
close(ka.donec)
|
||||
for _, ch := range ka.chs {
|
||||
close(ch)
|
||||
}
|
||||
}
|
101
vendor/go.etcd.io/etcd/clientv3/logger.go
generated
vendored
Normal file
101
vendor/go.etcd.io/etcd/clientv3/logger.go
generated
vendored
Normal file
@ -0,0 +1,101 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
|
||||
"go.etcd.io/etcd/pkg/logutil"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var (
|
||||
lgMu sync.RWMutex
|
||||
lg logutil.Logger
|
||||
)
|
||||
|
||||
type settableLogger struct {
|
||||
l grpclog.LoggerV2
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func init() {
|
||||
// disable client side logs by default
|
||||
lg = &settableLogger{}
|
||||
SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard))
|
||||
}
|
||||
|
||||
// SetLogger sets client-side Logger.
|
||||
func SetLogger(l grpclog.LoggerV2) {
|
||||
lgMu.Lock()
|
||||
lg = logutil.NewLogger(l)
|
||||
// override grpclog so that any changes happen with locking
|
||||
grpclog.SetLoggerV2(lg)
|
||||
lgMu.Unlock()
|
||||
}
|
||||
|
||||
// GetLogger returns the current logutil.Logger.
|
||||
func GetLogger() logutil.Logger {
|
||||
lgMu.RLock()
|
||||
l := lg
|
||||
lgMu.RUnlock()
|
||||
return l
|
||||
}
|
||||
|
||||
// NewLogger returns a new Logger with logutil.Logger.
|
||||
func NewLogger(gl grpclog.LoggerV2) logutil.Logger {
|
||||
return &settableLogger{l: gl}
|
||||
}
|
||||
|
||||
func (s *settableLogger) get() grpclog.LoggerV2 {
|
||||
s.mu.RLock()
|
||||
l := s.l
|
||||
s.mu.RUnlock()
|
||||
return l
|
||||
}
|
||||
|
||||
// implement the grpclog.LoggerV2 interface
|
||||
|
||||
func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) }
|
||||
func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) }
|
||||
func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) }
|
||||
func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) }
|
||||
func (s *settableLogger) Warningf(format string, args ...interface{}) {
|
||||
s.get().Warningf(format, args...)
|
||||
}
|
||||
func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) }
|
||||
func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) }
|
||||
func (s *settableLogger) Errorf(format string, args ...interface{}) {
|
||||
s.get().Errorf(format, args...)
|
||||
}
|
||||
func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) }
|
||||
func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) }
|
||||
func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) }
|
||||
func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) }
|
||||
func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) }
|
||||
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) }
|
||||
func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) }
|
||||
func (s *settableLogger) V(l int) bool { return s.get().V(l) }
|
||||
func (s *settableLogger) Lvl(lvl int) grpclog.LoggerV2 {
|
||||
s.mu.RLock()
|
||||
l := s.l
|
||||
s.mu.RUnlock()
|
||||
if l.V(lvl) {
|
||||
return s
|
||||
}
|
||||
return logutil.NewDiscardLogger()
|
||||
}
|
230
vendor/go.etcd.io/etcd/clientv3/maintenance.go
generated
vendored
Normal file
230
vendor/go.etcd.io/etcd/clientv3/maintenance.go
generated
vendored
Normal file
@ -0,0 +1,230 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type (
|
||||
DefragmentResponse pb.DefragmentResponse
|
||||
AlarmResponse pb.AlarmResponse
|
||||
AlarmMember pb.AlarmMember
|
||||
StatusResponse pb.StatusResponse
|
||||
HashKVResponse pb.HashKVResponse
|
||||
MoveLeaderResponse pb.MoveLeaderResponse
|
||||
)
|
||||
|
||||
type Maintenance interface {
|
||||
// AlarmList gets all active alarms.
|
||||
AlarmList(ctx context.Context) (*AlarmResponse, error)
|
||||
|
||||
// AlarmDisarm disarms a given alarm.
|
||||
AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
|
||||
|
||||
// Defragment releases wasted space from internal fragmentation on a given etcd member.
|
||||
// Defragment is only needed when deleting a large number of keys and want to reclaim
|
||||
// the resources.
|
||||
// Defragment is an expensive operation. User should avoid defragmenting multiple members
|
||||
// at the same time.
|
||||
// To defragment multiple members in the cluster, user need to call defragment multiple
|
||||
// times with different endpoints.
|
||||
Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error)
|
||||
|
||||
// Status gets the status of the endpoint.
|
||||
Status(ctx context.Context, endpoint string) (*StatusResponse, error)
|
||||
|
||||
// HashKV returns a hash of the KV state at the time of the RPC.
|
||||
// If revision is zero, the hash is computed on all keys. If the revision
|
||||
// is non-zero, the hash is computed on all keys at or below the given revision.
|
||||
HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error)
|
||||
|
||||
// Snapshot provides a reader for a point-in-time snapshot of etcd.
|
||||
// If the context "ctx" is canceled or timed out, reading from returned
|
||||
// "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded).
|
||||
Snapshot(ctx context.Context) (io.ReadCloser, error)
|
||||
|
||||
// MoveLeader requests current leader to transfer its leadership to the transferee.
|
||||
// Request must be made to the leader.
|
||||
MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error)
|
||||
}
|
||||
|
||||
type maintenance struct {
|
||||
dial func(endpoint string) (pb.MaintenanceClient, func(), error)
|
||||
remote pb.MaintenanceClient
|
||||
callOpts []grpc.CallOption
|
||||
}
|
||||
|
||||
func NewMaintenance(c *Client) Maintenance {
|
||||
api := &maintenance{
|
||||
dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
|
||||
conn, err := c.Dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %v", endpoint, err)
|
||||
}
|
||||
cancel := func() { conn.Close() }
|
||||
return RetryMaintenanceClient(c, conn), cancel, nil
|
||||
},
|
||||
remote: RetryMaintenanceClient(c, c.conn),
|
||||
}
|
||||
if c != nil {
|
||||
api.callOpts = c.callOpts
|
||||
}
|
||||
return api
|
||||
}
|
||||
|
||||
func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {
|
||||
api := &maintenance{
|
||||
dial: func(string) (pb.MaintenanceClient, func(), error) {
|
||||
return remote, func() {}, nil
|
||||
},
|
||||
remote: remote,
|
||||
}
|
||||
if c != nil {
|
||||
api.callOpts = c.callOpts
|
||||
}
|
||||
return api
|
||||
}
|
||||
|
||||
func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
|
||||
req := &pb.AlarmRequest{
|
||||
Action: pb.AlarmRequest_GET,
|
||||
MemberID: 0, // all
|
||||
Alarm: pb.AlarmType_NONE, // all
|
||||
}
|
||||
resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
|
||||
if err == nil {
|
||||
return (*AlarmResponse)(resp), nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
|
||||
req := &pb.AlarmRequest{
|
||||
Action: pb.AlarmRequest_DEACTIVATE,
|
||||
MemberID: am.MemberID,
|
||||
Alarm: am.Alarm,
|
||||
}
|
||||
|
||||
if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {
|
||||
ar, err := m.AlarmList(ctx)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
ret := AlarmResponse{}
|
||||
for _, am := range ar.Alarms {
|
||||
dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))
|
||||
if derr != nil {
|
||||
return nil, toErr(ctx, derr)
|
||||
}
|
||||
ret.Alarms = append(ret.Alarms, dresp.Alarms...)
|
||||
}
|
||||
return &ret, nil
|
||||
}
|
||||
|
||||
resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
|
||||
if err == nil {
|
||||
return (*AlarmResponse)(resp), nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
|
||||
remote, cancel, err := m.dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
defer cancel()
|
||||
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
return (*DefragmentResponse)(resp), nil
|
||||
}
|
||||
|
||||
func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
|
||||
remote, cancel, err := m.dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
defer cancel()
|
||||
resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
return (*StatusResponse)(resp), nil
|
||||
}
|
||||
|
||||
func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {
|
||||
remote, cancel, err := m.dial(endpoint)
|
||||
if err != nil {
|
||||
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
defer cancel()
|
||||
resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
return (*HashKVResponse)(resp), nil
|
||||
}
|
||||
|
||||
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
|
||||
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
for {
|
||||
resp, err := ss.Recv()
|
||||
if err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if resp == nil && err == nil {
|
||||
break
|
||||
}
|
||||
if _, werr := pw.Write(resp.Blob); werr != nil {
|
||||
pw.CloseWithError(werr)
|
||||
return
|
||||
}
|
||||
}
|
||||
pw.Close()
|
||||
}()
|
||||
return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil
|
||||
}
|
||||
|
||||
type snapshotReadCloser struct {
|
||||
ctx context.Context
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) {
|
||||
n, err = rc.ReadCloser.Read(p)
|
||||
return n, toErr(rc.ctx, err)
|
||||
}
|
||||
|
||||
func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) {
|
||||
resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...)
|
||||
return (*MoveLeaderResponse)(resp), toErr(ctx, err)
|
||||
}
|
560
vendor/go.etcd.io/etcd/clientv3/op.go
generated
vendored
Normal file
560
vendor/go.etcd.io/etcd/clientv3/op.go
generated
vendored
Normal file
@ -0,0 +1,560 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
type opType int
|
||||
|
||||
const (
|
||||
// A default Op has opType 0, which is invalid.
|
||||
tRange opType = iota + 1
|
||||
tPut
|
||||
tDeleteRange
|
||||
tTxn
|
||||
)
|
||||
|
||||
var noPrefixEnd = []byte{0}
|
||||
|
||||
// Op represents an Operation that kv can execute.
|
||||
type Op struct {
|
||||
t opType
|
||||
key []byte
|
||||
end []byte
|
||||
|
||||
// for range
|
||||
limit int64
|
||||
sort *SortOption
|
||||
serializable bool
|
||||
keysOnly bool
|
||||
countOnly bool
|
||||
minModRev int64
|
||||
maxModRev int64
|
||||
minCreateRev int64
|
||||
maxCreateRev int64
|
||||
|
||||
// for range, watch
|
||||
rev int64
|
||||
|
||||
// for watch, put, delete
|
||||
prevKV bool
|
||||
|
||||
// for watch
|
||||
// fragmentation should be disabled by default
|
||||
// if true, split watch events when total exceeds
|
||||
// "--max-request-bytes" flag value + 512-byte
|
||||
fragment bool
|
||||
|
||||
// for put
|
||||
ignoreValue bool
|
||||
ignoreLease bool
|
||||
|
||||
// progressNotify is for progress updates.
|
||||
progressNotify bool
|
||||
// createdNotify is for created event
|
||||
createdNotify bool
|
||||
// filters for watchers
|
||||
filterPut bool
|
||||
filterDelete bool
|
||||
|
||||
// for put
|
||||
val []byte
|
||||
leaseID LeaseID
|
||||
|
||||
// txn
|
||||
cmps []Cmp
|
||||
thenOps []Op
|
||||
elseOps []Op
|
||||
}
|
||||
|
||||
// accessors / mutators
|
||||
|
||||
// IsTxn returns true if the "Op" type is transaction.
|
||||
func (op Op) IsTxn() bool {
|
||||
return op.t == tTxn
|
||||
}
|
||||
|
||||
// Txn returns the comparison(if) operations, "then" operations, and "else" operations.
|
||||
func (op Op) Txn() ([]Cmp, []Op, []Op) {
|
||||
return op.cmps, op.thenOps, op.elseOps
|
||||
}
|
||||
|
||||
// KeyBytes returns the byte slice holding the Op's key.
|
||||
func (op Op) KeyBytes() []byte { return op.key }
|
||||
|
||||
// WithKeyBytes sets the byte slice for the Op's key.
|
||||
func (op *Op) WithKeyBytes(key []byte) { op.key = key }
|
||||
|
||||
// RangeBytes returns the byte slice holding with the Op's range end, if any.
|
||||
func (op Op) RangeBytes() []byte { return op.end }
|
||||
|
||||
// Rev returns the requested revision, if any.
|
||||
func (op Op) Rev() int64 { return op.rev }
|
||||
|
||||
// IsPut returns true iff the operation is a Put.
|
||||
func (op Op) IsPut() bool { return op.t == tPut }
|
||||
|
||||
// IsGet returns true iff the operation is a Get.
|
||||
func (op Op) IsGet() bool { return op.t == tRange }
|
||||
|
||||
// IsDelete returns true iff the operation is a Delete.
|
||||
func (op Op) IsDelete() bool { return op.t == tDeleteRange }
|
||||
|
||||
// IsSerializable returns true if the serializable field is true.
|
||||
func (op Op) IsSerializable() bool { return op.serializable }
|
||||
|
||||
// IsKeysOnly returns whether keysOnly is set.
|
||||
func (op Op) IsKeysOnly() bool { return op.keysOnly }
|
||||
|
||||
// IsCountOnly returns whether countOnly is set.
|
||||
func (op Op) IsCountOnly() bool { return op.countOnly }
|
||||
|
||||
// MinModRev returns the operation's minimum modify revision.
|
||||
func (op Op) MinModRev() int64 { return op.minModRev }
|
||||
|
||||
// MaxModRev returns the operation's maximum modify revision.
|
||||
func (op Op) MaxModRev() int64 { return op.maxModRev }
|
||||
|
||||
// MinCreateRev returns the operation's minimum create revision.
|
||||
func (op Op) MinCreateRev() int64 { return op.minCreateRev }
|
||||
|
||||
// MaxCreateRev returns the operation's maximum create revision.
|
||||
func (op Op) MaxCreateRev() int64 { return op.maxCreateRev }
|
||||
|
||||
// WithRangeBytes sets the byte slice for the Op's range end.
|
||||
func (op *Op) WithRangeBytes(end []byte) { op.end = end }
|
||||
|
||||
// ValueBytes returns the byte slice holding the Op's value, if any.
|
||||
func (op Op) ValueBytes() []byte { return op.val }
|
||||
|
||||
// WithValueBytes sets the byte slice for the Op's value.
|
||||
func (op *Op) WithValueBytes(v []byte) { op.val = v }
|
||||
|
||||
func (op Op) toRangeRequest() *pb.RangeRequest {
|
||||
if op.t != tRange {
|
||||
panic("op.t != tRange")
|
||||
}
|
||||
r := &pb.RangeRequest{
|
||||
Key: op.key,
|
||||
RangeEnd: op.end,
|
||||
Limit: op.limit,
|
||||
Revision: op.rev,
|
||||
Serializable: op.serializable,
|
||||
KeysOnly: op.keysOnly,
|
||||
CountOnly: op.countOnly,
|
||||
MinModRevision: op.minModRev,
|
||||
MaxModRevision: op.maxModRev,
|
||||
MinCreateRevision: op.minCreateRev,
|
||||
MaxCreateRevision: op.maxCreateRev,
|
||||
}
|
||||
if op.sort != nil {
|
||||
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
|
||||
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (op Op) toTxnRequest() *pb.TxnRequest {
|
||||
thenOps := make([]*pb.RequestOp, len(op.thenOps))
|
||||
for i, tOp := range op.thenOps {
|
||||
thenOps[i] = tOp.toRequestOp()
|
||||
}
|
||||
elseOps := make([]*pb.RequestOp, len(op.elseOps))
|
||||
for i, eOp := range op.elseOps {
|
||||
elseOps[i] = eOp.toRequestOp()
|
||||
}
|
||||
cmps := make([]*pb.Compare, len(op.cmps))
|
||||
for i := range op.cmps {
|
||||
cmps[i] = (*pb.Compare)(&op.cmps[i])
|
||||
}
|
||||
return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps}
|
||||
}
|
||||
|
||||
func (op Op) toRequestOp() *pb.RequestOp {
|
||||
switch op.t {
|
||||
case tRange:
|
||||
return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}}
|
||||
case tPut:
|
||||
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
|
||||
return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
|
||||
case tDeleteRange:
|
||||
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
|
||||
return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
|
||||
case tTxn:
|
||||
return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}}
|
||||
default:
|
||||
panic("Unknown Op")
|
||||
}
|
||||
}
|
||||
|
||||
func (op Op) isWrite() bool {
|
||||
if op.t == tTxn {
|
||||
for _, tOp := range op.thenOps {
|
||||
if tOp.isWrite() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, tOp := range op.elseOps {
|
||||
if tOp.isWrite() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
return op.t != tRange
|
||||
}
|
||||
|
||||
// OpGet returns "get" operation based on given key and operation options.
|
||||
func OpGet(key string, opts ...OpOption) Op {
|
||||
// WithPrefix and WithFromKey are not supported together
|
||||
if isWithPrefix(opts) && isWithFromKey(opts) {
|
||||
panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one")
|
||||
}
|
||||
ret := Op{t: tRange, key: []byte(key)}
|
||||
ret.applyOpts(opts)
|
||||
return ret
|
||||
}
|
||||
|
||||
// OpDelete returns "delete" operation based on given key and operation options.
|
||||
func OpDelete(key string, opts ...OpOption) Op {
|
||||
// WithPrefix and WithFromKey are not supported together
|
||||
if isWithPrefix(opts) && isWithFromKey(opts) {
|
||||
panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one")
|
||||
}
|
||||
ret := Op{t: tDeleteRange, key: []byte(key)}
|
||||
ret.applyOpts(opts)
|
||||
switch {
|
||||
case ret.leaseID != 0:
|
||||
panic("unexpected lease in delete")
|
||||
case ret.limit != 0:
|
||||
panic("unexpected limit in delete")
|
||||
case ret.rev != 0:
|
||||
panic("unexpected revision in delete")
|
||||
case ret.sort != nil:
|
||||
panic("unexpected sort in delete")
|
||||
case ret.serializable:
|
||||
panic("unexpected serializable in delete")
|
||||
case ret.countOnly:
|
||||
panic("unexpected countOnly in delete")
|
||||
case ret.minModRev != 0, ret.maxModRev != 0:
|
||||
panic("unexpected mod revision filter in delete")
|
||||
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
|
||||
panic("unexpected create revision filter in delete")
|
||||
case ret.filterDelete, ret.filterPut:
|
||||
panic("unexpected filter in delete")
|
||||
case ret.createdNotify:
|
||||
panic("unexpected createdNotify in delete")
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// OpPut returns "put" operation based on given key-value and operation options.
|
||||
func OpPut(key, val string, opts ...OpOption) Op {
|
||||
ret := Op{t: tPut, key: []byte(key), val: []byte(val)}
|
||||
ret.applyOpts(opts)
|
||||
switch {
|
||||
case ret.end != nil:
|
||||
panic("unexpected range in put")
|
||||
case ret.limit != 0:
|
||||
panic("unexpected limit in put")
|
||||
case ret.rev != 0:
|
||||
panic("unexpected revision in put")
|
||||
case ret.sort != nil:
|
||||
panic("unexpected sort in put")
|
||||
case ret.serializable:
|
||||
panic("unexpected serializable in put")
|
||||
case ret.countOnly:
|
||||
panic("unexpected countOnly in put")
|
||||
case ret.minModRev != 0, ret.maxModRev != 0:
|
||||
panic("unexpected mod revision filter in put")
|
||||
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
|
||||
panic("unexpected create revision filter in put")
|
||||
case ret.filterDelete, ret.filterPut:
|
||||
panic("unexpected filter in put")
|
||||
case ret.createdNotify:
|
||||
panic("unexpected createdNotify in put")
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// OpTxn returns "txn" operation based on given transaction conditions.
|
||||
func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op {
|
||||
return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps}
|
||||
}
|
||||
|
||||
func opWatch(key string, opts ...OpOption) Op {
|
||||
ret := Op{t: tRange, key: []byte(key)}
|
||||
ret.applyOpts(opts)
|
||||
switch {
|
||||
case ret.leaseID != 0:
|
||||
panic("unexpected lease in watch")
|
||||
case ret.limit != 0:
|
||||
panic("unexpected limit in watch")
|
||||
case ret.sort != nil:
|
||||
panic("unexpected sort in watch")
|
||||
case ret.serializable:
|
||||
panic("unexpected serializable in watch")
|
||||
case ret.countOnly:
|
||||
panic("unexpected countOnly in watch")
|
||||
case ret.minModRev != 0, ret.maxModRev != 0:
|
||||
panic("unexpected mod revision filter in watch")
|
||||
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
|
||||
panic("unexpected create revision filter in watch")
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (op *Op) applyOpts(opts []OpOption) {
|
||||
for _, opt := range opts {
|
||||
opt(op)
|
||||
}
|
||||
}
|
||||
|
||||
// OpOption configures Operations like Get, Put, Delete.
|
||||
type OpOption func(*Op)
|
||||
|
||||
// WithLease attaches a lease ID to a key in 'Put' request.
|
||||
func WithLease(leaseID LeaseID) OpOption {
|
||||
return func(op *Op) { op.leaseID = leaseID }
|
||||
}
|
||||
|
||||
// WithLimit limits the number of results to return from 'Get' request.
|
||||
// If WithLimit is given a 0 limit, it is treated as no limit.
|
||||
func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } }
|
||||
|
||||
// WithRev specifies the store revision for 'Get' request.
|
||||
// Or the start revision of 'Watch' request.
|
||||
func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } }
|
||||
|
||||
// WithSort specifies the ordering in 'Get' request. It requires
|
||||
// 'WithRange' and/or 'WithPrefix' to be specified too.
|
||||
// 'target' specifies the target to sort by: key, version, revisions, value.
|
||||
// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'.
|
||||
func WithSort(target SortTarget, order SortOrder) OpOption {
|
||||
return func(op *Op) {
|
||||
if target == SortByKey && order == SortAscend {
|
||||
// If order != SortNone, server fetches the entire key-space,
|
||||
// and then applies the sort and limit, if provided.
|
||||
// Since by default the server returns results sorted by keys
|
||||
// in lexicographically ascending order, the client should ignore
|
||||
// SortOrder if the target is SortByKey.
|
||||
order = SortNone
|
||||
}
|
||||
op.sort = &SortOption{target, order}
|
||||
}
|
||||
}
|
||||
|
||||
// GetPrefixRangeEnd gets the range end of the prefix.
|
||||
// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'.
|
||||
func GetPrefixRangeEnd(prefix string) string {
|
||||
return string(getPrefix([]byte(prefix)))
|
||||
}
|
||||
|
||||
func getPrefix(key []byte) []byte {
|
||||
end := make([]byte, len(key))
|
||||
copy(end, key)
|
||||
for i := len(end) - 1; i >= 0; i-- {
|
||||
if end[i] < 0xff {
|
||||
end[i] = end[i] + 1
|
||||
end = end[:i+1]
|
||||
return end
|
||||
}
|
||||
}
|
||||
// next prefix does not exist (e.g., 0xffff);
|
||||
// default to WithFromKey policy
|
||||
return noPrefixEnd
|
||||
}
|
||||
|
||||
// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate
|
||||
// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())'
|
||||
// can return 'foo1', 'foo2', and so on.
|
||||
func WithPrefix() OpOption {
|
||||
return func(op *Op) {
|
||||
if len(op.key) == 0 {
|
||||
op.key, op.end = []byte{0}, []byte{0}
|
||||
return
|
||||
}
|
||||
op.end = getPrefix(op.key)
|
||||
}
|
||||
}
|
||||
|
||||
// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests.
|
||||
// For example, 'Get' requests with 'WithRange(end)' returns
|
||||
// the keys in the range [key, end).
|
||||
// endKey must be lexicographically greater than start key.
|
||||
func WithRange(endKey string) OpOption {
|
||||
return func(op *Op) { op.end = []byte(endKey) }
|
||||
}
|
||||
|
||||
// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests
|
||||
// to be equal or greater than the key in the argument.
|
||||
func WithFromKey() OpOption {
|
||||
return func(op *Op) {
|
||||
if len(op.key) == 0 {
|
||||
op.key = []byte{0}
|
||||
}
|
||||
op.end = []byte("\x00")
|
||||
}
|
||||
}
|
||||
|
||||
// WithSerializable makes 'Get' request serializable. By default,
|
||||
// it's linearizable. Serializable requests are better for lower latency
|
||||
// requirement.
|
||||
func WithSerializable() OpOption {
|
||||
return func(op *Op) { op.serializable = true }
|
||||
}
|
||||
|
||||
// WithKeysOnly makes the 'Get' request return only the keys and the corresponding
|
||||
// values will be omitted.
|
||||
func WithKeysOnly() OpOption {
|
||||
return func(op *Op) { op.keysOnly = true }
|
||||
}
|
||||
|
||||
// WithCountOnly makes the 'Get' request return only the count of keys.
|
||||
func WithCountOnly() OpOption {
|
||||
return func(op *Op) { op.countOnly = true }
|
||||
}
|
||||
|
||||
// WithMinModRev filters out keys for Get with modification revisions less than the given revision.
|
||||
func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } }
|
||||
|
||||
// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision.
|
||||
func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } }
|
||||
|
||||
// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision.
|
||||
func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } }
|
||||
|
||||
// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision.
|
||||
func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } }
|
||||
|
||||
// WithFirstCreate gets the key with the oldest creation revision in the request range.
|
||||
func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
|
||||
|
||||
// WithLastCreate gets the key with the latest creation revision in the request range.
|
||||
func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) }
|
||||
|
||||
// WithFirstKey gets the lexically first key in the request range.
|
||||
func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) }
|
||||
|
||||
// WithLastKey gets the lexically last key in the request range.
|
||||
func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) }
|
||||
|
||||
// WithFirstRev gets the key with the oldest modification revision in the request range.
|
||||
func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) }
|
||||
|
||||
// WithLastRev gets the key with the latest modification revision in the request range.
|
||||
func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) }
|
||||
|
||||
// withTop gets the first key over the get's prefix given a sort order
|
||||
func withTop(target SortTarget, order SortOrder) []OpOption {
|
||||
return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
|
||||
}
|
||||
|
||||
// WithProgressNotify makes watch server send periodic progress updates
|
||||
// every 10 minutes when there is no incoming events.
|
||||
// Progress updates have zero events in WatchResponse.
|
||||
func WithProgressNotify() OpOption {
|
||||
return func(op *Op) {
|
||||
op.progressNotify = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithCreatedNotify makes watch server sends the created event.
|
||||
func WithCreatedNotify() OpOption {
|
||||
return func(op *Op) {
|
||||
op.createdNotify = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithFilterPut discards PUT events from the watcher.
|
||||
func WithFilterPut() OpOption {
|
||||
return func(op *Op) { op.filterPut = true }
|
||||
}
|
||||
|
||||
// WithFilterDelete discards DELETE events from the watcher.
|
||||
func WithFilterDelete() OpOption {
|
||||
return func(op *Op) { op.filterDelete = true }
|
||||
}
|
||||
|
||||
// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted,
|
||||
// nothing will be returned.
|
||||
func WithPrevKV() OpOption {
|
||||
return func(op *Op) {
|
||||
op.prevKV = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithFragment to receive raw watch response with fragmentation.
|
||||
// Fragmentation is disabled by default. If fragmentation is enabled,
|
||||
// etcd watch server will split watch response before sending to clients
|
||||
// when the total size of watch events exceed server-side request limit.
|
||||
// The default server-side request limit is 1.5 MiB, which can be configured
|
||||
// as "--max-request-bytes" flag value + gRPC-overhead 512 bytes.
|
||||
// See "etcdserver/api/v3rpc/watch.go" for more details.
|
||||
func WithFragment() OpOption {
|
||||
return func(op *Op) { op.fragment = true }
|
||||
}
|
||||
|
||||
// WithIgnoreValue updates the key using its current value.
|
||||
// This option can not be combined with non-empty values.
|
||||
// Returns an error if the key does not exist.
|
||||
func WithIgnoreValue() OpOption {
|
||||
return func(op *Op) {
|
||||
op.ignoreValue = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithIgnoreLease updates the key using its current lease.
|
||||
// This option can not be combined with WithLease.
|
||||
// Returns an error if the key does not exist.
|
||||
func WithIgnoreLease() OpOption {
|
||||
return func(op *Op) {
|
||||
op.ignoreLease = true
|
||||
}
|
||||
}
|
||||
|
||||
// LeaseOp represents an Operation that lease can execute.
|
||||
type LeaseOp struct {
|
||||
id LeaseID
|
||||
|
||||
// for TimeToLive
|
||||
attachedKeys bool
|
||||
}
|
||||
|
||||
// LeaseOption configures lease operations.
|
||||
type LeaseOption func(*LeaseOp)
|
||||
|
||||
func (op *LeaseOp) applyOpts(opts []LeaseOption) {
|
||||
for _, opt := range opts {
|
||||
opt(op)
|
||||
}
|
||||
}
|
||||
|
||||
// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID.
|
||||
func WithAttachedKeys() LeaseOption {
|
||||
return func(op *LeaseOp) { op.attachedKeys = true }
|
||||
}
|
||||
|
||||
func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest {
|
||||
ret := &LeaseOp{id: id}
|
||||
ret.applyOpts(opts)
|
||||
return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys}
|
||||
}
|
||||
|
||||
// isWithPrefix returns true if WithPrefix is being called in the op
|
||||
func isWithPrefix(opts []OpOption) bool { return isOpFuncCalled("WithPrefix", opts) }
|
||||
|
||||
// isWithFromKey returns true if WithFromKey is being called in the op
|
||||
func isWithFromKey(opts []OpOption) bool { return isOpFuncCalled("WithFromKey", opts) }
|
65
vendor/go.etcd.io/etcd/clientv3/options.go
generated
vendored
Normal file
65
vendor/go.etcd.io/etcd/clientv3/options.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
// client-side handling retrying of request failures where data was not written to the wire or
|
||||
// where server indicates it did not process the data. gRPC default is default is "FailFast(true)"
|
||||
// but for etcd we default to "FailFast(false)" to minimize client request error responses due to
|
||||
// transient failures.
|
||||
defaultFailFast = grpc.FailFast(false)
|
||||
|
||||
// client-side request send limit, gRPC default is math.MaxInt32
|
||||
// Make sure that "client-side send limit < server-side default send/recv limit"
|
||||
// Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes
|
||||
defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024)
|
||||
|
||||
// client-side response receive limit, gRPC default is 4MB
|
||||
// Make sure that "client-side receive limit >= server-side default send/recv limit"
|
||||
// because range response can easily exceed request send limits
|
||||
// Default to math.MaxInt32; writes exceeding server-side send limit fails anyway
|
||||
defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32)
|
||||
|
||||
// client-side non-streaming retry limit, only applied to requests where server responds with
|
||||
// a error code clearly indicating it was unable to process the request such as codes.Unavailable.
|
||||
// If set to 0, retry is disabled.
|
||||
defaultUnaryMaxRetries uint = 100
|
||||
|
||||
// client-side streaming retry limit, only applied to requests where server responds with
|
||||
// a error code clearly indicating it was unable to process the request such as codes.Unavailable.
|
||||
// If set to 0, retry is disabled.
|
||||
defaultStreamMaxRetries = ^uint(0) // max uint
|
||||
|
||||
// client-side retry backoff wait between requests.
|
||||
defaultBackoffWaitBetween = 25 * time.Millisecond
|
||||
|
||||
// client-side retry backoff default jitter fraction.
|
||||
defaultBackoffJitterFraction = 0.10
|
||||
)
|
||||
|
||||
// defaultCallOpts defines a list of default "gRPC.CallOption".
|
||||
// Some options are exposed to "clientv3.Config".
|
||||
// Defaults will be overridden by the settings in "clientv3.Config".
|
||||
var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize}
|
||||
|
||||
// MaxLeaseTTL is the maximum lease TTL value
|
||||
const MaxLeaseTTL = 9000000000
|
298
vendor/go.etcd.io/etcd/clientv3/retry.go
generated
vendored
Normal file
298
vendor/go.etcd.io/etcd/clientv3/retry.go
generated
vendored
Normal file
@ -0,0 +1,298 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type retryPolicy uint8
|
||||
|
||||
const (
|
||||
repeatable retryPolicy = iota
|
||||
nonRepeatable
|
||||
)
|
||||
|
||||
func (rp retryPolicy) String() string {
|
||||
switch rp {
|
||||
case repeatable:
|
||||
return "repeatable"
|
||||
case nonRepeatable:
|
||||
return "nonRepeatable"
|
||||
default:
|
||||
return "UNKNOWN"
|
||||
}
|
||||
}
|
||||
|
||||
// isSafeRetryImmutableRPC returns "true" when an immutable request is safe for retry.
|
||||
//
|
||||
// immutable requests (e.g. Get) should be retried unless it's
|
||||
// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge).
|
||||
//
|
||||
// Returning "false" means retry should stop, since client cannot
|
||||
// handle itself even with retries.
|
||||
func isSafeRetryImmutableRPC(err error) bool {
|
||||
eErr := rpctypes.Error(err)
|
||||
if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable {
|
||||
// interrupted by non-transient server-side or gRPC-side error
|
||||
// client cannot handle itself (e.g. rpctypes.ErrCompacted)
|
||||
return false
|
||||
}
|
||||
// only retry if unavailable
|
||||
ev, ok := status.FromError(err)
|
||||
if !ok {
|
||||
// all errors from RPC is typed "grpc/status.(*statusError)"
|
||||
// (ref. https://github.com/grpc/grpc-go/pull/1782)
|
||||
//
|
||||
// if the error type is not "grpc/status.(*statusError)",
|
||||
// it could be from "Dial"
|
||||
// TODO: do not retry for now
|
||||
// ref. https://github.com/grpc/grpc-go/issues/1581
|
||||
return false
|
||||
}
|
||||
return ev.Code() == codes.Unavailable
|
||||
}
|
||||
|
||||
// isSafeRetryMutableRPC returns "true" when a mutable request is safe for retry.
|
||||
//
|
||||
// mutable requests (e.g. Put, Delete, Txn) should only be retried
|
||||
// when the status code is codes.Unavailable when initial connection
|
||||
// has not been established (no endpoint is up).
|
||||
//
|
||||
// Returning "false" means retry should stop, otherwise it violates
|
||||
// write-at-most-once semantics.
|
||||
func isSafeRetryMutableRPC(err error) bool {
|
||||
if ev, ok := status.FromError(err); ok && ev.Code() != codes.Unavailable {
|
||||
// not safe for mutable RPCs
|
||||
// e.g. interrupted by non-transient error that client cannot handle itself,
|
||||
// or transient error while the connection has already been established
|
||||
return false
|
||||
}
|
||||
desc := rpctypes.ErrorDesc(err)
|
||||
return desc == "there is no address available" || desc == "there is no connection available"
|
||||
}
|
||||
|
||||
type retryKVClient struct {
|
||||
kc pb.KVClient
|
||||
}
|
||||
|
||||
// RetryKVClient implements a KVClient.
|
||||
func RetryKVClient(c *Client) pb.KVClient {
|
||||
return &retryKVClient{
|
||||
kc: pb.NewKVClient(c.conn),
|
||||
}
|
||||
}
|
||||
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
|
||||
return rkv.kc.Range(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
||||
return rkv.kc.Put(ctx, in, opts...)
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
||||
return rkv.kc.DeleteRange(ctx, in, opts...)
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
||||
return rkv.kc.Txn(ctx, in, opts...)
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
||||
return rkv.kc.Compact(ctx, in, opts...)
|
||||
}
|
||||
|
||||
type retryLeaseClient struct {
|
||||
lc pb.LeaseClient
|
||||
}
|
||||
|
||||
// RetryLeaseClient implements a LeaseClient.
|
||||
func RetryLeaseClient(c *Client) pb.LeaseClient {
|
||||
return &retryLeaseClient{
|
||||
lc: pb.NewLeaseClient(c.conn),
|
||||
}
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
|
||||
return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) {
|
||||
return rlc.lc.LeaseLeases(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
|
||||
return rlc.lc.LeaseGrant(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
|
||||
return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
|
||||
return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRetryPolicy(repeatable))...)
|
||||
}
|
||||
|
||||
type retryClusterClient struct {
|
||||
cc pb.ClusterClient
|
||||
}
|
||||
|
||||
// RetryClusterClient implements a ClusterClient.
|
||||
func RetryClusterClient(c *Client) pb.ClusterClient {
|
||||
return &retryClusterClient{
|
||||
cc: pb.NewClusterClient(c.conn),
|
||||
}
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
|
||||
return rcc.cc.MemberList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
|
||||
return rcc.cc.MemberAdd(ctx, in, opts...)
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
|
||||
return rcc.cc.MemberRemove(ctx, in, opts...)
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
|
||||
return rcc.cc.MemberUpdate(ctx, in, opts...)
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberPromote(ctx context.Context, in *pb.MemberPromoteRequest, opts ...grpc.CallOption) (resp *pb.MemberPromoteResponse, err error) {
|
||||
return rcc.cc.MemberPromote(ctx, in, opts...)
|
||||
}
|
||||
|
||||
type retryMaintenanceClient struct {
|
||||
mc pb.MaintenanceClient
|
||||
}
|
||||
|
||||
// RetryMaintenanceClient implements a Maintenance.
|
||||
func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient {
|
||||
return &retryMaintenanceClient{
|
||||
mc: pb.NewMaintenanceClient(conn),
|
||||
}
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
|
||||
return rmc.mc.Alarm(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
|
||||
return rmc.mc.Status(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
|
||||
return rmc.mc.Hash(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) {
|
||||
return rmc.mc.HashKV(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
|
||||
return rmc.mc.Snapshot(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) {
|
||||
return rmc.mc.MoveLeader(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
|
||||
return rmc.mc.Defragment(ctx, in, opts...)
|
||||
}
|
||||
|
||||
type retryAuthClient struct {
|
||||
ac pb.AuthClient
|
||||
}
|
||||
|
||||
// RetryAuthClient implements a AuthClient.
|
||||
func RetryAuthClient(c *Client) pb.AuthClient {
|
||||
return &retryAuthClient{
|
||||
ac: pb.NewAuthClient(c.conn),
|
||||
}
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
|
||||
return rac.ac.UserList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
|
||||
return rac.ac.UserGet(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
|
||||
return rac.ac.RoleGet(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
|
||||
return rac.ac.RoleList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
|
||||
return rac.ac.AuthEnable(ctx, in, opts...)
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
|
||||
return rac.ac.AuthDisable(ctx, in, opts...)
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
|
||||
return rac.ac.UserAdd(ctx, in, opts...)
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
|
||||
return rac.ac.UserDelete(ctx, in, opts...)
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
|
||||
return rac.ac.UserChangePassword(ctx, in, opts...)
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
|
||||
return rac.ac.UserGrantRole(ctx, in, opts...)
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
|
||||
return rac.ac.UserRevokeRole(ctx, in, opts...)
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
|
||||
return rac.ac.RoleAdd(ctx, in, opts...)
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
|
||||
return rac.ac.RoleDelete(ctx, in, opts...)
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
|
||||
return rac.ac.RoleGrantPermission(ctx, in, opts...)
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
|
||||
return rac.ac.RoleRevokePermission(ctx, in, opts...)
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) {
|
||||
return rac.ac.Authenticate(ctx, in, opts...)
|
||||
}
|
389
vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go
generated
vendored
Normal file
389
vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go
generated
vendored
Normal file
@ -0,0 +1,389 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Based on github.com/grpc-ecosystem/go-grpc-middleware/retry, but modified to support the more
|
||||
// fine grained error checking required by write-at-most-once retry semantics of etcd.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// unaryClientInterceptor returns a new retrying unary client interceptor.
|
||||
//
|
||||
// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
|
||||
// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
|
||||
func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.UnaryClientInterceptor {
|
||||
intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
|
||||
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||
grpcOpts, retryOpts := filterCallOptions(opts)
|
||||
callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
|
||||
// short circuit for simplicity, and avoiding allocations.
|
||||
if callOpts.max == 0 {
|
||||
return invoker(ctx, method, req, reply, cc, grpcOpts...)
|
||||
}
|
||||
var lastErr error
|
||||
for attempt := uint(0); attempt < callOpts.max; attempt++ {
|
||||
if err := waitRetryBackoff(ctx, attempt, callOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Debug(
|
||||
"retrying of unary invoker",
|
||||
zap.String("target", cc.Target()),
|
||||
zap.Uint("attempt", attempt),
|
||||
)
|
||||
lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...)
|
||||
if lastErr == nil {
|
||||
return nil
|
||||
}
|
||||
logger.Warn(
|
||||
"retrying of unary invoker failed",
|
||||
zap.String("target", cc.Target()),
|
||||
zap.Uint("attempt", attempt),
|
||||
zap.Error(lastErr),
|
||||
)
|
||||
if isContextError(lastErr) {
|
||||
if ctx.Err() != nil {
|
||||
// its the context deadline or cancellation.
|
||||
return lastErr
|
||||
}
|
||||
// its the callCtx deadline or cancellation, in which case try again.
|
||||
continue
|
||||
}
|
||||
if callOpts.retryAuth && rpctypes.Error(lastErr) == rpctypes.ErrInvalidAuthToken {
|
||||
gterr := c.getToken(ctx)
|
||||
if gterr != nil {
|
||||
logger.Warn(
|
||||
"retrying of unary invoker failed to fetch new auth token",
|
||||
zap.String("target", cc.Target()),
|
||||
zap.Error(gterr),
|
||||
)
|
||||
return gterr // lastErr must be invalid auth token
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !isSafeRetry(c.lg, lastErr, callOpts) {
|
||||
return lastErr
|
||||
}
|
||||
}
|
||||
return lastErr
|
||||
}
|
||||
}
|
||||
|
||||
// streamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls.
|
||||
//
|
||||
// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
|
||||
// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
|
||||
//
|
||||
// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs
|
||||
// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams,
|
||||
// BidiStreams), the retry interceptor will fail the call.
|
||||
func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.StreamClientInterceptor {
|
||||
intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
|
||||
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
grpcOpts, retryOpts := filterCallOptions(opts)
|
||||
callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
|
||||
// short circuit for simplicity, and avoiding allocations.
|
||||
if callOpts.max == 0 {
|
||||
return streamer(ctx, desc, cc, method, grpcOpts...)
|
||||
}
|
||||
if desc.ClientStreams {
|
||||
return nil, status.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()")
|
||||
}
|
||||
newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...)
|
||||
logger.Warn("retry stream intercept", zap.Error(err))
|
||||
if err != nil {
|
||||
// TODO(mwitkow): Maybe dial and transport errors should be retriable?
|
||||
return nil, err
|
||||
}
|
||||
retryingStreamer := &serverStreamingRetryingStream{
|
||||
client: c,
|
||||
ClientStream: newStreamer,
|
||||
callOpts: callOpts,
|
||||
ctx: ctx,
|
||||
streamerCall: func(ctx context.Context) (grpc.ClientStream, error) {
|
||||
return streamer(ctx, desc, cc, method, grpcOpts...)
|
||||
},
|
||||
}
|
||||
return retryingStreamer, nil
|
||||
}
|
||||
}
|
||||
|
||||
// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a
|
||||
// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish
|
||||
// a new ClientStream according to the retry policy.
|
||||
type serverStreamingRetryingStream struct {
|
||||
grpc.ClientStream
|
||||
client *Client
|
||||
bufferedSends []interface{} // single message that the client can sen
|
||||
receivedGood bool // indicates whether any prior receives were successful
|
||||
wasClosedSend bool // indicates that CloseSend was closed
|
||||
ctx context.Context
|
||||
callOpts *options
|
||||
streamerCall func(ctx context.Context) (grpc.ClientStream, error)
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) {
|
||||
s.mu.Lock()
|
||||
s.ClientStream = clientStream
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.ClientStream
|
||||
}
|
||||
|
||||
func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error {
|
||||
s.mu.Lock()
|
||||
s.bufferedSends = append(s.bufferedSends, m)
|
||||
s.mu.Unlock()
|
||||
return s.getStream().SendMsg(m)
|
||||
}
|
||||
|
||||
func (s *serverStreamingRetryingStream) CloseSend() error {
|
||||
s.mu.Lock()
|
||||
s.wasClosedSend = true
|
||||
s.mu.Unlock()
|
||||
return s.getStream().CloseSend()
|
||||
}
|
||||
|
||||
func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) {
|
||||
return s.getStream().Header()
|
||||
}
|
||||
|
||||
func (s *serverStreamingRetryingStream) Trailer() metadata.MD {
|
||||
return s.getStream().Trailer()
|
||||
}
|
||||
|
||||
func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
|
||||
attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m)
|
||||
if !attemptRetry {
|
||||
return lastErr // success or hard failure
|
||||
}
|
||||
// We start off from attempt 1, because zeroth was already made on normal SendMsg().
|
||||
for attempt := uint(1); attempt < s.callOpts.max; attempt++ {
|
||||
if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
newStream, err := s.reestablishStreamAndResendBuffer(s.ctx)
|
||||
if err != nil {
|
||||
// TODO(mwitkow): Maybe dial and transport errors should be retriable?
|
||||
return err
|
||||
}
|
||||
s.setStream(newStream)
|
||||
attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m)
|
||||
//fmt.Printf("Received message and indicate: %v %v\n", attemptRetry, lastErr)
|
||||
if !attemptRetry {
|
||||
return lastErr
|
||||
}
|
||||
}
|
||||
return lastErr
|
||||
}
|
||||
|
||||
func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) {
|
||||
s.mu.RLock()
|
||||
wasGood := s.receivedGood
|
||||
s.mu.RUnlock()
|
||||
err := s.getStream().RecvMsg(m)
|
||||
if err == nil || err == io.EOF {
|
||||
s.mu.Lock()
|
||||
s.receivedGood = true
|
||||
s.mu.Unlock()
|
||||
return false, err
|
||||
} else if wasGood {
|
||||
// previous RecvMsg in the stream succeeded, no retry logic should interfere
|
||||
return false, err
|
||||
}
|
||||
if isContextError(err) {
|
||||
if s.ctx.Err() != nil {
|
||||
return false, err
|
||||
}
|
||||
// its the callCtx deadline or cancellation, in which case try again.
|
||||
return true, err
|
||||
}
|
||||
if s.callOpts.retryAuth && rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
|
||||
gterr := s.client.getToken(s.ctx)
|
||||
if gterr != nil {
|
||||
s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gterr))
|
||||
return false, err // return the original error for simplicity
|
||||
}
|
||||
return true, err
|
||||
|
||||
}
|
||||
return isSafeRetry(s.client.lg, err, s.callOpts), err
|
||||
}
|
||||
|
||||
func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) {
|
||||
s.mu.RLock()
|
||||
bufferedSends := s.bufferedSends
|
||||
s.mu.RUnlock()
|
||||
newStream, err := s.streamerCall(callCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, msg := range bufferedSends {
|
||||
if err := newStream.SendMsg(msg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := newStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newStream, nil
|
||||
}
|
||||
|
||||
func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) error {
|
||||
waitTime := time.Duration(0)
|
||||
if attempt > 0 {
|
||||
waitTime = callOpts.backoffFunc(attempt)
|
||||
}
|
||||
if waitTime > 0 {
|
||||
timer := time.NewTimer(waitTime)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
timer.Stop()
|
||||
return contextErrToGrpcErr(ctx.Err())
|
||||
case <-timer.C:
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// isSafeRetry returns "true", if request is safe for retry with the given error.
|
||||
func isSafeRetry(lg *zap.Logger, err error, callOpts *options) bool {
|
||||
if isContextError(err) {
|
||||
return false
|
||||
}
|
||||
switch callOpts.retryPolicy {
|
||||
case repeatable:
|
||||
return isSafeRetryImmutableRPC(err)
|
||||
case nonRepeatable:
|
||||
return isSafeRetryMutableRPC(err)
|
||||
default:
|
||||
lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String()))
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func isContextError(err error) bool {
|
||||
return grpc.Code(err) == codes.DeadlineExceeded || grpc.Code(err) == codes.Canceled
|
||||
}
|
||||
|
||||
func contextErrToGrpcErr(err error) error {
|
||||
switch err {
|
||||
case context.DeadlineExceeded:
|
||||
return status.Errorf(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled:
|
||||
return status.Errorf(codes.Canceled, err.Error())
|
||||
default:
|
||||
return status.Errorf(codes.Unknown, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
defaultOptions = &options{
|
||||
retryPolicy: nonRepeatable,
|
||||
max: 0, // disable
|
||||
backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10),
|
||||
retryAuth: true,
|
||||
}
|
||||
)
|
||||
|
||||
// backoffFunc denotes a family of functions that control the backoff duration between call retries.
|
||||
//
|
||||
// They are called with an identifier of the attempt, and should return a time the system client should
|
||||
// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request
|
||||
// the deadline of the request takes precedence and the wait will be interrupted before proceeding
|
||||
// with the next iteration.
|
||||
type backoffFunc func(attempt uint) time.Duration
|
||||
|
||||
// withRetryPolicy sets the retry policy of this call.
|
||||
func withRetryPolicy(rp retryPolicy) retryOption {
|
||||
return retryOption{applyFunc: func(o *options) {
|
||||
o.retryPolicy = rp
|
||||
}}
|
||||
}
|
||||
|
||||
// withMax sets the maximum number of retries on this call, or this interceptor.
|
||||
func withMax(maxRetries uint) retryOption {
|
||||
return retryOption{applyFunc: func(o *options) {
|
||||
o.max = maxRetries
|
||||
}}
|
||||
}
|
||||
|
||||
// WithBackoff sets the `BackoffFunc `used to control time between retries.
|
||||
func withBackoff(bf backoffFunc) retryOption {
|
||||
return retryOption{applyFunc: func(o *options) {
|
||||
o.backoffFunc = bf
|
||||
}}
|
||||
}
|
||||
|
||||
type options struct {
|
||||
retryPolicy retryPolicy
|
||||
max uint
|
||||
backoffFunc backoffFunc
|
||||
retryAuth bool
|
||||
}
|
||||
|
||||
// retryOption is a grpc.CallOption that is local to clientv3's retry interceptor.
|
||||
type retryOption struct {
|
||||
grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic.
|
||||
applyFunc func(opt *options)
|
||||
}
|
||||
|
||||
func reuseOrNewWithCallOptions(opt *options, retryOptions []retryOption) *options {
|
||||
if len(retryOptions) == 0 {
|
||||
return opt
|
||||
}
|
||||
optCopy := &options{}
|
||||
*optCopy = *opt
|
||||
for _, f := range retryOptions {
|
||||
f.applyFunc(optCopy)
|
||||
}
|
||||
return optCopy
|
||||
}
|
||||
|
||||
func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []retryOption) {
|
||||
for _, opt := range callOptions {
|
||||
if co, ok := opt.(retryOption); ok {
|
||||
retryOptions = append(retryOptions, co)
|
||||
} else {
|
||||
grpcOptions = append(grpcOptions, opt)
|
||||
}
|
||||
}
|
||||
return grpcOptions, retryOptions
|
||||
}
|
||||
|
||||
// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment).
|
||||
//
|
||||
// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms.
|
||||
func backoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) backoffFunc {
|
||||
return func(attempt uint) time.Duration {
|
||||
return jitterUp(waitBetween, jitterFraction)
|
||||
}
|
||||
}
|
37
vendor/go.etcd.io/etcd/clientv3/sort.go
generated
vendored
Normal file
37
vendor/go.etcd.io/etcd/clientv3/sort.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
type SortTarget int
|
||||
type SortOrder int
|
||||
|
||||
const (
|
||||
SortNone SortOrder = iota
|
||||
SortAscend
|
||||
SortDescend
|
||||
)
|
||||
|
||||
const (
|
||||
SortByKey SortTarget = iota
|
||||
SortByVersion
|
||||
SortByCreateRevision
|
||||
SortByModRevision
|
||||
SortByValue
|
||||
)
|
||||
|
||||
type SortOption struct {
|
||||
Target SortTarget
|
||||
Order SortOrder
|
||||
}
|
151
vendor/go.etcd.io/etcd/clientv3/txn.go
generated
vendored
Normal file
151
vendor/go.etcd.io/etcd/clientv3/txn.go
generated
vendored
Normal file
@ -0,0 +1,151 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Txn is the interface that wraps mini-transactions.
|
||||
//
|
||||
// Txn(context.TODO()).If(
|
||||
// Compare(Value(k1), ">", v1),
|
||||
// Compare(Version(k1), "=", 2)
|
||||
// ).Then(
|
||||
// OpPut(k2,v2), OpPut(k3,v3)
|
||||
// ).Else(
|
||||
// OpPut(k4,v4), OpPut(k5,v5)
|
||||
// ).Commit()
|
||||
//
|
||||
type Txn interface {
|
||||
// If takes a list of comparison. If all comparisons passed in succeed,
|
||||
// the operations passed into Then() will be executed. Or the operations
|
||||
// passed into Else() will be executed.
|
||||
If(cs ...Cmp) Txn
|
||||
|
||||
// Then takes a list of operations. The Ops list will be executed, if the
|
||||
// comparisons passed in If() succeed.
|
||||
Then(ops ...Op) Txn
|
||||
|
||||
// Else takes a list of operations. The Ops list will be executed, if the
|
||||
// comparisons passed in If() fail.
|
||||
Else(ops ...Op) Txn
|
||||
|
||||
// Commit tries to commit the transaction.
|
||||
Commit() (*TxnResponse, error)
|
||||
}
|
||||
|
||||
type txn struct {
|
||||
kv *kv
|
||||
ctx context.Context
|
||||
|
||||
mu sync.Mutex
|
||||
cif bool
|
||||
cthen bool
|
||||
celse bool
|
||||
|
||||
isWrite bool
|
||||
|
||||
cmps []*pb.Compare
|
||||
|
||||
sus []*pb.RequestOp
|
||||
fas []*pb.RequestOp
|
||||
|
||||
callOpts []grpc.CallOption
|
||||
}
|
||||
|
||||
func (txn *txn) If(cs ...Cmp) Txn {
|
||||
txn.mu.Lock()
|
||||
defer txn.mu.Unlock()
|
||||
|
||||
if txn.cif {
|
||||
panic("cannot call If twice!")
|
||||
}
|
||||
|
||||
if txn.cthen {
|
||||
panic("cannot call If after Then!")
|
||||
}
|
||||
|
||||
if txn.celse {
|
||||
panic("cannot call If after Else!")
|
||||
}
|
||||
|
||||
txn.cif = true
|
||||
|
||||
for i := range cs {
|
||||
txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i]))
|
||||
}
|
||||
|
||||
return txn
|
||||
}
|
||||
|
||||
func (txn *txn) Then(ops ...Op) Txn {
|
||||
txn.mu.Lock()
|
||||
defer txn.mu.Unlock()
|
||||
|
||||
if txn.cthen {
|
||||
panic("cannot call Then twice!")
|
||||
}
|
||||
if txn.celse {
|
||||
panic("cannot call Then after Else!")
|
||||
}
|
||||
|
||||
txn.cthen = true
|
||||
|
||||
for _, op := range ops {
|
||||
txn.isWrite = txn.isWrite || op.isWrite()
|
||||
txn.sus = append(txn.sus, op.toRequestOp())
|
||||
}
|
||||
|
||||
return txn
|
||||
}
|
||||
|
||||
func (txn *txn) Else(ops ...Op) Txn {
|
||||
txn.mu.Lock()
|
||||
defer txn.mu.Unlock()
|
||||
|
||||
if txn.celse {
|
||||
panic("cannot call Else twice!")
|
||||
}
|
||||
|
||||
txn.celse = true
|
||||
|
||||
for _, op := range ops {
|
||||
txn.isWrite = txn.isWrite || op.isWrite()
|
||||
txn.fas = append(txn.fas, op.toRequestOp())
|
||||
}
|
||||
|
||||
return txn
|
||||
}
|
||||
|
||||
func (txn *txn) Commit() (*TxnResponse, error) {
|
||||
txn.mu.Lock()
|
||||
defer txn.mu.Unlock()
|
||||
|
||||
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
|
||||
|
||||
var resp *pb.TxnResponse
|
||||
var err error
|
||||
resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...)
|
||||
if err != nil {
|
||||
return nil, toErr(txn.ctx, err)
|
||||
}
|
||||
return (*TxnResponse)(resp), nil
|
||||
}
|
49
vendor/go.etcd.io/etcd/clientv3/utils.go
generated
vendored
Normal file
49
vendor/go.etcd.io/etcd/clientv3/utils.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// jitterUp adds random jitter to the duration.
|
||||
//
|
||||
// This adds or subtracts time from the duration within a given jitter fraction.
|
||||
// For example for 10s and jitter 0.1, it will return a time within [9s, 11s])
|
||||
//
|
||||
// Reference: https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils
|
||||
func jitterUp(duration time.Duration, jitter float64) time.Duration {
|
||||
multiplier := jitter * (rand.Float64()*2 - 1)
|
||||
return time.Duration(float64(duration) * (1 + multiplier))
|
||||
}
|
||||
|
||||
// Check if the provided function is being called in the op options.
|
||||
func isOpFuncCalled(op string, opts []OpOption) bool {
|
||||
for _, opt := range opts {
|
||||
v := reflect.ValueOf(opt)
|
||||
if v.Kind() == reflect.Func {
|
||||
if opFunc := runtime.FuncForPC(v.Pointer()); opFunc != nil {
|
||||
if strings.Contains(opFunc.Name(), op) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
987
vendor/go.etcd.io/etcd/clientv3/watch.go
generated
vendored
Normal file
987
vendor/go.etcd.io/etcd/clientv3/watch.go
generated
vendored
Normal file
@ -0,0 +1,987 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
v3rpc "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
|
||||
mvccpb "go.etcd.io/etcd/mvcc/mvccpb"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
EventTypeDelete = mvccpb.DELETE
|
||||
EventTypePut = mvccpb.PUT
|
||||
|
||||
closeSendErrTimeout = 250 * time.Millisecond
|
||||
)
|
||||
|
||||
type Event mvccpb.Event
|
||||
|
||||
type WatchChan <-chan WatchResponse
|
||||
|
||||
type Watcher interface {
|
||||
// Watch watches on a key or prefix. The watched events will be returned
|
||||
// through the returned channel. If revisions waiting to be sent over the
|
||||
// watch are compacted, then the watch will be canceled by the server, the
|
||||
// client will post a compacted error watch response, and the channel will close.
|
||||
// If the context "ctx" is canceled or timed out, returned "WatchChan" is closed,
|
||||
// and "WatchResponse" from this closed channel has zero events and nil "Err()".
|
||||
// The context "ctx" MUST be canceled, as soon as watcher is no longer being used,
|
||||
// to release the associated resources.
|
||||
//
|
||||
// If the context is "context.Background/TODO", returned "WatchChan" will
|
||||
// not be closed and block until event is triggered, except when server
|
||||
// returns a non-recoverable error (e.g. ErrCompacted).
|
||||
// For example, when context passed with "WithRequireLeader" and the
|
||||
// connected server has no leader (e.g. due to network partition),
|
||||
// error "etcdserver: no leader" (ErrNoLeader) will be returned,
|
||||
// and then "WatchChan" is closed with non-nil "Err()".
|
||||
// In order to prevent a watch stream being stuck in a partitioned node,
|
||||
// make sure to wrap context with "WithRequireLeader".
|
||||
//
|
||||
// Otherwise, as long as the context has not been canceled or timed out,
|
||||
// watch will retry on other recoverable errors forever until reconnected.
|
||||
//
|
||||
// TODO: explicitly set context error in the last "WatchResponse" message and close channel?
|
||||
// Currently, client contexts are overwritten with "valCtx" that never closes.
|
||||
// TODO(v3.4): configure watch retry policy, limit maximum retry number
|
||||
// (see https://github.com/etcd-io/etcd/issues/8980)
|
||||
Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
|
||||
|
||||
// RequestProgress requests a progress notify response be sent in all watch channels.
|
||||
RequestProgress(ctx context.Context) error
|
||||
|
||||
// Close closes the watcher and cancels all watch requests.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type WatchResponse struct {
|
||||
Header pb.ResponseHeader
|
||||
Events []*Event
|
||||
|
||||
// CompactRevision is the minimum revision the watcher may receive.
|
||||
CompactRevision int64
|
||||
|
||||
// Canceled is used to indicate watch failure.
|
||||
// If the watch failed and the stream was about to close, before the channel is closed,
|
||||
// the channel sends a final response that has Canceled set to true with a non-nil Err().
|
||||
Canceled bool
|
||||
|
||||
// Created is used to indicate the creation of the watcher.
|
||||
Created bool
|
||||
|
||||
closeErr error
|
||||
|
||||
// cancelReason is a reason of canceling watch
|
||||
cancelReason string
|
||||
}
|
||||
|
||||
// IsCreate returns true if the event tells that the key is newly created.
|
||||
func (e *Event) IsCreate() bool {
|
||||
return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision
|
||||
}
|
||||
|
||||
// IsModify returns true if the event tells that a new value is put on existing key.
|
||||
func (e *Event) IsModify() bool {
|
||||
return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision
|
||||
}
|
||||
|
||||
// Err is the error value if this WatchResponse holds an error.
|
||||
func (wr *WatchResponse) Err() error {
|
||||
switch {
|
||||
case wr.closeErr != nil:
|
||||
return v3rpc.Error(wr.closeErr)
|
||||
case wr.CompactRevision != 0:
|
||||
return v3rpc.ErrCompacted
|
||||
case wr.Canceled:
|
||||
if len(wr.cancelReason) != 0 {
|
||||
return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason))
|
||||
}
|
||||
return v3rpc.ErrFutureRev
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsProgressNotify returns true if the WatchResponse is progress notification.
|
||||
func (wr *WatchResponse) IsProgressNotify() bool {
|
||||
return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0
|
||||
}
|
||||
|
||||
// watcher implements the Watcher interface
|
||||
type watcher struct {
|
||||
remote pb.WatchClient
|
||||
callOpts []grpc.CallOption
|
||||
|
||||
// mu protects the grpc streams map
|
||||
mu sync.RWMutex
|
||||
|
||||
// streams holds all the active grpc streams keyed by ctx value.
|
||||
streams map[string]*watchGrpcStream
|
||||
}
|
||||
|
||||
// watchGrpcStream tracks all watch resources attached to a single grpc stream.
|
||||
type watchGrpcStream struct {
|
||||
owner *watcher
|
||||
remote pb.WatchClient
|
||||
callOpts []grpc.CallOption
|
||||
|
||||
// ctx controls internal remote.Watch requests
|
||||
ctx context.Context
|
||||
// ctxKey is the key used when looking up this stream's context
|
||||
ctxKey string
|
||||
cancel context.CancelFunc
|
||||
|
||||
// substreams holds all active watchers on this grpc stream
|
||||
substreams map[int64]*watcherStream
|
||||
// resuming holds all resuming watchers on this grpc stream
|
||||
resuming []*watcherStream
|
||||
|
||||
// reqc sends a watch request from Watch() to the main goroutine
|
||||
reqc chan watchStreamRequest
|
||||
// respc receives data from the watch client
|
||||
respc chan *pb.WatchResponse
|
||||
// donec closes to broadcast shutdown
|
||||
donec chan struct{}
|
||||
// errc transmits errors from grpc Recv to the watch stream reconnect logic
|
||||
errc chan error
|
||||
// closingc gets the watcherStream of closing watchers
|
||||
closingc chan *watcherStream
|
||||
// wg is Done when all substream goroutines have exited
|
||||
wg sync.WaitGroup
|
||||
|
||||
// resumec closes to signal that all substreams should begin resuming
|
||||
resumec chan struct{}
|
||||
// closeErr is the error that closed the watch stream
|
||||
closeErr error
|
||||
}
|
||||
|
||||
// watchStreamRequest is a union of the supported watch request operation types
|
||||
type watchStreamRequest interface {
|
||||
toPB() *pb.WatchRequest
|
||||
}
|
||||
|
||||
// watchRequest is issued by the subscriber to start a new watcher
|
||||
type watchRequest struct {
|
||||
ctx context.Context
|
||||
key string
|
||||
end string
|
||||
rev int64
|
||||
|
||||
// send created notification event if this field is true
|
||||
createdNotify bool
|
||||
// progressNotify is for progress updates
|
||||
progressNotify bool
|
||||
// fragmentation should be disabled by default
|
||||
// if true, split watch events when total exceeds
|
||||
// "--max-request-bytes" flag value + 512-byte
|
||||
fragment bool
|
||||
|
||||
// filters is the list of events to filter out
|
||||
filters []pb.WatchCreateRequest_FilterType
|
||||
// get the previous key-value pair before the event happens
|
||||
prevKV bool
|
||||
// retc receives a chan WatchResponse once the watcher is established
|
||||
retc chan chan WatchResponse
|
||||
}
|
||||
|
||||
// progressRequest is issued by the subscriber to request watch progress
|
||||
type progressRequest struct {
|
||||
}
|
||||
|
||||
// watcherStream represents a registered watcher
|
||||
type watcherStream struct {
|
||||
// initReq is the request that initiated this request
|
||||
initReq watchRequest
|
||||
|
||||
// outc publishes watch responses to subscriber
|
||||
outc chan WatchResponse
|
||||
// recvc buffers watch responses before publishing
|
||||
recvc chan *WatchResponse
|
||||
// donec closes when the watcherStream goroutine stops.
|
||||
donec chan struct{}
|
||||
// closing is set to true when stream should be scheduled to shutdown.
|
||||
closing bool
|
||||
// id is the registered watch id on the grpc stream
|
||||
id int64
|
||||
|
||||
// buf holds all events received from etcd but not yet consumed by the client
|
||||
buf []*WatchResponse
|
||||
}
|
||||
|
||||
func NewWatcher(c *Client) Watcher {
|
||||
return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c)
|
||||
}
|
||||
|
||||
func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher {
|
||||
w := &watcher{
|
||||
remote: wc,
|
||||
streams: make(map[string]*watchGrpcStream),
|
||||
}
|
||||
if c != nil {
|
||||
w.callOpts = c.callOpts
|
||||
}
|
||||
return w
|
||||
}
|
||||
|
||||
// never closes
|
||||
var valCtxCh = make(chan struct{})
|
||||
var zeroTime = time.Unix(0, 0)
|
||||
|
||||
// ctx with only the values; never Done
|
||||
type valCtx struct{ context.Context }
|
||||
|
||||
func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false }
|
||||
func (vc *valCtx) Done() <-chan struct{} { return valCtxCh }
|
||||
func (vc *valCtx) Err() error { return nil }
|
||||
|
||||
func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
|
||||
ctx, cancel := context.WithCancel(&valCtx{inctx})
|
||||
wgs := &watchGrpcStream{
|
||||
owner: w,
|
||||
remote: w.remote,
|
||||
callOpts: w.callOpts,
|
||||
ctx: ctx,
|
||||
ctxKey: streamKeyFromCtx(inctx),
|
||||
cancel: cancel,
|
||||
substreams: make(map[int64]*watcherStream),
|
||||
respc: make(chan *pb.WatchResponse),
|
||||
reqc: make(chan watchStreamRequest),
|
||||
donec: make(chan struct{}),
|
||||
errc: make(chan error, 1),
|
||||
closingc: make(chan *watcherStream),
|
||||
resumec: make(chan struct{}),
|
||||
}
|
||||
go wgs.run()
|
||||
return wgs
|
||||
}
|
||||
|
||||
// Watch posts a watch request to run() and waits for a new watcher channel
|
||||
func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
|
||||
ow := opWatch(key, opts...)
|
||||
|
||||
var filters []pb.WatchCreateRequest_FilterType
|
||||
if ow.filterPut {
|
||||
filters = append(filters, pb.WatchCreateRequest_NOPUT)
|
||||
}
|
||||
if ow.filterDelete {
|
||||
filters = append(filters, pb.WatchCreateRequest_NODELETE)
|
||||
}
|
||||
|
||||
wr := &watchRequest{
|
||||
ctx: ctx,
|
||||
createdNotify: ow.createdNotify,
|
||||
key: string(ow.key),
|
||||
end: string(ow.end),
|
||||
rev: ow.rev,
|
||||
progressNotify: ow.progressNotify,
|
||||
fragment: ow.fragment,
|
||||
filters: filters,
|
||||
prevKV: ow.prevKV,
|
||||
retc: make(chan chan WatchResponse, 1),
|
||||
}
|
||||
|
||||
ok := false
|
||||
ctxKey := streamKeyFromCtx(ctx)
|
||||
|
||||
// find or allocate appropriate grpc watch stream
|
||||
w.mu.Lock()
|
||||
if w.streams == nil {
|
||||
// closed
|
||||
w.mu.Unlock()
|
||||
ch := make(chan WatchResponse)
|
||||
close(ch)
|
||||
return ch
|
||||
}
|
||||
wgs := w.streams[ctxKey]
|
||||
if wgs == nil {
|
||||
wgs = w.newWatcherGrpcStream(ctx)
|
||||
w.streams[ctxKey] = wgs
|
||||
}
|
||||
donec := wgs.donec
|
||||
reqc := wgs.reqc
|
||||
w.mu.Unlock()
|
||||
|
||||
// couldn't create channel; return closed channel
|
||||
closeCh := make(chan WatchResponse, 1)
|
||||
|
||||
// submit request
|
||||
select {
|
||||
case reqc <- wr:
|
||||
ok = true
|
||||
case <-wr.ctx.Done():
|
||||
case <-donec:
|
||||
if wgs.closeErr != nil {
|
||||
closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
|
||||
break
|
||||
}
|
||||
// retry; may have dropped stream from no ctxs
|
||||
return w.Watch(ctx, key, opts...)
|
||||
}
|
||||
|
||||
// receive channel
|
||||
if ok {
|
||||
select {
|
||||
case ret := <-wr.retc:
|
||||
return ret
|
||||
case <-ctx.Done():
|
||||
case <-donec:
|
||||
if wgs.closeErr != nil {
|
||||
closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
|
||||
break
|
||||
}
|
||||
// retry; may have dropped stream from no ctxs
|
||||
return w.Watch(ctx, key, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
close(closeCh)
|
||||
return closeCh
|
||||
}
|
||||
|
||||
func (w *watcher) Close() (err error) {
|
||||
w.mu.Lock()
|
||||
streams := w.streams
|
||||
w.streams = nil
|
||||
w.mu.Unlock()
|
||||
for _, wgs := range streams {
|
||||
if werr := wgs.close(); werr != nil {
|
||||
err = werr
|
||||
}
|
||||
}
|
||||
// Consider context.Canceled as a successful close
|
||||
if err == context.Canceled {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// RequestProgress requests a progress notify response be sent in all watch channels.
|
||||
func (w *watcher) RequestProgress(ctx context.Context) (err error) {
|
||||
ctxKey := streamKeyFromCtx(ctx)
|
||||
|
||||
w.mu.Lock()
|
||||
if w.streams == nil {
|
||||
w.mu.Unlock()
|
||||
return fmt.Errorf("no stream found for context")
|
||||
}
|
||||
wgs := w.streams[ctxKey]
|
||||
if wgs == nil {
|
||||
wgs = w.newWatcherGrpcStream(ctx)
|
||||
w.streams[ctxKey] = wgs
|
||||
}
|
||||
donec := wgs.donec
|
||||
reqc := wgs.reqc
|
||||
w.mu.Unlock()
|
||||
|
||||
pr := &progressRequest{}
|
||||
|
||||
select {
|
||||
case reqc <- pr:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
if err == nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
return err
|
||||
case <-donec:
|
||||
if wgs.closeErr != nil {
|
||||
return wgs.closeErr
|
||||
}
|
||||
// retry; may have dropped stream from no ctxs
|
||||
return w.RequestProgress(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *watchGrpcStream) close() (err error) {
|
||||
w.cancel()
|
||||
<-w.donec
|
||||
select {
|
||||
case err = <-w.errc:
|
||||
default:
|
||||
}
|
||||
return toErr(w.ctx, err)
|
||||
}
|
||||
|
||||
func (w *watcher) closeStream(wgs *watchGrpcStream) {
|
||||
w.mu.Lock()
|
||||
close(wgs.donec)
|
||||
wgs.cancel()
|
||||
if w.streams != nil {
|
||||
delete(w.streams, wgs.ctxKey)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
|
||||
// check watch ID for backward compatibility (<= v3.3)
|
||||
if resp.WatchId == -1 || (resp.Canceled && resp.CancelReason != "") {
|
||||
w.closeErr = v3rpc.Error(errors.New(resp.CancelReason))
|
||||
// failed; no channel
|
||||
close(ws.recvc)
|
||||
return
|
||||
}
|
||||
ws.id = resp.WatchId
|
||||
w.substreams[ws.id] = ws
|
||||
}
|
||||
|
||||
func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) {
|
||||
select {
|
||||
case ws.outc <- *resp:
|
||||
case <-ws.initReq.ctx.Done():
|
||||
case <-time.After(closeSendErrTimeout):
|
||||
}
|
||||
close(ws.outc)
|
||||
}
|
||||
|
||||
func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
|
||||
// send channel response in case stream was never established
|
||||
select {
|
||||
case ws.initReq.retc <- ws.outc:
|
||||
default:
|
||||
}
|
||||
// close subscriber's channel
|
||||
if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil {
|
||||
go w.sendCloseSubstream(ws, &WatchResponse{Canceled: true, closeErr: w.closeErr})
|
||||
} else if ws.outc != nil {
|
||||
close(ws.outc)
|
||||
}
|
||||
if ws.id != -1 {
|
||||
delete(w.substreams, ws.id)
|
||||
return
|
||||
}
|
||||
for i := range w.resuming {
|
||||
if w.resuming[i] == ws {
|
||||
w.resuming[i] = nil
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// run is the root of the goroutines for managing a watcher client
|
||||
func (w *watchGrpcStream) run() {
|
||||
var wc pb.Watch_WatchClient
|
||||
var closeErr error
|
||||
|
||||
// substreams marked to close but goroutine still running; needed for
|
||||
// avoiding double-closing recvc on grpc stream teardown
|
||||
closing := make(map[*watcherStream]struct{})
|
||||
|
||||
defer func() {
|
||||
w.closeErr = closeErr
|
||||
// shutdown substreams and resuming substreams
|
||||
for _, ws := range w.substreams {
|
||||
if _, ok := closing[ws]; !ok {
|
||||
close(ws.recvc)
|
||||
closing[ws] = struct{}{}
|
||||
}
|
||||
}
|
||||
for _, ws := range w.resuming {
|
||||
if _, ok := closing[ws]; ws != nil && !ok {
|
||||
close(ws.recvc)
|
||||
closing[ws] = struct{}{}
|
||||
}
|
||||
}
|
||||
w.joinSubstreams()
|
||||
for range closing {
|
||||
w.closeSubstream(<-w.closingc)
|
||||
}
|
||||
w.wg.Wait()
|
||||
w.owner.closeStream(w)
|
||||
}()
|
||||
|
||||
// start a stream with the etcd grpc server
|
||||
if wc, closeErr = w.newWatchClient(); closeErr != nil {
|
||||
return
|
||||
}
|
||||
|
||||
cancelSet := make(map[int64]struct{})
|
||||
|
||||
var cur *pb.WatchResponse
|
||||
for {
|
||||
select {
|
||||
// Watch() requested
|
||||
case req := <-w.reqc:
|
||||
switch wreq := req.(type) {
|
||||
case *watchRequest:
|
||||
outc := make(chan WatchResponse, 1)
|
||||
// TODO: pass custom watch ID?
|
||||
ws := &watcherStream{
|
||||
initReq: *wreq,
|
||||
id: -1,
|
||||
outc: outc,
|
||||
// unbuffered so resumes won't cause repeat events
|
||||
recvc: make(chan *WatchResponse),
|
||||
}
|
||||
|
||||
ws.donec = make(chan struct{})
|
||||
w.wg.Add(1)
|
||||
go w.serveSubstream(ws, w.resumec)
|
||||
|
||||
// queue up for watcher creation/resume
|
||||
w.resuming = append(w.resuming, ws)
|
||||
if len(w.resuming) == 1 {
|
||||
// head of resume queue, can register a new watcher
|
||||
wc.Send(ws.initReq.toPB())
|
||||
}
|
||||
case *progressRequest:
|
||||
wc.Send(wreq.toPB())
|
||||
}
|
||||
|
||||
// new events from the watch client
|
||||
case pbresp := <-w.respc:
|
||||
if cur == nil || pbresp.Created || pbresp.Canceled {
|
||||
cur = pbresp
|
||||
} else if cur != nil && cur.WatchId == pbresp.WatchId {
|
||||
// merge new events
|
||||
cur.Events = append(cur.Events, pbresp.Events...)
|
||||
// update "Fragment" field; last response with "Fragment" == false
|
||||
cur.Fragment = pbresp.Fragment
|
||||
}
|
||||
|
||||
switch {
|
||||
case pbresp.Created:
|
||||
// response to head of queue creation
|
||||
if ws := w.resuming[0]; ws != nil {
|
||||
w.addSubstream(pbresp, ws)
|
||||
w.dispatchEvent(pbresp)
|
||||
w.resuming[0] = nil
|
||||
}
|
||||
|
||||
if ws := w.nextResume(); ws != nil {
|
||||
wc.Send(ws.initReq.toPB())
|
||||
}
|
||||
|
||||
// reset for next iteration
|
||||
cur = nil
|
||||
|
||||
case pbresp.Canceled && pbresp.CompactRevision == 0:
|
||||
delete(cancelSet, pbresp.WatchId)
|
||||
if ws, ok := w.substreams[pbresp.WatchId]; ok {
|
||||
// signal to stream goroutine to update closingc
|
||||
close(ws.recvc)
|
||||
closing[ws] = struct{}{}
|
||||
}
|
||||
|
||||
// reset for next iteration
|
||||
cur = nil
|
||||
|
||||
case cur.Fragment:
|
||||
// watch response events are still fragmented
|
||||
// continue to fetch next fragmented event arrival
|
||||
continue
|
||||
|
||||
default:
|
||||
// dispatch to appropriate watch stream
|
||||
ok := w.dispatchEvent(cur)
|
||||
|
||||
// reset for next iteration
|
||||
cur = nil
|
||||
|
||||
if ok {
|
||||
break
|
||||
}
|
||||
|
||||
// watch response on unexpected watch id; cancel id
|
||||
if _, ok := cancelSet[pbresp.WatchId]; ok {
|
||||
break
|
||||
}
|
||||
|
||||
cancelSet[pbresp.WatchId] = struct{}{}
|
||||
cr := &pb.WatchRequest_CancelRequest{
|
||||
CancelRequest: &pb.WatchCancelRequest{
|
||||
WatchId: pbresp.WatchId,
|
||||
},
|
||||
}
|
||||
req := &pb.WatchRequest{RequestUnion: cr}
|
||||
wc.Send(req)
|
||||
}
|
||||
|
||||
// watch client failed on Recv; spawn another if possible
|
||||
case err := <-w.errc:
|
||||
if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
|
||||
closeErr = err
|
||||
return
|
||||
}
|
||||
if wc, closeErr = w.newWatchClient(); closeErr != nil {
|
||||
return
|
||||
}
|
||||
if ws := w.nextResume(); ws != nil {
|
||||
wc.Send(ws.initReq.toPB())
|
||||
}
|
||||
cancelSet = make(map[int64]struct{})
|
||||
|
||||
case <-w.ctx.Done():
|
||||
return
|
||||
|
||||
case ws := <-w.closingc:
|
||||
w.closeSubstream(ws)
|
||||
delete(closing, ws)
|
||||
// no more watchers on this stream, shutdown
|
||||
if len(w.substreams)+len(w.resuming) == 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// nextResume chooses the next resuming to register with the grpc stream. Abandoned
|
||||
// streams are marked as nil in the queue since the head must wait for its inflight registration.
|
||||
func (w *watchGrpcStream) nextResume() *watcherStream {
|
||||
for len(w.resuming) != 0 {
|
||||
if w.resuming[0] != nil {
|
||||
return w.resuming[0]
|
||||
}
|
||||
w.resuming = w.resuming[1:len(w.resuming)]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// dispatchEvent sends a WatchResponse to the appropriate watcher stream
|
||||
func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
|
||||
events := make([]*Event, len(pbresp.Events))
|
||||
for i, ev := range pbresp.Events {
|
||||
events[i] = (*Event)(ev)
|
||||
}
|
||||
// TODO: return watch ID?
|
||||
wr := &WatchResponse{
|
||||
Header: *pbresp.Header,
|
||||
Events: events,
|
||||
CompactRevision: pbresp.CompactRevision,
|
||||
Created: pbresp.Created,
|
||||
Canceled: pbresp.Canceled,
|
||||
cancelReason: pbresp.CancelReason,
|
||||
}
|
||||
|
||||
// watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of -1 to
|
||||
// indicate they should be broadcast.
|
||||
if wr.IsProgressNotify() && pbresp.WatchId == -1 {
|
||||
return w.broadcastResponse(wr)
|
||||
}
|
||||
|
||||
return w.unicastResponse(wr, pbresp.WatchId)
|
||||
|
||||
}
|
||||
|
||||
// broadcastResponse send a watch response to all watch substreams.
|
||||
func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool {
|
||||
for _, ws := range w.substreams {
|
||||
select {
|
||||
case ws.recvc <- wr:
|
||||
case <-ws.donec:
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// unicastResponse sends a watch response to a specific watch substream.
|
||||
func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool {
|
||||
ws, ok := w.substreams[watchId]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
select {
|
||||
case ws.recvc <- wr:
|
||||
case <-ws.donec:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// serveWatchClient forwards messages from the grpc stream to run()
|
||||
func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) {
|
||||
for {
|
||||
resp, err := wc.Recv()
|
||||
if err != nil {
|
||||
select {
|
||||
case w.errc <- err:
|
||||
case <-w.donec:
|
||||
}
|
||||
return
|
||||
}
|
||||
select {
|
||||
case w.respc <- resp:
|
||||
case <-w.donec:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// serveSubstream forwards watch responses from run() to the subscriber
|
||||
func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) {
|
||||
if ws.closing {
|
||||
panic("created substream goroutine but substream is closing")
|
||||
}
|
||||
|
||||
// nextRev is the minimum expected next revision
|
||||
nextRev := ws.initReq.rev
|
||||
resuming := false
|
||||
defer func() {
|
||||
if !resuming {
|
||||
ws.closing = true
|
||||
}
|
||||
close(ws.donec)
|
||||
if !resuming {
|
||||
w.closingc <- ws
|
||||
}
|
||||
w.wg.Done()
|
||||
}()
|
||||
|
||||
emptyWr := &WatchResponse{}
|
||||
for {
|
||||
curWr := emptyWr
|
||||
outc := ws.outc
|
||||
|
||||
if len(ws.buf) > 0 {
|
||||
curWr = ws.buf[0]
|
||||
} else {
|
||||
outc = nil
|
||||
}
|
||||
select {
|
||||
case outc <- *curWr:
|
||||
if ws.buf[0].Err() != nil {
|
||||
return
|
||||
}
|
||||
ws.buf[0] = nil
|
||||
ws.buf = ws.buf[1:]
|
||||
case wr, ok := <-ws.recvc:
|
||||
if !ok {
|
||||
// shutdown from closeSubstream
|
||||
return
|
||||
}
|
||||
|
||||
if wr.Created {
|
||||
if ws.initReq.retc != nil {
|
||||
ws.initReq.retc <- ws.outc
|
||||
// to prevent next write from taking the slot in buffered channel
|
||||
// and posting duplicate create events
|
||||
ws.initReq.retc = nil
|
||||
|
||||
// send first creation event only if requested
|
||||
if ws.initReq.createdNotify {
|
||||
ws.outc <- *wr
|
||||
}
|
||||
// once the watch channel is returned, a current revision
|
||||
// watch must resume at the store revision. This is necessary
|
||||
// for the following case to work as expected:
|
||||
// wch := m1.Watch("a")
|
||||
// m2.Put("a", "b")
|
||||
// <-wch
|
||||
// If the revision is only bound on the first observed event,
|
||||
// if wch is disconnected before the Put is issued, then reconnects
|
||||
// after it is committed, it'll miss the Put.
|
||||
if ws.initReq.rev == 0 {
|
||||
nextRev = wr.Header.Revision
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// current progress of watch; <= store revision
|
||||
nextRev = wr.Header.Revision
|
||||
}
|
||||
|
||||
if len(wr.Events) > 0 {
|
||||
nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
|
||||
}
|
||||
ws.initReq.rev = nextRev
|
||||
|
||||
// created event is already sent above,
|
||||
// watcher should not post duplicate events
|
||||
if wr.Created {
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO pause channel if buffer gets too large
|
||||
ws.buf = append(ws.buf, wr)
|
||||
case <-w.ctx.Done():
|
||||
return
|
||||
case <-ws.initReq.ctx.Done():
|
||||
return
|
||||
case <-resumec:
|
||||
resuming = true
|
||||
return
|
||||
}
|
||||
}
|
||||
// lazily send cancel message if events on missing id
|
||||
}
|
||||
|
||||
func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
|
||||
// mark all substreams as resuming
|
||||
close(w.resumec)
|
||||
w.resumec = make(chan struct{})
|
||||
w.joinSubstreams()
|
||||
for _, ws := range w.substreams {
|
||||
ws.id = -1
|
||||
w.resuming = append(w.resuming, ws)
|
||||
}
|
||||
// strip out nils, if any
|
||||
var resuming []*watcherStream
|
||||
for _, ws := range w.resuming {
|
||||
if ws != nil {
|
||||
resuming = append(resuming, ws)
|
||||
}
|
||||
}
|
||||
w.resuming = resuming
|
||||
w.substreams = make(map[int64]*watcherStream)
|
||||
|
||||
// connect to grpc stream while accepting watcher cancelation
|
||||
stopc := make(chan struct{})
|
||||
donec := w.waitCancelSubstreams(stopc)
|
||||
wc, err := w.openWatchClient()
|
||||
close(stopc)
|
||||
<-donec
|
||||
|
||||
// serve all non-closing streams, even if there's a client error
|
||||
// so that the teardown path can shutdown the streams as expected.
|
||||
for _, ws := range w.resuming {
|
||||
if ws.closing {
|
||||
continue
|
||||
}
|
||||
ws.donec = make(chan struct{})
|
||||
w.wg.Add(1)
|
||||
go w.serveSubstream(ws, w.resumec)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, v3rpc.Error(err)
|
||||
}
|
||||
|
||||
// receive data from new grpc stream
|
||||
go w.serveWatchClient(wc)
|
||||
return wc, nil
|
||||
}
|
||||
|
||||
func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(w.resuming))
|
||||
donec := make(chan struct{})
|
||||
for i := range w.resuming {
|
||||
go func(ws *watcherStream) {
|
||||
defer wg.Done()
|
||||
if ws.closing {
|
||||
if ws.initReq.ctx.Err() != nil && ws.outc != nil {
|
||||
close(ws.outc)
|
||||
ws.outc = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-ws.initReq.ctx.Done():
|
||||
// closed ws will be removed from resuming
|
||||
ws.closing = true
|
||||
close(ws.outc)
|
||||
ws.outc = nil
|
||||
w.wg.Add(1)
|
||||
go func() {
|
||||
defer w.wg.Done()
|
||||
w.closingc <- ws
|
||||
}()
|
||||
case <-stopc:
|
||||
}
|
||||
}(w.resuming[i])
|
||||
}
|
||||
go func() {
|
||||
defer close(donec)
|
||||
wg.Wait()
|
||||
}()
|
||||
return donec
|
||||
}
|
||||
|
||||
// joinSubstreams waits for all substream goroutines to complete.
|
||||
func (w *watchGrpcStream) joinSubstreams() {
|
||||
for _, ws := range w.substreams {
|
||||
<-ws.donec
|
||||
}
|
||||
for _, ws := range w.resuming {
|
||||
if ws != nil {
|
||||
<-ws.donec
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var maxBackoff = 100 * time.Millisecond
|
||||
|
||||
// openWatchClient retries opening a watch client until success or halt.
|
||||
// manually retry in case "ws==nil && err==nil"
|
||||
// TODO: remove FailFast=false
|
||||
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
|
||||
backoff := time.Millisecond
|
||||
for {
|
||||
select {
|
||||
case <-w.ctx.Done():
|
||||
if err == nil {
|
||||
return nil, w.ctx.Err()
|
||||
}
|
||||
return nil, err
|
||||
default:
|
||||
}
|
||||
if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil {
|
||||
break
|
||||
}
|
||||
if isHaltErr(w.ctx, err) {
|
||||
return nil, v3rpc.Error(err)
|
||||
}
|
||||
if isUnavailableErr(w.ctx, err) {
|
||||
// retry, but backoff
|
||||
if backoff < maxBackoff {
|
||||
// 25% backoff factor
|
||||
backoff = backoff + backoff/4
|
||||
if backoff > maxBackoff {
|
||||
backoff = maxBackoff
|
||||
}
|
||||
}
|
||||
time.Sleep(backoff)
|
||||
}
|
||||
}
|
||||
return ws, nil
|
||||
}
|
||||
|
||||
// toPB converts an internal watch request structure to its protobuf WatchRequest structure.
|
||||
func (wr *watchRequest) toPB() *pb.WatchRequest {
|
||||
req := &pb.WatchCreateRequest{
|
||||
StartRevision: wr.rev,
|
||||
Key: []byte(wr.key),
|
||||
RangeEnd: []byte(wr.end),
|
||||
ProgressNotify: wr.progressNotify,
|
||||
Filters: wr.filters,
|
||||
PrevKv: wr.prevKV,
|
||||
Fragment: wr.fragment,
|
||||
}
|
||||
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
|
||||
return &pb.WatchRequest{RequestUnion: cr}
|
||||
}
|
||||
|
||||
// toPB converts an internal progress request structure to its protobuf WatchRequest structure.
|
||||
func (pr *progressRequest) toPB() *pb.WatchRequest {
|
||||
req := &pb.WatchProgressRequest{}
|
||||
cr := &pb.WatchRequest_ProgressRequest{ProgressRequest: req}
|
||||
return &pb.WatchRequest{RequestUnion: cr}
|
||||
}
|
||||
|
||||
func streamKeyFromCtx(ctx context.Context) string {
|
||||
if md, ok := metadata.FromOutgoingContext(ctx); ok {
|
||||
return fmt.Sprintf("%+v", md)
|
||||
}
|
||||
return ""
|
||||
}
|
16
vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/doc.go
generated
vendored
Normal file
16
vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/doc.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package rpctypes has types and values shared by the etcd server and client for v3 RPC interaction.
|
||||
package rpctypes
|
235
vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go
generated
vendored
Normal file
235
vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go
generated
vendored
Normal file
@ -0,0 +1,235 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rpctypes
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// server-side error
|
||||
var (
|
||||
ErrGRPCEmptyKey = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err()
|
||||
ErrGRPCKeyNotFound = status.New(codes.InvalidArgument, "etcdserver: key not found").Err()
|
||||
ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err()
|
||||
ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err()
|
||||
ErrGRPCTooManyOps = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err()
|
||||
ErrGRPCDuplicateKey = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err()
|
||||
ErrGRPCCompacted = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err()
|
||||
ErrGRPCFutureRev = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err()
|
||||
ErrGRPCNoSpace = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err()
|
||||
|
||||
ErrGRPCLeaseNotFound = status.New(codes.NotFound, "etcdserver: requested lease not found").Err()
|
||||
ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err()
|
||||
ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err()
|
||||
|
||||
ErrGRPCMemberExist = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err()
|
||||
ErrGRPCPeerURLExist = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err()
|
||||
ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err()
|
||||
ErrGRPCMemberBadURLs = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err()
|
||||
ErrGRPCMemberNotFound = status.New(codes.NotFound, "etcdserver: member not found").Err()
|
||||
ErrGRPCMemberNotLearner = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member").Err()
|
||||
ErrGRPCLearnerNotReady = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member which is in sync with leader").Err()
|
||||
ErrGRPCTooManyLearners = status.New(codes.FailedPrecondition, "etcdserver: too many learner members in cluster").Err()
|
||||
|
||||
ErrGRPCRequestTooLarge = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err()
|
||||
ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err()
|
||||
|
||||
ErrGRPCRootUserNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not exist").Err()
|
||||
ErrGRPCRootRoleNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not have root role").Err()
|
||||
ErrGRPCUserAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: user name already exists").Err()
|
||||
ErrGRPCUserEmpty = status.New(codes.InvalidArgument, "etcdserver: user name is empty").Err()
|
||||
ErrGRPCUserNotFound = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err()
|
||||
ErrGRPCRoleAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err()
|
||||
ErrGRPCRoleNotFound = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err()
|
||||
ErrGRPCRoleEmpty = status.New(codes.InvalidArgument, "etcdserver: role name is empty").Err()
|
||||
ErrGRPCAuthFailed = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err()
|
||||
ErrGRPCPermissionDenied = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err()
|
||||
ErrGRPCRoleNotGranted = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err()
|
||||
ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err()
|
||||
ErrGRPCAuthNotEnabled = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err()
|
||||
ErrGRPCInvalidAuthToken = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err()
|
||||
ErrGRPCInvalidAuthMgmt = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err()
|
||||
|
||||
ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err()
|
||||
ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err()
|
||||
ErrGRPCLeaderChanged = status.New(codes.Unavailable, "etcdserver: leader changed").Err()
|
||||
ErrGRPCNotCapable = status.New(codes.Unavailable, "etcdserver: not capable").Err()
|
||||
ErrGRPCStopped = status.New(codes.Unavailable, "etcdserver: server stopped").Err()
|
||||
ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err()
|
||||
ErrGRPCTimeoutDueToLeaderFail = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err()
|
||||
ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err()
|
||||
ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err()
|
||||
ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err()
|
||||
ErrGPRCNotSupportedForLearner = status.New(codes.Unavailable, "etcdserver: rpc not supported for learner").Err()
|
||||
ErrGRPCBadLeaderTransferee = status.New(codes.FailedPrecondition, "etcdserver: bad leader transferee").Err()
|
||||
|
||||
errStringToError = map[string]error{
|
||||
ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey,
|
||||
ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound,
|
||||
ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided,
|
||||
ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided,
|
||||
|
||||
ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps,
|
||||
ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
|
||||
ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted,
|
||||
ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev,
|
||||
ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace,
|
||||
|
||||
ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
|
||||
ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
|
||||
ErrorDesc(ErrGRPCLeaseTTLTooLarge): ErrGRPCLeaseTTLTooLarge,
|
||||
|
||||
ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
|
||||
ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
|
||||
ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted,
|
||||
ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs,
|
||||
ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound,
|
||||
ErrorDesc(ErrGRPCMemberNotLearner): ErrGRPCMemberNotLearner,
|
||||
ErrorDesc(ErrGRPCLearnerNotReady): ErrGRPCLearnerNotReady,
|
||||
ErrorDesc(ErrGRPCTooManyLearners): ErrGRPCTooManyLearners,
|
||||
|
||||
ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
|
||||
ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
|
||||
|
||||
ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist,
|
||||
ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist,
|
||||
ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist,
|
||||
ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty,
|
||||
ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound,
|
||||
ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist,
|
||||
ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound,
|
||||
ErrorDesc(ErrGRPCRoleEmpty): ErrGRPCRoleEmpty,
|
||||
ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed,
|
||||
ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied,
|
||||
ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted,
|
||||
ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
|
||||
ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled,
|
||||
ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken,
|
||||
ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt,
|
||||
|
||||
ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
|
||||
ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader,
|
||||
ErrorDesc(ErrGRPCLeaderChanged): ErrGRPCLeaderChanged,
|
||||
ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
|
||||
ErrorDesc(ErrGRPCStopped): ErrGRPCStopped,
|
||||
ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout,
|
||||
ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail,
|
||||
ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost,
|
||||
ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy,
|
||||
ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt,
|
||||
ErrorDesc(ErrGPRCNotSupportedForLearner): ErrGPRCNotSupportedForLearner,
|
||||
ErrorDesc(ErrGRPCBadLeaderTransferee): ErrGRPCBadLeaderTransferee,
|
||||
}
|
||||
)
|
||||
|
||||
// client-side error
|
||||
var (
|
||||
ErrEmptyKey = Error(ErrGRPCEmptyKey)
|
||||
ErrKeyNotFound = Error(ErrGRPCKeyNotFound)
|
||||
ErrValueProvided = Error(ErrGRPCValueProvided)
|
||||
ErrLeaseProvided = Error(ErrGRPCLeaseProvided)
|
||||
ErrTooManyOps = Error(ErrGRPCTooManyOps)
|
||||
ErrDuplicateKey = Error(ErrGRPCDuplicateKey)
|
||||
ErrCompacted = Error(ErrGRPCCompacted)
|
||||
ErrFutureRev = Error(ErrGRPCFutureRev)
|
||||
ErrNoSpace = Error(ErrGRPCNoSpace)
|
||||
|
||||
ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
|
||||
ErrLeaseExist = Error(ErrGRPCLeaseExist)
|
||||
ErrLeaseTTLTooLarge = Error(ErrGRPCLeaseTTLTooLarge)
|
||||
|
||||
ErrMemberExist = Error(ErrGRPCMemberExist)
|
||||
ErrPeerURLExist = Error(ErrGRPCPeerURLExist)
|
||||
ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted)
|
||||
ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs)
|
||||
ErrMemberNotFound = Error(ErrGRPCMemberNotFound)
|
||||
ErrMemberNotLearner = Error(ErrGRPCMemberNotLearner)
|
||||
ErrMemberLearnerNotReady = Error(ErrGRPCLearnerNotReady)
|
||||
ErrTooManyLearners = Error(ErrGRPCTooManyLearners)
|
||||
|
||||
ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
|
||||
ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests)
|
||||
|
||||
ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist)
|
||||
ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist)
|
||||
ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist)
|
||||
ErrUserEmpty = Error(ErrGRPCUserEmpty)
|
||||
ErrUserNotFound = Error(ErrGRPCUserNotFound)
|
||||
ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist)
|
||||
ErrRoleNotFound = Error(ErrGRPCRoleNotFound)
|
||||
ErrRoleEmpty = Error(ErrGRPCRoleEmpty)
|
||||
ErrAuthFailed = Error(ErrGRPCAuthFailed)
|
||||
ErrPermissionDenied = Error(ErrGRPCPermissionDenied)
|
||||
ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted)
|
||||
ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
|
||||
ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled)
|
||||
ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken)
|
||||
ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt)
|
||||
|
||||
ErrNoLeader = Error(ErrGRPCNoLeader)
|
||||
ErrNotLeader = Error(ErrGRPCNotLeader)
|
||||
ErrLeaderChanged = Error(ErrGRPCLeaderChanged)
|
||||
ErrNotCapable = Error(ErrGRPCNotCapable)
|
||||
ErrStopped = Error(ErrGRPCStopped)
|
||||
ErrTimeout = Error(ErrGRPCTimeout)
|
||||
ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail)
|
||||
ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost)
|
||||
ErrUnhealthy = Error(ErrGRPCUnhealthy)
|
||||
ErrCorrupt = Error(ErrGRPCCorrupt)
|
||||
ErrBadLeaderTransferee = Error(ErrGRPCBadLeaderTransferee)
|
||||
)
|
||||
|
||||
// EtcdError defines gRPC server errors.
|
||||
// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323)
|
||||
type EtcdError struct {
|
||||
code codes.Code
|
||||
desc string
|
||||
}
|
||||
|
||||
// Code returns grpc/codes.Code.
|
||||
// TODO: define clientv3/codes.Code.
|
||||
func (e EtcdError) Code() codes.Code {
|
||||
return e.code
|
||||
}
|
||||
|
||||
func (e EtcdError) Error() string {
|
||||
return e.desc
|
||||
}
|
||||
|
||||
func Error(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
verr, ok := errStringToError[ErrorDesc(err)]
|
||||
if !ok { // not gRPC error
|
||||
return err
|
||||
}
|
||||
ev, ok := status.FromError(verr)
|
||||
var desc string
|
||||
if ok {
|
||||
desc = ev.Message()
|
||||
} else {
|
||||
desc = verr.Error()
|
||||
}
|
||||
return EtcdError{code: ev.Code(), desc: desc}
|
||||
}
|
||||
|
||||
func ErrorDesc(err error) string {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Message()
|
||||
}
|
||||
return err.Error()
|
||||
}
|
20
vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go
generated
vendored
Normal file
20
vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rpctypes
|
||||
|
||||
var (
|
||||
MetadataRequireLeaderKey = "hasleader"
|
||||
MetadataHasLeader = "true"
|
||||
)
|
20
vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go
generated
vendored
Normal file
20
vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rpctypes
|
||||
|
||||
var (
|
||||
TokenFieldNameGRPC = "token"
|
||||
TokenFieldNameSwagger = "authorization"
|
||||
)
|
1041
vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
generated
vendored
Normal file
1041
vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2127
vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
generated
vendored
Normal file
2127
vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
183
vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
generated
vendored
Normal file
183
vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
generated
vendored
Normal file
@ -0,0 +1,183 @@
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserverpb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
// InternalRaftStringer implements custom proto Stringer:
|
||||
// redact password, replace value fields with value_size fields.
|
||||
type InternalRaftStringer struct {
|
||||
Request *InternalRaftRequest
|
||||
}
|
||||
|
||||
func (as *InternalRaftStringer) String() string {
|
||||
switch {
|
||||
case as.Request.LeaseGrant != nil:
|
||||
return fmt.Sprintf("header:<%s> lease_grant:<ttl:%d-second id:%016x>",
|
||||
as.Request.Header.String(),
|
||||
as.Request.LeaseGrant.TTL,
|
||||
as.Request.LeaseGrant.ID,
|
||||
)
|
||||
case as.Request.LeaseRevoke != nil:
|
||||
return fmt.Sprintf("header:<%s> lease_revoke:<id:%016x>",
|
||||
as.Request.Header.String(),
|
||||
as.Request.LeaseRevoke.ID,
|
||||
)
|
||||
case as.Request.Authenticate != nil:
|
||||
return fmt.Sprintf("header:<%s> authenticate:<name:%s simple_token:%s>",
|
||||
as.Request.Header.String(),
|
||||
as.Request.Authenticate.Name,
|
||||
as.Request.Authenticate.SimpleToken,
|
||||
)
|
||||
case as.Request.AuthUserAdd != nil:
|
||||
return fmt.Sprintf("header:<%s> auth_user_add:<name:%s>",
|
||||
as.Request.Header.String(),
|
||||
as.Request.AuthUserAdd.Name,
|
||||
)
|
||||
case as.Request.AuthUserChangePassword != nil:
|
||||
return fmt.Sprintf("header:<%s> auth_user_change_password:<name:%s>",
|
||||
as.Request.Header.String(),
|
||||
as.Request.AuthUserChangePassword.Name,
|
||||
)
|
||||
case as.Request.Put != nil:
|
||||
return fmt.Sprintf("header:<%s> put:<%s>",
|
||||
as.Request.Header.String(),
|
||||
NewLoggablePutRequest(as.Request.Put).String(),
|
||||
)
|
||||
case as.Request.Txn != nil:
|
||||
return fmt.Sprintf("header:<%s> txn:<%s>",
|
||||
as.Request.Header.String(),
|
||||
NewLoggableTxnRequest(as.Request.Txn).String(),
|
||||
)
|
||||
default:
|
||||
// nothing to redact
|
||||
}
|
||||
return as.Request.String()
|
||||
}
|
||||
|
||||
// txnRequestStringer implements a custom proto String to replace value bytes fields with value size
|
||||
// fields in any nested txn and put operations.
|
||||
type txnRequestStringer struct {
|
||||
Request *TxnRequest
|
||||
}
|
||||
|
||||
func NewLoggableTxnRequest(request *TxnRequest) *txnRequestStringer {
|
||||
return &txnRequestStringer{request}
|
||||
}
|
||||
|
||||
func (as *txnRequestStringer) String() string {
|
||||
var compare []string
|
||||
for _, c := range as.Request.Compare {
|
||||
switch cv := c.TargetUnion.(type) {
|
||||
case *Compare_Value:
|
||||
compare = append(compare, newLoggableValueCompare(c, cv).String())
|
||||
default:
|
||||
// nothing to redact
|
||||
compare = append(compare, c.String())
|
||||
}
|
||||
}
|
||||
var success []string
|
||||
for _, s := range as.Request.Success {
|
||||
success = append(success, newLoggableRequestOp(s).String())
|
||||
}
|
||||
var failure []string
|
||||
for _, f := range as.Request.Failure {
|
||||
failure = append(failure, newLoggableRequestOp(f).String())
|
||||
}
|
||||
return fmt.Sprintf("compare:<%s> success:<%s> failure:<%s>",
|
||||
strings.Join(compare, " "),
|
||||
strings.Join(success, " "),
|
||||
strings.Join(failure, " "),
|
||||
)
|
||||
}
|
||||
|
||||
// requestOpStringer implements a custom proto String to replace value bytes fields with value
|
||||
// size fields in any nested txn and put operations.
|
||||
type requestOpStringer struct {
|
||||
Op *RequestOp
|
||||
}
|
||||
|
||||
func newLoggableRequestOp(op *RequestOp) *requestOpStringer {
|
||||
return &requestOpStringer{op}
|
||||
}
|
||||
|
||||
func (as *requestOpStringer) String() string {
|
||||
switch op := as.Op.Request.(type) {
|
||||
case *RequestOp_RequestPut:
|
||||
return fmt.Sprintf("request_put:<%s>", NewLoggablePutRequest(op.RequestPut).String())
|
||||
case *RequestOp_RequestTxn:
|
||||
return fmt.Sprintf("request_txn:<%s>", NewLoggableTxnRequest(op.RequestTxn).String())
|
||||
default:
|
||||
// nothing to redact
|
||||
}
|
||||
return as.Op.String()
|
||||
}
|
||||
|
||||
// loggableValueCompare implements a custom proto String for Compare.Value union member types to
|
||||
// replace the value bytes field with a value size field.
|
||||
// To preserve proto encoding of the key and range_end bytes, a faked out proto type is used here.
|
||||
type loggableValueCompare struct {
|
||||
Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult"`
|
||||
Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget"`
|
||||
Key []byte `protobuf:"bytes,3,opt,name=key,proto3"`
|
||||
ValueSize int `protobuf:"bytes,7,opt,name=value_size,proto3"`
|
||||
RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,proto3"`
|
||||
}
|
||||
|
||||
func newLoggableValueCompare(c *Compare, cv *Compare_Value) *loggableValueCompare {
|
||||
return &loggableValueCompare{
|
||||
c.Result,
|
||||
c.Target,
|
||||
c.Key,
|
||||
len(cv.Value),
|
||||
c.RangeEnd,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *loggableValueCompare) Reset() { *m = loggableValueCompare{} }
|
||||
func (m *loggableValueCompare) String() string { return proto.CompactTextString(m) }
|
||||
func (*loggableValueCompare) ProtoMessage() {}
|
||||
|
||||
// loggablePutRequest implements a custom proto String to replace value bytes field with a value
|
||||
// size field.
|
||||
// To preserve proto encoding of the key bytes, a faked out proto type is used here.
|
||||
type loggablePutRequest struct {
|
||||
Key []byte `protobuf:"bytes,1,opt,name=key,proto3"`
|
||||
ValueSize int `protobuf:"varint,2,opt,name=value_size,proto3"`
|
||||
Lease int64 `protobuf:"varint,3,opt,name=lease,proto3"`
|
||||
PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,proto3"`
|
||||
IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,proto3"`
|
||||
IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,proto3"`
|
||||
}
|
||||
|
||||
func NewLoggablePutRequest(request *PutRequest) *loggablePutRequest {
|
||||
return &loggablePutRequest{
|
||||
request.Key,
|
||||
len(request.Value),
|
||||
request.Lease,
|
||||
request.PrevKv,
|
||||
request.IgnoreValue,
|
||||
request.IgnoreLease,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *loggablePutRequest) Reset() { *m = loggablePutRequest{} }
|
||||
func (m *loggablePutRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*loggablePutRequest) ProtoMessage() {}
|
20086
vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go
generated
vendored
Normal file
20086
vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
718
vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go
generated
vendored
Normal file
718
vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go
generated
vendored
Normal file
@ -0,0 +1,718 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: kv.proto
|
||||
|
||||
/*
|
||||
Package mvccpb is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
kv.proto
|
||||
|
||||
It has these top-level messages:
|
||||
KeyValue
|
||||
Event
|
||||
*/
|
||||
package mvccpb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
|
||||
math "math"
|
||||
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
io "io"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type Event_EventType int32
|
||||
|
||||
const (
|
||||
PUT Event_EventType = 0
|
||||
DELETE Event_EventType = 1
|
||||
)
|
||||
|
||||
var Event_EventType_name = map[int32]string{
|
||||
0: "PUT",
|
||||
1: "DELETE",
|
||||
}
|
||||
var Event_EventType_value = map[string]int32{
|
||||
"PUT": 0,
|
||||
"DELETE": 1,
|
||||
}
|
||||
|
||||
func (x Event_EventType) String() string {
|
||||
return proto.EnumName(Event_EventType_name, int32(x))
|
||||
}
|
||||
func (Event_EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptorKv, []int{1, 0} }
|
||||
|
||||
type KeyValue struct {
|
||||
// key is the key in bytes. An empty key is not allowed.
|
||||
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||
// create_revision is the revision of last creation on this key.
|
||||
CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"`
|
||||
// mod_revision is the revision of last modification on this key.
|
||||
ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"`
|
||||
// version is the version of the key. A deletion resets
|
||||
// the version to zero and any modification of the key
|
||||
// increases its version.
|
||||
Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"`
|
||||
// value is the value held by the key, in bytes.
|
||||
Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"`
|
||||
// lease is the ID of the lease that attached to key.
|
||||
// When the attached lease expires, the key will be deleted.
|
||||
// If lease is 0, then no lease is attached to the key.
|
||||
Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"`
|
||||
}
|
||||
|
||||
func (m *KeyValue) Reset() { *m = KeyValue{} }
|
||||
func (m *KeyValue) String() string { return proto.CompactTextString(m) }
|
||||
func (*KeyValue) ProtoMessage() {}
|
||||
func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{0} }
|
||||
|
||||
type Event struct {
|
||||
// type is the kind of event. If type is a PUT, it indicates
|
||||
// new data has been stored to the key. If type is a DELETE,
|
||||
// it indicates the key was deleted.
|
||||
Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"`
|
||||
// kv holds the KeyValue for the event.
|
||||
// A PUT event contains current kv pair.
|
||||
// A PUT event with kv.Version=1 indicates the creation of a key.
|
||||
// A DELETE/EXPIRE event contains the deleted key with
|
||||
// its modification revision set to the revision of deletion.
|
||||
Kv *KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"`
|
||||
// prev_kv holds the key-value pair before the event happens.
|
||||
PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Event) Reset() { *m = Event{} }
|
||||
func (m *Event) String() string { return proto.CompactTextString(m) }
|
||||
func (*Event) ProtoMessage() {}
|
||||
func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{1} }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue")
|
||||
proto.RegisterType((*Event)(nil), "mvccpb.Event")
|
||||
proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value)
|
||||
}
|
||||
func (m *KeyValue) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Key) > 0 {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintKv(dAtA, i, uint64(len(m.Key)))
|
||||
i += copy(dAtA[i:], m.Key)
|
||||
}
|
||||
if m.CreateRevision != 0 {
|
||||
dAtA[i] = 0x10
|
||||
i++
|
||||
i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision))
|
||||
}
|
||||
if m.ModRevision != 0 {
|
||||
dAtA[i] = 0x18
|
||||
i++
|
||||
i = encodeVarintKv(dAtA, i, uint64(m.ModRevision))
|
||||
}
|
||||
if m.Version != 0 {
|
||||
dAtA[i] = 0x20
|
||||
i++
|
||||
i = encodeVarintKv(dAtA, i, uint64(m.Version))
|
||||
}
|
||||
if len(m.Value) > 0 {
|
||||
dAtA[i] = 0x2a
|
||||
i++
|
||||
i = encodeVarintKv(dAtA, i, uint64(len(m.Value)))
|
||||
i += copy(dAtA[i:], m.Value)
|
||||
}
|
||||
if m.Lease != 0 {
|
||||
dAtA[i] = 0x30
|
||||
i++
|
||||
i = encodeVarintKv(dAtA, i, uint64(m.Lease))
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *Event) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Event) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Type != 0 {
|
||||
dAtA[i] = 0x8
|
||||
i++
|
||||
i = encodeVarintKv(dAtA, i, uint64(m.Type))
|
||||
}
|
||||
if m.Kv != nil {
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintKv(dAtA, i, uint64(m.Kv.Size()))
|
||||
n1, err := m.Kv.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n1
|
||||
}
|
||||
if m.PrevKv != nil {
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
i = encodeVarintKv(dAtA, i, uint64(m.PrevKv.Size()))
|
||||
n2, err := m.PrevKv.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n2
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeVarintKv(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *KeyValue) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Key)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovKv(uint64(l))
|
||||
}
|
||||
if m.CreateRevision != 0 {
|
||||
n += 1 + sovKv(uint64(m.CreateRevision))
|
||||
}
|
||||
if m.ModRevision != 0 {
|
||||
n += 1 + sovKv(uint64(m.ModRevision))
|
||||
}
|
||||
if m.Version != 0 {
|
||||
n += 1 + sovKv(uint64(m.Version))
|
||||
}
|
||||
l = len(m.Value)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovKv(uint64(l))
|
||||
}
|
||||
if m.Lease != 0 {
|
||||
n += 1 + sovKv(uint64(m.Lease))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *Event) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if m.Type != 0 {
|
||||
n += 1 + sovKv(uint64(m.Type))
|
||||
}
|
||||
if m.Kv != nil {
|
||||
l = m.Kv.Size()
|
||||
n += 1 + l + sovKv(uint64(l))
|
||||
}
|
||||
if m.PrevKv != nil {
|
||||
l = m.PrevKv.Size()
|
||||
n += 1 + l + sovKv(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovKv(x uint64) (n int) {
|
||||
for {
|
||||
n++
|
||||
x >>= 7
|
||||
if x == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
func sozKv(x uint64) (n int) {
|
||||
return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *KeyValue) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowKv
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: KeyValue: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowKv
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthKv
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Key == nil {
|
||||
m.Key = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
|
||||
}
|
||||
m.CreateRevision = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowKv
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.CreateRevision |= (int64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
|
||||
}
|
||||
m.ModRevision = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowKv
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.ModRevision |= (int64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 4:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
|
||||
}
|
||||
m.Version = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowKv
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Version |= (int64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 5:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowKv
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthKv
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Value == nil {
|
||||
m.Value = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 6:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
|
||||
}
|
||||
m.Lease = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowKv
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Lease |= (int64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipKv(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthKv
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *Event) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowKv
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Event: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
|
||||
}
|
||||
m.Type = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowKv
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Type |= (Event_EventType(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowKv
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthKv
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Kv == nil {
|
||||
m.Kv = &KeyValue{}
|
||||
}
|
||||
if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowKv
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthKv
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.PrevKv == nil {
|
||||
m.PrevKv = &KeyValue{}
|
||||
}
|
||||
if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipKv(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthKv
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipKv(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowKv
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowKv
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
return iNdEx, nil
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowKv
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthKv
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
var innerWire uint64
|
||||
var start int = iNdEx
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowKv
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
innerWireType := int(innerWire & 0x7)
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipKv(dAtA[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
return iNdEx, nil
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
return iNdEx, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowKv = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() { proto.RegisterFile("kv.proto", fileDescriptorKv) }
|
||||
|
||||
var fileDescriptorKv = []byte{
|
||||
// 303 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40,
|
||||
0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18,
|
||||
0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94,
|
||||
0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa,
|
||||
0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3,
|
||||
0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae,
|
||||
0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7,
|
||||
0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3,
|
||||
0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d,
|
||||
0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b,
|
||||
0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23,
|
||||
0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36,
|
||||
0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34,
|
||||
0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad,
|
||||
0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30,
|
||||
0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a,
|
||||
0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94,
|
||||
0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff,
|
||||
0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00,
|
||||
}
|
46
vendor/go.etcd.io/etcd/pkg/logutil/discard_logger.go
generated
vendored
Normal file
46
vendor/go.etcd.io/etcd/pkg/logutil/discard_logger.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logutil
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
// assert that "discardLogger" satisfy "Logger" interface
|
||||
var _ Logger = &discardLogger{}
|
||||
|
||||
// NewDiscardLogger returns a new Logger that discards everything except "fatal".
|
||||
func NewDiscardLogger() Logger { return &discardLogger{} }
|
||||
|
||||
type discardLogger struct{}
|
||||
|
||||
func (l *discardLogger) Info(args ...interface{}) {}
|
||||
func (l *discardLogger) Infoln(args ...interface{}) {}
|
||||
func (l *discardLogger) Infof(format string, args ...interface{}) {}
|
||||
func (l *discardLogger) Warning(args ...interface{}) {}
|
||||
func (l *discardLogger) Warningln(args ...interface{}) {}
|
||||
func (l *discardLogger) Warningf(format string, args ...interface{}) {}
|
||||
func (l *discardLogger) Error(args ...interface{}) {}
|
||||
func (l *discardLogger) Errorln(args ...interface{}) {}
|
||||
func (l *discardLogger) Errorf(format string, args ...interface{}) {}
|
||||
func (l *discardLogger) Fatal(args ...interface{}) { log.Fatal(args...) }
|
||||
func (l *discardLogger) Fatalln(args ...interface{}) { log.Fatalln(args...) }
|
||||
func (l *discardLogger) Fatalf(format string, args ...interface{}) { log.Fatalf(format, args...) }
|
||||
func (l *discardLogger) V(lvl int) bool {
|
||||
return false
|
||||
}
|
||||
func (l *discardLogger) Lvl(lvl int) grpclog.LoggerV2 { return l }
|
16
vendor/go.etcd.io/etcd/pkg/logutil/doc.go
generated
vendored
Normal file
16
vendor/go.etcd.io/etcd/pkg/logutil/doc.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package logutil includes utilities to facilitate logging.
|
||||
package logutil
|
70
vendor/go.etcd.io/etcd/pkg/logutil/log_level.go
generated
vendored
Normal file
70
vendor/go.etcd.io/etcd/pkg/logutil/log_level.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
// Copyright 2019 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
var DefaultLogLevel = "info"
|
||||
|
||||
// ConvertToZapLevel converts log level string to zapcore.Level.
|
||||
func ConvertToZapLevel(lvl string) zapcore.Level {
|
||||
switch lvl {
|
||||
case "debug":
|
||||
return zap.DebugLevel
|
||||
case "info":
|
||||
return zap.InfoLevel
|
||||
case "warn":
|
||||
return zap.WarnLevel
|
||||
case "error":
|
||||
return zap.ErrorLevel
|
||||
case "dpanic":
|
||||
return zap.DPanicLevel
|
||||
case "panic":
|
||||
return zap.PanicLevel
|
||||
case "fatal":
|
||||
return zap.FatalLevel
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown level %q", lvl))
|
||||
}
|
||||
}
|
||||
|
||||
// ConvertToCapnslogLogLevel convert log level string to capnslog.LogLevel.
|
||||
// TODO: deprecate this in 3.5
|
||||
func ConvertToCapnslogLogLevel(lvl string) capnslog.LogLevel {
|
||||
switch lvl {
|
||||
case "debug":
|
||||
return capnslog.DEBUG
|
||||
case "info":
|
||||
return capnslog.INFO
|
||||
case "warn":
|
||||
return capnslog.WARNING
|
||||
case "error":
|
||||
return capnslog.ERROR
|
||||
case "dpanic":
|
||||
return capnslog.CRITICAL
|
||||
case "panic":
|
||||
return capnslog.CRITICAL
|
||||
case "fatal":
|
||||
return capnslog.CRITICAL
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown level %q", lvl))
|
||||
}
|
||||
}
|
64
vendor/go.etcd.io/etcd/pkg/logutil/logger.go
generated
vendored
Normal file
64
vendor/go.etcd.io/etcd/pkg/logutil/logger.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logutil
|
||||
|
||||
import "google.golang.org/grpc/grpclog"
|
||||
|
||||
// Logger defines logging interface.
|
||||
// TODO: deprecate in v3.5.
|
||||
type Logger interface {
|
||||
grpclog.LoggerV2
|
||||
|
||||
// Lvl returns logger if logger's verbosity level >= "lvl".
|
||||
// Otherwise, logger that discards everything.
|
||||
Lvl(lvl int) grpclog.LoggerV2
|
||||
}
|
||||
|
||||
// assert that "defaultLogger" satisfy "Logger" interface
|
||||
var _ Logger = &defaultLogger{}
|
||||
|
||||
// NewLogger wraps "grpclog.LoggerV2" that implements "Logger" interface.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// var defaultLogger Logger
|
||||
// g := grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 4)
|
||||
// defaultLogger = NewLogger(g)
|
||||
//
|
||||
func NewLogger(g grpclog.LoggerV2) Logger { return &defaultLogger{g: g} }
|
||||
|
||||
type defaultLogger struct {
|
||||
g grpclog.LoggerV2
|
||||
}
|
||||
|
||||
func (l *defaultLogger) Info(args ...interface{}) { l.g.Info(args...) }
|
||||
func (l *defaultLogger) Infoln(args ...interface{}) { l.g.Info(args...) }
|
||||
func (l *defaultLogger) Infof(format string, args ...interface{}) { l.g.Infof(format, args...) }
|
||||
func (l *defaultLogger) Warning(args ...interface{}) { l.g.Warning(args...) }
|
||||
func (l *defaultLogger) Warningln(args ...interface{}) { l.g.Warning(args...) }
|
||||
func (l *defaultLogger) Warningf(format string, args ...interface{}) { l.g.Warningf(format, args...) }
|
||||
func (l *defaultLogger) Error(args ...interface{}) { l.g.Error(args...) }
|
||||
func (l *defaultLogger) Errorln(args ...interface{}) { l.g.Error(args...) }
|
||||
func (l *defaultLogger) Errorf(format string, args ...interface{}) { l.g.Errorf(format, args...) }
|
||||
func (l *defaultLogger) Fatal(args ...interface{}) { l.g.Fatal(args...) }
|
||||
func (l *defaultLogger) Fatalln(args ...interface{}) { l.g.Fatal(args...) }
|
||||
func (l *defaultLogger) Fatalf(format string, args ...interface{}) { l.g.Fatalf(format, args...) }
|
||||
func (l *defaultLogger) V(lvl int) bool { return l.g.V(lvl) }
|
||||
func (l *defaultLogger) Lvl(lvl int) grpclog.LoggerV2 {
|
||||
if l.g.V(lvl) {
|
||||
return l
|
||||
}
|
||||
return &discardLogger{}
|
||||
}
|
194
vendor/go.etcd.io/etcd/pkg/logutil/merge_logger.go
generated
vendored
Normal file
194
vendor/go.etcd.io/etcd/pkg/logutil/merge_logger.go
generated
vendored
Normal file
@ -0,0 +1,194 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultMergePeriod = time.Second
|
||||
defaultTimeOutputScale = 10 * time.Millisecond
|
||||
|
||||
outputInterval = time.Second
|
||||
)
|
||||
|
||||
// line represents a log line that can be printed out
|
||||
// through capnslog.PackageLogger.
|
||||
type line struct {
|
||||
level capnslog.LogLevel
|
||||
str string
|
||||
}
|
||||
|
||||
func (l line) append(s string) line {
|
||||
return line{
|
||||
level: l.level,
|
||||
str: l.str + " " + s,
|
||||
}
|
||||
}
|
||||
|
||||
// status represents the merge status of a line.
|
||||
type status struct {
|
||||
period time.Duration
|
||||
|
||||
start time.Time // start time of latest merge period
|
||||
count int // number of merged lines from starting
|
||||
}
|
||||
|
||||
func (s *status) isInMergePeriod(now time.Time) bool {
|
||||
return s.period == 0 || s.start.Add(s.period).After(now)
|
||||
}
|
||||
|
||||
func (s *status) isEmpty() bool { return s.count == 0 }
|
||||
|
||||
func (s *status) summary(now time.Time) string {
|
||||
ts := s.start.Round(defaultTimeOutputScale)
|
||||
took := now.Round(defaultTimeOutputScale).Sub(ts)
|
||||
return fmt.Sprintf("[merged %d repeated lines in %s]", s.count, took)
|
||||
}
|
||||
|
||||
func (s *status) reset(now time.Time) {
|
||||
s.start = now
|
||||
s.count = 0
|
||||
}
|
||||
|
||||
// MergeLogger supports merge logging, which merges repeated log lines
|
||||
// and prints summary log lines instead.
|
||||
//
|
||||
// For merge logging, MergeLogger prints out the line when the line appears
|
||||
// at the first time. MergeLogger holds the same log line printed within
|
||||
// defaultMergePeriod, and prints out summary log line at the end of defaultMergePeriod.
|
||||
// It stops merging when the line doesn't appear within the
|
||||
// defaultMergePeriod.
|
||||
type MergeLogger struct {
|
||||
*capnslog.PackageLogger
|
||||
|
||||
mu sync.Mutex // protect statusm
|
||||
statusm map[line]*status
|
||||
}
|
||||
|
||||
func NewMergeLogger(logger *capnslog.PackageLogger) *MergeLogger {
|
||||
l := &MergeLogger{
|
||||
PackageLogger: logger,
|
||||
statusm: make(map[line]*status),
|
||||
}
|
||||
go l.outputLoop()
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *MergeLogger) MergeInfo(entries ...interface{}) {
|
||||
l.merge(line{
|
||||
level: capnslog.INFO,
|
||||
str: fmt.Sprint(entries...),
|
||||
})
|
||||
}
|
||||
|
||||
func (l *MergeLogger) MergeInfof(format string, args ...interface{}) {
|
||||
l.merge(line{
|
||||
level: capnslog.INFO,
|
||||
str: fmt.Sprintf(format, args...),
|
||||
})
|
||||
}
|
||||
|
||||
func (l *MergeLogger) MergeNotice(entries ...interface{}) {
|
||||
l.merge(line{
|
||||
level: capnslog.NOTICE,
|
||||
str: fmt.Sprint(entries...),
|
||||
})
|
||||
}
|
||||
|
||||
func (l *MergeLogger) MergeNoticef(format string, args ...interface{}) {
|
||||
l.merge(line{
|
||||
level: capnslog.NOTICE,
|
||||
str: fmt.Sprintf(format, args...),
|
||||
})
|
||||
}
|
||||
|
||||
func (l *MergeLogger) MergeWarning(entries ...interface{}) {
|
||||
l.merge(line{
|
||||
level: capnslog.WARNING,
|
||||
str: fmt.Sprint(entries...),
|
||||
})
|
||||
}
|
||||
|
||||
func (l *MergeLogger) MergeWarningf(format string, args ...interface{}) {
|
||||
l.merge(line{
|
||||
level: capnslog.WARNING,
|
||||
str: fmt.Sprintf(format, args...),
|
||||
})
|
||||
}
|
||||
|
||||
func (l *MergeLogger) MergeError(entries ...interface{}) {
|
||||
l.merge(line{
|
||||
level: capnslog.ERROR,
|
||||
str: fmt.Sprint(entries...),
|
||||
})
|
||||
}
|
||||
|
||||
func (l *MergeLogger) MergeErrorf(format string, args ...interface{}) {
|
||||
l.merge(line{
|
||||
level: capnslog.ERROR,
|
||||
str: fmt.Sprintf(format, args...),
|
||||
})
|
||||
}
|
||||
|
||||
func (l *MergeLogger) merge(ln line) {
|
||||
l.mu.Lock()
|
||||
|
||||
// increase count if the logger is merging the line
|
||||
if status, ok := l.statusm[ln]; ok {
|
||||
status.count++
|
||||
l.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// initialize status of the line
|
||||
l.statusm[ln] = &status{
|
||||
period: defaultMergePeriod,
|
||||
start: time.Now(),
|
||||
}
|
||||
// release the lock before IO operation
|
||||
l.mu.Unlock()
|
||||
// print out the line at its first time
|
||||
l.PackageLogger.Logf(ln.level, ln.str)
|
||||
}
|
||||
|
||||
func (l *MergeLogger) outputLoop() {
|
||||
for now := range time.Tick(outputInterval) {
|
||||
var outputs []line
|
||||
|
||||
l.mu.Lock()
|
||||
for ln, status := range l.statusm {
|
||||
if status.isInMergePeriod(now) {
|
||||
continue
|
||||
}
|
||||
if status.isEmpty() {
|
||||
delete(l.statusm, ln)
|
||||
continue
|
||||
}
|
||||
outputs = append(outputs, ln.append(status.summary(now)))
|
||||
status.reset(now)
|
||||
}
|
||||
l.mu.Unlock()
|
||||
|
||||
for _, o := range outputs {
|
||||
l.PackageLogger.Logf(o.level, o.str)
|
||||
}
|
||||
}
|
||||
}
|
60
vendor/go.etcd.io/etcd/pkg/logutil/package_logger.go
generated
vendored
Normal file
60
vendor/go.etcd.io/etcd/pkg/logutil/package_logger.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logutil
|
||||
|
||||
import (
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
// assert that "packageLogger" satisfy "Logger" interface
|
||||
var _ Logger = &packageLogger{}
|
||||
|
||||
// NewPackageLogger wraps "*capnslog.PackageLogger" that implements "Logger" interface.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// var defaultLogger Logger
|
||||
// defaultLogger = NewPackageLogger("go.etcd.io/etcd", "snapshot")
|
||||
//
|
||||
func NewPackageLogger(repo, pkg string) Logger {
|
||||
return &packageLogger{p: capnslog.NewPackageLogger(repo, pkg)}
|
||||
}
|
||||
|
||||
type packageLogger struct {
|
||||
p *capnslog.PackageLogger
|
||||
}
|
||||
|
||||
func (l *packageLogger) Info(args ...interface{}) { l.p.Info(args...) }
|
||||
func (l *packageLogger) Infoln(args ...interface{}) { l.p.Info(args...) }
|
||||
func (l *packageLogger) Infof(format string, args ...interface{}) { l.p.Infof(format, args...) }
|
||||
func (l *packageLogger) Warning(args ...interface{}) { l.p.Warning(args...) }
|
||||
func (l *packageLogger) Warningln(args ...interface{}) { l.p.Warning(args...) }
|
||||
func (l *packageLogger) Warningf(format string, args ...interface{}) { l.p.Warningf(format, args...) }
|
||||
func (l *packageLogger) Error(args ...interface{}) { l.p.Error(args...) }
|
||||
func (l *packageLogger) Errorln(args ...interface{}) { l.p.Error(args...) }
|
||||
func (l *packageLogger) Errorf(format string, args ...interface{}) { l.p.Errorf(format, args...) }
|
||||
func (l *packageLogger) Fatal(args ...interface{}) { l.p.Fatal(args...) }
|
||||
func (l *packageLogger) Fatalln(args ...interface{}) { l.p.Fatal(args...) }
|
||||
func (l *packageLogger) Fatalf(format string, args ...interface{}) { l.p.Fatalf(format, args...) }
|
||||
func (l *packageLogger) V(lvl int) bool {
|
||||
return l.p.LevelAt(capnslog.LogLevel(lvl))
|
||||
}
|
||||
func (l *packageLogger) Lvl(lvl int) grpclog.LoggerV2 {
|
||||
if l.p.LevelAt(capnslog.LogLevel(lvl)) {
|
||||
return l
|
||||
}
|
||||
return &discardLogger{}
|
||||
}
|
91
vendor/go.etcd.io/etcd/pkg/logutil/zap.go
generated
vendored
Normal file
91
vendor/go.etcd.io/etcd/pkg/logutil/zap.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
// Copyright 2019 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logutil
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
// DefaultZapLoggerConfig defines default zap logger configuration.
|
||||
var DefaultZapLoggerConfig = zap.Config{
|
||||
Level: zap.NewAtomicLevelAt(ConvertToZapLevel(DefaultLogLevel)),
|
||||
|
||||
Development: false,
|
||||
Sampling: &zap.SamplingConfig{
|
||||
Initial: 100,
|
||||
Thereafter: 100,
|
||||
},
|
||||
|
||||
Encoding: "json",
|
||||
|
||||
// copied from "zap.NewProductionEncoderConfig" with some updates
|
||||
EncoderConfig: zapcore.EncoderConfig{
|
||||
TimeKey: "ts",
|
||||
LevelKey: "level",
|
||||
NameKey: "logger",
|
||||
CallerKey: "caller",
|
||||
MessageKey: "msg",
|
||||
StacktraceKey: "stacktrace",
|
||||
LineEnding: zapcore.DefaultLineEnding,
|
||||
EncodeLevel: zapcore.LowercaseLevelEncoder,
|
||||
EncodeTime: zapcore.ISO8601TimeEncoder,
|
||||
EncodeDuration: zapcore.StringDurationEncoder,
|
||||
EncodeCaller: zapcore.ShortCallerEncoder,
|
||||
},
|
||||
|
||||
// Use "/dev/null" to discard all
|
||||
OutputPaths: []string{"stderr"},
|
||||
ErrorOutputPaths: []string{"stderr"},
|
||||
}
|
||||
|
||||
// MergeOutputPaths merges logging output paths, resolving conflicts.
|
||||
func MergeOutputPaths(cfg zap.Config) zap.Config {
|
||||
outputs := make(map[string]struct{})
|
||||
for _, v := range cfg.OutputPaths {
|
||||
outputs[v] = struct{}{}
|
||||
}
|
||||
outputSlice := make([]string, 0)
|
||||
if _, ok := outputs["/dev/null"]; ok {
|
||||
// "/dev/null" to discard all
|
||||
outputSlice = []string{"/dev/null"}
|
||||
} else {
|
||||
for k := range outputs {
|
||||
outputSlice = append(outputSlice, k)
|
||||
}
|
||||
}
|
||||
cfg.OutputPaths = outputSlice
|
||||
sort.Strings(cfg.OutputPaths)
|
||||
|
||||
errOutputs := make(map[string]struct{})
|
||||
for _, v := range cfg.ErrorOutputPaths {
|
||||
errOutputs[v] = struct{}{}
|
||||
}
|
||||
errOutputSlice := make([]string, 0)
|
||||
if _, ok := errOutputs["/dev/null"]; ok {
|
||||
// "/dev/null" to discard all
|
||||
errOutputSlice = []string{"/dev/null"}
|
||||
} else {
|
||||
for k := range errOutputs {
|
||||
errOutputSlice = append(errOutputSlice, k)
|
||||
}
|
||||
}
|
||||
cfg.ErrorOutputPaths = errOutputSlice
|
||||
sort.Strings(cfg.ErrorOutputPaths)
|
||||
|
||||
return cfg
|
||||
}
|
111
vendor/go.etcd.io/etcd/pkg/logutil/zap_grpc.go
generated
vendored
Normal file
111
vendor/go.etcd.io/etcd/pkg/logutil/zap_grpc.go
generated
vendored
Normal file
@ -0,0 +1,111 @@
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logutil
|
||||
|
||||
import (
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
// NewGRPCLoggerV2 converts "*zap.Logger" to "grpclog.LoggerV2".
|
||||
// It discards all INFO level logging in gRPC, if debug level
|
||||
// is not enabled in "*zap.Logger".
|
||||
func NewGRPCLoggerV2(lcfg zap.Config) (grpclog.LoggerV2, error) {
|
||||
lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}, nil
|
||||
}
|
||||
|
||||
// NewGRPCLoggerV2FromZapCore creates "grpclog.LoggerV2" from "zap.Core"
|
||||
// and "zapcore.WriteSyncer". It discards all INFO level logging in gRPC,
|
||||
// if debug level is not enabled in "*zap.Logger".
|
||||
func NewGRPCLoggerV2FromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) grpclog.LoggerV2 {
|
||||
// "AddCallerSkip" to annotate caller outside of "logutil"
|
||||
lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer))
|
||||
return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}
|
||||
}
|
||||
|
||||
type zapGRPCLogger struct {
|
||||
lg *zap.Logger
|
||||
sugar *zap.SugaredLogger
|
||||
}
|
||||
|
||||
func (zl *zapGRPCLogger) Info(args ...interface{}) {
|
||||
if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
|
||||
return
|
||||
}
|
||||
zl.sugar.Info(args...)
|
||||
}
|
||||
|
||||
func (zl *zapGRPCLogger) Infoln(args ...interface{}) {
|
||||
if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
|
||||
return
|
||||
}
|
||||
zl.sugar.Info(args...)
|
||||
}
|
||||
|
||||
func (zl *zapGRPCLogger) Infof(format string, args ...interface{}) {
|
||||
if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
|
||||
return
|
||||
}
|
||||
zl.sugar.Infof(format, args...)
|
||||
}
|
||||
|
||||
func (zl *zapGRPCLogger) Warning(args ...interface{}) {
|
||||
zl.sugar.Warn(args...)
|
||||
}
|
||||
|
||||
func (zl *zapGRPCLogger) Warningln(args ...interface{}) {
|
||||
zl.sugar.Warn(args...)
|
||||
}
|
||||
|
||||
func (zl *zapGRPCLogger) Warningf(format string, args ...interface{}) {
|
||||
zl.sugar.Warnf(format, args...)
|
||||
}
|
||||
|
||||
func (zl *zapGRPCLogger) Error(args ...interface{}) {
|
||||
zl.sugar.Error(args...)
|
||||
}
|
||||
|
||||
func (zl *zapGRPCLogger) Errorln(args ...interface{}) {
|
||||
zl.sugar.Error(args...)
|
||||
}
|
||||
|
||||
func (zl *zapGRPCLogger) Errorf(format string, args ...interface{}) {
|
||||
zl.sugar.Errorf(format, args...)
|
||||
}
|
||||
|
||||
func (zl *zapGRPCLogger) Fatal(args ...interface{}) {
|
||||
zl.sugar.Fatal(args...)
|
||||
}
|
||||
|
||||
func (zl *zapGRPCLogger) Fatalln(args ...interface{}) {
|
||||
zl.sugar.Fatal(args...)
|
||||
}
|
||||
|
||||
func (zl *zapGRPCLogger) Fatalf(format string, args ...interface{}) {
|
||||
zl.sugar.Fatalf(format, args...)
|
||||
}
|
||||
|
||||
func (zl *zapGRPCLogger) V(l int) bool {
|
||||
// infoLog == 0
|
||||
if l <= 0 { // debug level, then we ignore info level in gRPC
|
||||
return !zl.lg.Core().Enabled(zapcore.DebugLevel)
|
||||
}
|
||||
return true
|
||||
}
|
92
vendor/go.etcd.io/etcd/pkg/logutil/zap_journal.go
generated
vendored
Normal file
92
vendor/go.etcd.io/etcd/pkg/logutil/zap_journal.go
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package logutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"go.etcd.io/etcd/pkg/systemd"
|
||||
|
||||
"github.com/coreos/go-systemd/journal"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
// NewJournalWriter wraps "io.Writer" to redirect log output
|
||||
// to the local systemd journal. If journald send fails, it fails
|
||||
// back to writing to the original writer.
|
||||
// The decode overhead is only <30µs per write.
|
||||
// Reference: https://github.com/coreos/pkg/blob/master/capnslog/journald_formatter.go
|
||||
func NewJournalWriter(wr io.Writer) (io.Writer, error) {
|
||||
return &journalWriter{Writer: wr}, systemd.DialJournal()
|
||||
}
|
||||
|
||||
type journalWriter struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
// WARN: assume that etcd uses default field names in zap encoder config
|
||||
// make sure to keep this up-to-date!
|
||||
type logLine struct {
|
||||
Level string `json:"level"`
|
||||
Caller string `json:"caller"`
|
||||
}
|
||||
|
||||
func (w *journalWriter) Write(p []byte) (int, error) {
|
||||
line := &logLine{}
|
||||
if err := json.NewDecoder(bytes.NewReader(p)).Decode(line); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var pri journal.Priority
|
||||
switch line.Level {
|
||||
case zapcore.DebugLevel.String():
|
||||
pri = journal.PriDebug
|
||||
case zapcore.InfoLevel.String():
|
||||
pri = journal.PriInfo
|
||||
|
||||
case zapcore.WarnLevel.String():
|
||||
pri = journal.PriWarning
|
||||
case zapcore.ErrorLevel.String():
|
||||
pri = journal.PriErr
|
||||
|
||||
case zapcore.DPanicLevel.String():
|
||||
pri = journal.PriCrit
|
||||
case zapcore.PanicLevel.String():
|
||||
pri = journal.PriCrit
|
||||
case zapcore.FatalLevel.String():
|
||||
pri = journal.PriCrit
|
||||
|
||||
default:
|
||||
panic(fmt.Errorf("unknown log level: %q", line.Level))
|
||||
}
|
||||
|
||||
err := journal.Send(string(p), pri, map[string]string{
|
||||
"PACKAGE": filepath.Dir(line.Caller),
|
||||
"SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
|
||||
})
|
||||
if err != nil {
|
||||
// "journal" also falls back to stderr
|
||||
// "fmt.Fprintln(os.Stderr, s)"
|
||||
return w.Writer.Write(p)
|
||||
}
|
||||
return 0, nil
|
||||
}
|
102
vendor/go.etcd.io/etcd/pkg/logutil/zap_raft.go
generated
vendored
Normal file
102
vendor/go.etcd.io/etcd/pkg/logutil/zap_raft.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"go.etcd.io/etcd/raft"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
// NewRaftLogger builds "raft.Logger" from "*zap.Config".
|
||||
func NewRaftLogger(lcfg *zap.Config) (raft.Logger, error) {
|
||||
if lcfg == nil {
|
||||
return nil, errors.New("nil zap.Config")
|
||||
}
|
||||
lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}, nil
|
||||
}
|
||||
|
||||
// NewRaftLoggerZap converts "*zap.Logger" to "raft.Logger".
|
||||
func NewRaftLoggerZap(lg *zap.Logger) raft.Logger {
|
||||
return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}
|
||||
}
|
||||
|
||||
// NewRaftLoggerFromZapCore creates "raft.Logger" from "zap.Core"
|
||||
// and "zapcore.WriteSyncer".
|
||||
func NewRaftLoggerFromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) raft.Logger {
|
||||
// "AddCallerSkip" to annotate caller outside of "logutil"
|
||||
lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer))
|
||||
return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}
|
||||
}
|
||||
|
||||
type zapRaftLogger struct {
|
||||
lg *zap.Logger
|
||||
sugar *zap.SugaredLogger
|
||||
}
|
||||
|
||||
func (zl *zapRaftLogger) Debug(args ...interface{}) {
|
||||
zl.sugar.Debug(args...)
|
||||
}
|
||||
|
||||
func (zl *zapRaftLogger) Debugf(format string, args ...interface{}) {
|
||||
zl.sugar.Debugf(format, args...)
|
||||
}
|
||||
|
||||
func (zl *zapRaftLogger) Error(args ...interface{}) {
|
||||
zl.sugar.Error(args...)
|
||||
}
|
||||
|
||||
func (zl *zapRaftLogger) Errorf(format string, args ...interface{}) {
|
||||
zl.sugar.Errorf(format, args...)
|
||||
}
|
||||
|
||||
func (zl *zapRaftLogger) Info(args ...interface{}) {
|
||||
zl.sugar.Info(args...)
|
||||
}
|
||||
|
||||
func (zl *zapRaftLogger) Infof(format string, args ...interface{}) {
|
||||
zl.sugar.Infof(format, args...)
|
||||
}
|
||||
|
||||
func (zl *zapRaftLogger) Warning(args ...interface{}) {
|
||||
zl.sugar.Warn(args...)
|
||||
}
|
||||
|
||||
func (zl *zapRaftLogger) Warningf(format string, args ...interface{}) {
|
||||
zl.sugar.Warnf(format, args...)
|
||||
}
|
||||
|
||||
func (zl *zapRaftLogger) Fatal(args ...interface{}) {
|
||||
zl.sugar.Fatal(args...)
|
||||
}
|
||||
|
||||
func (zl *zapRaftLogger) Fatalf(format string, args ...interface{}) {
|
||||
zl.sugar.Fatalf(format, args...)
|
||||
}
|
||||
|
||||
func (zl *zapRaftLogger) Panic(args ...interface{}) {
|
||||
zl.sugar.Panic(args...)
|
||||
}
|
||||
|
||||
func (zl *zapRaftLogger) Panicf(format string, args ...interface{}) {
|
||||
zl.sugar.Panicf(format, args...)
|
||||
}
|
16
vendor/go.etcd.io/etcd/pkg/systemd/doc.go
generated
vendored
Normal file
16
vendor/go.etcd.io/etcd/pkg/systemd/doc.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package systemd provides utility functions for systemd.
|
||||
package systemd
|
29
vendor/go.etcd.io/etcd/pkg/systemd/journal.go
generated
vendored
Normal file
29
vendor/go.etcd.io/etcd/pkg/systemd/journal.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package systemd
|
||||
|
||||
import "net"
|
||||
|
||||
// DialJournal returns no error if the process can dial journal socket.
|
||||
// Returns an error if dial failed, whichi indicates journald is not available
|
||||
// (e.g. run embedded etcd as docker daemon).
|
||||
// Reference: https://github.com/coreos/go-systemd/blob/master/journal/journal.go.
|
||||
func DialJournal() error {
|
||||
conn, err := net.Dial("unixgram", "/run/systemd/journal/socket")
|
||||
if conn != nil {
|
||||
defer conn.Close()
|
||||
}
|
||||
return err
|
||||
}
|
17
vendor/go.etcd.io/etcd/pkg/types/doc.go
generated
vendored
Normal file
17
vendor/go.etcd.io/etcd/pkg/types/doc.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package types declares various data types and implements type-checking
|
||||
// functions.
|
||||
package types
|
39
vendor/go.etcd.io/etcd/pkg/types/id.go
generated
vendored
Normal file
39
vendor/go.etcd.io/etcd/pkg/types/id.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
import "strconv"
|
||||
|
||||
// ID represents a generic identifier which is canonically
|
||||
// stored as a uint64 but is typically represented as a
|
||||
// base-16 string for input/output
|
||||
type ID uint64
|
||||
|
||||
func (i ID) String() string {
|
||||
return strconv.FormatUint(uint64(i), 16)
|
||||
}
|
||||
|
||||
// IDFromString attempts to create an ID from a base-16 string.
|
||||
func IDFromString(s string) (ID, error) {
|
||||
i, err := strconv.ParseUint(s, 16, 64)
|
||||
return ID(i), err
|
||||
}
|
||||
|
||||
// IDSlice implements the sort interface
|
||||
type IDSlice []ID
|
||||
|
||||
func (p IDSlice) Len() int { return len(p) }
|
||||
func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) }
|
||||
func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
195
vendor/go.etcd.io/etcd/pkg/types/set.go
generated
vendored
Normal file
195
vendor/go.etcd.io/etcd/pkg/types/set.go
generated
vendored
Normal file
@ -0,0 +1,195 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Set interface {
|
||||
Add(string)
|
||||
Remove(string)
|
||||
Contains(string) bool
|
||||
Equals(Set) bool
|
||||
Length() int
|
||||
Values() []string
|
||||
Copy() Set
|
||||
Sub(Set) Set
|
||||
}
|
||||
|
||||
func NewUnsafeSet(values ...string) *unsafeSet {
|
||||
set := &unsafeSet{make(map[string]struct{})}
|
||||
for _, v := range values {
|
||||
set.Add(v)
|
||||
}
|
||||
return set
|
||||
}
|
||||
|
||||
func NewThreadsafeSet(values ...string) *tsafeSet {
|
||||
us := NewUnsafeSet(values...)
|
||||
return &tsafeSet{us, sync.RWMutex{}}
|
||||
}
|
||||
|
||||
type unsafeSet struct {
|
||||
d map[string]struct{}
|
||||
}
|
||||
|
||||
// Add adds a new value to the set (no-op if the value is already present)
|
||||
func (us *unsafeSet) Add(value string) {
|
||||
us.d[value] = struct{}{}
|
||||
}
|
||||
|
||||
// Remove removes the given value from the set
|
||||
func (us *unsafeSet) Remove(value string) {
|
||||
delete(us.d, value)
|
||||
}
|
||||
|
||||
// Contains returns whether the set contains the given value
|
||||
func (us *unsafeSet) Contains(value string) (exists bool) {
|
||||
_, exists = us.d[value]
|
||||
return exists
|
||||
}
|
||||
|
||||
// ContainsAll returns whether the set contains all given values
|
||||
func (us *unsafeSet) ContainsAll(values []string) bool {
|
||||
for _, s := range values {
|
||||
if !us.Contains(s) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Equals returns whether the contents of two sets are identical
|
||||
func (us *unsafeSet) Equals(other Set) bool {
|
||||
v1 := sort.StringSlice(us.Values())
|
||||
v2 := sort.StringSlice(other.Values())
|
||||
v1.Sort()
|
||||
v2.Sort()
|
||||
return reflect.DeepEqual(v1, v2)
|
||||
}
|
||||
|
||||
// Length returns the number of elements in the set
|
||||
func (us *unsafeSet) Length() int {
|
||||
return len(us.d)
|
||||
}
|
||||
|
||||
// Values returns the values of the Set in an unspecified order.
|
||||
func (us *unsafeSet) Values() (values []string) {
|
||||
values = make([]string, 0)
|
||||
for val := range us.d {
|
||||
values = append(values, val)
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
// Copy creates a new Set containing the values of the first
|
||||
func (us *unsafeSet) Copy() Set {
|
||||
cp := NewUnsafeSet()
|
||||
for val := range us.d {
|
||||
cp.Add(val)
|
||||
}
|
||||
|
||||
return cp
|
||||
}
|
||||
|
||||
// Sub removes all elements in other from the set
|
||||
func (us *unsafeSet) Sub(other Set) Set {
|
||||
oValues := other.Values()
|
||||
result := us.Copy().(*unsafeSet)
|
||||
|
||||
for _, val := range oValues {
|
||||
if _, ok := result.d[val]; !ok {
|
||||
continue
|
||||
}
|
||||
delete(result.d, val)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
type tsafeSet struct {
|
||||
us *unsafeSet
|
||||
m sync.RWMutex
|
||||
}
|
||||
|
||||
func (ts *tsafeSet) Add(value string) {
|
||||
ts.m.Lock()
|
||||
defer ts.m.Unlock()
|
||||
ts.us.Add(value)
|
||||
}
|
||||
|
||||
func (ts *tsafeSet) Remove(value string) {
|
||||
ts.m.Lock()
|
||||
defer ts.m.Unlock()
|
||||
ts.us.Remove(value)
|
||||
}
|
||||
|
||||
func (ts *tsafeSet) Contains(value string) (exists bool) {
|
||||
ts.m.RLock()
|
||||
defer ts.m.RUnlock()
|
||||
return ts.us.Contains(value)
|
||||
}
|
||||
|
||||
func (ts *tsafeSet) Equals(other Set) bool {
|
||||
ts.m.RLock()
|
||||
defer ts.m.RUnlock()
|
||||
|
||||
// If ts and other represent the same variable, avoid calling
|
||||
// ts.us.Equals(other), to avoid double RLock bug
|
||||
if _other, ok := other.(*tsafeSet); ok {
|
||||
if _other == ts {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return ts.us.Equals(other)
|
||||
}
|
||||
|
||||
func (ts *tsafeSet) Length() int {
|
||||
ts.m.RLock()
|
||||
defer ts.m.RUnlock()
|
||||
return ts.us.Length()
|
||||
}
|
||||
|
||||
func (ts *tsafeSet) Values() (values []string) {
|
||||
ts.m.RLock()
|
||||
defer ts.m.RUnlock()
|
||||
return ts.us.Values()
|
||||
}
|
||||
|
||||
func (ts *tsafeSet) Copy() Set {
|
||||
ts.m.RLock()
|
||||
defer ts.m.RUnlock()
|
||||
usResult := ts.us.Copy().(*unsafeSet)
|
||||
return &tsafeSet{usResult, sync.RWMutex{}}
|
||||
}
|
||||
|
||||
func (ts *tsafeSet) Sub(other Set) Set {
|
||||
ts.m.RLock()
|
||||
defer ts.m.RUnlock()
|
||||
|
||||
// If ts and other represent the same variable, avoid calling
|
||||
// ts.us.Sub(other), to avoid double RLock bug
|
||||
if _other, ok := other.(*tsafeSet); ok {
|
||||
if _other == ts {
|
||||
usResult := NewUnsafeSet()
|
||||
return &tsafeSet{usResult, sync.RWMutex{}}
|
||||
}
|
||||
}
|
||||
usResult := ts.us.Sub(other).(*unsafeSet)
|
||||
return &tsafeSet{usResult, sync.RWMutex{}}
|
||||
}
|
22
vendor/go.etcd.io/etcd/pkg/types/slice.go
generated
vendored
Normal file
22
vendor/go.etcd.io/etcd/pkg/types/slice.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
// Uint64Slice implements sort interface
|
||||
type Uint64Slice []uint64
|
||||
|
||||
func (p Uint64Slice) Len() int { return len(p) }
|
||||
func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||
func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
82
vendor/go.etcd.io/etcd/pkg/types/urls.go
generated
vendored
Normal file
82
vendor/go.etcd.io/etcd/pkg/types/urls.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type URLs []url.URL
|
||||
|
||||
func NewURLs(strs []string) (URLs, error) {
|
||||
all := make([]url.URL, len(strs))
|
||||
if len(all) == 0 {
|
||||
return nil, errors.New("no valid URLs given")
|
||||
}
|
||||
for i, in := range strs {
|
||||
in = strings.TrimSpace(in)
|
||||
u, err := url.Parse(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" {
|
||||
return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in)
|
||||
}
|
||||
if _, _, err := net.SplitHostPort(u.Host); err != nil {
|
||||
return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in)
|
||||
}
|
||||
if u.Path != "" {
|
||||
return nil, fmt.Errorf("URL must not contain a path: %s", in)
|
||||
}
|
||||
all[i] = *u
|
||||
}
|
||||
us := URLs(all)
|
||||
us.Sort()
|
||||
|
||||
return us, nil
|
||||
}
|
||||
|
||||
func MustNewURLs(strs []string) URLs {
|
||||
urls, err := NewURLs(strs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return urls
|
||||
}
|
||||
|
||||
func (us URLs) String() string {
|
||||
return strings.Join(us.StringSlice(), ",")
|
||||
}
|
||||
|
||||
func (us *URLs) Sort() {
|
||||
sort.Sort(us)
|
||||
}
|
||||
func (us URLs) Len() int { return len(us) }
|
||||
func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() }
|
||||
func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] }
|
||||
|
||||
func (us URLs) StringSlice() []string {
|
||||
out := make([]string, len(us))
|
||||
for i := range us {
|
||||
out[i] = us[i].String()
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
107
vendor/go.etcd.io/etcd/pkg/types/urlsmap.go
generated
vendored
Normal file
107
vendor/go.etcd.io/etcd/pkg/types/urlsmap.go
generated
vendored
Normal file
@ -0,0 +1,107 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// URLsMap is a map from a name to its URLs.
|
||||
type URLsMap map[string]URLs
|
||||
|
||||
// NewURLsMap returns a URLsMap instantiated from the given string,
|
||||
// which consists of discovery-formatted names-to-URLs, like:
|
||||
// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380
|
||||
func NewURLsMap(s string) (URLsMap, error) {
|
||||
m := parse(s)
|
||||
|
||||
cl := URLsMap{}
|
||||
for name, urls := range m {
|
||||
us, err := NewURLs(urls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cl[name] = us
|
||||
}
|
||||
return cl, nil
|
||||
}
|
||||
|
||||
// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The
|
||||
// string values in the map can be multiple values separated by the sep string.
|
||||
func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) {
|
||||
var err error
|
||||
um := URLsMap{}
|
||||
for k, v := range m {
|
||||
um[k], err = NewURLs(strings.Split(v, sep))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return um, nil
|
||||
}
|
||||
|
||||
// String turns URLsMap into discovery-formatted name-to-URLs sorted by name.
|
||||
func (c URLsMap) String() string {
|
||||
var pairs []string
|
||||
for name, urls := range c {
|
||||
for _, url := range urls {
|
||||
pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String()))
|
||||
}
|
||||
}
|
||||
sort.Strings(pairs)
|
||||
return strings.Join(pairs, ",")
|
||||
}
|
||||
|
||||
// URLs returns a list of all URLs.
|
||||
// The returned list is sorted in ascending lexicographical order.
|
||||
func (c URLsMap) URLs() []string {
|
||||
var urls []string
|
||||
for _, us := range c {
|
||||
for _, u := range us {
|
||||
urls = append(urls, u.String())
|
||||
}
|
||||
}
|
||||
sort.Strings(urls)
|
||||
return urls
|
||||
}
|
||||
|
||||
// Len returns the size of URLsMap.
|
||||
func (c URLsMap) Len() int {
|
||||
return len(c)
|
||||
}
|
||||
|
||||
// parse parses the given string and returns a map listing the values specified for each key.
|
||||
func parse(s string) map[string][]string {
|
||||
m := make(map[string][]string)
|
||||
for s != "" {
|
||||
key := s
|
||||
if i := strings.IndexAny(key, ","); i >= 0 {
|
||||
key, s = key[:i], key[i+1:]
|
||||
} else {
|
||||
s = ""
|
||||
}
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
value := ""
|
||||
if i := strings.Index(key, "="); i >= 0 {
|
||||
key, value = key[:i], key[i+1:]
|
||||
}
|
||||
m[key] = append(m[key], value)
|
||||
}
|
||||
return m
|
||||
}
|
80
vendor/go.etcd.io/etcd/raft/bootstrap.go
generated
vendored
Normal file
80
vendor/go.etcd.io/etcd/raft/bootstrap.go
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package raft
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
pb "go.etcd.io/etcd/raft/raftpb"
|
||||
)
|
||||
|
||||
// Bootstrap initializes the RawNode for first use by appending configuration
|
||||
// changes for the supplied peers. This method returns an error if the Storage
|
||||
// is nonempty.
|
||||
//
|
||||
// It is recommended that instead of calling this method, applications bootstrap
|
||||
// their state manually by setting up a Storage that has a first index > 1 and
|
||||
// which stores the desired ConfState as its InitialState.
|
||||
func (rn *RawNode) Bootstrap(peers []Peer) error {
|
||||
if len(peers) == 0 {
|
||||
return errors.New("must provide at least one peer to Bootstrap")
|
||||
}
|
||||
lastIndex, err := rn.raft.raftLog.storage.LastIndex()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if lastIndex != 0 {
|
||||
return errors.New("can't bootstrap a nonempty Storage")
|
||||
}
|
||||
|
||||
// We've faked out initial entries above, but nothing has been
|
||||
// persisted. Start with an empty HardState (thus the first Ready will
|
||||
// emit a HardState update for the app to persist).
|
||||
rn.prevHardSt = emptyState
|
||||
|
||||
// TODO(tbg): remove StartNode and give the application the right tools to
|
||||
// bootstrap the initial membership in a cleaner way.
|
||||
rn.raft.becomeFollower(1, None)
|
||||
ents := make([]pb.Entry, len(peers))
|
||||
for i, peer := range peers {
|
||||
cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
|
||||
data, err := cc.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data}
|
||||
}
|
||||
rn.raft.raftLog.append(ents...)
|
||||
|
||||
// Now apply them, mainly so that the application can call Campaign
|
||||
// immediately after StartNode in tests. Note that these nodes will
|
||||
// be added to raft twice: here and when the application's Ready
|
||||
// loop calls ApplyConfChange. The calls to addNode must come after
|
||||
// all calls to raftLog.append so progress.next is set after these
|
||||
// bootstrapping entries (it is an error if we try to append these
|
||||
// entries since they have already been committed).
|
||||
// We do not set raftLog.applied so the application will be able
|
||||
// to observe all conf changes via Ready.CommittedEntries.
|
||||
//
|
||||
// TODO(bdarnell): These entries are still unstable; do we need to preserve
|
||||
// the invariant that committed < unstable?
|
||||
rn.raft.raftLog.committed = uint64(len(ents))
|
||||
for _, peer := range peers {
|
||||
rn.raft.applyConfChange(pb.ConfChange{NodeID: peer.ID, Type: pb.ConfChangeAddNode}.AsV2())
|
||||
}
|
||||
return nil
|
||||
}
|
425
vendor/go.etcd.io/etcd/raft/confchange/confchange.go
generated
vendored
Normal file
425
vendor/go.etcd.io/etcd/raft/confchange/confchange.go
generated
vendored
Normal file
@ -0,0 +1,425 @@
|
||||
// Copyright 2019 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package confchange
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"go.etcd.io/etcd/raft/quorum"
|
||||
pb "go.etcd.io/etcd/raft/raftpb"
|
||||
"go.etcd.io/etcd/raft/tracker"
|
||||
)
|
||||
|
||||
// Changer facilitates configuration changes. It exposes methods to handle
|
||||
// simple and joint consensus while performing the proper validation that allows
|
||||
// refusing invalid configuration changes before they affect the active
|
||||
// configuration.
|
||||
type Changer struct {
|
||||
Tracker tracker.ProgressTracker
|
||||
LastIndex uint64
|
||||
}
|
||||
|
||||
// EnterJoint verifies that the outgoing (=right) majority config of the joint
|
||||
// config is empty and initializes it with a copy of the incoming (=left)
|
||||
// majority config. That is, it transitions from
|
||||
//
|
||||
// (1 2 3)&&()
|
||||
// to
|
||||
// (1 2 3)&&(1 2 3).
|
||||
//
|
||||
// The supplied changes are then applied to the incoming majority config,
|
||||
// resulting in a joint configuration that in terms of the Raft thesis[1]
|
||||
// (Section 4.3) corresponds to `C_{new,old}`.
|
||||
//
|
||||
// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf
|
||||
func (c Changer) EnterJoint(autoLeave bool, ccs ...pb.ConfChangeSingle) (tracker.Config, tracker.ProgressMap, error) {
|
||||
cfg, prs, err := c.checkAndCopy()
|
||||
if err != nil {
|
||||
return c.err(err)
|
||||
}
|
||||
if joint(cfg) {
|
||||
err := errors.New("config is already joint")
|
||||
return c.err(err)
|
||||
}
|
||||
if len(incoming(cfg.Voters)) == 0 {
|
||||
// We allow adding nodes to an empty config for convenience (testing and
|
||||
// bootstrap), but you can't enter a joint state.
|
||||
err := errors.New("can't make a zero-voter config joint")
|
||||
return c.err(err)
|
||||
}
|
||||
// Clear the outgoing config.
|
||||
*outgoingPtr(&cfg.Voters) = quorum.MajorityConfig{}
|
||||
// Copy incoming to outgoing.
|
||||
for id := range incoming(cfg.Voters) {
|
||||
outgoing(cfg.Voters)[id] = struct{}{}
|
||||
}
|
||||
|
||||
if err := c.apply(&cfg, prs, ccs...); err != nil {
|
||||
return c.err(err)
|
||||
}
|
||||
cfg.AutoLeave = autoLeave
|
||||
return checkAndReturn(cfg, prs)
|
||||
}
|
||||
|
||||
// LeaveJoint transitions out of a joint configuration. It is an error to call
|
||||
// this method if the configuration is not joint, i.e. if the outgoing majority
|
||||
// config Voters[1] is empty.
|
||||
//
|
||||
// The outgoing majority config of the joint configuration will be removed,
|
||||
// that is, the incoming config is promoted as the sole decision maker. In the
|
||||
// notation of the Raft thesis[1] (Section 4.3), this method transitions from
|
||||
// `C_{new,old}` into `C_new`.
|
||||
//
|
||||
// At the same time, any staged learners (LearnersNext) the addition of which
|
||||
// was held back by an overlapping voter in the former outgoing config will be
|
||||
// inserted into Learners.
|
||||
//
|
||||
// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf
|
||||
func (c Changer) LeaveJoint() (tracker.Config, tracker.ProgressMap, error) {
|
||||
cfg, prs, err := c.checkAndCopy()
|
||||
if err != nil {
|
||||
return c.err(err)
|
||||
}
|
||||
if !joint(cfg) {
|
||||
err := errors.New("can't leave a non-joint config")
|
||||
return c.err(err)
|
||||
}
|
||||
if len(outgoing(cfg.Voters)) == 0 {
|
||||
err := fmt.Errorf("configuration is not joint: %v", cfg)
|
||||
return c.err(err)
|
||||
}
|
||||
for id := range cfg.LearnersNext {
|
||||
nilAwareAdd(&cfg.Learners, id)
|
||||
prs[id].IsLearner = true
|
||||
}
|
||||
cfg.LearnersNext = nil
|
||||
|
||||
for id := range outgoing(cfg.Voters) {
|
||||
_, isVoter := incoming(cfg.Voters)[id]
|
||||
_, isLearner := cfg.Learners[id]
|
||||
|
||||
if !isVoter && !isLearner {
|
||||
delete(prs, id)
|
||||
}
|
||||
}
|
||||
*outgoingPtr(&cfg.Voters) = nil
|
||||
cfg.AutoLeave = false
|
||||
|
||||
return checkAndReturn(cfg, prs)
|
||||
}
|
||||
|
||||
// Simple carries out a series of configuration changes that (in aggregate)
|
||||
// mutates the incoming majority config Voters[0] by at most one. This method
|
||||
// will return an error if that is not the case, if the resulting quorum is
|
||||
// zero, or if the configuration is in a joint state (i.e. if there is an
|
||||
// outgoing configuration).
|
||||
func (c Changer) Simple(ccs ...pb.ConfChangeSingle) (tracker.Config, tracker.ProgressMap, error) {
|
||||
cfg, prs, err := c.checkAndCopy()
|
||||
if err != nil {
|
||||
return c.err(err)
|
||||
}
|
||||
if joint(cfg) {
|
||||
err := errors.New("can't apply simple config change in joint config")
|
||||
return c.err(err)
|
||||
}
|
||||
if err := c.apply(&cfg, prs, ccs...); err != nil {
|
||||
return c.err(err)
|
||||
}
|
||||
if n := symdiff(incoming(c.Tracker.Voters), incoming(cfg.Voters)); n > 1 {
|
||||
return tracker.Config{}, nil, errors.New("more than one voter changed without entering joint config")
|
||||
}
|
||||
if err := checkInvariants(cfg, prs); err != nil {
|
||||
return tracker.Config{}, tracker.ProgressMap{}, nil
|
||||
}
|
||||
|
||||
return checkAndReturn(cfg, prs)
|
||||
}
|
||||
|
||||
// apply a change to the configuration. By convention, changes to voters are
|
||||
// always made to the incoming majority config Voters[0]. Voters[1] is either
|
||||
// empty or preserves the outgoing majority configuration while in a joint state.
|
||||
func (c Changer) apply(cfg *tracker.Config, prs tracker.ProgressMap, ccs ...pb.ConfChangeSingle) error {
|
||||
for _, cc := range ccs {
|
||||
if cc.NodeID == 0 {
|
||||
// etcd replaces the NodeID with zero if it decides (downstream of
|
||||
// raft) to not apply a change, so we have to have explicit code
|
||||
// here to ignore these.
|
||||
continue
|
||||
}
|
||||
switch cc.Type {
|
||||
case pb.ConfChangeAddNode:
|
||||
c.makeVoter(cfg, prs, cc.NodeID)
|
||||
case pb.ConfChangeAddLearnerNode:
|
||||
c.makeLearner(cfg, prs, cc.NodeID)
|
||||
case pb.ConfChangeRemoveNode:
|
||||
c.remove(cfg, prs, cc.NodeID)
|
||||
case pb.ConfChangeUpdateNode:
|
||||
default:
|
||||
return fmt.Errorf("unexpected conf type %d", cc.Type)
|
||||
}
|
||||
}
|
||||
if len(incoming(cfg.Voters)) == 0 {
|
||||
return errors.New("removed all voters")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// makeVoter adds or promotes the given ID to be a voter in the incoming
|
||||
// majority config.
|
||||
func (c Changer) makeVoter(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) {
|
||||
pr := prs[id]
|
||||
if pr == nil {
|
||||
c.initProgress(cfg, prs, id, false /* isLearner */)
|
||||
return
|
||||
}
|
||||
|
||||
pr.IsLearner = false
|
||||
nilAwareDelete(&cfg.Learners, id)
|
||||
nilAwareDelete(&cfg.LearnersNext, id)
|
||||
incoming(cfg.Voters)[id] = struct{}{}
|
||||
return
|
||||
}
|
||||
|
||||
// makeLearner makes the given ID a learner or stages it to be a learner once
|
||||
// an active joint configuration is exited.
|
||||
//
|
||||
// The former happens when the peer is not a part of the outgoing config, in
|
||||
// which case we either add a new learner or demote a voter in the incoming
|
||||
// config.
|
||||
//
|
||||
// The latter case occurs when the configuration is joint and the peer is a
|
||||
// voter in the outgoing config. In that case, we do not want to add the peer
|
||||
// as a learner because then we'd have to track a peer as a voter and learner
|
||||
// simultaneously. Instead, we add the learner to LearnersNext, so that it will
|
||||
// be added to Learners the moment the outgoing config is removed by
|
||||
// LeaveJoint().
|
||||
func (c Changer) makeLearner(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) {
|
||||
pr := prs[id]
|
||||
if pr == nil {
|
||||
c.initProgress(cfg, prs, id, true /* isLearner */)
|
||||
return
|
||||
}
|
||||
if pr.IsLearner {
|
||||
return
|
||||
}
|
||||
// Remove any existing voter in the incoming config...
|
||||
c.remove(cfg, prs, id)
|
||||
// ... but save the Progress.
|
||||
prs[id] = pr
|
||||
// Use LearnersNext if we can't add the learner to Learners directly, i.e.
|
||||
// if the peer is still tracked as a voter in the outgoing config. It will
|
||||
// be turned into a learner in LeaveJoint().
|
||||
//
|
||||
// Otherwise, add a regular learner right away.
|
||||
if _, onRight := outgoing(cfg.Voters)[id]; onRight {
|
||||
nilAwareAdd(&cfg.LearnersNext, id)
|
||||
} else {
|
||||
pr.IsLearner = true
|
||||
nilAwareAdd(&cfg.Learners, id)
|
||||
}
|
||||
}
|
||||
|
||||
// remove this peer as a voter or learner from the incoming config.
|
||||
func (c Changer) remove(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) {
|
||||
if _, ok := prs[id]; !ok {
|
||||
return
|
||||
}
|
||||
|
||||
delete(incoming(cfg.Voters), id)
|
||||
nilAwareDelete(&cfg.Learners, id)
|
||||
nilAwareDelete(&cfg.LearnersNext, id)
|
||||
|
||||
// If the peer is still a voter in the outgoing config, keep the Progress.
|
||||
if _, onRight := outgoing(cfg.Voters)[id]; !onRight {
|
||||
delete(prs, id)
|
||||
}
|
||||
}
|
||||
|
||||
// initProgress initializes a new progress for the given node or learner.
|
||||
func (c Changer) initProgress(cfg *tracker.Config, prs tracker.ProgressMap, id uint64, isLearner bool) {
|
||||
if !isLearner {
|
||||
incoming(cfg.Voters)[id] = struct{}{}
|
||||
} else {
|
||||
nilAwareAdd(&cfg.Learners, id)
|
||||
}
|
||||
prs[id] = &tracker.Progress{
|
||||
// Initializing the Progress with the last index means that the follower
|
||||
// can be probed (with the last index).
|
||||
//
|
||||
// TODO(tbg): seems awfully optimistic. Using the first index would be
|
||||
// better. The general expectation here is that the follower has no log
|
||||
// at all (and will thus likely need a snapshot), though the app may
|
||||
// have applied a snapshot out of band before adding the replica (thus
|
||||
// making the first index the better choice).
|
||||
Next: c.LastIndex,
|
||||
Match: 0,
|
||||
Inflights: tracker.NewInflights(c.Tracker.MaxInflight),
|
||||
IsLearner: isLearner,
|
||||
// When a node is first added, we should mark it as recently active.
|
||||
// Otherwise, CheckQuorum may cause us to step down if it is invoked
|
||||
// before the added node has had a chance to communicate with us.
|
||||
RecentActive: true,
|
||||
}
|
||||
}
|
||||
|
||||
// checkInvariants makes sure that the config and progress are compatible with
|
||||
// each other. This is used to check both what the Changer is initialized with,
|
||||
// as well as what it returns.
|
||||
func checkInvariants(cfg tracker.Config, prs tracker.ProgressMap) error {
|
||||
// NB: intentionally allow the empty config. In production we'll never see a
|
||||
// non-empty config (we prevent it from being created) but we will need to
|
||||
// be able to *create* an initial config, for example during bootstrap (or
|
||||
// during tests). Instead of having to hand-code this, we allow
|
||||
// transitioning from an empty config into any other legal and non-empty
|
||||
// config.
|
||||
for _, ids := range []map[uint64]struct{}{
|
||||
cfg.Voters.IDs(),
|
||||
cfg.Learners,
|
||||
cfg.LearnersNext,
|
||||
} {
|
||||
for id := range ids {
|
||||
if _, ok := prs[id]; !ok {
|
||||
return fmt.Errorf("no progress for %d", id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Any staged learner was staged because it could not be directly added due
|
||||
// to a conflicting voter in the outgoing config.
|
||||
for id := range cfg.LearnersNext {
|
||||
if _, ok := outgoing(cfg.Voters)[id]; !ok {
|
||||
return fmt.Errorf("%d is in LearnersNext, but not Voters[1]", id)
|
||||
}
|
||||
if prs[id].IsLearner {
|
||||
return fmt.Errorf("%d is in LearnersNext, but is already marked as learner", id)
|
||||
}
|
||||
}
|
||||
// Conversely Learners and Voters doesn't intersect at all.
|
||||
for id := range cfg.Learners {
|
||||
if _, ok := outgoing(cfg.Voters)[id]; ok {
|
||||
return fmt.Errorf("%d is in Learners and Voters[1]", id)
|
||||
}
|
||||
if _, ok := incoming(cfg.Voters)[id]; ok {
|
||||
return fmt.Errorf("%d is in Learners and Voters[0]", id)
|
||||
}
|
||||
if !prs[id].IsLearner {
|
||||
return fmt.Errorf("%d is in Learners, but is not marked as learner", id)
|
||||
}
|
||||
}
|
||||
|
||||
if !joint(cfg) {
|
||||
// We enforce that empty maps are nil instead of zero.
|
||||
if outgoing(cfg.Voters) != nil {
|
||||
return fmt.Errorf("Voters[1] must be nil when not joint")
|
||||
}
|
||||
if cfg.LearnersNext != nil {
|
||||
return fmt.Errorf("LearnersNext must be nil when not joint")
|
||||
}
|
||||
if cfg.AutoLeave {
|
||||
return fmt.Errorf("AutoLeave must be false when not joint")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkAndCopy copies the tracker's config and progress map (deeply enough for
|
||||
// the purposes of the Changer) and returns those copies. It returns an error
|
||||
// if checkInvariants does.
|
||||
func (c Changer) checkAndCopy() (tracker.Config, tracker.ProgressMap, error) {
|
||||
cfg := c.Tracker.Config.Clone()
|
||||
prs := tracker.ProgressMap{}
|
||||
|
||||
for id, pr := range c.Tracker.Progress {
|
||||
// A shallow copy is enough because we only mutate the Learner field.
|
||||
ppr := *pr
|
||||
prs[id] = &ppr
|
||||
}
|
||||
return checkAndReturn(cfg, prs)
|
||||
}
|
||||
|
||||
// checkAndReturn calls checkInvariants on the input and returns either the
|
||||
// resulting error or the input.
|
||||
func checkAndReturn(cfg tracker.Config, prs tracker.ProgressMap) (tracker.Config, tracker.ProgressMap, error) {
|
||||
if err := checkInvariants(cfg, prs); err != nil {
|
||||
return tracker.Config{}, tracker.ProgressMap{}, err
|
||||
}
|
||||
return cfg, prs, nil
|
||||
}
|
||||
|
||||
// err returns zero values and an error.
|
||||
func (c Changer) err(err error) (tracker.Config, tracker.ProgressMap, error) {
|
||||
return tracker.Config{}, nil, err
|
||||
}
|
||||
|
||||
// nilAwareAdd populates a map entry, creating the map if necessary.
|
||||
func nilAwareAdd(m *map[uint64]struct{}, id uint64) {
|
||||
if *m == nil {
|
||||
*m = map[uint64]struct{}{}
|
||||
}
|
||||
(*m)[id] = struct{}{}
|
||||
}
|
||||
|
||||
// nilAwareDelete deletes from a map, nil'ing the map itself if it is empty after.
|
||||
func nilAwareDelete(m *map[uint64]struct{}, id uint64) {
|
||||
if *m == nil {
|
||||
return
|
||||
}
|
||||
delete(*m, id)
|
||||
if len(*m) == 0 {
|
||||
*m = nil
|
||||
}
|
||||
}
|
||||
|
||||
// symdiff returns the count of the symmetric difference between the sets of
|
||||
// uint64s, i.e. len( (l - r) \union (r - l)).
|
||||
func symdiff(l, r map[uint64]struct{}) int {
|
||||
var n int
|
||||
pairs := [][2]quorum.MajorityConfig{
|
||||
{l, r}, // count elems in l but not in r
|
||||
{r, l}, // count elems in r but not in l
|
||||
}
|
||||
for _, p := range pairs {
|
||||
for id := range p[0] {
|
||||
if _, ok := p[1][id]; !ok {
|
||||
n++
|
||||
}
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func joint(cfg tracker.Config) bool {
|
||||
return len(outgoing(cfg.Voters)) > 0
|
||||
}
|
||||
|
||||
func incoming(voters quorum.JointConfig) quorum.MajorityConfig { return voters[0] }
|
||||
func outgoing(voters quorum.JointConfig) quorum.MajorityConfig { return voters[1] }
|
||||
func outgoingPtr(voters *quorum.JointConfig) *quorum.MajorityConfig { return &voters[1] }
|
||||
|
||||
// Describe prints the type and NodeID of the configuration changes as a
|
||||
// space-delimited string.
|
||||
func Describe(ccs ...pb.ConfChangeSingle) string {
|
||||
var buf strings.Builder
|
||||
for _, cc := range ccs {
|
||||
if buf.Len() > 0 {
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
fmt.Fprintf(&buf, "%s(%d)", cc.Type, cc.NodeID)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
155
vendor/go.etcd.io/etcd/raft/confchange/restore.go
generated
vendored
Normal file
155
vendor/go.etcd.io/etcd/raft/confchange/restore.go
generated
vendored
Normal file
@ -0,0 +1,155 @@
|
||||
// Copyright 2019 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package confchange
|
||||
|
||||
import (
|
||||
pb "go.etcd.io/etcd/raft/raftpb"
|
||||
"go.etcd.io/etcd/raft/tracker"
|
||||
)
|
||||
|
||||
// toConfChangeSingle translates a conf state into 1) a slice of operations creating
|
||||
// first the config that will become the outgoing one, and then the incoming one, and
|
||||
// b) another slice that, when applied to the config resulted from 1), represents the
|
||||
// ConfState.
|
||||
func toConfChangeSingle(cs pb.ConfState) (out []pb.ConfChangeSingle, in []pb.ConfChangeSingle) {
|
||||
// Example to follow along this code:
|
||||
// voters=(1 2 3) learners=(5) outgoing=(1 2 4 6) learners_next=(4)
|
||||
//
|
||||
// This means that before entering the joint config, the configuration
|
||||
// had voters (1 2 4) and perhaps some learners that are already gone.
|
||||
// The new set of voters is (1 2 3), i.e. (1 2) were kept around, and (4 6)
|
||||
// are no longer voters; however 4 is poised to become a learner upon leaving
|
||||
// the joint state.
|
||||
// We can't tell whether 5 was a learner before entering the joint config,
|
||||
// but it doesn't matter (we'll pretend that it wasn't).
|
||||
//
|
||||
// The code below will construct
|
||||
// outgoing = add 1; add 2; add 4; add 6
|
||||
// incoming = remove 1; remove 2; remove 4; remove 6
|
||||
// add 1; add 2; add 3;
|
||||
// add-learner 5;
|
||||
// add-learner 4;
|
||||
//
|
||||
// So, when starting with an empty config, after applying 'outgoing' we have
|
||||
//
|
||||
// quorum=(1 2 4 6)
|
||||
//
|
||||
// From which we enter a joint state via 'incoming'
|
||||
//
|
||||
// quorum=(1 2 3)&&(1 2 4 6) learners=(5) learners_next=(4)
|
||||
//
|
||||
// as desired.
|
||||
|
||||
for _, id := range cs.VotersOutgoing {
|
||||
// If there are outgoing voters, first add them one by one so that the
|
||||
// (non-joint) config has them all.
|
||||
out = append(out, pb.ConfChangeSingle{
|
||||
Type: pb.ConfChangeAddNode,
|
||||
NodeID: id,
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
// We're done constructing the outgoing slice, now on to the incoming one
|
||||
// (which will apply on top of the config created by the outgoing slice).
|
||||
|
||||
// First, we'll remove all of the outgoing voters.
|
||||
for _, id := range cs.VotersOutgoing {
|
||||
in = append(in, pb.ConfChangeSingle{
|
||||
Type: pb.ConfChangeRemoveNode,
|
||||
NodeID: id,
|
||||
})
|
||||
}
|
||||
// Then we'll add the incoming voters and learners.
|
||||
for _, id := range cs.Voters {
|
||||
in = append(in, pb.ConfChangeSingle{
|
||||
Type: pb.ConfChangeAddNode,
|
||||
NodeID: id,
|
||||
})
|
||||
}
|
||||
for _, id := range cs.Learners {
|
||||
in = append(in, pb.ConfChangeSingle{
|
||||
Type: pb.ConfChangeAddLearnerNode,
|
||||
NodeID: id,
|
||||
})
|
||||
}
|
||||
// Same for LearnersNext; these are nodes we want to be learners but which
|
||||
// are currently voters in the outgoing config.
|
||||
for _, id := range cs.LearnersNext {
|
||||
in = append(in, pb.ConfChangeSingle{
|
||||
Type: pb.ConfChangeAddLearnerNode,
|
||||
NodeID: id,
|
||||
})
|
||||
}
|
||||
return out, in
|
||||
}
|
||||
|
||||
func chain(chg Changer, ops ...func(Changer) (tracker.Config, tracker.ProgressMap, error)) (tracker.Config, tracker.ProgressMap, error) {
|
||||
for _, op := range ops {
|
||||
cfg, prs, err := op(chg)
|
||||
if err != nil {
|
||||
return tracker.Config{}, nil, err
|
||||
}
|
||||
chg.Tracker.Config = cfg
|
||||
chg.Tracker.Progress = prs
|
||||
}
|
||||
return chg.Tracker.Config, chg.Tracker.Progress, nil
|
||||
}
|
||||
|
||||
// Restore takes a Changer (which must represent an empty configuration), and
|
||||
// runs a sequence of changes enacting the configuration described in the
|
||||
// ConfState.
|
||||
//
|
||||
// TODO(tbg) it's silly that this takes a Changer. Unravel this by making sure
|
||||
// the Changer only needs a ProgressMap (not a whole Tracker) at which point
|
||||
// this can just take LastIndex and MaxInflight directly instead and cook up
|
||||
// the results from that alone.
|
||||
func Restore(chg Changer, cs pb.ConfState) (tracker.Config, tracker.ProgressMap, error) {
|
||||
outgoing, incoming := toConfChangeSingle(cs)
|
||||
|
||||
var ops []func(Changer) (tracker.Config, tracker.ProgressMap, error)
|
||||
|
||||
if len(outgoing) == 0 {
|
||||
// No outgoing config, so just apply the incoming changes one by one.
|
||||
for _, cc := range incoming {
|
||||
cc := cc // loop-local copy
|
||||
ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) {
|
||||
return chg.Simple(cc)
|
||||
})
|
||||
}
|
||||
} else {
|
||||
// The ConfState describes a joint configuration.
|
||||
//
|
||||
// First, apply all of the changes of the outgoing config one by one, so
|
||||
// that it temporarily becomes the incoming active config. For example,
|
||||
// if the config is (1 2 3)&(2 3 4), this will establish (2 3 4)&().
|
||||
for _, cc := range outgoing {
|
||||
cc := cc // loop-local copy
|
||||
ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) {
|
||||
return chg.Simple(cc)
|
||||
})
|
||||
}
|
||||
// Now enter the joint state, which rotates the above additions into the
|
||||
// outgoing config, and adds the incoming config in. Continuing the
|
||||
// example above, we'd get (1 2 3)&(2 3 4), i.e. the incoming operations
|
||||
// would be removing 2,3,4 and then adding in 1,2,3 while transitioning
|
||||
// into a joint state.
|
||||
ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) {
|
||||
return chg.EnterJoint(cs.AutoLeave, incoming...)
|
||||
})
|
||||
}
|
||||
|
||||
return chain(chg, ops...)
|
||||
}
|
300
vendor/go.etcd.io/etcd/raft/doc.go
generated
vendored
Normal file
300
vendor/go.etcd.io/etcd/raft/doc.go
generated
vendored
Normal file
@ -0,0 +1,300 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package raft sends and receives messages in the Protocol Buffer format
|
||||
defined in the raftpb package.
|
||||
|
||||
Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
|
||||
The state machine is kept in sync through the use of a replicated log.
|
||||
For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
|
||||
(https://raft.github.io/raft.pdf) by Diego Ongaro and John Ousterhout.
|
||||
|
||||
A simple example application, _raftexample_, is also available to help illustrate
|
||||
how to use this package in practice:
|
||||
https://github.com/etcd-io/etcd/tree/master/contrib/raftexample
|
||||
|
||||
Usage
|
||||
|
||||
The primary object in raft is a Node. You either start a Node from scratch
|
||||
using raft.StartNode or start a Node from some initial state using raft.RestartNode.
|
||||
|
||||
To start a node from scratch:
|
||||
|
||||
storage := raft.NewMemoryStorage()
|
||||
c := &Config{
|
||||
ID: 0x01,
|
||||
ElectionTick: 10,
|
||||
HeartbeatTick: 1,
|
||||
Storage: storage,
|
||||
MaxSizePerMsg: 4096,
|
||||
MaxInflightMsgs: 256,
|
||||
}
|
||||
n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
|
||||
|
||||
To restart a node from previous state:
|
||||
|
||||
storage := raft.NewMemoryStorage()
|
||||
|
||||
// recover the in-memory storage from persistent
|
||||
// snapshot, state and entries.
|
||||
storage.ApplySnapshot(snapshot)
|
||||
storage.SetHardState(state)
|
||||
storage.Append(entries)
|
||||
|
||||
c := &Config{
|
||||
ID: 0x01,
|
||||
ElectionTick: 10,
|
||||
HeartbeatTick: 1,
|
||||
Storage: storage,
|
||||
MaxSizePerMsg: 4096,
|
||||
MaxInflightMsgs: 256,
|
||||
}
|
||||
|
||||
// restart raft without peer information.
|
||||
// peer information is already included in the storage.
|
||||
n := raft.RestartNode(c)
|
||||
|
||||
Now that you are holding onto a Node you have a few responsibilities:
|
||||
|
||||
First, you must read from the Node.Ready() channel and process the updates
|
||||
it contains. These steps may be performed in parallel, except as noted in step
|
||||
2.
|
||||
|
||||
1. Write HardState, Entries, and Snapshot to persistent storage if they are
|
||||
not empty. Note that when writing an Entry with Index i, any
|
||||
previously-persisted entries with Index >= i must be discarded.
|
||||
|
||||
2. Send all Messages to the nodes named in the To field. It is important that
|
||||
no messages be sent until the latest HardState has been persisted to disk,
|
||||
and all Entries written by any previous Ready batch (Messages may be sent while
|
||||
entries from the same batch are being persisted). To reduce the I/O latency, an
|
||||
optimization can be applied to make leader write to disk in parallel with its
|
||||
followers (as explained at section 10.2.1 in Raft thesis). If any Message has type
|
||||
MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be
|
||||
large).
|
||||
|
||||
Note: Marshalling messages is not thread-safe; it is important that you
|
||||
make sure that no new entries are persisted while marshalling.
|
||||
The easiest way to achieve this is to serialize the messages directly inside
|
||||
your main raft loop.
|
||||
|
||||
3. Apply Snapshot (if any) and CommittedEntries to the state machine.
|
||||
If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange()
|
||||
to apply it to the node. The configuration change may be cancelled at this point
|
||||
by setting the NodeID field to zero before calling ApplyConfChange
|
||||
(but ApplyConfChange must be called one way or the other, and the decision to cancel
|
||||
must be based solely on the state machine and not external information such as
|
||||
the observed health of the node).
|
||||
|
||||
4. Call Node.Advance() to signal readiness for the next batch of updates.
|
||||
This may be done at any time after step 1, although all updates must be processed
|
||||
in the order they were returned by Ready.
|
||||
|
||||
Second, all persisted log entries must be made available via an
|
||||
implementation of the Storage interface. The provided MemoryStorage
|
||||
type can be used for this (if you repopulate its state upon a
|
||||
restart), or you can supply your own disk-backed implementation.
|
||||
|
||||
Third, when you receive a message from another node, pass it to Node.Step:
|
||||
|
||||
func recvRaftRPC(ctx context.Context, m raftpb.Message) {
|
||||
n.Step(ctx, m)
|
||||
}
|
||||
|
||||
Finally, you need to call Node.Tick() at regular intervals (probably
|
||||
via a time.Ticker). Raft has two important timeouts: heartbeat and the
|
||||
election timeout. However, internally to the raft package time is
|
||||
represented by an abstract "tick".
|
||||
|
||||
The total state machine handling loop will look something like this:
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-s.Ticker:
|
||||
n.Tick()
|
||||
case rd := <-s.Node.Ready():
|
||||
saveToStorage(rd.State, rd.Entries, rd.Snapshot)
|
||||
send(rd.Messages)
|
||||
if !raft.IsEmptySnap(rd.Snapshot) {
|
||||
processSnapshot(rd.Snapshot)
|
||||
}
|
||||
for _, entry := range rd.CommittedEntries {
|
||||
process(entry)
|
||||
if entry.Type == raftpb.EntryConfChange {
|
||||
var cc raftpb.ConfChange
|
||||
cc.Unmarshal(entry.Data)
|
||||
s.Node.ApplyConfChange(cc)
|
||||
}
|
||||
}
|
||||
s.Node.Advance()
|
||||
case <-s.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
To propose changes to the state machine from your node take your application
|
||||
data, serialize it into a byte slice and call:
|
||||
|
||||
n.Propose(ctx, data)
|
||||
|
||||
If the proposal is committed, data will appear in committed entries with type
|
||||
raftpb.EntryNormal. There is no guarantee that a proposed command will be
|
||||
committed; you may have to re-propose after a timeout.
|
||||
|
||||
To add or remove a node in a cluster, build ConfChange struct 'cc' and call:
|
||||
|
||||
n.ProposeConfChange(ctx, cc)
|
||||
|
||||
After config change is committed, some committed entry with type
|
||||
raftpb.EntryConfChange will be returned. You must apply it to node through:
|
||||
|
||||
var cc raftpb.ConfChange
|
||||
cc.Unmarshal(data)
|
||||
n.ApplyConfChange(cc)
|
||||
|
||||
Note: An ID represents a unique node in a cluster for all time. A
|
||||
given ID MUST be used only once even if the old node has been removed.
|
||||
This means that for example IP addresses make poor node IDs since they
|
||||
may be reused. Node IDs must be non-zero.
|
||||
|
||||
Implementation notes
|
||||
|
||||
This implementation is up to date with the final Raft thesis
|
||||
(https://github.com/ongardie/dissertation/blob/master/stanford.pdf), although our
|
||||
implementation of the membership change protocol differs somewhat from
|
||||
that described in chapter 4. The key invariant that membership changes
|
||||
happen one node at a time is preserved, but in our implementation the
|
||||
membership change takes effect when its entry is applied, not when it
|
||||
is added to the log (so the entry is committed under the old
|
||||
membership instead of the new). This is equivalent in terms of safety,
|
||||
since the old and new configurations are guaranteed to overlap.
|
||||
|
||||
To ensure that we do not attempt to commit two membership changes at
|
||||
once by matching log positions (which would be unsafe since they
|
||||
should have different quorum requirements), we simply disallow any
|
||||
proposed membership change while any uncommitted change appears in
|
||||
the leader's log.
|
||||
|
||||
This approach introduces a problem when you try to remove a member
|
||||
from a two-member cluster: If one of the members dies before the
|
||||
other one receives the commit of the confchange entry, then the member
|
||||
cannot be removed any more since the cluster cannot make progress.
|
||||
For this reason it is highly recommended to use three or more nodes in
|
||||
every cluster.
|
||||
|
||||
MessageType
|
||||
|
||||
Package raft sends and receives message in Protocol Buffer format (defined
|
||||
in raftpb package). Each state (follower, candidate, leader) implements its
|
||||
own 'step' method ('stepFollower', 'stepCandidate', 'stepLeader') when
|
||||
advancing with the given raftpb.Message. Each step is determined by its
|
||||
raftpb.MessageType. Note that every step is checked by one common method
|
||||
'Step' that safety-checks the terms of node and incoming message to prevent
|
||||
stale log entries:
|
||||
|
||||
'MsgHup' is used for election. If a node is a follower or candidate, the
|
||||
'tick' function in 'raft' struct is set as 'tickElection'. If a follower or
|
||||
candidate has not received any heartbeat before the election timeout, it
|
||||
passes 'MsgHup' to its Step method and becomes (or remains) a candidate to
|
||||
start a new election.
|
||||
|
||||
'MsgBeat' is an internal type that signals the leader to send a heartbeat of
|
||||
the 'MsgHeartbeat' type. If a node is a leader, the 'tick' function in
|
||||
the 'raft' struct is set as 'tickHeartbeat', and triggers the leader to
|
||||
send periodic 'MsgHeartbeat' messages to its followers.
|
||||
|
||||
'MsgProp' proposes to append data to its log entries. This is a special
|
||||
type to redirect proposals to leader. Therefore, send method overwrites
|
||||
raftpb.Message's term with its HardState's term to avoid attaching its
|
||||
local term to 'MsgProp'. When 'MsgProp' is passed to the leader's 'Step'
|
||||
method, the leader first calls the 'appendEntry' method to append entries
|
||||
to its log, and then calls 'bcastAppend' method to send those entries to
|
||||
its peers. When passed to candidate, 'MsgProp' is dropped. When passed to
|
||||
follower, 'MsgProp' is stored in follower's mailbox(msgs) by the send
|
||||
method. It is stored with sender's ID and later forwarded to leader by
|
||||
rafthttp package.
|
||||
|
||||
'MsgApp' contains log entries to replicate. A leader calls bcastAppend,
|
||||
which calls sendAppend, which sends soon-to-be-replicated logs in 'MsgApp'
|
||||
type. When 'MsgApp' is passed to candidate's Step method, candidate reverts
|
||||
back to follower, because it indicates that there is a valid leader sending
|
||||
'MsgApp' messages. Candidate and follower respond to this message in
|
||||
'MsgAppResp' type.
|
||||
|
||||
'MsgAppResp' is response to log replication request('MsgApp'). When
|
||||
'MsgApp' is passed to candidate or follower's Step method, it responds by
|
||||
calling 'handleAppendEntries' method, which sends 'MsgAppResp' to raft
|
||||
mailbox.
|
||||
|
||||
'MsgVote' requests votes for election. When a node is a follower or
|
||||
candidate and 'MsgHup' is passed to its Step method, then the node calls
|
||||
'campaign' method to campaign itself to become a leader. Once 'campaign'
|
||||
method is called, the node becomes candidate and sends 'MsgVote' to peers
|
||||
in cluster to request votes. When passed to leader or candidate's Step
|
||||
method and the message's Term is lower than leader's or candidate's,
|
||||
'MsgVote' will be rejected ('MsgVoteResp' is returned with Reject true).
|
||||
If leader or candidate receives 'MsgVote' with higher term, it will revert
|
||||
back to follower. When 'MsgVote' is passed to follower, it votes for the
|
||||
sender only when sender's last term is greater than MsgVote's term or
|
||||
sender's last term is equal to MsgVote's term but sender's last committed
|
||||
index is greater than or equal to follower's.
|
||||
|
||||
'MsgVoteResp' contains responses from voting request. When 'MsgVoteResp' is
|
||||
passed to candidate, the candidate calculates how many votes it has won. If
|
||||
it's more than majority (quorum), it becomes leader and calls 'bcastAppend'.
|
||||
If candidate receives majority of votes of denials, it reverts back to
|
||||
follower.
|
||||
|
||||
'MsgPreVote' and 'MsgPreVoteResp' are used in an optional two-phase election
|
||||
protocol. When Config.PreVote is true, a pre-election is carried out first
|
||||
(using the same rules as a regular election), and no node increases its term
|
||||
number unless the pre-election indicates that the campaigning node would win.
|
||||
This minimizes disruption when a partitioned node rejoins the cluster.
|
||||
|
||||
'MsgSnap' requests to install a snapshot message. When a node has just
|
||||
become a leader or the leader receives 'MsgProp' message, it calls
|
||||
'bcastAppend' method, which then calls 'sendAppend' method to each
|
||||
follower. In 'sendAppend', if a leader fails to get term or entries,
|
||||
the leader requests snapshot by sending 'MsgSnap' type message.
|
||||
|
||||
'MsgSnapStatus' tells the result of snapshot install message. When a
|
||||
follower rejected 'MsgSnap', it indicates the snapshot request with
|
||||
'MsgSnap' had failed from network issues which causes the network layer
|
||||
to fail to send out snapshots to its followers. Then leader considers
|
||||
follower's progress as probe. When 'MsgSnap' were not rejected, it
|
||||
indicates that the snapshot succeeded and the leader sets follower's
|
||||
progress to probe and resumes its log replication.
|
||||
|
||||
'MsgHeartbeat' sends heartbeat from leader. When 'MsgHeartbeat' is passed
|
||||
to candidate and message's term is higher than candidate's, the candidate
|
||||
reverts back to follower and updates its committed index from the one in
|
||||
this heartbeat. And it sends the message to its mailbox. When
|
||||
'MsgHeartbeat' is passed to follower's Step method and message's term is
|
||||
higher than follower's, the follower updates its leaderID with the ID
|
||||
from the message.
|
||||
|
||||
'MsgHeartbeatResp' is a response to 'MsgHeartbeat'. When 'MsgHeartbeatResp'
|
||||
is passed to leader's Step method, the leader knows which follower
|
||||
responded. And only when the leader's last committed index is greater than
|
||||
follower's Match index, the leader runs 'sendAppend` method.
|
||||
|
||||
'MsgUnreachable' tells that request(message) wasn't delivered. When
|
||||
'MsgUnreachable' is passed to leader's Step method, the leader discovers
|
||||
that the follower that sent this 'MsgUnreachable' is not reachable, often
|
||||
indicating 'MsgApp' is lost. When follower's progress state is replicate,
|
||||
the leader sets it back to probe.
|
||||
|
||||
*/
|
||||
package raft
|
372
vendor/go.etcd.io/etcd/raft/log.go
generated
vendored
Normal file
372
vendor/go.etcd.io/etcd/raft/log.go
generated
vendored
Normal file
@ -0,0 +1,372 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package raft
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
pb "go.etcd.io/etcd/raft/raftpb"
|
||||
)
|
||||
|
||||
type raftLog struct {
|
||||
// storage contains all stable entries since the last snapshot.
|
||||
storage Storage
|
||||
|
||||
// unstable contains all unstable entries and snapshot.
|
||||
// they will be saved into storage.
|
||||
unstable unstable
|
||||
|
||||
// committed is the highest log position that is known to be in
|
||||
// stable storage on a quorum of nodes.
|
||||
committed uint64
|
||||
// applied is the highest log position that the application has
|
||||
// been instructed to apply to its state machine.
|
||||
// Invariant: applied <= committed
|
||||
applied uint64
|
||||
|
||||
logger Logger
|
||||
|
||||
// maxNextEntsSize is the maximum number aggregate byte size of the messages
|
||||
// returned from calls to nextEnts.
|
||||
maxNextEntsSize uint64
|
||||
}
|
||||
|
||||
// newLog returns log using the given storage and default options. It
|
||||
// recovers the log to the state that it just commits and applies the
|
||||
// latest snapshot.
|
||||
func newLog(storage Storage, logger Logger) *raftLog {
|
||||
return newLogWithSize(storage, logger, noLimit)
|
||||
}
|
||||
|
||||
// newLogWithSize returns a log using the given storage and max
|
||||
// message size.
|
||||
func newLogWithSize(storage Storage, logger Logger, maxNextEntsSize uint64) *raftLog {
|
||||
if storage == nil {
|
||||
log.Panic("storage must not be nil")
|
||||
}
|
||||
log := &raftLog{
|
||||
storage: storage,
|
||||
logger: logger,
|
||||
maxNextEntsSize: maxNextEntsSize,
|
||||
}
|
||||
firstIndex, err := storage.FirstIndex()
|
||||
if err != nil {
|
||||
panic(err) // TODO(bdarnell)
|
||||
}
|
||||
lastIndex, err := storage.LastIndex()
|
||||
if err != nil {
|
||||
panic(err) // TODO(bdarnell)
|
||||
}
|
||||
log.unstable.offset = lastIndex + 1
|
||||
log.unstable.logger = logger
|
||||
// Initialize our committed and applied pointers to the time of the last compaction.
|
||||
log.committed = firstIndex - 1
|
||||
log.applied = firstIndex - 1
|
||||
|
||||
return log
|
||||
}
|
||||
|
||||
func (l *raftLog) String() string {
|
||||
return fmt.Sprintf("committed=%d, applied=%d, unstable.offset=%d, len(unstable.Entries)=%d", l.committed, l.applied, l.unstable.offset, len(l.unstable.entries))
|
||||
}
|
||||
|
||||
// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise,
|
||||
// it returns (last index of new entries, true).
|
||||
func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) {
|
||||
if l.matchTerm(index, logTerm) {
|
||||
lastnewi = index + uint64(len(ents))
|
||||
ci := l.findConflict(ents)
|
||||
switch {
|
||||
case ci == 0:
|
||||
case ci <= l.committed:
|
||||
l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed)
|
||||
default:
|
||||
offset := index + 1
|
||||
l.append(ents[ci-offset:]...)
|
||||
}
|
||||
l.commitTo(min(committed, lastnewi))
|
||||
return lastnewi, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func (l *raftLog) append(ents ...pb.Entry) uint64 {
|
||||
if len(ents) == 0 {
|
||||
return l.lastIndex()
|
||||
}
|
||||
if after := ents[0].Index - 1; after < l.committed {
|
||||
l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed)
|
||||
}
|
||||
l.unstable.truncateAndAppend(ents)
|
||||
return l.lastIndex()
|
||||
}
|
||||
|
||||
// findConflict finds the index of the conflict.
|
||||
// It returns the first pair of conflicting entries between the existing
|
||||
// entries and the given entries, if there are any.
|
||||
// If there is no conflicting entries, and the existing entries contains
|
||||
// all the given entries, zero will be returned.
|
||||
// If there is no conflicting entries, but the given entries contains new
|
||||
// entries, the index of the first new entry will be returned.
|
||||
// An entry is considered to be conflicting if it has the same index but
|
||||
// a different term.
|
||||
// The first entry MUST have an index equal to the argument 'from'.
|
||||
// The index of the given entries MUST be continuously increasing.
|
||||
func (l *raftLog) findConflict(ents []pb.Entry) uint64 {
|
||||
for _, ne := range ents {
|
||||
if !l.matchTerm(ne.Index, ne.Term) {
|
||||
if ne.Index <= l.lastIndex() {
|
||||
l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]",
|
||||
ne.Index, l.zeroTermOnErrCompacted(l.term(ne.Index)), ne.Term)
|
||||
}
|
||||
return ne.Index
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (l *raftLog) unstableEntries() []pb.Entry {
|
||||
if len(l.unstable.entries) == 0 {
|
||||
return nil
|
||||
}
|
||||
return l.unstable.entries
|
||||
}
|
||||
|
||||
// nextEnts returns all the available entries for execution.
|
||||
// If applied is smaller than the index of snapshot, it returns all committed
|
||||
// entries after the index of snapshot.
|
||||
func (l *raftLog) nextEnts() (ents []pb.Entry) {
|
||||
off := max(l.applied+1, l.firstIndex())
|
||||
if l.committed+1 > off {
|
||||
ents, err := l.slice(off, l.committed+1, l.maxNextEntsSize)
|
||||
if err != nil {
|
||||
l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err)
|
||||
}
|
||||
return ents
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasNextEnts returns if there is any available entries for execution. This
|
||||
// is a fast check without heavy raftLog.slice() in raftLog.nextEnts().
|
||||
func (l *raftLog) hasNextEnts() bool {
|
||||
off := max(l.applied+1, l.firstIndex())
|
||||
return l.committed+1 > off
|
||||
}
|
||||
|
||||
func (l *raftLog) snapshot() (pb.Snapshot, error) {
|
||||
if l.unstable.snapshot != nil {
|
||||
return *l.unstable.snapshot, nil
|
||||
}
|
||||
return l.storage.Snapshot()
|
||||
}
|
||||
|
||||
func (l *raftLog) firstIndex() uint64 {
|
||||
if i, ok := l.unstable.maybeFirstIndex(); ok {
|
||||
return i
|
||||
}
|
||||
index, err := l.storage.FirstIndex()
|
||||
if err != nil {
|
||||
panic(err) // TODO(bdarnell)
|
||||
}
|
||||
return index
|
||||
}
|
||||
|
||||
func (l *raftLog) lastIndex() uint64 {
|
||||
if i, ok := l.unstable.maybeLastIndex(); ok {
|
||||
return i
|
||||
}
|
||||
i, err := l.storage.LastIndex()
|
||||
if err != nil {
|
||||
panic(err) // TODO(bdarnell)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func (l *raftLog) commitTo(tocommit uint64) {
|
||||
// never decrease commit
|
||||
if l.committed < tocommit {
|
||||
if l.lastIndex() < tocommit {
|
||||
l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex())
|
||||
}
|
||||
l.committed = tocommit
|
||||
}
|
||||
}
|
||||
|
||||
func (l *raftLog) appliedTo(i uint64) {
|
||||
if i == 0 {
|
||||
return
|
||||
}
|
||||
if l.committed < i || i < l.applied {
|
||||
l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed)
|
||||
}
|
||||
l.applied = i
|
||||
}
|
||||
|
||||
func (l *raftLog) stableTo(i, t uint64) { l.unstable.stableTo(i, t) }
|
||||
|
||||
func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) }
|
||||
|
||||
func (l *raftLog) lastTerm() uint64 {
|
||||
t, err := l.term(l.lastIndex())
|
||||
if err != nil {
|
||||
l.logger.Panicf("unexpected error when getting the last term (%v)", err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (l *raftLog) term(i uint64) (uint64, error) {
|
||||
// the valid term range is [index of dummy entry, last index]
|
||||
dummyIndex := l.firstIndex() - 1
|
||||
if i < dummyIndex || i > l.lastIndex() {
|
||||
// TODO: return an error instead?
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if t, ok := l.unstable.maybeTerm(i); ok {
|
||||
return t, nil
|
||||
}
|
||||
|
||||
t, err := l.storage.Term(i)
|
||||
if err == nil {
|
||||
return t, nil
|
||||
}
|
||||
if err == ErrCompacted || err == ErrUnavailable {
|
||||
return 0, err
|
||||
}
|
||||
panic(err) // TODO(bdarnell)
|
||||
}
|
||||
|
||||
func (l *raftLog) entries(i, maxsize uint64) ([]pb.Entry, error) {
|
||||
if i > l.lastIndex() {
|
||||
return nil, nil
|
||||
}
|
||||
return l.slice(i, l.lastIndex()+1, maxsize)
|
||||
}
|
||||
|
||||
// allEntries returns all entries in the log.
|
||||
func (l *raftLog) allEntries() []pb.Entry {
|
||||
ents, err := l.entries(l.firstIndex(), noLimit)
|
||||
if err == nil {
|
||||
return ents
|
||||
}
|
||||
if err == ErrCompacted { // try again if there was a racing compaction
|
||||
return l.allEntries()
|
||||
}
|
||||
// TODO (xiangli): handle error?
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// isUpToDate determines if the given (lastIndex,term) log is more up-to-date
|
||||
// by comparing the index and term of the last entries in the existing logs.
|
||||
// If the logs have last entries with different terms, then the log with the
|
||||
// later term is more up-to-date. If the logs end with the same term, then
|
||||
// whichever log has the larger lastIndex is more up-to-date. If the logs are
|
||||
// the same, the given log is up-to-date.
|
||||
func (l *raftLog) isUpToDate(lasti, term uint64) bool {
|
||||
return term > l.lastTerm() || (term == l.lastTerm() && lasti >= l.lastIndex())
|
||||
}
|
||||
|
||||
func (l *raftLog) matchTerm(i, term uint64) bool {
|
||||
t, err := l.term(i)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return t == term
|
||||
}
|
||||
|
||||
func (l *raftLog) maybeCommit(maxIndex, term uint64) bool {
|
||||
if maxIndex > l.committed && l.zeroTermOnErrCompacted(l.term(maxIndex)) == term {
|
||||
l.commitTo(maxIndex)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (l *raftLog) restore(s pb.Snapshot) {
|
||||
l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term)
|
||||
l.committed = s.Metadata.Index
|
||||
l.unstable.restore(s)
|
||||
}
|
||||
|
||||
// slice returns a slice of log entries from lo through hi-1, inclusive.
|
||||
func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) {
|
||||
err := l.mustCheckOutOfBounds(lo, hi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if lo == hi {
|
||||
return nil, nil
|
||||
}
|
||||
var ents []pb.Entry
|
||||
if lo < l.unstable.offset {
|
||||
storedEnts, err := l.storage.Entries(lo, min(hi, l.unstable.offset), maxSize)
|
||||
if err == ErrCompacted {
|
||||
return nil, err
|
||||
} else if err == ErrUnavailable {
|
||||
l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, min(hi, l.unstable.offset))
|
||||
} else if err != nil {
|
||||
panic(err) // TODO(bdarnell)
|
||||
}
|
||||
|
||||
// check if ents has reached the size limitation
|
||||
if uint64(len(storedEnts)) < min(hi, l.unstable.offset)-lo {
|
||||
return storedEnts, nil
|
||||
}
|
||||
|
||||
ents = storedEnts
|
||||
}
|
||||
if hi > l.unstable.offset {
|
||||
unstable := l.unstable.slice(max(lo, l.unstable.offset), hi)
|
||||
if len(ents) > 0 {
|
||||
combined := make([]pb.Entry, len(ents)+len(unstable))
|
||||
n := copy(combined, ents)
|
||||
copy(combined[n:], unstable)
|
||||
ents = combined
|
||||
} else {
|
||||
ents = unstable
|
||||
}
|
||||
}
|
||||
return limitSize(ents, maxSize), nil
|
||||
}
|
||||
|
||||
// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries)
|
||||
func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error {
|
||||
if lo > hi {
|
||||
l.logger.Panicf("invalid slice %d > %d", lo, hi)
|
||||
}
|
||||
fi := l.firstIndex()
|
||||
if lo < fi {
|
||||
return ErrCompacted
|
||||
}
|
||||
|
||||
length := l.lastIndex() + 1 - fi
|
||||
if lo < fi || hi > fi+length {
|
||||
l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *raftLog) zeroTermOnErrCompacted(t uint64, err error) uint64 {
|
||||
if err == nil {
|
||||
return t
|
||||
}
|
||||
if err == ErrCompacted {
|
||||
return 0
|
||||
}
|
||||
l.logger.Panicf("unexpected error (%v)", err)
|
||||
return 0
|
||||
}
|
157
vendor/go.etcd.io/etcd/raft/log_unstable.go
generated
vendored
Normal file
157
vendor/go.etcd.io/etcd/raft/log_unstable.go
generated
vendored
Normal file
@ -0,0 +1,157 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package raft
|
||||
|
||||
import pb "go.etcd.io/etcd/raft/raftpb"
|
||||
|
||||
// unstable.entries[i] has raft log position i+unstable.offset.
|
||||
// Note that unstable.offset may be less than the highest log
|
||||
// position in storage; this means that the next write to storage
|
||||
// might need to truncate the log before persisting unstable.entries.
|
||||
type unstable struct {
|
||||
// the incoming unstable snapshot, if any.
|
||||
snapshot *pb.Snapshot
|
||||
// all entries that have not yet been written to storage.
|
||||
entries []pb.Entry
|
||||
offset uint64
|
||||
|
||||
logger Logger
|
||||
}
|
||||
|
||||
// maybeFirstIndex returns the index of the first possible entry in entries
|
||||
// if it has a snapshot.
|
||||
func (u *unstable) maybeFirstIndex() (uint64, bool) {
|
||||
if u.snapshot != nil {
|
||||
return u.snapshot.Metadata.Index + 1, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// maybeLastIndex returns the last index if it has at least one
|
||||
// unstable entry or snapshot.
|
||||
func (u *unstable) maybeLastIndex() (uint64, bool) {
|
||||
if l := len(u.entries); l != 0 {
|
||||
return u.offset + uint64(l) - 1, true
|
||||
}
|
||||
if u.snapshot != nil {
|
||||
return u.snapshot.Metadata.Index, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// maybeTerm returns the term of the entry at index i, if there
|
||||
// is any.
|
||||
func (u *unstable) maybeTerm(i uint64) (uint64, bool) {
|
||||
if i < u.offset {
|
||||
if u.snapshot != nil && u.snapshot.Metadata.Index == i {
|
||||
return u.snapshot.Metadata.Term, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
last, ok := u.maybeLastIndex()
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
if i > last {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
return u.entries[i-u.offset].Term, true
|
||||
}
|
||||
|
||||
func (u *unstable) stableTo(i, t uint64) {
|
||||
gt, ok := u.maybeTerm(i)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// if i < offset, term is matched with the snapshot
|
||||
// only update the unstable entries if term is matched with
|
||||
// an unstable entry.
|
||||
if gt == t && i >= u.offset {
|
||||
u.entries = u.entries[i+1-u.offset:]
|
||||
u.offset = i + 1
|
||||
u.shrinkEntriesArray()
|
||||
}
|
||||
}
|
||||
|
||||
// shrinkEntriesArray discards the underlying array used by the entries slice
|
||||
// if most of it isn't being used. This avoids holding references to a bunch of
|
||||
// potentially large entries that aren't needed anymore. Simply clearing the
|
||||
// entries wouldn't be safe because clients might still be using them.
|
||||
func (u *unstable) shrinkEntriesArray() {
|
||||
// We replace the array if we're using less than half of the space in
|
||||
// it. This number is fairly arbitrary, chosen as an attempt to balance
|
||||
// memory usage vs number of allocations. It could probably be improved
|
||||
// with some focused tuning.
|
||||
const lenMultiple = 2
|
||||
if len(u.entries) == 0 {
|
||||
u.entries = nil
|
||||
} else if len(u.entries)*lenMultiple < cap(u.entries) {
|
||||
newEntries := make([]pb.Entry, len(u.entries))
|
||||
copy(newEntries, u.entries)
|
||||
u.entries = newEntries
|
||||
}
|
||||
}
|
||||
|
||||
func (u *unstable) stableSnapTo(i uint64) {
|
||||
if u.snapshot != nil && u.snapshot.Metadata.Index == i {
|
||||
u.snapshot = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (u *unstable) restore(s pb.Snapshot) {
|
||||
u.offset = s.Metadata.Index + 1
|
||||
u.entries = nil
|
||||
u.snapshot = &s
|
||||
}
|
||||
|
||||
func (u *unstable) truncateAndAppend(ents []pb.Entry) {
|
||||
after := ents[0].Index
|
||||
switch {
|
||||
case after == u.offset+uint64(len(u.entries)):
|
||||
// after is the next index in the u.entries
|
||||
// directly append
|
||||
u.entries = append(u.entries, ents...)
|
||||
case after <= u.offset:
|
||||
u.logger.Infof("replace the unstable entries from index %d", after)
|
||||
// The log is being truncated to before our current offset
|
||||
// portion, so set the offset and replace the entries
|
||||
u.offset = after
|
||||
u.entries = ents
|
||||
default:
|
||||
// truncate to after and copy to u.entries
|
||||
// then append
|
||||
u.logger.Infof("truncate the unstable entries before index %d", after)
|
||||
u.entries = append([]pb.Entry{}, u.slice(u.offset, after)...)
|
||||
u.entries = append(u.entries, ents...)
|
||||
}
|
||||
}
|
||||
|
||||
func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry {
|
||||
u.mustCheckOutOfBounds(lo, hi)
|
||||
return u.entries[lo-u.offset : hi-u.offset]
|
||||
}
|
||||
|
||||
// u.offset <= lo <= hi <= u.offset+len(u.entries)
|
||||
func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) {
|
||||
if lo > hi {
|
||||
u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi)
|
||||
}
|
||||
upper := u.offset + uint64(len(u.entries))
|
||||
if lo < u.offset || hi > upper {
|
||||
u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper)
|
||||
}
|
||||
}
|
132
vendor/go.etcd.io/etcd/raft/logger.go
generated
vendored
Normal file
132
vendor/go.etcd.io/etcd/raft/logger.go
generated
vendored
Normal file
@ -0,0 +1,132 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package raft
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Logger interface {
|
||||
Debug(v ...interface{})
|
||||
Debugf(format string, v ...interface{})
|
||||
|
||||
Error(v ...interface{})
|
||||
Errorf(format string, v ...interface{})
|
||||
|
||||
Info(v ...interface{})
|
||||
Infof(format string, v ...interface{})
|
||||
|
||||
Warning(v ...interface{})
|
||||
Warningf(format string, v ...interface{})
|
||||
|
||||
Fatal(v ...interface{})
|
||||
Fatalf(format string, v ...interface{})
|
||||
|
||||
Panic(v ...interface{})
|
||||
Panicf(format string, v ...interface{})
|
||||
}
|
||||
|
||||
func SetLogger(l Logger) {
|
||||
raftLoggerMu.Lock()
|
||||
raftLogger = l
|
||||
raftLoggerMu.Unlock()
|
||||
}
|
||||
|
||||
var (
|
||||
defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)}
|
||||
discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)}
|
||||
raftLoggerMu sync.Mutex
|
||||
raftLogger = Logger(defaultLogger)
|
||||
)
|
||||
|
||||
const (
|
||||
calldepth = 2
|
||||
)
|
||||
|
||||
// DefaultLogger is a default implementation of the Logger interface.
|
||||
type DefaultLogger struct {
|
||||
*log.Logger
|
||||
debug bool
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) EnableTimestamps() {
|
||||
l.SetFlags(l.Flags() | log.Ldate | log.Ltime)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) EnableDebug() {
|
||||
l.debug = true
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Debug(v ...interface{}) {
|
||||
if l.debug {
|
||||
l.Output(calldepth, header("DEBUG", fmt.Sprint(v...)))
|
||||
}
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Debugf(format string, v ...interface{}) {
|
||||
if l.debug {
|
||||
l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...)))
|
||||
}
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Info(v ...interface{}) {
|
||||
l.Output(calldepth, header("INFO", fmt.Sprint(v...)))
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Infof(format string, v ...interface{}) {
|
||||
l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...)))
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Error(v ...interface{}) {
|
||||
l.Output(calldepth, header("ERROR", fmt.Sprint(v...)))
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Errorf(format string, v ...interface{}) {
|
||||
l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...)))
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Warning(v ...interface{}) {
|
||||
l.Output(calldepth, header("WARN", fmt.Sprint(v...)))
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Warningf(format string, v ...interface{}) {
|
||||
l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...)))
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Fatal(v ...interface{}) {
|
||||
l.Output(calldepth, header("FATAL", fmt.Sprint(v...)))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Fatalf(format string, v ...interface{}) {
|
||||
l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...)))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Panic(v ...interface{}) {
|
||||
l.Logger.Panic(v...)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Panicf(format string, v ...interface{}) {
|
||||
l.Logger.Panicf(format, v...)
|
||||
}
|
||||
|
||||
func header(lvl, msg string) string {
|
||||
return fmt.Sprintf("%s: %s", lvl, msg)
|
||||
}
|
584
vendor/go.etcd.io/etcd/raft/node.go
generated
vendored
Normal file
584
vendor/go.etcd.io/etcd/raft/node.go
generated
vendored
Normal file
@ -0,0 +1,584 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package raft
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
pb "go.etcd.io/etcd/raft/raftpb"
|
||||
)
|
||||
|
||||
type SnapshotStatus int
|
||||
|
||||
const (
|
||||
SnapshotFinish SnapshotStatus = 1
|
||||
SnapshotFailure SnapshotStatus = 2
|
||||
)
|
||||
|
||||
var (
|
||||
emptyState = pb.HardState{}
|
||||
|
||||
// ErrStopped is returned by methods on Nodes that have been stopped.
|
||||
ErrStopped = errors.New("raft: stopped")
|
||||
)
|
||||
|
||||
// SoftState provides state that is useful for logging and debugging.
|
||||
// The state is volatile and does not need to be persisted to the WAL.
|
||||
type SoftState struct {
|
||||
Lead uint64 // must use atomic operations to access; keep 64-bit aligned.
|
||||
RaftState StateType
|
||||
}
|
||||
|
||||
func (a *SoftState) equal(b *SoftState) bool {
|
||||
return a.Lead == b.Lead && a.RaftState == b.RaftState
|
||||
}
|
||||
|
||||
// Ready encapsulates the entries and messages that are ready to read,
|
||||
// be saved to stable storage, committed or sent to other peers.
|
||||
// All fields in Ready are read-only.
|
||||
type Ready struct {
|
||||
// The current volatile state of a Node.
|
||||
// SoftState will be nil if there is no update.
|
||||
// It is not required to consume or store SoftState.
|
||||
*SoftState
|
||||
|
||||
// The current state of a Node to be saved to stable storage BEFORE
|
||||
// Messages are sent.
|
||||
// HardState will be equal to empty state if there is no update.
|
||||
pb.HardState
|
||||
|
||||
// ReadStates can be used for node to serve linearizable read requests locally
|
||||
// when its applied index is greater than the index in ReadState.
|
||||
// Note that the readState will be returned when raft receives msgReadIndex.
|
||||
// The returned is only valid for the request that requested to read.
|
||||
ReadStates []ReadState
|
||||
|
||||
// Entries specifies entries to be saved to stable storage BEFORE
|
||||
// Messages are sent.
|
||||
Entries []pb.Entry
|
||||
|
||||
// Snapshot specifies the snapshot to be saved to stable storage.
|
||||
Snapshot pb.Snapshot
|
||||
|
||||
// CommittedEntries specifies entries to be committed to a
|
||||
// store/state-machine. These have previously been committed to stable
|
||||
// store.
|
||||
CommittedEntries []pb.Entry
|
||||
|
||||
// Messages specifies outbound messages to be sent AFTER Entries are
|
||||
// committed to stable storage.
|
||||
// If it contains a MsgSnap message, the application MUST report back to raft
|
||||
// when the snapshot has been received or has failed by calling ReportSnapshot.
|
||||
Messages []pb.Message
|
||||
|
||||
// MustSync indicates whether the HardState and Entries must be synchronously
|
||||
// written to disk or if an asynchronous write is permissible.
|
||||
MustSync bool
|
||||
}
|
||||
|
||||
func isHardStateEqual(a, b pb.HardState) bool {
|
||||
return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit
|
||||
}
|
||||
|
||||
// IsEmptyHardState returns true if the given HardState is empty.
|
||||
func IsEmptyHardState(st pb.HardState) bool {
|
||||
return isHardStateEqual(st, emptyState)
|
||||
}
|
||||
|
||||
// IsEmptySnap returns true if the given Snapshot is empty.
|
||||
func IsEmptySnap(sp pb.Snapshot) bool {
|
||||
return sp.Metadata.Index == 0
|
||||
}
|
||||
|
||||
func (rd Ready) containsUpdates() bool {
|
||||
return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) ||
|
||||
!IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 ||
|
||||
len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0
|
||||
}
|
||||
|
||||
// appliedCursor extracts from the Ready the highest index the client has
|
||||
// applied (once the Ready is confirmed via Advance). If no information is
|
||||
// contained in the Ready, returns zero.
|
||||
func (rd Ready) appliedCursor() uint64 {
|
||||
if n := len(rd.CommittedEntries); n > 0 {
|
||||
return rd.CommittedEntries[n-1].Index
|
||||
}
|
||||
if index := rd.Snapshot.Metadata.Index; index > 0 {
|
||||
return index
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Node represents a node in a raft cluster.
|
||||
type Node interface {
|
||||
// Tick increments the internal logical clock for the Node by a single tick. Election
|
||||
// timeouts and heartbeat timeouts are in units of ticks.
|
||||
Tick()
|
||||
// Campaign causes the Node to transition to candidate state and start campaigning to become leader.
|
||||
Campaign(ctx context.Context) error
|
||||
// Propose proposes that data be appended to the log. Note that proposals can be lost without
|
||||
// notice, therefore it is user's job to ensure proposal retries.
|
||||
Propose(ctx context.Context, data []byte) error
|
||||
// ProposeConfChange proposes a configuration change. Like any proposal, the
|
||||
// configuration change may be dropped with or without an error being
|
||||
// returned. In particular, configuration changes are dropped unless the
|
||||
// leader has certainty that there is no prior unapplied configuration
|
||||
// change in its log.
|
||||
//
|
||||
// The method accepts either a pb.ConfChange (deprecated) or pb.ConfChangeV2
|
||||
// message. The latter allows arbitrary configuration changes via joint
|
||||
// consensus, notably including replacing a voter. Passing a ConfChangeV2
|
||||
// message is only allowed if all Nodes participating in the cluster run a
|
||||
// version of this library aware of the V2 API. See pb.ConfChangeV2 for
|
||||
// usage details and semantics.
|
||||
ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error
|
||||
|
||||
// Step advances the state machine using the given message. ctx.Err() will be returned, if any.
|
||||
Step(ctx context.Context, msg pb.Message) error
|
||||
|
||||
// Ready returns a channel that returns the current point-in-time state.
|
||||
// Users of the Node must call Advance after retrieving the state returned by Ready.
|
||||
//
|
||||
// NOTE: No committed entries from the next Ready may be applied until all committed entries
|
||||
// and snapshots from the previous one have finished.
|
||||
Ready() <-chan Ready
|
||||
|
||||
// Advance notifies the Node that the application has saved progress up to the last Ready.
|
||||
// It prepares the node to return the next available Ready.
|
||||
//
|
||||
// The application should generally call Advance after it applies the entries in last Ready.
|
||||
//
|
||||
// However, as an optimization, the application may call Advance while it is applying the
|
||||
// commands. For example. when the last Ready contains a snapshot, the application might take
|
||||
// a long time to apply the snapshot data. To continue receiving Ready without blocking raft
|
||||
// progress, it can call Advance before finishing applying the last ready.
|
||||
Advance()
|
||||
// ApplyConfChange applies a config change (previously passed to
|
||||
// ProposeConfChange) to the node. This must be called whenever a config
|
||||
// change is observed in Ready.CommittedEntries.
|
||||
//
|
||||
// Returns an opaque non-nil ConfState protobuf which must be recorded in
|
||||
// snapshots.
|
||||
ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState
|
||||
|
||||
// TransferLeadership attempts to transfer leadership to the given transferee.
|
||||
TransferLeadership(ctx context.Context, lead, transferee uint64)
|
||||
|
||||
// ReadIndex request a read state. The read state will be set in the ready.
|
||||
// Read state has a read index. Once the application advances further than the read
|
||||
// index, any linearizable read requests issued before the read request can be
|
||||
// processed safely. The read state will have the same rctx attached.
|
||||
ReadIndex(ctx context.Context, rctx []byte) error
|
||||
|
||||
// Status returns the current status of the raft state machine.
|
||||
Status() Status
|
||||
// ReportUnreachable reports the given node is not reachable for the last send.
|
||||
ReportUnreachable(id uint64)
|
||||
// ReportSnapshot reports the status of the sent snapshot. The id is the raft ID of the follower
|
||||
// who is meant to receive the snapshot, and the status is SnapshotFinish or SnapshotFailure.
|
||||
// Calling ReportSnapshot with SnapshotFinish is a no-op. But, any failure in applying a
|
||||
// snapshot (for e.g., while streaming it from leader to follower), should be reported to the
|
||||
// leader with SnapshotFailure. When leader sends a snapshot to a follower, it pauses any raft
|
||||
// log probes until the follower can apply the snapshot and advance its state. If the follower
|
||||
// can't do that, for e.g., due to a crash, it could end up in a limbo, never getting any
|
||||
// updates from the leader. Therefore, it is crucial that the application ensures that any
|
||||
// failure in snapshot sending is caught and reported back to the leader; so it can resume raft
|
||||
// log probing in the follower.
|
||||
ReportSnapshot(id uint64, status SnapshotStatus)
|
||||
// Stop performs any necessary termination of the Node.
|
||||
Stop()
|
||||
}
|
||||
|
||||
type Peer struct {
|
||||
ID uint64
|
||||
Context []byte
|
||||
}
|
||||
|
||||
// StartNode returns a new Node given configuration and a list of raft peers.
|
||||
// It appends a ConfChangeAddNode entry for each given peer to the initial log.
|
||||
//
|
||||
// Peers must not be zero length; call RestartNode in that case.
|
||||
func StartNode(c *Config, peers []Peer) Node {
|
||||
if len(peers) == 0 {
|
||||
panic("no peers given; use RestartNode instead")
|
||||
}
|
||||
rn, err := NewRawNode(c)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rn.Bootstrap(peers)
|
||||
|
||||
n := newNode(rn)
|
||||
|
||||
go n.run()
|
||||
return &n
|
||||
}
|
||||
|
||||
// RestartNode is similar to StartNode but does not take a list of peers.
|
||||
// The current membership of the cluster will be restored from the Storage.
|
||||
// If the caller has an existing state machine, pass in the last log index that
|
||||
// has been applied to it; otherwise use zero.
|
||||
func RestartNode(c *Config) Node {
|
||||
rn, err := NewRawNode(c)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
n := newNode(rn)
|
||||
go n.run()
|
||||
return &n
|
||||
}
|
||||
|
||||
type msgWithResult struct {
|
||||
m pb.Message
|
||||
result chan error
|
||||
}
|
||||
|
||||
// node is the canonical implementation of the Node interface
|
||||
type node struct {
|
||||
propc chan msgWithResult
|
||||
recvc chan pb.Message
|
||||
confc chan pb.ConfChangeV2
|
||||
confstatec chan pb.ConfState
|
||||
readyc chan Ready
|
||||
advancec chan struct{}
|
||||
tickc chan struct{}
|
||||
done chan struct{}
|
||||
stop chan struct{}
|
||||
status chan chan Status
|
||||
|
||||
rn *RawNode
|
||||
}
|
||||
|
||||
func newNode(rn *RawNode) node {
|
||||
return node{
|
||||
propc: make(chan msgWithResult),
|
||||
recvc: make(chan pb.Message),
|
||||
confc: make(chan pb.ConfChangeV2),
|
||||
confstatec: make(chan pb.ConfState),
|
||||
readyc: make(chan Ready),
|
||||
advancec: make(chan struct{}),
|
||||
// make tickc a buffered chan, so raft node can buffer some ticks when the node
|
||||
// is busy processing raft messages. Raft node will resume process buffered
|
||||
// ticks when it becomes idle.
|
||||
tickc: make(chan struct{}, 128),
|
||||
done: make(chan struct{}),
|
||||
stop: make(chan struct{}),
|
||||
status: make(chan chan Status),
|
||||
rn: rn,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *node) Stop() {
|
||||
select {
|
||||
case n.stop <- struct{}{}:
|
||||
// Not already stopped, so trigger it
|
||||
case <-n.done:
|
||||
// Node has already been stopped - no need to do anything
|
||||
return
|
||||
}
|
||||
// Block until the stop has been acknowledged by run()
|
||||
<-n.done
|
||||
}
|
||||
|
||||
func (n *node) run() {
|
||||
var propc chan msgWithResult
|
||||
var readyc chan Ready
|
||||
var advancec chan struct{}
|
||||
var rd Ready
|
||||
|
||||
r := n.rn.raft
|
||||
|
||||
lead := None
|
||||
|
||||
for {
|
||||
if advancec != nil {
|
||||
readyc = nil
|
||||
} else if n.rn.HasReady() {
|
||||
// Populate a Ready. Note that this Ready is not guaranteed to
|
||||
// actually be handled. We will arm readyc, but there's no guarantee
|
||||
// that we will actually send on it. It's possible that we will
|
||||
// service another channel instead, loop around, and then populate
|
||||
// the Ready again. We could instead force the previous Ready to be
|
||||
// handled first, but it's generally good to emit larger Readys plus
|
||||
// it simplifies testing (by emitting less frequently and more
|
||||
// predictably).
|
||||
rd = n.rn.readyWithoutAccept()
|
||||
readyc = n.readyc
|
||||
}
|
||||
|
||||
if lead != r.lead {
|
||||
if r.hasLeader() {
|
||||
if lead == None {
|
||||
r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term)
|
||||
} else {
|
||||
r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term)
|
||||
}
|
||||
propc = n.propc
|
||||
} else {
|
||||
r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term)
|
||||
propc = nil
|
||||
}
|
||||
lead = r.lead
|
||||
}
|
||||
|
||||
select {
|
||||
// TODO: maybe buffer the config propose if there exists one (the way
|
||||
// described in raft dissertation)
|
||||
// Currently it is dropped in Step silently.
|
||||
case pm := <-propc:
|
||||
m := pm.m
|
||||
m.From = r.id
|
||||
err := r.Step(m)
|
||||
if pm.result != nil {
|
||||
pm.result <- err
|
||||
close(pm.result)
|
||||
}
|
||||
case m := <-n.recvc:
|
||||
// filter out response message from unknown From.
|
||||
if pr := r.prs.Progress[m.From]; pr != nil || !IsResponseMsg(m.Type) {
|
||||
r.Step(m)
|
||||
}
|
||||
case cc := <-n.confc:
|
||||
_, okBefore := r.prs.Progress[r.id]
|
||||
cs := r.applyConfChange(cc)
|
||||
// If the node was removed, block incoming proposals. Note that we
|
||||
// only do this if the node was in the config before. Nodes may be
|
||||
// a member of the group without knowing this (when they're catching
|
||||
// up on the log and don't have the latest config) and we don't want
|
||||
// to block the proposal channel in that case.
|
||||
//
|
||||
// NB: propc is reset when the leader changes, which, if we learn
|
||||
// about it, sort of implies that we got readded, maybe? This isn't
|
||||
// very sound and likely has bugs.
|
||||
if _, okAfter := r.prs.Progress[r.id]; okBefore && !okAfter {
|
||||
var found bool
|
||||
for _, sl := range [][]uint64{cs.Voters, cs.VotersOutgoing} {
|
||||
for _, id := range sl {
|
||||
if id == r.id {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
propc = nil
|
||||
}
|
||||
}
|
||||
select {
|
||||
case n.confstatec <- cs:
|
||||
case <-n.done:
|
||||
}
|
||||
case <-n.tickc:
|
||||
n.rn.Tick()
|
||||
case readyc <- rd:
|
||||
n.rn.acceptReady(rd)
|
||||
advancec = n.advancec
|
||||
case <-advancec:
|
||||
n.rn.Advance(rd)
|
||||
rd = Ready{}
|
||||
advancec = nil
|
||||
case c := <-n.status:
|
||||
c <- getStatus(r)
|
||||
case <-n.stop:
|
||||
close(n.done)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tick increments the internal logical clock for this Node. Election timeouts
|
||||
// and heartbeat timeouts are in units of ticks.
|
||||
func (n *node) Tick() {
|
||||
select {
|
||||
case n.tickc <- struct{}{}:
|
||||
case <-n.done:
|
||||
default:
|
||||
n.rn.raft.logger.Warningf("%x (leader %v) A tick missed to fire. Node blocks too long!", n.rn.raft.id, n.rn.raft.id == n.rn.raft.lead)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) }
|
||||
|
||||
func (n *node) Propose(ctx context.Context, data []byte) error {
|
||||
return n.stepWait(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
|
||||
}
|
||||
|
||||
func (n *node) Step(ctx context.Context, m pb.Message) error {
|
||||
// ignore unexpected local messages receiving over network
|
||||
if IsLocalMsg(m.Type) {
|
||||
// TODO: return an error?
|
||||
return nil
|
||||
}
|
||||
return n.step(ctx, m)
|
||||
}
|
||||
|
||||
func confChangeToMsg(c pb.ConfChangeI) (pb.Message, error) {
|
||||
typ, data, err := pb.MarshalConfChange(c)
|
||||
if err != nil {
|
||||
return pb.Message{}, err
|
||||
}
|
||||
return pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: typ, Data: data}}}, nil
|
||||
}
|
||||
|
||||
func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error {
|
||||
msg, err := confChangeToMsg(cc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return n.Step(ctx, msg)
|
||||
}
|
||||
|
||||
func (n *node) step(ctx context.Context, m pb.Message) error {
|
||||
return n.stepWithWaitOption(ctx, m, false)
|
||||
}
|
||||
|
||||
func (n *node) stepWait(ctx context.Context, m pb.Message) error {
|
||||
return n.stepWithWaitOption(ctx, m, true)
|
||||
}
|
||||
|
||||
// Step advances the state machine using msgs. The ctx.Err() will be returned,
|
||||
// if any.
|
||||
func (n *node) stepWithWaitOption(ctx context.Context, m pb.Message, wait bool) error {
|
||||
if m.Type != pb.MsgProp {
|
||||
select {
|
||||
case n.recvc <- m:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-n.done:
|
||||
return ErrStopped
|
||||
}
|
||||
}
|
||||
ch := n.propc
|
||||
pm := msgWithResult{m: m}
|
||||
if wait {
|
||||
pm.result = make(chan error, 1)
|
||||
}
|
||||
select {
|
||||
case ch <- pm:
|
||||
if !wait {
|
||||
return nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-n.done:
|
||||
return ErrStopped
|
||||
}
|
||||
select {
|
||||
case err := <-pm.result:
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-n.done:
|
||||
return ErrStopped
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *node) Ready() <-chan Ready { return n.readyc }
|
||||
|
||||
func (n *node) Advance() {
|
||||
select {
|
||||
case n.advancec <- struct{}{}:
|
||||
case <-n.done:
|
||||
}
|
||||
}
|
||||
|
||||
func (n *node) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState {
|
||||
var cs pb.ConfState
|
||||
select {
|
||||
case n.confc <- cc.AsV2():
|
||||
case <-n.done:
|
||||
}
|
||||
select {
|
||||
case cs = <-n.confstatec:
|
||||
case <-n.done:
|
||||
}
|
||||
return &cs
|
||||
}
|
||||
|
||||
func (n *node) Status() Status {
|
||||
c := make(chan Status)
|
||||
select {
|
||||
case n.status <- c:
|
||||
return <-c
|
||||
case <-n.done:
|
||||
return Status{}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *node) ReportUnreachable(id uint64) {
|
||||
select {
|
||||
case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}:
|
||||
case <-n.done:
|
||||
}
|
||||
}
|
||||
|
||||
func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) {
|
||||
rej := status == SnapshotFailure
|
||||
|
||||
select {
|
||||
case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}:
|
||||
case <-n.done:
|
||||
}
|
||||
}
|
||||
|
||||
func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) {
|
||||
select {
|
||||
// manually set 'from' and 'to', so that leader can voluntarily transfers its leadership
|
||||
case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}:
|
||||
case <-n.done:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
func (n *node) ReadIndex(ctx context.Context, rctx []byte) error {
|
||||
return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
|
||||
}
|
||||
|
||||
func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready {
|
||||
rd := Ready{
|
||||
Entries: r.raftLog.unstableEntries(),
|
||||
CommittedEntries: r.raftLog.nextEnts(),
|
||||
Messages: r.msgs,
|
||||
}
|
||||
if softSt := r.softState(); !softSt.equal(prevSoftSt) {
|
||||
rd.SoftState = softSt
|
||||
}
|
||||
if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) {
|
||||
rd.HardState = hardSt
|
||||
}
|
||||
if r.raftLog.unstable.snapshot != nil {
|
||||
rd.Snapshot = *r.raftLog.unstable.snapshot
|
||||
}
|
||||
if len(r.readStates) != 0 {
|
||||
rd.ReadStates = r.readStates
|
||||
}
|
||||
rd.MustSync = MustSync(r.hardState(), prevHardSt, len(rd.Entries))
|
||||
return rd
|
||||
}
|
||||
|
||||
// MustSync returns true if the hard state and count of Raft entries indicate
|
||||
// that a synchronous write to persistent storage is required.
|
||||
func MustSync(st, prevst pb.HardState, entsnum int) bool {
|
||||
// Persistent state on all servers:
|
||||
// (Updated on stable storage before responding to RPCs)
|
||||
// currentTerm
|
||||
// votedFor
|
||||
// log entries[]
|
||||
return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term
|
||||
}
|
75
vendor/go.etcd.io/etcd/raft/quorum/joint.go
generated
vendored
Normal file
75
vendor/go.etcd.io/etcd/raft/quorum/joint.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
// Copyright 2019 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package quorum
|
||||
|
||||
// JointConfig is a configuration of two groups of (possibly overlapping)
|
||||
// majority configurations. Decisions require the support of both majorities.
|
||||
type JointConfig [2]MajorityConfig
|
||||
|
||||
func (c JointConfig) String() string {
|
||||
if len(c[1]) > 0 {
|
||||
return c[0].String() + "&&" + c[1].String()
|
||||
}
|
||||
return c[0].String()
|
||||
}
|
||||
|
||||
// IDs returns a newly initialized map representing the set of voters present
|
||||
// in the joint configuration.
|
||||
func (c JointConfig) IDs() map[uint64]struct{} {
|
||||
m := map[uint64]struct{}{}
|
||||
for _, cc := range c {
|
||||
for id := range cc {
|
||||
m[id] = struct{}{}
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Describe returns a (multi-line) representation of the commit indexes for the
|
||||
// given lookuper.
|
||||
func (c JointConfig) Describe(l AckedIndexer) string {
|
||||
return MajorityConfig(c.IDs()).Describe(l)
|
||||
}
|
||||
|
||||
// CommittedIndex returns the largest committed index for the given joint
|
||||
// quorum. An index is jointly committed if it is committed in both constituent
|
||||
// majorities.
|
||||
func (c JointConfig) CommittedIndex(l AckedIndexer) Index {
|
||||
idx0 := c[0].CommittedIndex(l)
|
||||
idx1 := c[1].CommittedIndex(l)
|
||||
if idx0 < idx1 {
|
||||
return idx0
|
||||
}
|
||||
return idx1
|
||||
}
|
||||
|
||||
// VoteResult takes a mapping of voters to yes/no (true/false) votes and returns
|
||||
// a result indicating whether the vote is pending, lost, or won. A joint quorum
|
||||
// requires both majority quorums to vote in favor.
|
||||
func (c JointConfig) VoteResult(votes map[uint64]bool) VoteResult {
|
||||
r1 := c[0].VoteResult(votes)
|
||||
r2 := c[1].VoteResult(votes)
|
||||
|
||||
if r1 == r2 {
|
||||
// If they agree, return the agreed state.
|
||||
return r1
|
||||
}
|
||||
if r1 == VoteLost || r2 == VoteLost {
|
||||
// If either config has lost, loss is the only possible outcome.
|
||||
return VoteLost
|
||||
}
|
||||
// One side won, the other one is pending, so the whole outcome is.
|
||||
return VotePending
|
||||
}
|
210
vendor/go.etcd.io/etcd/raft/quorum/majority.go
generated
vendored
Normal file
210
vendor/go.etcd.io/etcd/raft/quorum/majority.go
generated
vendored
Normal file
@ -0,0 +1,210 @@
|
||||
// Copyright 2019 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package quorum
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// MajorityConfig is a set of IDs that uses majority quorums to make decisions.
|
||||
type MajorityConfig map[uint64]struct{}
|
||||
|
||||
func (c MajorityConfig) String() string {
|
||||
sl := make([]uint64, 0, len(c))
|
||||
for id := range c {
|
||||
sl = append(sl, id)
|
||||
}
|
||||
sort.Slice(sl, func(i, j int) bool { return sl[i] < sl[j] })
|
||||
var buf strings.Builder
|
||||
buf.WriteByte('(')
|
||||
for i := range sl {
|
||||
if i > 0 {
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
fmt.Fprint(&buf, sl[i])
|
||||
}
|
||||
buf.WriteByte(')')
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Describe returns a (multi-line) representation of the commit indexes for the
|
||||
// given lookuper.
|
||||
func (c MajorityConfig) Describe(l AckedIndexer) string {
|
||||
if len(c) == 0 {
|
||||
return "<empty majority quorum>"
|
||||
}
|
||||
type tup struct {
|
||||
id uint64
|
||||
idx Index
|
||||
ok bool // idx found?
|
||||
bar int // length of bar displayed for this tup
|
||||
}
|
||||
|
||||
// Below, populate .bar so that the i-th largest commit index has bar i (we
|
||||
// plot this as sort of a progress bar). The actual code is a bit more
|
||||
// complicated and also makes sure that equal index => equal bar.
|
||||
|
||||
n := len(c)
|
||||
info := make([]tup, 0, n)
|
||||
for id := range c {
|
||||
idx, ok := l.AckedIndex(id)
|
||||
info = append(info, tup{id: id, idx: idx, ok: ok})
|
||||
}
|
||||
|
||||
// Sort by index
|
||||
sort.Slice(info, func(i, j int) bool {
|
||||
if info[i].idx == info[j].idx {
|
||||
return info[i].id < info[j].id
|
||||
}
|
||||
return info[i].idx < info[j].idx
|
||||
})
|
||||
|
||||
// Populate .bar.
|
||||
for i := range info {
|
||||
if i > 0 && info[i-1].idx < info[i].idx {
|
||||
info[i].bar = i
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by ID.
|
||||
sort.Slice(info, func(i, j int) bool {
|
||||
return info[i].id < info[j].id
|
||||
})
|
||||
|
||||
var buf strings.Builder
|
||||
|
||||
// Print.
|
||||
fmt.Fprint(&buf, strings.Repeat(" ", n)+" idx\n")
|
||||
for i := range info {
|
||||
bar := info[i].bar
|
||||
if !info[i].ok {
|
||||
fmt.Fprint(&buf, "?"+strings.Repeat(" ", n))
|
||||
} else {
|
||||
fmt.Fprint(&buf, strings.Repeat("x", bar)+">"+strings.Repeat(" ", n-bar))
|
||||
}
|
||||
fmt.Fprintf(&buf, " %5d (id=%d)\n", info[i].idx, info[i].id)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Slice returns the MajorityConfig as a sorted slice.
|
||||
func (c MajorityConfig) Slice() []uint64 {
|
||||
var sl []uint64
|
||||
for id := range c {
|
||||
sl = append(sl, id)
|
||||
}
|
||||
sort.Slice(sl, func(i, j int) bool { return sl[i] < sl[j] })
|
||||
return sl
|
||||
}
|
||||
|
||||
func insertionSort(sl []uint64) {
|
||||
a, b := 0, len(sl)
|
||||
for i := a + 1; i < b; i++ {
|
||||
for j := i; j > a && sl[j] < sl[j-1]; j-- {
|
||||
sl[j], sl[j-1] = sl[j-1], sl[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CommittedIndex computes the committed index from those supplied via the
|
||||
// provided AckedIndexer (for the active config).
|
||||
func (c MajorityConfig) CommittedIndex(l AckedIndexer) Index {
|
||||
n := len(c)
|
||||
if n == 0 {
|
||||
// This plays well with joint quorums which, when one half is the zero
|
||||
// MajorityConfig, should behave like the other half.
|
||||
return math.MaxUint64
|
||||
}
|
||||
|
||||
// Use an on-stack slice to collect the committed indexes when n <= 7
|
||||
// (otherwise we alloc). The alternative is to stash a slice on
|
||||
// MajorityConfig, but this impairs usability (as is, MajorityConfig is just
|
||||
// a map, and that's nice). The assumption is that running with a
|
||||
// replication factor of >7 is rare, and in cases in which it happens
|
||||
// performance is a lesser concern (additionally the performance
|
||||
// implications of an allocation here are far from drastic).
|
||||
var stk [7]uint64
|
||||
var srt []uint64
|
||||
if len(stk) >= n {
|
||||
srt = stk[:n]
|
||||
} else {
|
||||
srt = make([]uint64, n)
|
||||
}
|
||||
|
||||
{
|
||||
// Fill the slice with the indexes observed. Any unused slots will be
|
||||
// left as zero; these correspond to voters that may report in, but
|
||||
// haven't yet. We fill from the right (since the zeroes will end up on
|
||||
// the left after sorting below anyway).
|
||||
i := n - 1
|
||||
for id := range c {
|
||||
if idx, ok := l.AckedIndex(id); ok {
|
||||
srt[i] = uint64(idx)
|
||||
i--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by index. Use a bespoke algorithm (copied from the stdlib's sort
|
||||
// package) to keep srt on the stack.
|
||||
insertionSort(srt)
|
||||
|
||||
// The smallest index into the array for which the value is acked by a
|
||||
// quorum. In other words, from the end of the slice, move n/2+1 to the
|
||||
// left (accounting for zero-indexing).
|
||||
pos := n - (n/2 + 1)
|
||||
return Index(srt[pos])
|
||||
}
|
||||
|
||||
// VoteResult takes a mapping of voters to yes/no (true/false) votes and returns
|
||||
// a result indicating whether the vote is pending (i.e. neither a quorum of
|
||||
// yes/no has been reached), won (a quorum of yes has been reached), or lost (a
|
||||
// quorum of no has been reached).
|
||||
func (c MajorityConfig) VoteResult(votes map[uint64]bool) VoteResult {
|
||||
if len(c) == 0 {
|
||||
// By convention, the elections on an empty config win. This comes in
|
||||
// handy with joint quorums because it'll make a half-populated joint
|
||||
// quorum behave like a majority quorum.
|
||||
return VoteWon
|
||||
}
|
||||
|
||||
ny := [2]int{} // vote counts for no and yes, respectively
|
||||
|
||||
var missing int
|
||||
for id := range c {
|
||||
v, ok := votes[id]
|
||||
if !ok {
|
||||
missing++
|
||||
continue
|
||||
}
|
||||
if v {
|
||||
ny[1]++
|
||||
} else {
|
||||
ny[0]++
|
||||
}
|
||||
}
|
||||
|
||||
q := len(c)/2 + 1
|
||||
if ny[1] >= q {
|
||||
return VoteWon
|
||||
}
|
||||
if ny[1]+missing >= q {
|
||||
return VotePending
|
||||
}
|
||||
return VoteLost
|
||||
}
|
58
vendor/go.etcd.io/etcd/raft/quorum/quorum.go
generated
vendored
Normal file
58
vendor/go.etcd.io/etcd/raft/quorum/quorum.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
// Copyright 2019 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package quorum
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Index is a Raft log position.
|
||||
type Index uint64
|
||||
|
||||
func (i Index) String() string {
|
||||
if i == math.MaxUint64 {
|
||||
return "∞"
|
||||
}
|
||||
return strconv.FormatUint(uint64(i), 10)
|
||||
}
|
||||
|
||||
// AckedIndexer allows looking up a commit index for a given ID of a voter
|
||||
// from a corresponding MajorityConfig.
|
||||
type AckedIndexer interface {
|
||||
AckedIndex(voterID uint64) (idx Index, found bool)
|
||||
}
|
||||
|
||||
type mapAckIndexer map[uint64]Index
|
||||
|
||||
func (m mapAckIndexer) AckedIndex(id uint64) (Index, bool) {
|
||||
idx, ok := m[id]
|
||||
return idx, ok
|
||||
}
|
||||
|
||||
// VoteResult indicates the outcome of a vote.
|
||||
//
|
||||
//go:generate stringer -type=VoteResult
|
||||
type VoteResult uint8
|
||||
|
||||
const (
|
||||
// VotePending indicates that the decision of the vote depends on future
|
||||
// votes, i.e. neither "yes" or "no" has reached quorum yet.
|
||||
VotePending VoteResult = 1 + iota
|
||||
// VoteLost indicates that the quorum has voted "no".
|
||||
VoteLost
|
||||
// VoteWon indicates that the quorum has voted "yes".
|
||||
VoteWon
|
||||
)
|
26
vendor/go.etcd.io/etcd/raft/quorum/voteresult_string.go
generated
vendored
Normal file
26
vendor/go.etcd.io/etcd/raft/quorum/voteresult_string.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
// Code generated by "stringer -type=VoteResult"; DO NOT EDIT.
|
||||
|
||||
package quorum
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[VotePending-1]
|
||||
_ = x[VoteLost-2]
|
||||
_ = x[VoteWon-3]
|
||||
}
|
||||
|
||||
const _VoteResult_name = "VotePendingVoteLostVoteWon"
|
||||
|
||||
var _VoteResult_index = [...]uint8{0, 11, 19, 26}
|
||||
|
||||
func (i VoteResult) String() string {
|
||||
i -= 1
|
||||
if i >= VoteResult(len(_VoteResult_index)-1) {
|
||||
return "VoteResult(" + strconv.FormatInt(int64(i+1), 10) + ")"
|
||||
}
|
||||
return _VoteResult_name[_VoteResult_index[i]:_VoteResult_index[i+1]]
|
||||
}
|
1656
vendor/go.etcd.io/etcd/raft/raft.go
generated
vendored
Normal file
1656
vendor/go.etcd.io/etcd/raft/raft.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
170
vendor/go.etcd.io/etcd/raft/raftpb/confchange.go
generated
vendored
Normal file
170
vendor/go.etcd.io/etcd/raft/raftpb/confchange.go
generated
vendored
Normal file
@ -0,0 +1,170 @@
|
||||
// Copyright 2019 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package raftpb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
// ConfChangeI abstracts over ConfChangeV2 and (legacy) ConfChange to allow
|
||||
// treating them in a unified manner.
|
||||
type ConfChangeI interface {
|
||||
AsV2() ConfChangeV2
|
||||
AsV1() (ConfChange, bool)
|
||||
}
|
||||
|
||||
// MarshalConfChange calls Marshal on the underlying ConfChange or ConfChangeV2
|
||||
// and returns the result along with the corresponding EntryType.
|
||||
func MarshalConfChange(c ConfChangeI) (EntryType, []byte, error) {
|
||||
var typ EntryType
|
||||
var ccdata []byte
|
||||
var err error
|
||||
if ccv1, ok := c.AsV1(); ok {
|
||||
typ = EntryConfChange
|
||||
ccdata, err = ccv1.Marshal()
|
||||
} else {
|
||||
ccv2 := c.AsV2()
|
||||
typ = EntryConfChangeV2
|
||||
ccdata, err = ccv2.Marshal()
|
||||
}
|
||||
return typ, ccdata, err
|
||||
}
|
||||
|
||||
// AsV2 returns a V2 configuration change carrying out the same operation.
|
||||
func (c ConfChange) AsV2() ConfChangeV2 {
|
||||
return ConfChangeV2{
|
||||
Changes: []ConfChangeSingle{{
|
||||
Type: c.Type,
|
||||
NodeID: c.NodeID,
|
||||
}},
|
||||
Context: c.Context,
|
||||
}
|
||||
}
|
||||
|
||||
// AsV1 returns the ConfChange and true.
|
||||
func (c ConfChange) AsV1() (ConfChange, bool) {
|
||||
return c, true
|
||||
}
|
||||
|
||||
// AsV2 is the identity.
|
||||
func (c ConfChangeV2) AsV2() ConfChangeV2 { return c }
|
||||
|
||||
// AsV1 returns ConfChange{} and false.
|
||||
func (c ConfChangeV2) AsV1() (ConfChange, bool) { return ConfChange{}, false }
|
||||
|
||||
// EnterJoint returns two bools. The second bool is true if and only if this
|
||||
// config change will use Joint Consensus, which is the case if it contains more
|
||||
// than one change or if the use of Joint Consensus was requested explicitly.
|
||||
// The first bool can only be true if second one is, and indicates whether the
|
||||
// Joint State will be left automatically.
|
||||
func (c *ConfChangeV2) EnterJoint() (autoLeave bool, ok bool) {
|
||||
// NB: in theory, more config changes could qualify for the "simple"
|
||||
// protocol but it depends on the config on top of which the changes apply.
|
||||
// For example, adding two learners is not OK if both nodes are part of the
|
||||
// base config (i.e. two voters are turned into learners in the process of
|
||||
// applying the conf change). In practice, these distinctions should not
|
||||
// matter, so we keep it simple and use Joint Consensus liberally.
|
||||
if c.Transition != ConfChangeTransitionAuto || len(c.Changes) > 1 {
|
||||
// Use Joint Consensus.
|
||||
var autoLeave bool
|
||||
switch c.Transition {
|
||||
case ConfChangeTransitionAuto:
|
||||
autoLeave = true
|
||||
case ConfChangeTransitionJointImplicit:
|
||||
autoLeave = true
|
||||
case ConfChangeTransitionJointExplicit:
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown transition: %+v", c))
|
||||
}
|
||||
return autoLeave, true
|
||||
}
|
||||
return false, false
|
||||
}
|
||||
|
||||
// LeaveJoint is true if the configuration change leaves a joint configuration.
|
||||
// This is the case if the ConfChangeV2 is zero, with the possible exception of
|
||||
// the Context field.
|
||||
func (c *ConfChangeV2) LeaveJoint() bool {
|
||||
cpy := *c
|
||||
cpy.Context = nil
|
||||
return proto.Equal(&cpy, &ConfChangeV2{})
|
||||
}
|
||||
|
||||
// ConfChangesFromString parses a Space-delimited sequence of operations into a
|
||||
// slice of ConfChangeSingle. The supported operations are:
|
||||
// - vn: make n a voter,
|
||||
// - ln: make n a learner,
|
||||
// - rn: remove n, and
|
||||
// - un: update n.
|
||||
func ConfChangesFromString(s string) ([]ConfChangeSingle, error) {
|
||||
var ccs []ConfChangeSingle
|
||||
toks := strings.Split(strings.TrimSpace(s), " ")
|
||||
if toks[0] == "" {
|
||||
toks = nil
|
||||
}
|
||||
for _, tok := range toks {
|
||||
if len(tok) < 2 {
|
||||
return nil, fmt.Errorf("unknown token %s", tok)
|
||||
}
|
||||
var cc ConfChangeSingle
|
||||
switch tok[0] {
|
||||
case 'v':
|
||||
cc.Type = ConfChangeAddNode
|
||||
case 'l':
|
||||
cc.Type = ConfChangeAddLearnerNode
|
||||
case 'r':
|
||||
cc.Type = ConfChangeRemoveNode
|
||||
case 'u':
|
||||
cc.Type = ConfChangeUpdateNode
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown input: %s", tok)
|
||||
}
|
||||
id, err := strconv.ParseUint(tok[1:], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cc.NodeID = id
|
||||
ccs = append(ccs, cc)
|
||||
}
|
||||
return ccs, nil
|
||||
}
|
||||
|
||||
// ConfChangesToString is the inverse to ConfChangesFromString.
|
||||
func ConfChangesToString(ccs []ConfChangeSingle) string {
|
||||
var buf strings.Builder
|
||||
for i, cc := range ccs {
|
||||
if i > 0 {
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
switch cc.Type {
|
||||
case ConfChangeAddNode:
|
||||
buf.WriteByte('v')
|
||||
case ConfChangeAddLearnerNode:
|
||||
buf.WriteByte('l')
|
||||
case ConfChangeRemoveNode:
|
||||
buf.WriteByte('r')
|
||||
case ConfChangeUpdateNode:
|
||||
buf.WriteByte('u')
|
||||
default:
|
||||
buf.WriteString("unknown")
|
||||
}
|
||||
fmt.Fprintf(&buf, "%d", cc.NodeID)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
45
vendor/go.etcd.io/etcd/raft/raftpb/confstate.go
generated
vendored
Normal file
45
vendor/go.etcd.io/etcd/raft/raftpb/confstate.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
// Copyright 2019 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package raftpb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Equivalent returns a nil error if the inputs describe the same configuration.
|
||||
// On mismatch, returns a descriptive error showing the differences.
|
||||
func (cs ConfState) Equivalent(cs2 ConfState) error {
|
||||
cs1 := cs
|
||||
orig1, orig2 := cs1, cs2
|
||||
s := func(sl *[]uint64) {
|
||||
*sl = append([]uint64(nil), *sl...)
|
||||
sort.Slice(*sl, func(i, j int) bool { return (*sl)[i] < (*sl)[j] })
|
||||
}
|
||||
|
||||
for _, cs := range []*ConfState{&cs1, &cs2} {
|
||||
s(&cs.Voters)
|
||||
s(&cs.Learners)
|
||||
s(&cs.VotersOutgoing)
|
||||
s(&cs.LearnersNext)
|
||||
cs.XXX_unrecognized = nil
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(cs1, cs2) {
|
||||
return fmt.Errorf("ConfStates not equivalent after sorting:\n%+#v\n%+#v\nInputs were:\n%+#v\n%+#v", cs1, cs2, orig1, orig2)
|
||||
}
|
||||
return nil
|
||||
}
|
2646
vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go
generated
vendored
Normal file
2646
vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
239
vendor/go.etcd.io/etcd/raft/rawnode.go
generated
vendored
Normal file
239
vendor/go.etcd.io/etcd/raft/rawnode.go
generated
vendored
Normal file
@ -0,0 +1,239 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package raft
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
pb "go.etcd.io/etcd/raft/raftpb"
|
||||
"go.etcd.io/etcd/raft/tracker"
|
||||
)
|
||||
|
||||
// ErrStepLocalMsg is returned when try to step a local raft message
|
||||
var ErrStepLocalMsg = errors.New("raft: cannot step raft local message")
|
||||
|
||||
// ErrStepPeerNotFound is returned when try to step a response message
|
||||
// but there is no peer found in raft.prs for that node.
|
||||
var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found")
|
||||
|
||||
// RawNode is a thread-unsafe Node.
|
||||
// The methods of this struct correspond to the methods of Node and are described
|
||||
// more fully there.
|
||||
type RawNode struct {
|
||||
raft *raft
|
||||
prevSoftSt *SoftState
|
||||
prevHardSt pb.HardState
|
||||
}
|
||||
|
||||
// NewRawNode instantiates a RawNode from the given configuration.
|
||||
//
|
||||
// See Bootstrap() for bootstrapping an initial state; this replaces the former
|
||||
// 'peers' argument to this method (with identical behavior). However, It is
|
||||
// recommended that instead of calling Bootstrap, applications bootstrap their
|
||||
// state manually by setting up a Storage that has a first index > 1 and which
|
||||
// stores the desired ConfState as its InitialState.
|
||||
func NewRawNode(config *Config) (*RawNode, error) {
|
||||
r := newRaft(config)
|
||||
rn := &RawNode{
|
||||
raft: r,
|
||||
}
|
||||
rn.prevSoftSt = r.softState()
|
||||
rn.prevHardSt = r.hardState()
|
||||
return rn, nil
|
||||
}
|
||||
|
||||
// Tick advances the internal logical clock by a single tick.
|
||||
func (rn *RawNode) Tick() {
|
||||
rn.raft.tick()
|
||||
}
|
||||
|
||||
// TickQuiesced advances the internal logical clock by a single tick without
|
||||
// performing any other state machine processing. It allows the caller to avoid
|
||||
// periodic heartbeats and elections when all of the peers in a Raft group are
|
||||
// known to be at the same state. Expected usage is to periodically invoke Tick
|
||||
// or TickQuiesced depending on whether the group is "active" or "quiesced".
|
||||
//
|
||||
// WARNING: Be very careful about using this method as it subverts the Raft
|
||||
// state machine. You should probably be using Tick instead.
|
||||
func (rn *RawNode) TickQuiesced() {
|
||||
rn.raft.electionElapsed++
|
||||
}
|
||||
|
||||
// Campaign causes this RawNode to transition to candidate state.
|
||||
func (rn *RawNode) Campaign() error {
|
||||
return rn.raft.Step(pb.Message{
|
||||
Type: pb.MsgHup,
|
||||
})
|
||||
}
|
||||
|
||||
// Propose proposes data be appended to the raft log.
|
||||
func (rn *RawNode) Propose(data []byte) error {
|
||||
return rn.raft.Step(pb.Message{
|
||||
Type: pb.MsgProp,
|
||||
From: rn.raft.id,
|
||||
Entries: []pb.Entry{
|
||||
{Data: data},
|
||||
}})
|
||||
}
|
||||
|
||||
// ProposeConfChange proposes a config change. See (Node).ProposeConfChange for
|
||||
// details.
|
||||
func (rn *RawNode) ProposeConfChange(cc pb.ConfChangeI) error {
|
||||
m, err := confChangeToMsg(cc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return rn.raft.Step(m)
|
||||
}
|
||||
|
||||
// ApplyConfChange applies a config change to the local node.
|
||||
func (rn *RawNode) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState {
|
||||
cs := rn.raft.applyConfChange(cc.AsV2())
|
||||
return &cs
|
||||
}
|
||||
|
||||
// Step advances the state machine using the given message.
|
||||
func (rn *RawNode) Step(m pb.Message) error {
|
||||
// ignore unexpected local messages receiving over network
|
||||
if IsLocalMsg(m.Type) {
|
||||
return ErrStepLocalMsg
|
||||
}
|
||||
if pr := rn.raft.prs.Progress[m.From]; pr != nil || !IsResponseMsg(m.Type) {
|
||||
return rn.raft.Step(m)
|
||||
}
|
||||
return ErrStepPeerNotFound
|
||||
}
|
||||
|
||||
// Ready returns the outstanding work that the application needs to handle. This
|
||||
// includes appending and applying entries or a snapshot, updating the HardState,
|
||||
// and sending messages. The returned Ready() *must* be handled and subsequently
|
||||
// passed back via Advance().
|
||||
func (rn *RawNode) Ready() Ready {
|
||||
rd := rn.readyWithoutAccept()
|
||||
rn.acceptReady(rd)
|
||||
return rd
|
||||
}
|
||||
|
||||
// readyWithoutAccept returns a Ready. This is a read-only operation, i.e. there
|
||||
// is no obligation that the Ready must be handled.
|
||||
func (rn *RawNode) readyWithoutAccept() Ready {
|
||||
return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt)
|
||||
}
|
||||
|
||||
// acceptReady is called when the consumer of the RawNode has decided to go
|
||||
// ahead and handle a Ready. Nothing must alter the state of the RawNode between
|
||||
// this call and the prior call to Ready().
|
||||
func (rn *RawNode) acceptReady(rd Ready) {
|
||||
if rd.SoftState != nil {
|
||||
rn.prevSoftSt = rd.SoftState
|
||||
}
|
||||
if len(rd.ReadStates) != 0 {
|
||||
rn.raft.readStates = nil
|
||||
}
|
||||
rn.raft.msgs = nil
|
||||
}
|
||||
|
||||
// HasReady called when RawNode user need to check if any Ready pending.
|
||||
// Checking logic in this method should be consistent with Ready.containsUpdates().
|
||||
func (rn *RawNode) HasReady() bool {
|
||||
r := rn.raft
|
||||
if !r.softState().equal(rn.prevSoftSt) {
|
||||
return true
|
||||
}
|
||||
if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) {
|
||||
return true
|
||||
}
|
||||
if r.raftLog.unstable.snapshot != nil && !IsEmptySnap(*r.raftLog.unstable.snapshot) {
|
||||
return true
|
||||
}
|
||||
if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() {
|
||||
return true
|
||||
}
|
||||
if len(r.readStates) != 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Advance notifies the RawNode that the application has applied and saved progress in the
|
||||
// last Ready results.
|
||||
func (rn *RawNode) Advance(rd Ready) {
|
||||
if !IsEmptyHardState(rd.HardState) {
|
||||
rn.prevHardSt = rd.HardState
|
||||
}
|
||||
rn.raft.advance(rd)
|
||||
}
|
||||
|
||||
// Status returns the current status of the given group. This allocates, see
|
||||
// BasicStatus and WithProgress for allocation-friendlier choices.
|
||||
func (rn *RawNode) Status() Status {
|
||||
status := getStatus(rn.raft)
|
||||
return status
|
||||
}
|
||||
|
||||
// BasicStatus returns a BasicStatus. Notably this does not contain the
|
||||
// Progress map; see WithProgress for an allocation-free way to inspect it.
|
||||
func (rn *RawNode) BasicStatus() BasicStatus {
|
||||
return getBasicStatus(rn.raft)
|
||||
}
|
||||
|
||||
// ProgressType indicates the type of replica a Progress corresponds to.
|
||||
type ProgressType byte
|
||||
|
||||
const (
|
||||
// ProgressTypePeer accompanies a Progress for a regular peer replica.
|
||||
ProgressTypePeer ProgressType = iota
|
||||
// ProgressTypeLearner accompanies a Progress for a learner replica.
|
||||
ProgressTypeLearner
|
||||
)
|
||||
|
||||
// WithProgress is a helper to introspect the Progress for this node and its
|
||||
// peers.
|
||||
func (rn *RawNode) WithProgress(visitor func(id uint64, typ ProgressType, pr tracker.Progress)) {
|
||||
rn.raft.prs.Visit(func(id uint64, pr *tracker.Progress) {
|
||||
typ := ProgressTypePeer
|
||||
if pr.IsLearner {
|
||||
typ = ProgressTypeLearner
|
||||
}
|
||||
p := *pr
|
||||
p.Inflights = nil
|
||||
visitor(id, typ, p)
|
||||
})
|
||||
}
|
||||
|
||||
// ReportUnreachable reports the given node is not reachable for the last send.
|
||||
func (rn *RawNode) ReportUnreachable(id uint64) {
|
||||
_ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id})
|
||||
}
|
||||
|
||||
// ReportSnapshot reports the status of the sent snapshot.
|
||||
func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) {
|
||||
rej := status == SnapshotFailure
|
||||
|
||||
_ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej})
|
||||
}
|
||||
|
||||
// TransferLeader tries to transfer leadership to the given transferee.
|
||||
func (rn *RawNode) TransferLeader(transferee uint64) {
|
||||
_ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee})
|
||||
}
|
||||
|
||||
// ReadIndex requests a read state. The read state will be set in ready.
|
||||
// Read State has a read index. Once the application advances further than the read
|
||||
// index, any linearizable read requests issued before the read request can be
|
||||
// processed safely. The read state will have the same rctx attached.
|
||||
func (rn *RawNode) ReadIndex(rctx []byte) {
|
||||
_ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
|
||||
}
|
121
vendor/go.etcd.io/etcd/raft/read_only.go
generated
vendored
Normal file
121
vendor/go.etcd.io/etcd/raft/read_only.go
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package raft
|
||||
|
||||
import pb "go.etcd.io/etcd/raft/raftpb"
|
||||
|
||||
// ReadState provides state for read only query.
|
||||
// It's caller's responsibility to call ReadIndex first before getting
|
||||
// this state from ready, it's also caller's duty to differentiate if this
|
||||
// state is what it requests through RequestCtx, eg. given a unique id as
|
||||
// RequestCtx
|
||||
type ReadState struct {
|
||||
Index uint64
|
||||
RequestCtx []byte
|
||||
}
|
||||
|
||||
type readIndexStatus struct {
|
||||
req pb.Message
|
||||
index uint64
|
||||
// NB: this never records 'false', but it's more convenient to use this
|
||||
// instead of a map[uint64]struct{} due to the API of quorum.VoteResult. If
|
||||
// this becomes performance sensitive enough (doubtful), quorum.VoteResult
|
||||
// can change to an API that is closer to that of CommittedIndex.
|
||||
acks map[uint64]bool
|
||||
}
|
||||
|
||||
type readOnly struct {
|
||||
option ReadOnlyOption
|
||||
pendingReadIndex map[string]*readIndexStatus
|
||||
readIndexQueue []string
|
||||
}
|
||||
|
||||
func newReadOnly(option ReadOnlyOption) *readOnly {
|
||||
return &readOnly{
|
||||
option: option,
|
||||
pendingReadIndex: make(map[string]*readIndexStatus),
|
||||
}
|
||||
}
|
||||
|
||||
// addRequest adds a read only reuqest into readonly struct.
|
||||
// `index` is the commit index of the raft state machine when it received
|
||||
// the read only request.
|
||||
// `m` is the original read only request message from the local or remote node.
|
||||
func (ro *readOnly) addRequest(index uint64, m pb.Message) {
|
||||
s := string(m.Entries[0].Data)
|
||||
if _, ok := ro.pendingReadIndex[s]; ok {
|
||||
return
|
||||
}
|
||||
ro.pendingReadIndex[s] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]bool)}
|
||||
ro.readIndexQueue = append(ro.readIndexQueue, s)
|
||||
}
|
||||
|
||||
// recvAck notifies the readonly struct that the raft state machine received
|
||||
// an acknowledgment of the heartbeat that attached with the read only request
|
||||
// context.
|
||||
func (ro *readOnly) recvAck(id uint64, context []byte) map[uint64]bool {
|
||||
rs, ok := ro.pendingReadIndex[string(context)]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
rs.acks[id] = true
|
||||
return rs.acks
|
||||
}
|
||||
|
||||
// advance advances the read only request queue kept by the readonly struct.
|
||||
// It dequeues the requests until it finds the read only request that has
|
||||
// the same context as the given `m`.
|
||||
func (ro *readOnly) advance(m pb.Message) []*readIndexStatus {
|
||||
var (
|
||||
i int
|
||||
found bool
|
||||
)
|
||||
|
||||
ctx := string(m.Context)
|
||||
rss := []*readIndexStatus{}
|
||||
|
||||
for _, okctx := range ro.readIndexQueue {
|
||||
i++
|
||||
rs, ok := ro.pendingReadIndex[okctx]
|
||||
if !ok {
|
||||
panic("cannot find corresponding read state from pending map")
|
||||
}
|
||||
rss = append(rss, rs)
|
||||
if okctx == ctx {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
ro.readIndexQueue = ro.readIndexQueue[i:]
|
||||
for _, rs := range rss {
|
||||
delete(ro.pendingReadIndex, string(rs.req.Entries[0].Data))
|
||||
}
|
||||
return rss
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// lastPendingRequestCtx returns the context of the last pending read only
|
||||
// request in readonly struct.
|
||||
func (ro *readOnly) lastPendingRequestCtx() string {
|
||||
if len(ro.readIndexQueue) == 0 {
|
||||
return ""
|
||||
}
|
||||
return ro.readIndexQueue[len(ro.readIndexQueue)-1]
|
||||
}
|
106
vendor/go.etcd.io/etcd/raft/status.go
generated
vendored
Normal file
106
vendor/go.etcd.io/etcd/raft/status.go
generated
vendored
Normal file
@ -0,0 +1,106 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package raft
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
pb "go.etcd.io/etcd/raft/raftpb"
|
||||
"go.etcd.io/etcd/raft/tracker"
|
||||
)
|
||||
|
||||
// Status contains information about this Raft peer and its view of the system.
|
||||
// The Progress is only populated on the leader.
|
||||
type Status struct {
|
||||
BasicStatus
|
||||
Config tracker.Config
|
||||
Progress map[uint64]tracker.Progress
|
||||
}
|
||||
|
||||
// BasicStatus contains basic information about the Raft peer. It does not allocate.
|
||||
type BasicStatus struct {
|
||||
ID uint64
|
||||
|
||||
pb.HardState
|
||||
SoftState
|
||||
|
||||
Applied uint64
|
||||
|
||||
LeadTransferee uint64
|
||||
}
|
||||
|
||||
func getProgressCopy(r *raft) map[uint64]tracker.Progress {
|
||||
m := make(map[uint64]tracker.Progress)
|
||||
r.prs.Visit(func(id uint64, pr *tracker.Progress) {
|
||||
var p tracker.Progress
|
||||
p = *pr
|
||||
p.Inflights = pr.Inflights.Clone()
|
||||
pr = nil
|
||||
|
||||
m[id] = p
|
||||
})
|
||||
return m
|
||||
}
|
||||
|
||||
func getBasicStatus(r *raft) BasicStatus {
|
||||
s := BasicStatus{
|
||||
ID: r.id,
|
||||
LeadTransferee: r.leadTransferee,
|
||||
}
|
||||
s.HardState = r.hardState()
|
||||
s.SoftState = *r.softState()
|
||||
s.Applied = r.raftLog.applied
|
||||
return s
|
||||
}
|
||||
|
||||
// getStatus gets a copy of the current raft status.
|
||||
func getStatus(r *raft) Status {
|
||||
var s Status
|
||||
s.BasicStatus = getBasicStatus(r)
|
||||
if s.RaftState == StateLeader {
|
||||
s.Progress = getProgressCopy(r)
|
||||
}
|
||||
s.Config = r.prs.Config.Clone()
|
||||
return s
|
||||
}
|
||||
|
||||
// MarshalJSON translates the raft status into JSON.
|
||||
// TODO: try to simplify this by introducing ID type into raft
|
||||
func (s Status) MarshalJSON() ([]byte, error) {
|
||||
j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"applied":%d,"progress":{`,
|
||||
s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState, s.Applied)
|
||||
|
||||
if len(s.Progress) == 0 {
|
||||
j += "},"
|
||||
} else {
|
||||
for k, v := range s.Progress {
|
||||
subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State)
|
||||
j += subj
|
||||
}
|
||||
// remove the trailing ","
|
||||
j = j[:len(j)-1] + "},"
|
||||
}
|
||||
|
||||
j += fmt.Sprintf(`"leadtransferee":"%x"}`, s.LeadTransferee)
|
||||
return []byte(j), nil
|
||||
}
|
||||
|
||||
func (s Status) String() string {
|
||||
b, err := s.MarshalJSON()
|
||||
if err != nil {
|
||||
raftLogger.Panicf("unexpected error: %v", err)
|
||||
}
|
||||
return string(b)
|
||||
}
|
273
vendor/go.etcd.io/etcd/raft/storage.go
generated
vendored
Normal file
273
vendor/go.etcd.io/etcd/raft/storage.go
generated
vendored
Normal file
@ -0,0 +1,273 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package raft
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
pb "go.etcd.io/etcd/raft/raftpb"
|
||||
)
|
||||
|
||||
// ErrCompacted is returned by Storage.Entries/Compact when a requested
|
||||
// index is unavailable because it predates the last snapshot.
|
||||
var ErrCompacted = errors.New("requested index is unavailable due to compaction")
|
||||
|
||||
// ErrSnapOutOfDate is returned by Storage.CreateSnapshot when a requested
|
||||
// index is older than the existing snapshot.
|
||||
var ErrSnapOutOfDate = errors.New("requested index is older than the existing snapshot")
|
||||
|
||||
// ErrUnavailable is returned by Storage interface when the requested log entries
|
||||
// are unavailable.
|
||||
var ErrUnavailable = errors.New("requested entry at index is unavailable")
|
||||
|
||||
// ErrSnapshotTemporarilyUnavailable is returned by the Storage interface when the required
|
||||
// snapshot is temporarily unavailable.
|
||||
var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable")
|
||||
|
||||
// Storage is an interface that may be implemented by the application
|
||||
// to retrieve log entries from storage.
|
||||
//
|
||||
// If any Storage method returns an error, the raft instance will
|
||||
// become inoperable and refuse to participate in elections; the
|
||||
// application is responsible for cleanup and recovery in this case.
|
||||
type Storage interface {
|
||||
// TODO(tbg): split this into two interfaces, LogStorage and StateStorage.
|
||||
|
||||
// InitialState returns the saved HardState and ConfState information.
|
||||
InitialState() (pb.HardState, pb.ConfState, error)
|
||||
// Entries returns a slice of log entries in the range [lo,hi).
|
||||
// MaxSize limits the total size of the log entries returned, but
|
||||
// Entries returns at least one entry if any.
|
||||
Entries(lo, hi, maxSize uint64) ([]pb.Entry, error)
|
||||
// Term returns the term of entry i, which must be in the range
|
||||
// [FirstIndex()-1, LastIndex()]. The term of the entry before
|
||||
// FirstIndex is retained for matching purposes even though the
|
||||
// rest of that entry may not be available.
|
||||
Term(i uint64) (uint64, error)
|
||||
// LastIndex returns the index of the last entry in the log.
|
||||
LastIndex() (uint64, error)
|
||||
// FirstIndex returns the index of the first log entry that is
|
||||
// possibly available via Entries (older entries have been incorporated
|
||||
// into the latest Snapshot; if storage only contains the dummy entry the
|
||||
// first log entry is not available).
|
||||
FirstIndex() (uint64, error)
|
||||
// Snapshot returns the most recent snapshot.
|
||||
// If snapshot is temporarily unavailable, it should return ErrSnapshotTemporarilyUnavailable,
|
||||
// so raft state machine could know that Storage needs some time to prepare
|
||||
// snapshot and call Snapshot later.
|
||||
Snapshot() (pb.Snapshot, error)
|
||||
}
|
||||
|
||||
// MemoryStorage implements the Storage interface backed by an
|
||||
// in-memory array.
|
||||
type MemoryStorage struct {
|
||||
// Protects access to all fields. Most methods of MemoryStorage are
|
||||
// run on the raft goroutine, but Append() is run on an application
|
||||
// goroutine.
|
||||
sync.Mutex
|
||||
|
||||
hardState pb.HardState
|
||||
snapshot pb.Snapshot
|
||||
// ents[i] has raft log position i+snapshot.Metadata.Index
|
||||
ents []pb.Entry
|
||||
}
|
||||
|
||||
// NewMemoryStorage creates an empty MemoryStorage.
|
||||
func NewMemoryStorage() *MemoryStorage {
|
||||
return &MemoryStorage{
|
||||
// When starting from scratch populate the list with a dummy entry at term zero.
|
||||
ents: make([]pb.Entry, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// InitialState implements the Storage interface.
|
||||
func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) {
|
||||
return ms.hardState, ms.snapshot.Metadata.ConfState, nil
|
||||
}
|
||||
|
||||
// SetHardState saves the current HardState.
|
||||
func (ms *MemoryStorage) SetHardState(st pb.HardState) error {
|
||||
ms.Lock()
|
||||
defer ms.Unlock()
|
||||
ms.hardState = st
|
||||
return nil
|
||||
}
|
||||
|
||||
// Entries implements the Storage interface.
|
||||
func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) {
|
||||
ms.Lock()
|
||||
defer ms.Unlock()
|
||||
offset := ms.ents[0].Index
|
||||
if lo <= offset {
|
||||
return nil, ErrCompacted
|
||||
}
|
||||
if hi > ms.lastIndex()+1 {
|
||||
raftLogger.Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex())
|
||||
}
|
||||
// only contains dummy entries.
|
||||
if len(ms.ents) == 1 {
|
||||
return nil, ErrUnavailable
|
||||
}
|
||||
|
||||
ents := ms.ents[lo-offset : hi-offset]
|
||||
return limitSize(ents, maxSize), nil
|
||||
}
|
||||
|
||||
// Term implements the Storage interface.
|
||||
func (ms *MemoryStorage) Term(i uint64) (uint64, error) {
|
||||
ms.Lock()
|
||||
defer ms.Unlock()
|
||||
offset := ms.ents[0].Index
|
||||
if i < offset {
|
||||
return 0, ErrCompacted
|
||||
}
|
||||
if int(i-offset) >= len(ms.ents) {
|
||||
return 0, ErrUnavailable
|
||||
}
|
||||
return ms.ents[i-offset].Term, nil
|
||||
}
|
||||
|
||||
// LastIndex implements the Storage interface.
|
||||
func (ms *MemoryStorage) LastIndex() (uint64, error) {
|
||||
ms.Lock()
|
||||
defer ms.Unlock()
|
||||
return ms.lastIndex(), nil
|
||||
}
|
||||
|
||||
func (ms *MemoryStorage) lastIndex() uint64 {
|
||||
return ms.ents[0].Index + uint64(len(ms.ents)) - 1
|
||||
}
|
||||
|
||||
// FirstIndex implements the Storage interface.
|
||||
func (ms *MemoryStorage) FirstIndex() (uint64, error) {
|
||||
ms.Lock()
|
||||
defer ms.Unlock()
|
||||
return ms.firstIndex(), nil
|
||||
}
|
||||
|
||||
func (ms *MemoryStorage) firstIndex() uint64 {
|
||||
return ms.ents[0].Index + 1
|
||||
}
|
||||
|
||||
// Snapshot implements the Storage interface.
|
||||
func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) {
|
||||
ms.Lock()
|
||||
defer ms.Unlock()
|
||||
return ms.snapshot, nil
|
||||
}
|
||||
|
||||
// ApplySnapshot overwrites the contents of this Storage object with
|
||||
// those of the given snapshot.
|
||||
func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error {
|
||||
ms.Lock()
|
||||
defer ms.Unlock()
|
||||
|
||||
//handle check for old snapshot being applied
|
||||
msIndex := ms.snapshot.Metadata.Index
|
||||
snapIndex := snap.Metadata.Index
|
||||
if msIndex >= snapIndex {
|
||||
return ErrSnapOutOfDate
|
||||
}
|
||||
|
||||
ms.snapshot = snap
|
||||
ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and
|
||||
// can be used to reconstruct the state at that point.
|
||||
// If any configuration changes have been made since the last compaction,
|
||||
// the result of the last ApplyConfChange must be passed in.
|
||||
func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) {
|
||||
ms.Lock()
|
||||
defer ms.Unlock()
|
||||
if i <= ms.snapshot.Metadata.Index {
|
||||
return pb.Snapshot{}, ErrSnapOutOfDate
|
||||
}
|
||||
|
||||
offset := ms.ents[0].Index
|
||||
if i > ms.lastIndex() {
|
||||
raftLogger.Panicf("snapshot %d is out of bound lastindex(%d)", i, ms.lastIndex())
|
||||
}
|
||||
|
||||
ms.snapshot.Metadata.Index = i
|
||||
ms.snapshot.Metadata.Term = ms.ents[i-offset].Term
|
||||
if cs != nil {
|
||||
ms.snapshot.Metadata.ConfState = *cs
|
||||
}
|
||||
ms.snapshot.Data = data
|
||||
return ms.snapshot, nil
|
||||
}
|
||||
|
||||
// Compact discards all log entries prior to compactIndex.
|
||||
// It is the application's responsibility to not attempt to compact an index
|
||||
// greater than raftLog.applied.
|
||||
func (ms *MemoryStorage) Compact(compactIndex uint64) error {
|
||||
ms.Lock()
|
||||
defer ms.Unlock()
|
||||
offset := ms.ents[0].Index
|
||||
if compactIndex <= offset {
|
||||
return ErrCompacted
|
||||
}
|
||||
if compactIndex > ms.lastIndex() {
|
||||
raftLogger.Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex())
|
||||
}
|
||||
|
||||
i := compactIndex - offset
|
||||
ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i)
|
||||
ents[0].Index = ms.ents[i].Index
|
||||
ents[0].Term = ms.ents[i].Term
|
||||
ents = append(ents, ms.ents[i+1:]...)
|
||||
ms.ents = ents
|
||||
return nil
|
||||
}
|
||||
|
||||
// Append the new entries to storage.
|
||||
// TODO (xiangli): ensure the entries are continuous and
|
||||
// entries[0].Index > ms.entries[0].Index
|
||||
func (ms *MemoryStorage) Append(entries []pb.Entry) error {
|
||||
if len(entries) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
ms.Lock()
|
||||
defer ms.Unlock()
|
||||
|
||||
first := ms.firstIndex()
|
||||
last := entries[0].Index + uint64(len(entries)) - 1
|
||||
|
||||
// shortcut if there is no new entry.
|
||||
if last < first {
|
||||
return nil
|
||||
}
|
||||
// truncate compacted entries
|
||||
if first > entries[0].Index {
|
||||
entries = entries[first-entries[0].Index:]
|
||||
}
|
||||
|
||||
offset := entries[0].Index - ms.ents[0].Index
|
||||
switch {
|
||||
case uint64(len(ms.ents)) > offset:
|
||||
ms.ents = append([]pb.Entry{}, ms.ents[:offset]...)
|
||||
ms.ents = append(ms.ents, entries...)
|
||||
case uint64(len(ms.ents)) == offset:
|
||||
ms.ents = append(ms.ents, entries...)
|
||||
default:
|
||||
raftLogger.Panicf("missing log entry [last: %d, append at: %d]",
|
||||
ms.lastIndex(), entries[0].Index)
|
||||
}
|
||||
return nil
|
||||
}
|
132
vendor/go.etcd.io/etcd/raft/tracker/inflights.go
generated
vendored
Normal file
132
vendor/go.etcd.io/etcd/raft/tracker/inflights.go
generated
vendored
Normal file
@ -0,0 +1,132 @@
|
||||
// Copyright 2019 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tracker
|
||||
|
||||
// Inflights limits the number of MsgApp (represented by the largest index
|
||||
// contained within) sent to followers but not yet acknowledged by them. Callers
|
||||
// use Full() to check whether more messages can be sent, call Add() whenever
|
||||
// they are sending a new append, and release "quota" via FreeLE() whenever an
|
||||
// ack is received.
|
||||
type Inflights struct {
|
||||
// the starting index in the buffer
|
||||
start int
|
||||
// number of inflights in the buffer
|
||||
count int
|
||||
|
||||
// the size of the buffer
|
||||
size int
|
||||
|
||||
// buffer contains the index of the last entry
|
||||
// inside one message.
|
||||
buffer []uint64
|
||||
}
|
||||
|
||||
// NewInflights sets up an Inflights that allows up to 'size' inflight messages.
|
||||
func NewInflights(size int) *Inflights {
|
||||
return &Inflights{
|
||||
size: size,
|
||||
}
|
||||
}
|
||||
|
||||
// Clone returns an *Inflights that is identical to but shares no memory with
|
||||
// the receiver.
|
||||
func (in *Inflights) Clone() *Inflights {
|
||||
ins := *in
|
||||
ins.buffer = append([]uint64(nil), in.buffer...)
|
||||
return &ins
|
||||
}
|
||||
|
||||
// Add notifies the Inflights that a new message with the given index is being
|
||||
// dispatched. Full() must be called prior to Add() to verify that there is room
|
||||
// for one more message, and consecutive calls to add Add() must provide a
|
||||
// monotonic sequence of indexes.
|
||||
func (in *Inflights) Add(inflight uint64) {
|
||||
if in.Full() {
|
||||
panic("cannot add into a Full inflights")
|
||||
}
|
||||
next := in.start + in.count
|
||||
size := in.size
|
||||
if next >= size {
|
||||
next -= size
|
||||
}
|
||||
if next >= len(in.buffer) {
|
||||
in.grow()
|
||||
}
|
||||
in.buffer[next] = inflight
|
||||
in.count++
|
||||
}
|
||||
|
||||
// grow the inflight buffer by doubling up to inflights.size. We grow on demand
|
||||
// instead of preallocating to inflights.size to handle systems which have
|
||||
// thousands of Raft groups per process.
|
||||
func (in *Inflights) grow() {
|
||||
newSize := len(in.buffer) * 2
|
||||
if newSize == 0 {
|
||||
newSize = 1
|
||||
} else if newSize > in.size {
|
||||
newSize = in.size
|
||||
}
|
||||
newBuffer := make([]uint64, newSize)
|
||||
copy(newBuffer, in.buffer)
|
||||
in.buffer = newBuffer
|
||||
}
|
||||
|
||||
// FreeLE frees the inflights smaller or equal to the given `to` flight.
|
||||
func (in *Inflights) FreeLE(to uint64) {
|
||||
if in.count == 0 || to < in.buffer[in.start] {
|
||||
// out of the left side of the window
|
||||
return
|
||||
}
|
||||
|
||||
idx := in.start
|
||||
var i int
|
||||
for i = 0; i < in.count; i++ {
|
||||
if to < in.buffer[idx] { // found the first large inflight
|
||||
break
|
||||
}
|
||||
|
||||
// increase index and maybe rotate
|
||||
size := in.size
|
||||
if idx++; idx >= size {
|
||||
idx -= size
|
||||
}
|
||||
}
|
||||
// free i inflights and set new start index
|
||||
in.count -= i
|
||||
in.start = idx
|
||||
if in.count == 0 {
|
||||
// inflights is empty, reset the start index so that we don't grow the
|
||||
// buffer unnecessarily.
|
||||
in.start = 0
|
||||
}
|
||||
}
|
||||
|
||||
// FreeFirstOne releases the first inflight. This is a no-op if nothing is
|
||||
// inflight.
|
||||
func (in *Inflights) FreeFirstOne() { in.FreeLE(in.buffer[in.start]) }
|
||||
|
||||
// Full returns true if no more messages can be sent at the moment.
|
||||
func (in *Inflights) Full() bool {
|
||||
return in.count == in.size
|
||||
}
|
||||
|
||||
// Count returns the number of inflight messages.
|
||||
func (in *Inflights) Count() int { return in.count }
|
||||
|
||||
// reset frees all inflights.
|
||||
func (in *Inflights) reset() {
|
||||
in.count = 0
|
||||
in.start = 0
|
||||
}
|
259
vendor/go.etcd.io/etcd/raft/tracker/progress.go
generated
vendored
Normal file
259
vendor/go.etcd.io/etcd/raft/tracker/progress.go
generated
vendored
Normal file
@ -0,0 +1,259 @@
|
||||
// Copyright 2019 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tracker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Progress represents a follower’s progress in the view of the leader. Leader
|
||||
// maintains progresses of all followers, and sends entries to the follower
|
||||
// based on its progress.
|
||||
//
|
||||
// NB(tbg): Progress is basically a state machine whose transitions are mostly
|
||||
// strewn around `*raft.raft`. Additionally, some fields are only used when in a
|
||||
// certain State. All of this isn't ideal.
|
||||
type Progress struct {
|
||||
Match, Next uint64
|
||||
// State defines how the leader should interact with the follower.
|
||||
//
|
||||
// When in StateProbe, leader sends at most one replication message
|
||||
// per heartbeat interval. It also probes actual progress of the follower.
|
||||
//
|
||||
// When in StateReplicate, leader optimistically increases next
|
||||
// to the latest entry sent after sending replication message. This is
|
||||
// an optimized state for fast replicating log entries to the follower.
|
||||
//
|
||||
// When in StateSnapshot, leader should have sent out snapshot
|
||||
// before and stops sending any replication message.
|
||||
State StateType
|
||||
|
||||
// PendingSnapshot is used in StateSnapshot.
|
||||
// If there is a pending snapshot, the pendingSnapshot will be set to the
|
||||
// index of the snapshot. If pendingSnapshot is set, the replication process of
|
||||
// this Progress will be paused. raft will not resend snapshot until the pending one
|
||||
// is reported to be failed.
|
||||
PendingSnapshot uint64
|
||||
|
||||
// RecentActive is true if the progress is recently active. Receiving any messages
|
||||
// from the corresponding follower indicates the progress is active.
|
||||
// RecentActive can be reset to false after an election timeout.
|
||||
//
|
||||
// TODO(tbg): the leader should always have this set to true.
|
||||
RecentActive bool
|
||||
|
||||
// ProbeSent is used while this follower is in StateProbe. When ProbeSent is
|
||||
// true, raft should pause sending replication message to this peer until
|
||||
// ProbeSent is reset. See ProbeAcked() and IsPaused().
|
||||
ProbeSent bool
|
||||
|
||||
// Inflights is a sliding window for the inflight messages.
|
||||
// Each inflight message contains one or more log entries.
|
||||
// The max number of entries per message is defined in raft config as MaxSizePerMsg.
|
||||
// Thus inflight effectively limits both the number of inflight messages
|
||||
// and the bandwidth each Progress can use.
|
||||
// When inflights is Full, no more message should be sent.
|
||||
// When a leader sends out a message, the index of the last
|
||||
// entry should be added to inflights. The index MUST be added
|
||||
// into inflights in order.
|
||||
// When a leader receives a reply, the previous inflights should
|
||||
// be freed by calling inflights.FreeLE with the index of the last
|
||||
// received entry.
|
||||
Inflights *Inflights
|
||||
|
||||
// IsLearner is true if this progress is tracked for a learner.
|
||||
IsLearner bool
|
||||
}
|
||||
|
||||
// ResetState moves the Progress into the specified State, resetting ProbeSent,
|
||||
// PendingSnapshot, and Inflights.
|
||||
func (pr *Progress) ResetState(state StateType) {
|
||||
pr.ProbeSent = false
|
||||
pr.PendingSnapshot = 0
|
||||
pr.State = state
|
||||
pr.Inflights.reset()
|
||||
}
|
||||
|
||||
func max(a, b uint64) uint64 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func min(a, b uint64) uint64 {
|
||||
if a > b {
|
||||
return b
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// ProbeAcked is called when this peer has accepted an append. It resets
|
||||
// ProbeSent to signal that additional append messages should be sent without
|
||||
// further delay.
|
||||
func (pr *Progress) ProbeAcked() {
|
||||
pr.ProbeSent = false
|
||||
}
|
||||
|
||||
// BecomeProbe transitions into StateProbe. Next is reset to Match+1 or,
|
||||
// optionally and if larger, the index of the pending snapshot.
|
||||
func (pr *Progress) BecomeProbe() {
|
||||
// If the original state is StateSnapshot, progress knows that
|
||||
// the pending snapshot has been sent to this peer successfully, then
|
||||
// probes from pendingSnapshot + 1.
|
||||
if pr.State == StateSnapshot {
|
||||
pendingSnapshot := pr.PendingSnapshot
|
||||
pr.ResetState(StateProbe)
|
||||
pr.Next = max(pr.Match+1, pendingSnapshot+1)
|
||||
} else {
|
||||
pr.ResetState(StateProbe)
|
||||
pr.Next = pr.Match + 1
|
||||
}
|
||||
}
|
||||
|
||||
// BecomeReplicate transitions into StateReplicate, resetting Next to Match+1.
|
||||
func (pr *Progress) BecomeReplicate() {
|
||||
pr.ResetState(StateReplicate)
|
||||
pr.Next = pr.Match + 1
|
||||
}
|
||||
|
||||
// BecomeSnapshot moves the Progress to StateSnapshot with the specified pending
|
||||
// snapshot index.
|
||||
func (pr *Progress) BecomeSnapshot(snapshoti uint64) {
|
||||
pr.ResetState(StateSnapshot)
|
||||
pr.PendingSnapshot = snapshoti
|
||||
}
|
||||
|
||||
// MaybeUpdate is called when an MsgAppResp arrives from the follower, with the
|
||||
// index acked by it. The method returns false if the given n index comes from
|
||||
// an outdated message. Otherwise it updates the progress and returns true.
|
||||
func (pr *Progress) MaybeUpdate(n uint64) bool {
|
||||
var updated bool
|
||||
if pr.Match < n {
|
||||
pr.Match = n
|
||||
updated = true
|
||||
pr.ProbeAcked()
|
||||
}
|
||||
if pr.Next < n+1 {
|
||||
pr.Next = n + 1
|
||||
}
|
||||
return updated
|
||||
}
|
||||
|
||||
// OptimisticUpdate signals that appends all the way up to and including index n
|
||||
// are in-flight. As a result, Next is increased to n+1.
|
||||
func (pr *Progress) OptimisticUpdate(n uint64) { pr.Next = n + 1 }
|
||||
|
||||
// MaybeDecrTo adjusts the Progress to the receipt of a MsgApp rejection. The
|
||||
// arguments are the index the follower rejected to append to its log, and its
|
||||
// last index.
|
||||
//
|
||||
// Rejections can happen spuriously as messages are sent out of order or
|
||||
// duplicated. In such cases, the rejection pertains to an index that the
|
||||
// Progress already knows were previously acknowledged, and false is returned
|
||||
// without changing the Progress.
|
||||
//
|
||||
// If the rejection is genuine, Next is lowered sensibly, and the Progress is
|
||||
// cleared for sending log entries.
|
||||
func (pr *Progress) MaybeDecrTo(rejected, last uint64) bool {
|
||||
if pr.State == StateReplicate {
|
||||
// The rejection must be stale if the progress has matched and "rejected"
|
||||
// is smaller than "match".
|
||||
if rejected <= pr.Match {
|
||||
return false
|
||||
}
|
||||
// Directly decrease next to match + 1.
|
||||
//
|
||||
// TODO(tbg): why not use last if it's larger?
|
||||
pr.Next = pr.Match + 1
|
||||
return true
|
||||
}
|
||||
|
||||
// The rejection must be stale if "rejected" does not match next - 1. This
|
||||
// is because non-replicating followers are probed one entry at a time.
|
||||
if pr.Next-1 != rejected {
|
||||
return false
|
||||
}
|
||||
|
||||
if pr.Next = min(rejected, last+1); pr.Next < 1 {
|
||||
pr.Next = 1
|
||||
}
|
||||
pr.ProbeSent = false
|
||||
return true
|
||||
}
|
||||
|
||||
// IsPaused returns whether sending log entries to this node has been throttled.
|
||||
// This is done when a node has rejected recent MsgApps, is currently waiting
|
||||
// for a snapshot, or has reached the MaxInflightMsgs limit. In normal
|
||||
// operation, this is false. A throttled node will be contacted less frequently
|
||||
// until it has reached a state in which it's able to accept a steady stream of
|
||||
// log entries again.
|
||||
func (pr *Progress) IsPaused() bool {
|
||||
switch pr.State {
|
||||
case StateProbe:
|
||||
return pr.ProbeSent
|
||||
case StateReplicate:
|
||||
return pr.Inflights.Full()
|
||||
case StateSnapshot:
|
||||
return true
|
||||
default:
|
||||
panic("unexpected state")
|
||||
}
|
||||
}
|
||||
|
||||
func (pr *Progress) String() string {
|
||||
var buf strings.Builder
|
||||
fmt.Fprintf(&buf, "%s match=%d next=%d", pr.State, pr.Match, pr.Next)
|
||||
if pr.IsLearner {
|
||||
fmt.Fprint(&buf, " learner")
|
||||
}
|
||||
if pr.IsPaused() {
|
||||
fmt.Fprint(&buf, " paused")
|
||||
}
|
||||
if pr.PendingSnapshot > 0 {
|
||||
fmt.Fprintf(&buf, " pendingSnap=%d", pr.PendingSnapshot)
|
||||
}
|
||||
if !pr.RecentActive {
|
||||
fmt.Fprintf(&buf, " inactive")
|
||||
}
|
||||
if n := pr.Inflights.Count(); n > 0 {
|
||||
fmt.Fprintf(&buf, " inflight=%d", n)
|
||||
if pr.Inflights.Full() {
|
||||
fmt.Fprint(&buf, "[full]")
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// ProgressMap is a map of *Progress.
|
||||
type ProgressMap map[uint64]*Progress
|
||||
|
||||
// String prints the ProgressMap in sorted key order, one Progress per line.
|
||||
func (m ProgressMap) String() string {
|
||||
ids := make([]uint64, 0, len(m))
|
||||
for k := range m {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Slice(ids, func(i, j int) bool {
|
||||
return ids[i] < ids[j]
|
||||
})
|
||||
var buf strings.Builder
|
||||
for _, id := range ids {
|
||||
fmt.Fprintf(&buf, "%d: %s\n", id, m[id])
|
||||
}
|
||||
return buf.String()
|
||||
}
|
42
vendor/go.etcd.io/etcd/raft/tracker/state.go
generated
vendored
Normal file
42
vendor/go.etcd.io/etcd/raft/tracker/state.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2019 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tracker
|
||||
|
||||
// StateType is the state of a tracked follower.
|
||||
type StateType uint64
|
||||
|
||||
const (
|
||||
// StateProbe indicates a follower whose last index isn't known. Such a
|
||||
// follower is "probed" (i.e. an append sent periodically) to narrow down
|
||||
// its last index. In the ideal (and common) case, only one round of probing
|
||||
// is necessary as the follower will react with a hint. Followers that are
|
||||
// probed over extended periods of time are often offline.
|
||||
StateProbe StateType = iota
|
||||
// StateReplicate is the state steady in which a follower eagerly receives
|
||||
// log entries to append to its log.
|
||||
StateReplicate
|
||||
// StateSnapshot indicates a follower that needs log entries not available
|
||||
// from the leader's Raft log. Such a follower needs a full snapshot to
|
||||
// return to StateReplicate.
|
||||
StateSnapshot
|
||||
)
|
||||
|
||||
var prstmap = [...]string{
|
||||
"StateProbe",
|
||||
"StateReplicate",
|
||||
"StateSnapshot",
|
||||
}
|
||||
|
||||
func (st StateType) String() string { return prstmap[uint64(st)] }
|
288
vendor/go.etcd.io/etcd/raft/tracker/tracker.go
generated
vendored
Normal file
288
vendor/go.etcd.io/etcd/raft/tracker/tracker.go
generated
vendored
Normal file
@ -0,0 +1,288 @@
|
||||
// Copyright 2019 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tracker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"go.etcd.io/etcd/raft/quorum"
|
||||
pb "go.etcd.io/etcd/raft/raftpb"
|
||||
)
|
||||
|
||||
// Config reflects the configuration tracked in a ProgressTracker.
|
||||
type Config struct {
|
||||
Voters quorum.JointConfig
|
||||
// AutoLeave is true if the configuration is joint and a transition to the
|
||||
// incoming configuration should be carried out automatically by Raft when
|
||||
// this is possible. If false, the configuration will be joint until the
|
||||
// application initiates the transition manually.
|
||||
AutoLeave bool
|
||||
// Learners is a set of IDs corresponding to the learners active in the
|
||||
// current configuration.
|
||||
//
|
||||
// Invariant: Learners and Voters does not intersect, i.e. if a peer is in
|
||||
// either half of the joint config, it can't be a learner; if it is a
|
||||
// learner it can't be in either half of the joint config. This invariant
|
||||
// simplifies the implementation since it allows peers to have clarity about
|
||||
// its current role without taking into account joint consensus.
|
||||
Learners map[uint64]struct{}
|
||||
// When we turn a voter into a learner during a joint consensus transition,
|
||||
// we cannot add the learner directly when entering the joint state. This is
|
||||
// because this would violate the invariant that the intersection of
|
||||
// voters and learners is empty. For example, assume a Voter is removed and
|
||||
// immediately re-added as a learner (or in other words, it is demoted):
|
||||
//
|
||||
// Initially, the configuration will be
|
||||
//
|
||||
// voters: {1 2 3}
|
||||
// learners: {}
|
||||
//
|
||||
// and we want to demote 3. Entering the joint configuration, we naively get
|
||||
//
|
||||
// voters: {1 2} & {1 2 3}
|
||||
// learners: {3}
|
||||
//
|
||||
// but this violates the invariant (3 is both voter and learner). Instead,
|
||||
// we get
|
||||
//
|
||||
// voters: {1 2} & {1 2 3}
|
||||
// learners: {}
|
||||
// next_learners: {3}
|
||||
//
|
||||
// Where 3 is now still purely a voter, but we are remembering the intention
|
||||
// to make it a learner upon transitioning into the final configuration:
|
||||
//
|
||||
// voters: {1 2}
|
||||
// learners: {3}
|
||||
// next_learners: {}
|
||||
//
|
||||
// Note that next_learners is not used while adding a learner that is not
|
||||
// also a voter in the joint config. In this case, the learner is added
|
||||
// right away when entering the joint configuration, so that it is caught up
|
||||
// as soon as possible.
|
||||
LearnersNext map[uint64]struct{}
|
||||
}
|
||||
|
||||
func (c Config) String() string {
|
||||
var buf strings.Builder
|
||||
fmt.Fprintf(&buf, "voters=%s", c.Voters)
|
||||
if c.Learners != nil {
|
||||
fmt.Fprintf(&buf, " learners=%s", quorum.MajorityConfig(c.Learners).String())
|
||||
}
|
||||
if c.LearnersNext != nil {
|
||||
fmt.Fprintf(&buf, " learners_next=%s", quorum.MajorityConfig(c.LearnersNext).String())
|
||||
}
|
||||
if c.AutoLeave {
|
||||
fmt.Fprintf(&buf, " autoleave")
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Clone returns a copy of the Config that shares no memory with the original.
|
||||
func (c *Config) Clone() Config {
|
||||
clone := func(m map[uint64]struct{}) map[uint64]struct{} {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
mm := make(map[uint64]struct{}, len(m))
|
||||
for k := range m {
|
||||
mm[k] = struct{}{}
|
||||
}
|
||||
return mm
|
||||
}
|
||||
return Config{
|
||||
Voters: quorum.JointConfig{clone(c.Voters[0]), clone(c.Voters[1])},
|
||||
Learners: clone(c.Learners),
|
||||
LearnersNext: clone(c.LearnersNext),
|
||||
}
|
||||
}
|
||||
|
||||
// ProgressTracker tracks the currently active configuration and the information
|
||||
// known about the nodes and learners in it. In particular, it tracks the match
|
||||
// index for each peer which in turn allows reasoning about the committed index.
|
||||
type ProgressTracker struct {
|
||||
Config
|
||||
|
||||
Progress ProgressMap
|
||||
|
||||
Votes map[uint64]bool
|
||||
|
||||
MaxInflight int
|
||||
}
|
||||
|
||||
// MakeProgressTracker initializes a ProgressTracker.
|
||||
func MakeProgressTracker(maxInflight int) ProgressTracker {
|
||||
p := ProgressTracker{
|
||||
MaxInflight: maxInflight,
|
||||
Config: Config{
|
||||
Voters: quorum.JointConfig{
|
||||
quorum.MajorityConfig{},
|
||||
nil, // only populated when used
|
||||
},
|
||||
Learners: nil, // only populated when used
|
||||
LearnersNext: nil, // only populated when used
|
||||
},
|
||||
Votes: map[uint64]bool{},
|
||||
Progress: map[uint64]*Progress{},
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// ConfState returns a ConfState representing the active configuration.
|
||||
func (p *ProgressTracker) ConfState() pb.ConfState {
|
||||
return pb.ConfState{
|
||||
Voters: p.Voters[0].Slice(),
|
||||
VotersOutgoing: p.Voters[1].Slice(),
|
||||
Learners: quorum.MajorityConfig(p.Learners).Slice(),
|
||||
LearnersNext: quorum.MajorityConfig(p.LearnersNext).Slice(),
|
||||
AutoLeave: p.AutoLeave,
|
||||
}
|
||||
}
|
||||
|
||||
// IsSingleton returns true if (and only if) there is only one voting member
|
||||
// (i.e. the leader) in the current configuration.
|
||||
func (p *ProgressTracker) IsSingleton() bool {
|
||||
return len(p.Voters[0]) == 1 && len(p.Voters[1]) == 0
|
||||
}
|
||||
|
||||
type matchAckIndexer map[uint64]*Progress
|
||||
|
||||
var _ quorum.AckedIndexer = matchAckIndexer(nil)
|
||||
|
||||
// AckedIndex implements IndexLookuper.
|
||||
func (l matchAckIndexer) AckedIndex(id uint64) (quorum.Index, bool) {
|
||||
pr, ok := l[id]
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
return quorum.Index(pr.Match), true
|
||||
}
|
||||
|
||||
// Committed returns the largest log index known to be committed based on what
|
||||
// the voting members of the group have acknowledged.
|
||||
func (p *ProgressTracker) Committed() uint64 {
|
||||
return uint64(p.Voters.CommittedIndex(matchAckIndexer(p.Progress)))
|
||||
}
|
||||
|
||||
func insertionSort(sl []uint64) {
|
||||
a, b := 0, len(sl)
|
||||
for i := a + 1; i < b; i++ {
|
||||
for j := i; j > a && sl[j] < sl[j-1]; j-- {
|
||||
sl[j], sl[j-1] = sl[j-1], sl[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Visit invokes the supplied closure for all tracked progresses in stable order.
|
||||
func (p *ProgressTracker) Visit(f func(id uint64, pr *Progress)) {
|
||||
n := len(p.Progress)
|
||||
// We need to sort the IDs and don't want to allocate since this is hot code.
|
||||
// The optimization here mirrors that in `(MajorityConfig).CommittedIndex`,
|
||||
// see there for details.
|
||||
var sl [7]uint64
|
||||
ids := sl[:]
|
||||
if len(sl) >= n {
|
||||
ids = sl[:n]
|
||||
} else {
|
||||
ids = make([]uint64, n)
|
||||
}
|
||||
for id := range p.Progress {
|
||||
n--
|
||||
ids[n] = id
|
||||
}
|
||||
insertionSort(ids)
|
||||
for _, id := range ids {
|
||||
f(id, p.Progress[id])
|
||||
}
|
||||
}
|
||||
|
||||
// QuorumActive returns true if the quorum is active from the view of the local
|
||||
// raft state machine. Otherwise, it returns false.
|
||||
func (p *ProgressTracker) QuorumActive() bool {
|
||||
votes := map[uint64]bool{}
|
||||
p.Visit(func(id uint64, pr *Progress) {
|
||||
if pr.IsLearner {
|
||||
return
|
||||
}
|
||||
votes[id] = pr.RecentActive
|
||||
})
|
||||
|
||||
return p.Voters.VoteResult(votes) == quorum.VoteWon
|
||||
}
|
||||
|
||||
// VoterNodes returns a sorted slice of voters.
|
||||
func (p *ProgressTracker) VoterNodes() []uint64 {
|
||||
m := p.Voters.IDs()
|
||||
nodes := make([]uint64, 0, len(m))
|
||||
for id := range m {
|
||||
nodes = append(nodes, id)
|
||||
}
|
||||
sort.Slice(nodes, func(i, j int) bool { return nodes[i] < nodes[j] })
|
||||
return nodes
|
||||
}
|
||||
|
||||
// LearnerNodes returns a sorted slice of learners.
|
||||
func (p *ProgressTracker) LearnerNodes() []uint64 {
|
||||
if len(p.Learners) == 0 {
|
||||
return nil
|
||||
}
|
||||
nodes := make([]uint64, 0, len(p.Learners))
|
||||
for id := range p.Learners {
|
||||
nodes = append(nodes, id)
|
||||
}
|
||||
sort.Slice(nodes, func(i, j int) bool { return nodes[i] < nodes[j] })
|
||||
return nodes
|
||||
}
|
||||
|
||||
// ResetVotes prepares for a new round of vote counting via recordVote.
|
||||
func (p *ProgressTracker) ResetVotes() {
|
||||
p.Votes = map[uint64]bool{}
|
||||
}
|
||||
|
||||
// RecordVote records that the node with the given id voted for this Raft
|
||||
// instance if v == true (and declined it otherwise).
|
||||
func (p *ProgressTracker) RecordVote(id uint64, v bool) {
|
||||
_, ok := p.Votes[id]
|
||||
if !ok {
|
||||
p.Votes[id] = v
|
||||
}
|
||||
}
|
||||
|
||||
// TallyVotes returns the number of granted and rejected Votes, and whether the
|
||||
// election outcome is known.
|
||||
func (p *ProgressTracker) TallyVotes() (granted int, rejected int, _ quorum.VoteResult) {
|
||||
// Make sure to populate granted/rejected correctly even if the Votes slice
|
||||
// contains members no longer part of the configuration. This doesn't really
|
||||
// matter in the way the numbers are used (they're informational), but might
|
||||
// as well get it right.
|
||||
for id, pr := range p.Progress {
|
||||
if pr.IsLearner {
|
||||
continue
|
||||
}
|
||||
v, voted := p.Votes[id]
|
||||
if !voted {
|
||||
continue
|
||||
}
|
||||
if v {
|
||||
granted++
|
||||
} else {
|
||||
rejected++
|
||||
}
|
||||
}
|
||||
result := p.Voters.VoteResult(p.Votes)
|
||||
return granted, rejected, result
|
||||
}
|
233
vendor/go.etcd.io/etcd/raft/util.go
generated
vendored
Normal file
233
vendor/go.etcd.io/etcd/raft/util.go
generated
vendored
Normal file
@ -0,0 +1,233 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package raft
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
pb "go.etcd.io/etcd/raft/raftpb"
|
||||
)
|
||||
|
||||
func (st StateType) MarshalJSON() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("%q", st.String())), nil
|
||||
}
|
||||
|
||||
func min(a, b uint64) uint64 {
|
||||
if a > b {
|
||||
return b
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func max(a, b uint64) uint64 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func IsLocalMsg(msgt pb.MessageType) bool {
|
||||
return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable ||
|
||||
msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum
|
||||
}
|
||||
|
||||
func IsResponseMsg(msgt pb.MessageType) bool {
|
||||
return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp
|
||||
}
|
||||
|
||||
// voteResponseType maps vote and prevote message types to their corresponding responses.
|
||||
func voteRespMsgType(msgt pb.MessageType) pb.MessageType {
|
||||
switch msgt {
|
||||
case pb.MsgVote:
|
||||
return pb.MsgVoteResp
|
||||
case pb.MsgPreVote:
|
||||
return pb.MsgPreVoteResp
|
||||
default:
|
||||
panic(fmt.Sprintf("not a vote message: %s", msgt))
|
||||
}
|
||||
}
|
||||
|
||||
func DescribeHardState(hs pb.HardState) string {
|
||||
var buf strings.Builder
|
||||
fmt.Fprintf(&buf, "Term:%d", hs.Term)
|
||||
if hs.Vote != 0 {
|
||||
fmt.Fprintf(&buf, " Vote:%d", hs.Vote)
|
||||
}
|
||||
fmt.Fprintf(&buf, " Commit:%d", hs.Commit)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func DescribeSoftState(ss SoftState) string {
|
||||
return fmt.Sprintf("Lead:%d State:%s", ss.Lead, ss.RaftState)
|
||||
}
|
||||
|
||||
func DescribeConfState(state pb.ConfState) string {
|
||||
return fmt.Sprintf(
|
||||
"Voters:%v VotersOutgoing:%v Learners:%v LearnersNext:%v AutoLeave:%v",
|
||||
state.Voters, state.VotersOutgoing, state.Learners, state.LearnersNext, state.AutoLeave,
|
||||
)
|
||||
}
|
||||
|
||||
func DescribeSnapshot(snap pb.Snapshot) string {
|
||||
m := snap.Metadata
|
||||
return fmt.Sprintf("Index:%d Term:%d ConfState:%s", m.Index, m.Term, DescribeConfState(m.ConfState))
|
||||
}
|
||||
|
||||
func DescribeReady(rd Ready, f EntryFormatter) string {
|
||||
var buf strings.Builder
|
||||
if rd.SoftState != nil {
|
||||
fmt.Fprint(&buf, DescribeSoftState(*rd.SoftState))
|
||||
buf.WriteByte('\n')
|
||||
}
|
||||
if !IsEmptyHardState(rd.HardState) {
|
||||
fmt.Fprintf(&buf, "HardState %s", DescribeHardState(rd.HardState))
|
||||
buf.WriteByte('\n')
|
||||
}
|
||||
if len(rd.ReadStates) > 0 {
|
||||
fmt.Fprintf(&buf, "ReadStates %v\n", rd.ReadStates)
|
||||
}
|
||||
if len(rd.Entries) > 0 {
|
||||
buf.WriteString("Entries:\n")
|
||||
fmt.Fprint(&buf, DescribeEntries(rd.Entries, f))
|
||||
}
|
||||
if !IsEmptySnap(rd.Snapshot) {
|
||||
fmt.Fprintf(&buf, "Snapshot %s\n", DescribeSnapshot(rd.Snapshot))
|
||||
}
|
||||
if len(rd.CommittedEntries) > 0 {
|
||||
buf.WriteString("CommittedEntries:\n")
|
||||
fmt.Fprint(&buf, DescribeEntries(rd.CommittedEntries, f))
|
||||
}
|
||||
if len(rd.Messages) > 0 {
|
||||
buf.WriteString("Messages:\n")
|
||||
for _, msg := range rd.Messages {
|
||||
fmt.Fprint(&buf, DescribeMessage(msg, f))
|
||||
buf.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
if buf.Len() > 0 {
|
||||
return fmt.Sprintf("Ready MustSync=%t:\n%s", rd.MustSync, buf.String())
|
||||
}
|
||||
return "<empty Ready>"
|
||||
}
|
||||
|
||||
// EntryFormatter can be implemented by the application to provide human-readable formatting
|
||||
// of entry data. Nil is a valid EntryFormatter and will use a default format.
|
||||
type EntryFormatter func([]byte) string
|
||||
|
||||
// DescribeMessage returns a concise human-readable description of a
|
||||
// Message for debugging.
|
||||
func DescribeMessage(m pb.Message, f EntryFormatter) string {
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index)
|
||||
if m.Reject {
|
||||
fmt.Fprintf(&buf, " Rejected (Hint: %d)", m.RejectHint)
|
||||
}
|
||||
if m.Commit != 0 {
|
||||
fmt.Fprintf(&buf, " Commit:%d", m.Commit)
|
||||
}
|
||||
if len(m.Entries) > 0 {
|
||||
fmt.Fprintf(&buf, " Entries:[")
|
||||
for i, e := range m.Entries {
|
||||
if i != 0 {
|
||||
buf.WriteString(", ")
|
||||
}
|
||||
buf.WriteString(DescribeEntry(e, f))
|
||||
}
|
||||
fmt.Fprintf(&buf, "]")
|
||||
}
|
||||
if !IsEmptySnap(m.Snapshot) {
|
||||
fmt.Fprintf(&buf, " Snapshot: %s", DescribeSnapshot(m.Snapshot))
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// PayloadSize is the size of the payload of this Entry. Notably, it does not
|
||||
// depend on its Index or Term.
|
||||
func PayloadSize(e pb.Entry) int {
|
||||
return len(e.Data)
|
||||
}
|
||||
|
||||
// DescribeEntry returns a concise human-readable description of an
|
||||
// Entry for debugging.
|
||||
func DescribeEntry(e pb.Entry, f EntryFormatter) string {
|
||||
if f == nil {
|
||||
f = func(data []byte) string { return fmt.Sprintf("%q", data) }
|
||||
}
|
||||
|
||||
formatConfChange := func(cc pb.ConfChangeI) string {
|
||||
// TODO(tbg): give the EntryFormatter a type argument so that it gets
|
||||
// a chance to expose the Context.
|
||||
return pb.ConfChangesToString(cc.AsV2().Changes)
|
||||
}
|
||||
|
||||
var formatted string
|
||||
switch e.Type {
|
||||
case pb.EntryNormal:
|
||||
formatted = f(e.Data)
|
||||
case pb.EntryConfChange:
|
||||
var cc pb.ConfChange
|
||||
if err := cc.Unmarshal(e.Data); err != nil {
|
||||
formatted = err.Error()
|
||||
} else {
|
||||
formatted = formatConfChange(cc)
|
||||
}
|
||||
case pb.EntryConfChangeV2:
|
||||
var cc pb.ConfChangeV2
|
||||
if err := cc.Unmarshal(e.Data); err != nil {
|
||||
formatted = err.Error()
|
||||
} else {
|
||||
formatted = formatConfChange(cc)
|
||||
}
|
||||
}
|
||||
if formatted != "" {
|
||||
formatted = " " + formatted
|
||||
}
|
||||
return fmt.Sprintf("%d/%d %s%s", e.Term, e.Index, e.Type, formatted)
|
||||
}
|
||||
|
||||
// DescribeEntries calls DescribeEntry for each Entry, adding a newline to
|
||||
// each.
|
||||
func DescribeEntries(ents []pb.Entry, f EntryFormatter) string {
|
||||
var buf bytes.Buffer
|
||||
for _, e := range ents {
|
||||
_, _ = buf.WriteString(DescribeEntry(e, f) + "\n")
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry {
|
||||
if len(ents) == 0 {
|
||||
return ents
|
||||
}
|
||||
size := ents[0].Size()
|
||||
var limit int
|
||||
for limit = 1; limit < len(ents); limit++ {
|
||||
size += ents[limit].Size()
|
||||
if uint64(size) > maxSize {
|
||||
break
|
||||
}
|
||||
}
|
||||
return ents[:limit]
|
||||
}
|
||||
|
||||
func assertConfStatesEquivalent(l Logger, cs1, cs2 pb.ConfState) {
|
||||
err := cs1.Equivalent(cs2)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
l.Panic(err)
|
||||
}
|
Reference in New Issue
Block a user