Vendor cleanup

Signed-off-by: Madhu Rajanna <mrajanna@redhat.com>
This commit is contained in:
Madhu Rajanna
2019-01-16 18:11:54 +05:30
parent 661818bd79
commit 0f836c62fa
16816 changed files with 20 additions and 4611100 deletions

View File

@ -1,72 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package buffer
// RingGrowing is a growing ring buffer.
// Not thread safe.
type RingGrowing struct {
data []interface{}
n int // Size of Data
beg int // First available element
readable int // Number of data items available
}
// NewRingGrowing constructs a new RingGrowing instance with provided parameters.
func NewRingGrowing(initialSize int) *RingGrowing {
return &RingGrowing{
data: make([]interface{}, initialSize),
n: initialSize,
}
}
// ReadOne reads (consumes) first item from the buffer if it is available, otherwise returns false.
func (r *RingGrowing) ReadOne() (data interface{}, ok bool) {
if r.readable == 0 {
return nil, false
}
r.readable--
element := r.data[r.beg]
r.data[r.beg] = nil // Remove reference to the object to help GC
if r.beg == r.n-1 {
// Was the last element
r.beg = 0
} else {
r.beg++
}
return element, true
}
// WriteOne adds an item to the end of the buffer, growing it if it is full.
func (r *RingGrowing) WriteOne(data interface{}) {
if r.readable == r.n {
// Time to grow
newN := r.n * 2
newData := make([]interface{}, newN)
to := r.beg + r.readable
if to <= r.n {
copy(newData, r.data[r.beg:to])
} else {
copied := copy(newData, r.data[r.beg:])
copy(newData[copied:], r.data[:(to%r.n)])
}
r.beg = 0
r.data = newData
r.n = newN
}
r.data[(r.readable+r.beg)%r.n] = data
r.readable++
}

View File

@ -1,50 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package buffer
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestGrowth(t *testing.T) {
t.Parallel()
x := 10
g := NewRingGrowing(1)
for i := 0; i < x; i++ {
assert.Equal(t, i, g.readable)
g.WriteOne(i)
}
read := 0
for g.readable > 0 {
v, ok := g.ReadOne()
assert.True(t, ok)
assert.Equal(t, read, v)
read++
}
assert.Equalf(t, x, read, "expected to have read %d items: %d", x, read)
assert.Zerof(t, g.readable, "expected readable to be zero: %d", g.readable)
assert.Equalf(t, 16, g.n, "expected N to be 16: %d", g.n)
}
func TestEmpty(t *testing.T) {
t.Parallel()
g := NewRingGrowing(1)
_, ok := g.ReadOne()
assert.False(t, ok)
}

View File

@ -1,7 +0,0 @@
approvers:
- sig-auth-certificates-approvers
reviewers:
- sig-auth-certificates-reviewers
labels:
- sig/auth

View File

@ -1,75 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cert
import (
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"io/ioutil"
"net"
"testing"
)
func TestMakeCSR(t *testing.T) {
keyFile := "testdata/dontUseThisKey.pem"
subject := &pkix.Name{
CommonName: "kube-worker",
}
dnsSANs := []string{"localhost"}
ipSANs := []net.IP{net.ParseIP("127.0.0.1")}
keyData, err := ioutil.ReadFile(keyFile)
if err != nil {
t.Fatal(err)
}
key, err := ParsePrivateKeyPEM(keyData)
if err != nil {
t.Fatal(err)
}
csrPEM, err := MakeCSR(key, subject, dnsSANs, ipSANs)
if err != nil {
t.Error(err)
}
csrBlock, rest := pem.Decode(csrPEM)
if csrBlock == nil {
t.Error("Unable to decode MakeCSR result.")
}
if len(rest) != 0 {
t.Error("Found more than one PEM encoded block in the result.")
}
if csrBlock.Type != CertificateRequestBlockType {
t.Errorf("Found block type %q, wanted 'CERTIFICATE REQUEST'", csrBlock.Type)
}
csr, err := x509.ParseCertificateRequest(csrBlock.Bytes)
if err != nil {
t.Errorf("Found %v parsing MakeCSR result as a CertificateRequest.", err)
}
if csr.Subject.CommonName != subject.CommonName {
t.Errorf("Wanted %v, got %v", subject, csr.Subject)
}
if len(csr.DNSNames) != 1 {
t.Errorf("Wanted 1 DNS name in the result, got %d", len(csr.DNSNames))
} else if csr.DNSNames[0] != dnsSANs[0] {
t.Errorf("Wanted %v, got %v", dnsSANs[0], csr.DNSNames[0])
}
if len(csr.IPAddresses) != 1 {
t.Errorf("Wanted 1 IP address in the result, got %d", len(csr.IPAddresses))
} else if csr.IPAddresses[0].String() != ipSANs[0].String() {
t.Errorf("Wanted %v, got %v", ipSANs[0], csr.IPAddresses[0])
}
}

View File

@ -1,197 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cert
import (
"io/ioutil"
"os"
"testing"
)
const (
// rsaPrivateKey is a RSA Private Key in PKCS#1 format
// openssl genrsa -out rsa2048.pem 2048
rsaPrivateKey = `-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA92mVjhBKOFsdxFzb/Pjq+7b5TJlODAdY5hK+WxLZTIrfhDPq
FWrGKdjSNiHbXrdEtwJh9V+RqPZVSN3aWy1224RgkyNdMJsXhJKuCC24ZKY8SXtW
xuTYmMRaMnCsv6QBGRTIbZ2EFbAObVM7lDyv1VqY3amZIWFQMlZ9CNpxDSPa5yi4
3gopbXkne0oGNmey9X0qtpk7NMZIgAL6Zz4rZ30bcfC2ag6RLOFI2E/c4n8c38R8
9MfXfLkj8/Cxo4JfI9NvRCpPOpFO8d/ZtWVUuIrBQN+Y7tkN2T60Qq/TkKXUrhDe
fwlTlktZVJ/GztLYU41b2GcWsh/XO+PH831rmwIDAQABAoIBAQCC9c6GDjVbM0/E
WurPMusfJjE7zII1d8YkspM0HfwLug6qKdikUYpnKC/NG4rEzfl/bbFwco/lgc6O
7W/hh2U8uQttlvCDA/Uk5YddKOZL0Hpk4vaB/SxxYK3luSKXpjY2knutGg2KdVCN
qdsFkkH4iyYTXuyBcMNEgedZQldI/kEujIH/L7FE+DF5TMzT4lHhozDoG+fy564q
qVGUZXJn0ubc3GaPn2QOLNNM44sfYA4UJCpKBXPu85bvNObjxVQO4WqwwxU1vRnL
UUsaGaelhSVJCo0dVPRvrfPPKZ09HTwpy40EkgQo6VriFc1EBoQDjENLbAJv9OfQ
aCc9wiZhAoGBAP/8oEy48Zbb0P8Vdy4djf5tfBW8yXFLWzXewJ4l3itKS1r42nbX
9q3cJsgRTQm8uRcMIpWxsc3n6zG+lREvTkoTB3ViI7+uQPiqA+BtWyNy7jzufFke
ONKZfg7QxxmYRWZBRnoNGNbMpNeERuLmhvQuom9D1WbhzAYJbfs/O4WTAoGBAPds
2FNDU0gaesFDdkIUGq1nIJqRQDW485LXZm4pFqBFxdOpbdWRuYT2XZjd3fD0XY98
Nhkpb7NTMCuK3BdKcqIptt+cK+quQgYid0hhhgZbpCQ5AL6c6KgyjgpYlh2enzU9
Zo3yg8ej1zbbA11sBlhX+5iO2P1u5DG+JHLwUUbZAoGAUwaU102EzfEtsA4+QW7E
hyjrfgFlNKHES4yb3K9bh57pIfBkqvcQwwMMcQdrfSUAw0DkVrjzel0mI1Q09QXq
1ould6UFAz55RC2gZEITtUOpkYmoOx9aPrQZ9qQwb1S77ZZuTVfCHqjxLhVxCFbM
npYhiQTvShciHTMhwMOZgpECgYAVV5EtVXBYltgh1YTc3EkUzgF087R7LdHsx6Gx
POATwRD4WfP8aQ58lpeqOPEM+LcdSlSMRRO6fyF3kAm+BJDwxfJdRWZQXumZB94M
I0VhRQRaj4Qt7PDwmTPBVrTUJzuKZxpyggm17b8Bn1Ch/VBqzGQKW8AB1E/grosM
UwhfuQKBgQC2JO/iqTQScHClf0qlItCJsBuVukFmSAVCkpOD8YdbdlPdOOwSk1wQ
C0eAlsC3BCMvkpidKQmra6IqIrvTGI6EFgkrb3aknWdup2w8j2udYCNqyE3W+fVe
p8FdYQ1FkACQ+daO5VlClL/9l0sGjKXlNKbpmJ2H4ngZmXj5uGmxuQ==
-----END RSA PRIVATE KEY-----`
// rsaPublicKey is a RSA Public Key in PEM encoded format
// openssl rsa -in rsa2048.pem -pubout -out rsa2048pub.pem
rsaPublicKey = `-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA92mVjhBKOFsdxFzb/Pjq
+7b5TJlODAdY5hK+WxLZTIrfhDPqFWrGKdjSNiHbXrdEtwJh9V+RqPZVSN3aWy12
24RgkyNdMJsXhJKuCC24ZKY8SXtWxuTYmMRaMnCsv6QBGRTIbZ2EFbAObVM7lDyv
1VqY3amZIWFQMlZ9CNpxDSPa5yi43gopbXkne0oGNmey9X0qtpk7NMZIgAL6Zz4r
Z30bcfC2ag6RLOFI2E/c4n8c38R89MfXfLkj8/Cxo4JfI9NvRCpPOpFO8d/ZtWVU
uIrBQN+Y7tkN2T60Qq/TkKXUrhDefwlTlktZVJ/GztLYU41b2GcWsh/XO+PH831r
mwIDAQAB
-----END PUBLIC KEY-----`
// certificate is an x509 certificate in PEM encoded format
// openssl req -new -key rsa2048.pem -sha256 -nodes -x509 -days 1826 -out x509certificate.pem -subj "/C=US/CN=not-valid"
certificate = `-----BEGIN CERTIFICATE-----
MIIDFTCCAf2gAwIBAgIJAN8B8NOwtiUCMA0GCSqGSIb3DQEBCwUAMCExCzAJBgNV
BAYTAlVTMRIwEAYDVQQDDAlub3QtdmFsaWQwHhcNMTcwMzIyMDI1NjM2WhcNMjIw
MzIyMDI1NjM2WjAhMQswCQYDVQQGEwJVUzESMBAGA1UEAwwJbm90LXZhbGlkMIIB
IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA92mVjhBKOFsdxFzb/Pjq+7b5
TJlODAdY5hK+WxLZTIrfhDPqFWrGKdjSNiHbXrdEtwJh9V+RqPZVSN3aWy1224Rg
kyNdMJsXhJKuCC24ZKY8SXtWxuTYmMRaMnCsv6QBGRTIbZ2EFbAObVM7lDyv1VqY
3amZIWFQMlZ9CNpxDSPa5yi43gopbXkne0oGNmey9X0qtpk7NMZIgAL6Zz4rZ30b
cfC2ag6RLOFI2E/c4n8c38R89MfXfLkj8/Cxo4JfI9NvRCpPOpFO8d/ZtWVUuIrB
QN+Y7tkN2T60Qq/TkKXUrhDefwlTlktZVJ/GztLYU41b2GcWsh/XO+PH831rmwID
AQABo1AwTjAdBgNVHQ4EFgQU1I5GfinLF7ta+dBJ6UWcrYaexLswHwYDVR0jBBgw
FoAU1I5GfinLF7ta+dBJ6UWcrYaexLswDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0B
AQsFAAOCAQEAUl0wUD4y41juHFOVMYiziPYr1ShSpQXdwp8FfaHrzI5hsr8UMe8D
dzb9QzZ4bx3yZhiG3ahrSBh956thMTHrKTEwAfJIEXI4cuSVWQAaOJ4Em5SDFxQe
d0E6Ui2nGh1SFGF7oyuEXyzqgRMWFNDFw9HLUNgXaO18Zfouw8+K0BgbfEWEcSi1
JLQbyhCjz088gltrliQGPWDFAg9cHBKtJhuTzZkvuqK1CLEmBhtzP1zFiGBfOJc8
v+aKjAwrPUNX11cXOCPxBv2qXMetxaovBem6AI2hvypCInXaVQfP+yOLubzlTDjS
Y708SlY38hmS1uTwDpyLOn8AKkZ8jtx75g==
-----END CERTIFICATE-----`
// ecdsaPrivateKeyWithParams is a ECDSA Private Key with included EC Parameters block
// openssl ecparam -name prime256v1 -genkey -out ecdsa256params.pem
ecdsaPrivateKeyWithParams = `-----BEGIN EC PARAMETERS-----
BggqhkjOPQMBBw==
-----END EC PARAMETERS-----
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIAwSOWQqlMTZNqNF7tgua812Jxib1DVOgb2pHHyIEyNNoAoGCCqGSM49
AwEHoUQDQgAEyxYNrs6a6tsNCFNYn+l+JDUZ0PnUZbcsDgJn2O62D1se8M5iQ5rY
iIv6RpxE3VHvlHEIvYgCZkG0jHszTUopBg==
-----END EC PRIVATE KEY-----`
// ecdsaPrivateKey is a ECDSA Private Key in ASN.1 format
// openssl ecparam -name prime256v1 -genkey -noout -out ecdsa256.pem
ecdsaPrivateKey = `-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIP6Qw6dHDiLsSnLXUhQVTPE0fTQQrj3XSbiQAZPXnk5+oAoGCCqGSM49
AwEHoUQDQgAEZZzi1u5f2/AEGFI/HYUhU+u6cTK1q2bbtE7r1JMK+/sQA5sNAp+7
Vdc3psr1OaNzyTyuhTECyRdFKXm63cMnGg==
-----END EC PRIVATE KEY-----`
// ecdsaPublicKey is a ECDSA Public Key in PEM encoded format
// openssl ec -in ecdsa256.pem -pubout -out ecdsa256pub.pem
ecdsaPublicKey = `-----BEGIN PUBLIC KEY-----
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEZZzi1u5f2/AEGFI/HYUhU+u6cTK1
q2bbtE7r1JMK+/sQA5sNAp+7Vdc3psr1OaNzyTyuhTECyRdFKXm63cMnGg==
-----END PUBLIC KEY-----`
)
func TestReadPrivateKey(t *testing.T) {
f, err := ioutil.TempFile("", "")
if err != nil {
t.Fatalf("error creating tmpfile: %v", err)
}
defer os.Remove(f.Name())
if _, err := PrivateKeyFromFile(f.Name()); err == nil {
t.Fatalf("Expected error reading key from empty file, got none")
}
if err := ioutil.WriteFile(f.Name(), []byte(rsaPrivateKey), os.FileMode(0600)); err != nil {
t.Fatalf("error writing private key to tmpfile: %v", err)
}
if _, err := PrivateKeyFromFile(f.Name()); err != nil {
t.Fatalf("error reading private RSA key: %v", err)
}
if err := ioutil.WriteFile(f.Name(), []byte(ecdsaPrivateKey), os.FileMode(0600)); err != nil {
t.Fatalf("error writing private key to tmpfile: %v", err)
}
if _, err := PrivateKeyFromFile(f.Name()); err != nil {
t.Fatalf("error reading private ECDSA key: %v", err)
}
if err := ioutil.WriteFile(f.Name(), []byte(ecdsaPrivateKeyWithParams), os.FileMode(0600)); err != nil {
t.Fatalf("error writing private key to tmpfile: %v", err)
}
if _, err := PrivateKeyFromFile(f.Name()); err != nil {
t.Fatalf("error reading private ECDSA key with params: %v", err)
}
}
func TestReadPublicKeys(t *testing.T) {
f, err := ioutil.TempFile("", "")
if err != nil {
t.Fatalf("error creating tmpfile: %v", err)
}
defer os.Remove(f.Name())
if _, err := PublicKeysFromFile(f.Name()); err == nil {
t.Fatalf("Expected error reading keys from empty file, got none")
}
if err := ioutil.WriteFile(f.Name(), []byte(rsaPublicKey), os.FileMode(0600)); err != nil {
t.Fatalf("error writing public key to tmpfile: %v", err)
}
if keys, err := PublicKeysFromFile(f.Name()); err != nil {
t.Fatalf("error reading RSA public key: %v", err)
} else if len(keys) != 1 {
t.Fatalf("expected 1 key, got %d", len(keys))
}
if err := ioutil.WriteFile(f.Name(), []byte(ecdsaPublicKey), os.FileMode(0600)); err != nil {
t.Fatalf("error writing public key to tmpfile: %v", err)
}
if keys, err := PublicKeysFromFile(f.Name()); err != nil {
t.Fatalf("error reading ECDSA public key: %v", err)
} else if len(keys) != 1 {
t.Fatalf("expected 1 key, got %d", len(keys))
}
if err := ioutil.WriteFile(f.Name(), []byte(rsaPublicKey+"\n"+ecdsaPublicKey), os.FileMode(0600)); err != nil {
t.Fatalf("error writing public key to tmpfile: %v", err)
}
if keys, err := PublicKeysFromFile(f.Name()); err != nil {
t.Fatalf("error reading combined RSA/ECDSA public key file: %v", err)
} else if len(keys) != 2 {
t.Fatalf("expected 2 keys, got %d", len(keys))
}
if err := ioutil.WriteFile(f.Name(), []byte(certificate), os.FileMode(0600)); err != nil {
t.Fatalf("error writing certificate to tmpfile: %v", err)
}
if keys, err := PublicKeysFromFile(f.Name()); err != nil {
t.Fatalf("error reading public key from certificate file: %v", err)
} else if len(keys) != 1 {
t.Fatalf("expected 1 keys, got %d", len(keys))
}
}

View File

@ -1,6 +0,0 @@
-----BEGIN EC PRIVATE KEY-----
MIGkAgEBBDAPEbSXwyDfWf0+61Oofd7aHkmdX69mrzD2Xb1CHF5syfsoRIhnG0dJ
ozBulPZCDDWgBwYFK4EEACKhZANiAATjlMJAtKhEPqU/i7MsrgKcK/RmXHC6He7W
0p69+9qFXg2raJ9zvvbKxkiu2ELOYRDAz0utcFTBOIgoUJEzBVmsjZQ7dvFa1BKP
Ym7MFAKG3O2espBqXn+audgdHGh5B0I=
-----END EC PRIVATE KEY-----

View File

@ -1,7 +0,0 @@
approvers:
- sig-auth-certificates-approvers
reviewers:
- sig-auth-certificates-reviewers
labels:
- sig/auth

View File

@ -1,556 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certificate
import (
"crypto/ecdsa"
"crypto/elliptic"
cryptorand "crypto/rand"
"crypto/tls"
"crypto/x509"
"encoding/pem"
"fmt"
"reflect"
"sync"
"time"
"k8s.io/klog"
certificates "k8s.io/api/certificates/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
certificatesclient "k8s.io/client-go/kubernetes/typed/certificates/v1beta1"
"k8s.io/client-go/util/cert"
"k8s.io/client-go/util/certificate/csr"
)
// certificateWaitBackoff controls the amount and timing of retries when the
// watch for certificate approval is interrupted.
var certificateWaitBackoff = wait.Backoff{Duration: 30 * time.Second, Steps: 4, Factor: 1.5, Jitter: 0.1}
// Manager maintains and updates the certificates in use by this certificate
// manager. In the background it communicates with the API server to get new
// certificates for certificates about to expire.
type Manager interface {
// CertificateSigningRequestClient sets the client interface that is used for
// signing new certificates generated as part of rotation.
SetCertificateSigningRequestClient(certificatesclient.CertificateSigningRequestInterface) error
// Start the API server status sync loop.
Start()
// Current returns the currently selected certificate from the
// certificate manager, as well as the associated certificate and key data
// in PEM format.
Current() *tls.Certificate
// ServerHealthy returns true if the manager is able to communicate with
// the server. This allows a caller to determine whether the cert manager
// thinks it can potentially talk to the API server. The cert manager may
// be very conservative and only return true if recent communication has
// occurred with the server.
ServerHealthy() bool
}
// Config is the set of configuration parameters available for a new Manager.
type Config struct {
// CertificateSigningRequestClient will be used for signing new certificate
// requests generated when a key rotation occurs. It must be set either at
// initialization or by using CertificateSigningRequestClient before
// Manager.Start() is called.
CertificateSigningRequestClient certificatesclient.CertificateSigningRequestInterface
// Template is the CertificateRequest that will be used as a template for
// generating certificate signing requests for all new keys generated as
// part of rotation. It follows the same rules as the template parameter of
// crypto.x509.CreateCertificateRequest in the Go standard libraries.
Template *x509.CertificateRequest
// GetTemplate returns the CertificateRequest that will be used as a template for
// generating certificate signing requests for all new keys generated as
// part of rotation. It follows the same rules as the template parameter of
// crypto.x509.CreateCertificateRequest in the Go standard libraries.
// If no template is available, nil may be returned, and no certificate will be requested.
// If specified, takes precedence over Template.
GetTemplate func() *x509.CertificateRequest
// Usages is the types of usages that certificates generated by the manager
// can be used for.
Usages []certificates.KeyUsage
// CertificateStore is a persistent store where the current cert/key is
// kept and future cert/key pairs will be persisted after they are
// generated.
CertificateStore Store
// BootstrapCertificatePEM is the certificate data that will be returned
// from the Manager if the CertificateStore doesn't have any cert/key pairs
// currently available and has not yet had a chance to get a new cert/key
// pair from the API. If the CertificateStore does have a cert/key pair,
// this will be ignored. If there is no cert/key pair available in the
// CertificateStore, as soon as Start is called, it will request a new
// cert/key pair from the CertificateSigningRequestClient. This is intended
// to allow the first boot of a component to be initialized using a
// generic, multi-use cert/key pair which will be quickly replaced with a
// unique cert/key pair.
BootstrapCertificatePEM []byte
// BootstrapKeyPEM is the key data that will be returned from the Manager
// if the CertificateStore doesn't have any cert/key pairs currently
// available. If the CertificateStore does have a cert/key pair, this will
// be ignored. If the bootstrap cert/key pair are used, they will be
// rotated at the first opportunity, possibly well in advance of expiring.
// This is intended to allow the first boot of a component to be
// initialized using a generic, multi-use cert/key pair which will be
// quickly replaced with a unique cert/key pair.
BootstrapKeyPEM []byte
// CertificateExpiration will record a metric that shows the remaining
// lifetime of the certificate.
CertificateExpiration Gauge
}
// Store is responsible for getting and updating the current certificate.
// Depending on the concrete implementation, the backing store for this
// behavior may vary.
type Store interface {
// Current returns the currently selected certificate, as well as the
// associated certificate and key data in PEM format. If the Store doesn't
// have a cert/key pair currently, it should return a NoCertKeyError so
// that the Manager can recover by using bootstrap certificates to request
// a new cert/key pair.
Current() (*tls.Certificate, error)
// Update accepts the PEM data for the cert/key pair and makes the new
// cert/key pair the 'current' pair, that will be returned by future calls
// to Current().
Update(cert, key []byte) (*tls.Certificate, error)
}
// Gauge will record the remaining lifetime of the certificate each time it is
// updated.
type Gauge interface {
Set(float64)
}
// NoCertKeyError indicates there is no cert/key currently available.
type NoCertKeyError string
func (e *NoCertKeyError) Error() string { return string(*e) }
type manager struct {
certSigningRequestClient certificatesclient.CertificateSigningRequestInterface
getTemplate func() *x509.CertificateRequest
lastRequestLock sync.Mutex
lastRequest *x509.CertificateRequest
dynamicTemplate bool
usages []certificates.KeyUsage
certStore Store
certAccessLock sync.RWMutex
cert *tls.Certificate
forceRotation bool
certificateExpiration Gauge
serverHealth bool
}
// NewManager returns a new certificate manager. A certificate manager is
// responsible for being the authoritative source of certificates in the
// Kubelet and handling updates due to rotation.
func NewManager(config *Config) (Manager, error) {
cert, forceRotation, err := getCurrentCertificateOrBootstrap(
config.CertificateStore,
config.BootstrapCertificatePEM,
config.BootstrapKeyPEM)
if err != nil {
return nil, err
}
getTemplate := config.GetTemplate
if getTemplate == nil {
getTemplate = func() *x509.CertificateRequest { return config.Template }
}
m := manager{
certSigningRequestClient: config.CertificateSigningRequestClient,
getTemplate: getTemplate,
dynamicTemplate: config.GetTemplate != nil,
usages: config.Usages,
certStore: config.CertificateStore,
cert: cert,
forceRotation: forceRotation,
certificateExpiration: config.CertificateExpiration,
}
return &m, nil
}
// Current returns the currently selected certificate from the certificate
// manager. This can be nil if the manager was initialized without a
// certificate and has not yet received one from the
// CertificateSigningRequestClient.
func (m *manager) Current() *tls.Certificate {
m.certAccessLock.RLock()
defer m.certAccessLock.RUnlock()
return m.cert
}
// ServerHealthy returns true if the cert manager believes the server
// is currently alive.
func (m *manager) ServerHealthy() bool {
m.certAccessLock.RLock()
defer m.certAccessLock.RUnlock()
return m.serverHealth
}
// SetCertificateSigningRequestClient sets the client interface that is used
// for signing new certificates generated as part of rotation. It must be
// called before Start() and can not be used to change the
// CertificateSigningRequestClient that has already been set. This method is to
// support the one specific scenario where the CertificateSigningRequestClient
// uses the CertificateManager.
func (m *manager) SetCertificateSigningRequestClient(certSigningRequestClient certificatesclient.CertificateSigningRequestInterface) error {
if m.certSigningRequestClient == nil {
m.certSigningRequestClient = certSigningRequestClient
return nil
}
return fmt.Errorf("property CertificateSigningRequestClient is already set")
}
// Start will start the background work of rotating the certificates.
func (m *manager) Start() {
// Certificate rotation depends on access to the API server certificate
// signing API, so don't start the certificate manager if we don't have a
// client.
if m.certSigningRequestClient == nil {
klog.V(2).Infof("Certificate rotation is not enabled, no connection to the apiserver.")
return
}
klog.V(2).Infof("Certificate rotation is enabled.")
templateChanged := make(chan struct{})
go wait.Forever(func() {
deadline := m.nextRotationDeadline()
if sleepInterval := deadline.Sub(time.Now()); sleepInterval > 0 {
klog.V(2).Infof("Waiting %v for next certificate rotation", sleepInterval)
timer := time.NewTimer(sleepInterval)
defer timer.Stop()
select {
case <-timer.C:
// unblock when deadline expires
case <-templateChanged:
if reflect.DeepEqual(m.getLastRequest(), m.getTemplate()) {
// if the template now matches what we last requested, restart the rotation deadline loop
return
}
klog.V(2).Infof("Certificate template changed, rotating")
}
}
// Don't enter rotateCerts and trigger backoff if we don't even have a template to request yet
if m.getTemplate() == nil {
return
}
backoff := wait.Backoff{
Duration: 2 * time.Second,
Factor: 2,
Jitter: 0.1,
Steps: 5,
}
if err := wait.ExponentialBackoff(backoff, m.rotateCerts); err != nil {
utilruntime.HandleError(fmt.Errorf("Reached backoff limit, still unable to rotate certs: %v", err))
wait.PollInfinite(32*time.Second, m.rotateCerts)
}
}, time.Second)
if m.dynamicTemplate {
go wait.Forever(func() {
// check if the current template matches what we last requested
if !m.certSatisfiesTemplate() && !reflect.DeepEqual(m.getLastRequest(), m.getTemplate()) {
// if the template is different, queue up an interrupt of the rotation deadline loop.
// if we've requested a CSR that matches the new template by the time the interrupt is handled, the interrupt is disregarded.
templateChanged <- struct{}{}
}
}, time.Second)
}
}
func getCurrentCertificateOrBootstrap(
store Store,
bootstrapCertificatePEM []byte,
bootstrapKeyPEM []byte) (cert *tls.Certificate, shouldRotate bool, errResult error) {
currentCert, err := store.Current()
if err == nil {
// if the current cert is expired, fall back to the bootstrap cert
if currentCert.Leaf != nil && time.Now().Before(currentCert.Leaf.NotAfter) {
return currentCert, false, nil
}
} else {
if _, ok := err.(*NoCertKeyError); !ok {
return nil, false, err
}
}
if bootstrapCertificatePEM == nil || bootstrapKeyPEM == nil {
return nil, true, nil
}
bootstrapCert, err := tls.X509KeyPair(bootstrapCertificatePEM, bootstrapKeyPEM)
if err != nil {
return nil, false, err
}
if len(bootstrapCert.Certificate) < 1 {
return nil, false, fmt.Errorf("no cert/key data found")
}
certs, err := x509.ParseCertificates(bootstrapCert.Certificate[0])
if err != nil {
return nil, false, fmt.Errorf("unable to parse certificate data: %v", err)
}
bootstrapCert.Leaf = certs[0]
if _, err := store.Update(bootstrapCertificatePEM, bootstrapKeyPEM); err != nil {
utilruntime.HandleError(fmt.Errorf("Unable to set the cert/key pair to the bootstrap certificate: %v", err))
} else {
klog.V(4).Infof("Updated the store to contain the initial bootstrap certificate")
}
return &bootstrapCert, true, nil
}
// rotateCerts attempts to request a client cert from the server, wait a reasonable
// period of time for it to be signed, and then update the cert on disk. If it cannot
// retrieve a cert, it will return false. It will only return error in exceptional cases.
// This method also keeps track of "server health" by interpreting the responses it gets
// from the server on the various calls it makes.
func (m *manager) rotateCerts() (bool, error) {
klog.V(2).Infof("Rotating certificates")
template, csrPEM, keyPEM, privateKey, err := m.generateCSR()
if err != nil {
utilruntime.HandleError(fmt.Errorf("Unable to generate a certificate signing request: %v", err))
return false, nil
}
// Call the Certificate Signing Request API to get a certificate for the
// new private key.
req, err := csr.RequestCertificate(m.certSigningRequestClient, csrPEM, "", m.usages, privateKey)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Failed while requesting a signed certificate from the master: %v", err))
return false, m.updateServerError(err)
}
// Once we've successfully submitted a CSR for this template, record that we did so
m.setLastRequest(template)
// Wait for the certificate to be signed. Instead of one long watch, we retry with slightly longer
// intervals each time in order to tolerate failures from the server AND to preserve the liveliness
// of the cert manager loop. This creates slightly more traffic against the API server in return
// for bounding the amount of time we wait when a certificate expires.
var crtPEM []byte
watchDuration := time.Minute
if err := wait.ExponentialBackoff(certificateWaitBackoff, func() (bool, error) {
data, err := csr.WaitForCertificate(m.certSigningRequestClient, req, watchDuration)
switch {
case err == nil:
crtPEM = data
return true, nil
case err == wait.ErrWaitTimeout:
watchDuration += time.Minute
if watchDuration > 5*time.Minute {
watchDuration = 5 * time.Minute
}
return false, nil
default:
utilruntime.HandleError(fmt.Errorf("Unable to check certificate signing status: %v", err))
return false, m.updateServerError(err)
}
}); err != nil {
utilruntime.HandleError(fmt.Errorf("Certificate request was not signed: %v", err))
return false, nil
}
cert, err := m.certStore.Update(crtPEM, keyPEM)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Unable to store the new cert/key pair: %v", err))
return false, nil
}
m.updateCached(cert)
return true, nil
}
// Check that the current certificate on disk satisfies the requests from the
// current template.
//
// Note that extra items in the certificate's SAN or orgs that don't exist in
// the template will not trigger a renewal.
//
// Requires certAccessLock to be locked.
func (m *manager) certSatisfiesTemplateLocked() bool {
if m.cert == nil {
return false
}
if template := m.getTemplate(); template != nil {
if template.Subject.CommonName != m.cert.Leaf.Subject.CommonName {
klog.V(2).Infof("Current certificate CN (%s) does not match requested CN (%s)", m.cert.Leaf.Subject.CommonName, template.Subject.CommonName)
return false
}
currentDNSNames := sets.NewString(m.cert.Leaf.DNSNames...)
desiredDNSNames := sets.NewString(template.DNSNames...)
missingDNSNames := desiredDNSNames.Difference(currentDNSNames)
if len(missingDNSNames) > 0 {
klog.V(2).Infof("Current certificate is missing requested DNS names %v", missingDNSNames.List())
return false
}
currentIPs := sets.NewString()
for _, ip := range m.cert.Leaf.IPAddresses {
currentIPs.Insert(ip.String())
}
desiredIPs := sets.NewString()
for _, ip := range template.IPAddresses {
desiredIPs.Insert(ip.String())
}
missingIPs := desiredIPs.Difference(currentIPs)
if len(missingIPs) > 0 {
klog.V(2).Infof("Current certificate is missing requested IP addresses %v", missingIPs.List())
return false
}
currentOrgs := sets.NewString(m.cert.Leaf.Subject.Organization...)
desiredOrgs := sets.NewString(template.Subject.Organization...)
missingOrgs := desiredOrgs.Difference(currentOrgs)
if len(missingOrgs) > 0 {
klog.V(2).Infof("Current certificate is missing requested orgs %v", missingOrgs.List())
return false
}
}
return true
}
func (m *manager) certSatisfiesTemplate() bool {
m.certAccessLock.RLock()
defer m.certAccessLock.RUnlock()
return m.certSatisfiesTemplateLocked()
}
// nextRotationDeadline returns a value for the threshold at which the
// current certificate should be rotated, 80%+/-10% of the expiration of the
// certificate.
func (m *manager) nextRotationDeadline() time.Time {
// forceRotation is not protected by locks
if m.forceRotation {
m.forceRotation = false
return time.Now()
}
m.certAccessLock.RLock()
defer m.certAccessLock.RUnlock()
if !m.certSatisfiesTemplateLocked() {
return time.Now()
}
notAfter := m.cert.Leaf.NotAfter
totalDuration := float64(notAfter.Sub(m.cert.Leaf.NotBefore))
deadline := m.cert.Leaf.NotBefore.Add(jitteryDuration(totalDuration))
klog.V(2).Infof("Certificate expiration is %v, rotation deadline is %v", notAfter, deadline)
if m.certificateExpiration != nil {
m.certificateExpiration.Set(float64(notAfter.Unix()))
}
return deadline
}
// jitteryDuration uses some jitter to set the rotation threshold so each node
// will rotate at approximately 70-90% of the total lifetime of the
// certificate. With jitter, if a number of nodes are added to a cluster at
// approximately the same time (such as cluster creation time), they won't all
// try to rotate certificates at the same time for the rest of the life of the
// cluster.
//
// This function is represented as a variable to allow replacement during testing.
var jitteryDuration = func(totalDuration float64) time.Duration {
return wait.Jitter(time.Duration(totalDuration), 0.2) - time.Duration(totalDuration*0.3)
}
// updateCached sets the most recent retrieved cert. It also sets the server
// as assumed healthy.
func (m *manager) updateCached(cert *tls.Certificate) {
m.certAccessLock.Lock()
defer m.certAccessLock.Unlock()
m.serverHealth = true
m.cert = cert
}
// updateServerError takes an error returned by the server and infers
// the health of the server based on the error. It will return nil if
// the error does not require immediate termination of any wait loops,
// and otherwise it will return the error.
func (m *manager) updateServerError(err error) error {
m.certAccessLock.Lock()
defer m.certAccessLock.Unlock()
switch {
case errors.IsUnauthorized(err):
// SSL terminating proxies may report this error instead of the master
m.serverHealth = true
case errors.IsUnexpectedServerError(err):
// generally indicates a proxy or other load balancer problem, rather than a problem coming
// from the master
m.serverHealth = false
default:
// Identify known errors that could be expected for a cert request that
// indicate everything is working normally
m.serverHealth = errors.IsNotFound(err) || errors.IsForbidden(err)
}
return nil
}
func (m *manager) generateCSR() (template *x509.CertificateRequest, csrPEM []byte, keyPEM []byte, key interface{}, err error) {
// Generate a new private key.
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader)
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("unable to generate a new private key: %v", err)
}
der, err := x509.MarshalECPrivateKey(privateKey)
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("unable to marshal the new key to DER: %v", err)
}
keyPEM = pem.EncodeToMemory(&pem.Block{Type: cert.ECPrivateKeyBlockType, Bytes: der})
template = m.getTemplate()
if template == nil {
return nil, nil, nil, nil, fmt.Errorf("unable to create a csr, no template available")
}
csrPEM, err = cert.MakeCSRFromTemplate(privateKey, template)
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("unable to create a csr from the private key: %v", err)
}
return template, csrPEM, keyPEM, privateKey, nil
}
func (m *manager) getLastRequest() *x509.CertificateRequest {
m.lastRequestLock.Lock()
defer m.lastRequestLock.Unlock()
return m.lastRequest
}
func (m *manager) setLastRequest(r *x509.CertificateRequest) {
m.lastRequestLock.Lock()
defer m.lastRequestLock.Unlock()
m.lastRequest = r
}

File diff suppressed because it is too large Load Diff

View File

@ -1,319 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certificate
import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"k8s.io/klog"
)
const (
keyExtension = ".key"
certExtension = ".crt"
pemExtension = ".pem"
currentPair = "current"
updatedPair = "updated"
)
type fileStore struct {
pairNamePrefix string
certDirectory string
keyDirectory string
certFile string
keyFile string
}
// FileStore is a store that provides certificate retrieval as well as
// the path on disk of the current PEM.
type FileStore interface {
Store
// CurrentPath returns the path on disk of the current certificate/key
// pair encoded as PEM files.
CurrentPath() string
}
// NewFileStore returns a concrete implementation of a Store that is based on
// storing the cert/key pairs in a single file per pair on disk in the
// designated directory. When starting up it will look for the currently
// selected cert/key pair in:
//
// 1. ${certDirectory}/${pairNamePrefix}-current.pem - both cert and key are in the same file.
// 2. ${certFile}, ${keyFile}
// 3. ${certDirectory}/${pairNamePrefix}.crt, ${keyDirectory}/${pairNamePrefix}.key
//
// The first one found will be used. If rotation is enabled, future cert/key
// updates will be written to the ${certDirectory} directory and
// ${certDirectory}/${pairNamePrefix}-current.pem will be created as a soft
// link to the currently selected cert/key pair.
func NewFileStore(
pairNamePrefix string,
certDirectory string,
keyDirectory string,
certFile string,
keyFile string) (FileStore, error) {
s := fileStore{
pairNamePrefix: pairNamePrefix,
certDirectory: certDirectory,
keyDirectory: keyDirectory,
certFile: certFile,
keyFile: keyFile,
}
if err := s.recover(); err != nil {
return nil, err
}
return &s, nil
}
// CurrentPath returns the path to the current version of these certificates.
func (s *fileStore) CurrentPath() string {
return filepath.Join(s.certDirectory, s.filename(currentPair))
}
// recover checks if there is a certificate rotation that was interrupted while
// progress, and if so, attempts to recover to a good state.
func (s *fileStore) recover() error {
// If the 'current' file doesn't exist, continue on with the recovery process.
currentPath := filepath.Join(s.certDirectory, s.filename(currentPair))
if exists, err := fileExists(currentPath); err != nil {
return err
} else if exists {
return nil
}
// If the 'updated' file exists, and it is a symbolic link, continue on
// with the recovery process.
updatedPath := filepath.Join(s.certDirectory, s.filename(updatedPair))
if fi, err := os.Lstat(updatedPath); err != nil {
if os.IsNotExist(err) {
return nil
}
return err
} else if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
return fmt.Errorf("expected %q to be a symlink but it is a file", updatedPath)
}
// Move the 'updated' symlink to 'current'.
if err := os.Rename(updatedPath, currentPath); err != nil {
return fmt.Errorf("unable to rename %q to %q: %v", updatedPath, currentPath, err)
}
return nil
}
func (s *fileStore) Current() (*tls.Certificate, error) {
pairFile := filepath.Join(s.certDirectory, s.filename(currentPair))
if pairFileExists, err := fileExists(pairFile); err != nil {
return nil, err
} else if pairFileExists {
klog.Infof("Loading cert/key pair from %q.", pairFile)
return loadFile(pairFile)
}
certFileExists, err := fileExists(s.certFile)
if err != nil {
return nil, err
}
keyFileExists, err := fileExists(s.keyFile)
if err != nil {
return nil, err
}
if certFileExists && keyFileExists {
klog.Infof("Loading cert/key pair from (%q, %q).", s.certFile, s.keyFile)
return loadX509KeyPair(s.certFile, s.keyFile)
}
c := filepath.Join(s.certDirectory, s.pairNamePrefix+certExtension)
k := filepath.Join(s.keyDirectory, s.pairNamePrefix+keyExtension)
certFileExists, err = fileExists(c)
if err != nil {
return nil, err
}
keyFileExists, err = fileExists(k)
if err != nil {
return nil, err
}
if certFileExists && keyFileExists {
klog.Infof("Loading cert/key pair from (%q, %q).", c, k)
return loadX509KeyPair(c, k)
}
noKeyErr := NoCertKeyError(
fmt.Sprintf("no cert/key files read at %q, (%q, %q) or (%q, %q)",
pairFile,
s.certFile,
s.keyFile,
s.certDirectory,
s.keyDirectory))
return nil, &noKeyErr
}
func loadFile(pairFile string) (*tls.Certificate, error) {
// LoadX509KeyPair knows how to parse combined cert and private key from
// the same file.
cert, err := tls.LoadX509KeyPair(pairFile, pairFile)
if err != nil {
return nil, fmt.Errorf("could not convert data from %q into cert/key pair: %v", pairFile, err)
}
certs, err := x509.ParseCertificates(cert.Certificate[0])
if err != nil {
return nil, fmt.Errorf("unable to parse certificate data: %v", err)
}
cert.Leaf = certs[0]
return &cert, nil
}
func (s *fileStore) Update(certData, keyData []byte) (*tls.Certificate, error) {
ts := time.Now().Format("2006-01-02-15-04-05")
pemFilename := s.filename(ts)
if err := os.MkdirAll(s.certDirectory, 0755); err != nil {
return nil, fmt.Errorf("could not create directory %q to store certificates: %v", s.certDirectory, err)
}
certPath := filepath.Join(s.certDirectory, pemFilename)
f, err := os.OpenFile(certPath, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0600)
if err != nil {
return nil, fmt.Errorf("could not open %q: %v", certPath, err)
}
defer f.Close()
certBlock, _ := pem.Decode(certData)
if certBlock == nil {
return nil, fmt.Errorf("invalid certificate data")
}
pem.Encode(f, certBlock)
keyBlock, _ := pem.Decode(keyData)
if keyBlock == nil {
return nil, fmt.Errorf("invalid key data")
}
pem.Encode(f, keyBlock)
cert, err := loadFile(certPath)
if err != nil {
return nil, err
}
if err := s.updateSymlink(certPath); err != nil {
return nil, err
}
return cert, nil
}
// updateSymLink updates the current symlink to point to the file that is
// passed it. It will fail if there is a non-symlink file exists where the
// symlink is expected to be.
func (s *fileStore) updateSymlink(filename string) error {
// If the 'current' file either doesn't exist, or is already a symlink,
// proceed. Otherwise, this is an unrecoverable error.
currentPath := filepath.Join(s.certDirectory, s.filename(currentPair))
currentPathExists := false
if fi, err := os.Lstat(currentPath); err != nil {
if !os.IsNotExist(err) {
return err
}
} else if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
return fmt.Errorf("expected %q to be a symlink but it is a file", currentPath)
} else {
currentPathExists = true
}
// If the 'updated' file doesn't exist, proceed. If it exists but it is a
// symlink, delete it. Otherwise, this is an unrecoverable error.
updatedPath := filepath.Join(s.certDirectory, s.filename(updatedPair))
if fi, err := os.Lstat(updatedPath); err != nil {
if !os.IsNotExist(err) {
return err
}
} else if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
return fmt.Errorf("expected %q to be a symlink but it is a file", updatedPath)
} else {
if err := os.Remove(updatedPath); err != nil {
return fmt.Errorf("unable to remove %q: %v", updatedPath, err)
}
}
// Check that the new cert/key pair file exists to avoid rotating to an
// invalid cert/key.
if filenameExists, err := fileExists(filename); err != nil {
return err
} else if !filenameExists {
return fmt.Errorf("file %q does not exist so it can not be used as the currently selected cert/key", filename)
}
// Ensure the source path is absolute to ensure the symlink target is
// correct when certDirectory is a relative path.
filename, err := filepath.Abs(filename)
if err != nil {
return err
}
// Create the 'updated' symlink pointing to the requested file name.
if err := os.Symlink(filename, updatedPath); err != nil {
return fmt.Errorf("unable to create a symlink from %q to %q: %v", updatedPath, filename, err)
}
// Replace the 'current' symlink.
if currentPathExists {
if err := os.Remove(currentPath); err != nil {
return fmt.Errorf("unable to remove %q: %v", currentPath, err)
}
}
if err := os.Rename(updatedPath, currentPath); err != nil {
return fmt.Errorf("unable to rename %q to %q: %v", updatedPath, currentPath, err)
}
return nil
}
func (s *fileStore) filename(qualifier string) string {
return s.pairNamePrefix + "-" + qualifier + pemExtension
}
// withoutExt returns the given filename after removing the extension. The
// extension to remove will be the result of filepath.Ext().
func withoutExt(filename string) string {
return strings.TrimSuffix(filename, filepath.Ext(filename))
}
func loadX509KeyPair(certFile, keyFile string) (*tls.Certificate, error) {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return nil, err
}
certs, err := x509.ParseCertificates(cert.Certificate[0])
if err != nil {
return nil, fmt.Errorf("unable to parse certificate data: %v", err)
}
cert.Leaf = certs[0]
return &cert, nil
}
// FileExists checks if specified file exists.
func fileExists(filename string) (bool, error) {
if _, err := os.Stat(filename); os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, err
}
return true, nil
}

View File

@ -1,423 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certificate
import (
"bytes"
"io/ioutil"
"os"
"path/filepath"
"testing"
)
func TestUpdateSymlinkExistingFileError(t *testing.T) {
dir, err := ioutil.TempDir("", "k8s-test-update-symlink")
if err != nil {
t.Fatalf("Unable to create the test directory %q: %v", dir, err)
}
defer func() {
if err := os.RemoveAll(dir); err != nil {
t.Errorf("Unable to clean up test directory %q: %v", dir, err)
}
}()
pairFile := filepath.Join(dir, "kubelet-current.pem")
if err := ioutil.WriteFile(pairFile, nil, 0600); err != nil {
t.Fatalf("Unable to create the file %q: %v", pairFile, err)
}
s := fileStore{
certDirectory: dir,
pairNamePrefix: "kubelet",
}
if err := s.updateSymlink(pairFile); err == nil {
t.Errorf("Got no error, wanted to fail updating the symlink because there is a file there.")
}
}
func TestUpdateSymlinkNewFileNotExist(t *testing.T) {
dir, err := ioutil.TempDir("", "k8s-test-update-symlink")
if err != nil {
t.Fatalf("Unable to create the test directory %q: %v", dir, err)
}
defer func() {
if err := os.RemoveAll(dir); err != nil {
t.Errorf("Unable to clean up test directory %q: %v", dir, err)
}
}()
oldPairFile := filepath.Join(dir, "kubelet-oldpair.pem")
if err := ioutil.WriteFile(oldPairFile, nil, 0600); err != nil {
t.Fatalf("Unable to create the file %q: %v", oldPairFile, err)
}
s := fileStore{
certDirectory: dir,
pairNamePrefix: "kubelet",
}
if err := s.updateSymlink(oldPairFile); err != nil {
t.Errorf("Got error %v, wanted successful update of the symlink to point to %q", err, oldPairFile)
}
if _, err := os.Stat(oldPairFile); err != nil {
t.Errorf("Got error %v, wanted file %q to be there.", err, oldPairFile)
}
currentPairFile := filepath.Join(dir, "kubelet-current.pem")
if fi, err := os.Lstat(currentPairFile); err != nil {
t.Errorf("Got error %v, wanted file %q to be there", err, currentPairFile)
} else if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
t.Errorf("Got %q not a symlink.", currentPairFile)
}
newPairFile := filepath.Join(dir, "kubelet-newpair.pem")
if err := s.updateSymlink(newPairFile); err == nil {
t.Errorf("Got no error, wanted to fail updating the symlink the file %q does not exist.", newPairFile)
}
}
func TestUpdateSymlinkNoSymlink(t *testing.T) {
dir, err := ioutil.TempDir("", "k8s-test-update-symlink")
if err != nil {
t.Fatalf("Unable to create the test directory %q: %v", dir, err)
}
defer func() {
if err := os.RemoveAll(dir); err != nil {
t.Errorf("Unable to clean up test directory %q: %v", dir, err)
}
}()
pairFile := filepath.Join(dir, "kubelet-newfile.pem")
if err := ioutil.WriteFile(pairFile, nil, 0600); err != nil {
t.Fatalf("Unable to create the file %q: %v", pairFile, err)
}
s := fileStore{
certDirectory: dir,
pairNamePrefix: "kubelet",
}
if err := s.updateSymlink(pairFile); err != nil {
t.Errorf("Got error %v, wanted a new symlink to be created", err)
}
if _, err := os.Stat(pairFile); err != nil {
t.Errorf("Got error %v, wanted file %q to be there", err, pairFile)
}
currentPairFile := filepath.Join(dir, "kubelet-current.pem")
if fi, err := os.Lstat(currentPairFile); err != nil {
t.Errorf("Got %v, wanted %q to be there", currentPairFile, err)
} else if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
t.Errorf("%q not a symlink, wanted a symlink.", currentPairFile)
}
}
func TestUpdateSymlinkReplaceExistingSymlink(t *testing.T) {
prefix := "kubelet"
dir, err := ioutil.TempDir("", "k8s-test-update-symlink")
if err != nil {
t.Fatalf("Unable to create the test directory %q: %v", dir, err)
}
defer func() {
if err := os.RemoveAll(dir); err != nil {
t.Errorf("Unable to clean up test directory %q: %v", dir, err)
}
}()
oldPairFile := filepath.Join(dir, prefix+"-oldfile.pem")
if err := ioutil.WriteFile(oldPairFile, nil, 0600); err != nil {
t.Fatalf("Unable to create the file %q: %v", oldPairFile, err)
}
newPairFile := filepath.Join(dir, prefix+"-newfile.pem")
if err := ioutil.WriteFile(newPairFile, nil, 0600); err != nil {
t.Fatalf("Unable to create the file %q: %v", newPairFile, err)
}
currentPairFile := filepath.Join(dir, prefix+"-current.pem")
if err := os.Symlink(oldPairFile, currentPairFile); err != nil {
t.Fatalf("unable to create a symlink from %q to %q: %v", currentPairFile, oldPairFile, err)
}
if resolved, err := os.Readlink(currentPairFile); err != nil {
t.Fatalf("Got %v when attempting to resolve symlink %q", err, currentPairFile)
} else if resolved != oldPairFile {
t.Fatalf("Got %q as resolution of symlink %q, wanted %q", resolved, currentPairFile, oldPairFile)
}
s := fileStore{
certDirectory: dir,
pairNamePrefix: prefix,
}
if err := s.updateSymlink(newPairFile); err != nil {
t.Errorf("Got error %v, wanted a new symlink to be created", err)
}
if _, err := os.Stat(oldPairFile); err != nil {
t.Errorf("Got error %v, wanted file %q to be there", oldPairFile, err)
}
if _, err := os.Stat(newPairFile); err != nil {
t.Errorf("Got error %v, wanted file %q to be there", newPairFile, err)
}
if fi, err := os.Lstat(currentPairFile); err != nil {
t.Errorf("Got %v, wanted %q to be there", currentPairFile, err)
} else if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
t.Errorf("%q not a symlink, wanted a symlink.", currentPairFile)
}
if resolved, err := os.Readlink(currentPairFile); err != nil {
t.Fatalf("Got %v when attempting to resolve symlink %q", err, currentPairFile)
} else if resolved != newPairFile {
t.Fatalf("Got %q as resolution of symlink %q, wanted %q", resolved, currentPairFile, newPairFile)
}
}
func TestLoadFile(t *testing.T) {
dir, err := ioutil.TempDir("", "k8s-test-load-cert-key-blocks")
if err != nil {
t.Fatalf("Unable to create the test directory %q: %v", dir, err)
}
defer func() {
if err := os.RemoveAll(dir); err != nil {
t.Errorf("Unable to clean up test directory %q: %v", dir, err)
}
}()
pairFile := filepath.Join(dir, "kubelet-pair.pem")
tests := []struct {
desc string
data []byte
}{
{desc: "cert and key", data: bytes.Join([][]byte{storeCertData.certificatePEM, storeCertData.keyPEM}, []byte("\n"))},
{desc: "key and cert", data: bytes.Join([][]byte{storeCertData.keyPEM, storeCertData.certificatePEM}, []byte("\n"))},
}
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
if err := ioutil.WriteFile(pairFile, tt.data, 0600); err != nil {
t.Fatalf("Unable to create the file %q: %v", pairFile, err)
}
cert, err := loadFile(pairFile)
if err != nil {
t.Fatalf("Could not load certificate from disk: %v", err)
}
if cert == nil {
t.Fatalf("There was no error, but no certificate data was returned.")
}
if cert.Leaf == nil {
t.Fatalf("Got an empty leaf, expected private data.")
}
})
}
}
func TestUpdateNoRotation(t *testing.T) {
prefix := "kubelet-server"
dir, err := ioutil.TempDir("", "k8s-test-certstore-current")
if err != nil {
t.Fatalf("Unable to create the test directory %q: %v", dir, err)
}
defer func() {
if err := os.RemoveAll(dir); err != nil {
t.Errorf("Unable to clean up test directory %q: %v", dir, err)
}
}()
keyFile := filepath.Join(dir, "kubelet.key")
if err := ioutil.WriteFile(keyFile, storeCertData.keyPEM, 0600); err != nil {
t.Fatalf("Unable to create the file %q: %v", keyFile, err)
}
certFile := filepath.Join(dir, "kubelet.crt")
if err := ioutil.WriteFile(certFile, storeCertData.certificatePEM, 0600); err != nil {
t.Fatalf("Unable to create the file %q: %v", certFile, err)
}
s, err := NewFileStore(prefix, dir, dir, certFile, keyFile)
if err != nil {
t.Fatalf("Got %v while creating a new store.", err)
}
cert, err := s.Update(storeCertData.certificatePEM, storeCertData.keyPEM)
if err != nil {
t.Errorf("Got %v while updating certificate store.", err)
}
if cert == nil {
t.Errorf("Got nil certificate, expected something real.")
}
}
func TestUpdateRotation(t *testing.T) {
prefix := "kubelet-server"
dir, err := ioutil.TempDir("", "k8s-test-certstore-current")
if err != nil {
t.Fatalf("Unable to create the test directory %q: %v", dir, err)
}
defer func() {
if err := os.RemoveAll(dir); err != nil {
t.Errorf("Unable to clean up test directory %q: %v", dir, err)
}
}()
keyFile := filepath.Join(dir, "kubelet.key")
if err := ioutil.WriteFile(keyFile, storeCertData.keyPEM, 0600); err != nil {
t.Fatalf("Unable to create the file %q: %v", keyFile, err)
}
certFile := filepath.Join(dir, "kubelet.crt")
if err := ioutil.WriteFile(certFile, storeCertData.certificatePEM, 0600); err != nil {
t.Fatalf("Unable to create the file %q: %v", certFile, err)
}
s, err := NewFileStore(prefix, dir, dir, certFile, keyFile)
if err != nil {
t.Fatalf("Got %v while creating a new store.", err)
}
cert, err := s.Update(storeCertData.certificatePEM, storeCertData.keyPEM)
if err != nil {
t.Fatalf("Got %v while updating certificate store.", err)
}
if cert == nil {
t.Fatalf("Got nil certificate, expected something real.")
}
}
func TestUpdateWithBadCertKeyData(t *testing.T) {
prefix := "kubelet-server"
dir, err := ioutil.TempDir("", "k8s-test-certstore-current")
if err != nil {
t.Fatalf("Unable to create the test directory %q: %v", dir, err)
}
defer func() {
if err := os.RemoveAll(dir); err != nil {
t.Errorf("Unable to clean up test directory %q: %v", dir, err)
}
}()
keyFile := filepath.Join(dir, "kubelet.key")
if err := ioutil.WriteFile(keyFile, storeCertData.keyPEM, 0600); err != nil {
t.Fatalf("Unable to create the file %q: %v", keyFile, err)
}
certFile := filepath.Join(dir, "kubelet.crt")
if err := ioutil.WriteFile(certFile, storeCertData.certificatePEM, 0600); err != nil {
t.Fatalf("Unable to create the file %q: %v", certFile, err)
}
s, err := NewFileStore(prefix, dir, dir, certFile, keyFile)
if err != nil {
t.Fatalf("Got %v while creating a new store.", err)
}
cert, err := s.Update([]byte{0, 0}, storeCertData.keyPEM)
if err == nil {
t.Fatalf("Got no error while updating certificate store with invalid data.")
}
if cert != nil {
t.Fatalf("Got %v certificate returned from the update, expected nil.", cert)
}
}
func TestCurrentPairFile(t *testing.T) {
prefix := "kubelet-server"
dir, err := ioutil.TempDir("", "k8s-test-certstore-current")
if err != nil {
t.Fatalf("Unable to create the test directory %q: %v", dir, err)
}
defer func() {
if err := os.RemoveAll(dir); err != nil {
t.Errorf("Unable to clean up test directory %q: %v", dir, err)
}
}()
pairFile := filepath.Join(dir, prefix+"-pair.pem")
data := append(storeCertData.certificatePEM, []byte("\n")...)
data = append(data, storeCertData.keyPEM...)
if err := ioutil.WriteFile(pairFile, data, 0600); err != nil {
t.Fatalf("Unable to create the file %q: %v", pairFile, err)
}
currentFile := filepath.Join(dir, prefix+"-current.pem")
if err := os.Symlink(pairFile, currentFile); err != nil {
t.Fatalf("unable to create a symlink from %q to %q: %v", currentFile, pairFile, err)
}
store, err := NewFileStore("kubelet-server", dir, dir, "", "")
if err != nil {
t.Fatalf("Failed to initialize certificate store: %v", err)
}
cert, err := store.Current()
if err != nil {
t.Fatalf("Could not load certificate from disk: %v", err)
}
if cert == nil {
t.Fatalf("There was no error, but no certificate data was returned.")
}
if cert.Leaf == nil {
t.Fatalf("Got an empty leaf, expected private data.")
}
}
func TestCurrentCertKeyFiles(t *testing.T) {
prefix := "kubelet-server"
dir, err := ioutil.TempDir("", "k8s-test-certstore-current")
if err != nil {
t.Fatalf("Unable to create the test directory %q: %v", dir, err)
}
defer func() {
if err := os.RemoveAll(dir); err != nil {
t.Errorf("Unable to clean up test directory %q: %v", dir, err)
}
}()
certFile := filepath.Join(dir, "kubelet.crt")
if err := ioutil.WriteFile(certFile, storeCertData.certificatePEM, 0600); err != nil {
t.Fatalf("Unable to create the file %q: %v", certFile, err)
}
keyFile := filepath.Join(dir, "kubelet.key")
if err := ioutil.WriteFile(keyFile, storeCertData.keyPEM, 0600); err != nil {
t.Fatalf("Unable to create the file %q: %v", keyFile, err)
}
store, err := NewFileStore(prefix, dir, dir, certFile, keyFile)
if err != nil {
t.Fatalf("Failed to initialize certificate store: %v", err)
}
cert, err := store.Current()
if err != nil {
t.Fatalf("Could not load certificate from disk: %v", err)
}
if cert == nil {
t.Fatalf("There was no error, but no certificate data was returned.")
}
if cert.Leaf == nil {
t.Fatalf("Got an empty leaf, expected private data.")
}
}
func TestCurrentNoFiles(t *testing.T) {
dir, err := ioutil.TempDir("", "k8s-test-certstore-current")
if err != nil {
t.Fatalf("Unable to create the test directory %q: %v", dir, err)
}
defer func() {
if err := os.RemoveAll(dir); err != nil {
t.Errorf("Unable to clean up test directory %q: %v", dir, err)
}
}()
store, err := NewFileStore("kubelet-server", dir, dir, "", "")
if err != nil {
t.Fatalf("Failed to initialize certificate store: %v", err)
}
cert, err := store.Current()
if err == nil {
t.Fatalf("Got no error, expected an error because the cert/key files don't exist.")
}
if _, ok := err.(*NoCertKeyError); !ok {
t.Fatalf("Got error %v, expected NoCertKeyError.", err)
}
if cert != nil {
t.Fatalf("Got certificate, expected no certificate because the cert/key files don't exist.")
}
}

View File

@ -1,187 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csr
import (
"crypto"
"crypto/x509"
"encoding/pem"
"fmt"
"reflect"
"time"
"k8s.io/klog"
certificates "k8s.io/api/certificates/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
certificatesclient "k8s.io/client-go/kubernetes/typed/certificates/v1beta1"
"k8s.io/client-go/tools/cache"
watchtools "k8s.io/client-go/tools/watch"
certutil "k8s.io/client-go/util/cert"
)
// RequestCertificate will either use an existing (if this process has run
// before but not to completion) or create a certificate signing request using the
// PEM encoded CSR and send it to API server, then it will watch the object's
// status, once approved by API server, it will return the API server's issued
// certificate (pem-encoded). If there is any errors, or the watch timeouts, it
// will return an error.
func RequestCertificate(client certificatesclient.CertificateSigningRequestInterface, csrData []byte, name string, usages []certificates.KeyUsage, privateKey interface{}) (req *certificates.CertificateSigningRequest, err error) {
csr := &certificates.CertificateSigningRequest{
// Username, UID, Groups will be injected by API server.
TypeMeta: metav1.TypeMeta{Kind: "CertificateSigningRequest"},
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: certificates.CertificateSigningRequestSpec{
Request: csrData,
Usages: usages,
},
}
if len(csr.Name) == 0 {
csr.GenerateName = "csr-"
}
req, err = client.Create(csr)
switch {
case err == nil:
case errors.IsAlreadyExists(err) && len(name) > 0:
klog.Infof("csr for this node already exists, reusing")
req, err = client.Get(name, metav1.GetOptions{})
if err != nil {
return nil, formatError("cannot retrieve certificate signing request: %v", err)
}
if err := ensureCompatible(req, csr, privateKey); err != nil {
return nil, fmt.Errorf("retrieved csr is not compatible: %v", err)
}
klog.Infof("csr for this node is still valid")
default:
return nil, formatError("cannot create certificate signing request: %v", err)
}
return req, nil
}
// WaitForCertificate waits for a certificate to be issued until timeout, or returns an error.
func WaitForCertificate(client certificatesclient.CertificateSigningRequestInterface, req *certificates.CertificateSigningRequest, timeout time.Duration) (certData []byte, err error) {
fieldSelector := fields.OneTermEqualSelector("metadata.name", req.Name).String()
event, err := watchtools.ListWatchUntil(
timeout,
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fieldSelector
return client.List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = fieldSelector
return client.Watch(options)
},
},
func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Modified, watch.Added:
case watch.Deleted:
return false, fmt.Errorf("csr %q was deleted", req.Name)
default:
return false, nil
}
csr := event.Object.(*certificates.CertificateSigningRequest)
if csr.UID != req.UID {
return false, fmt.Errorf("csr %q changed UIDs", csr.Name)
}
for _, c := range csr.Status.Conditions {
if c.Type == certificates.CertificateDenied {
return false, fmt.Errorf("certificate signing request is not approved, reason: %v, message: %v", c.Reason, c.Message)
}
if c.Type == certificates.CertificateApproved && csr.Status.Certificate != nil {
return true, nil
}
}
return false, nil
},
)
if err == wait.ErrWaitTimeout {
return nil, wait.ErrWaitTimeout
}
if err != nil {
return nil, formatError("cannot watch on the certificate signing request: %v", err)
}
return event.Object.(*certificates.CertificateSigningRequest).Status.Certificate, nil
}
// ensureCompatible ensures that a CSR object is compatible with an original CSR
func ensureCompatible(new, orig *certificates.CertificateSigningRequest, privateKey interface{}) error {
newCSR, err := parseCSR(new)
if err != nil {
return fmt.Errorf("unable to parse new csr: %v", err)
}
origCSR, err := parseCSR(orig)
if err != nil {
return fmt.Errorf("unable to parse original csr: %v", err)
}
if !reflect.DeepEqual(newCSR.Subject, origCSR.Subject) {
return fmt.Errorf("csr subjects differ: new: %#v, orig: %#v", newCSR.Subject, origCSR.Subject)
}
signer, ok := privateKey.(crypto.Signer)
if !ok {
return fmt.Errorf("privateKey is not a signer")
}
newCSR.PublicKey = signer.Public()
if err := newCSR.CheckSignature(); err != nil {
return fmt.Errorf("error validating signature new CSR against old key: %v", err)
}
if len(new.Status.Certificate) > 0 {
certs, err := certutil.ParseCertsPEM(new.Status.Certificate)
if err != nil {
return fmt.Errorf("error parsing signed certificate for CSR: %v", err)
}
now := time.Now()
for _, cert := range certs {
if now.After(cert.NotAfter) {
return fmt.Errorf("one of the certificates for the CSR has expired: %s", cert.NotAfter)
}
}
}
return nil
}
// formatError preserves the type of an API message but alters the message. Expects
// a single argument format string, and returns the wrapped error.
func formatError(format string, err error) error {
if s, ok := err.(errors.APIStatus); ok {
se := &errors.StatusError{ErrStatus: s.Status()}
se.ErrStatus.Message = fmt.Sprintf(format, se.ErrStatus.Message)
return se
}
return fmt.Errorf(format, err)
}
// parseCSR extracts the CSR from the API object and decodes it.
func parseCSR(obj *certificates.CertificateSigningRequest) (*x509.CertificateRequest, error) {
// extract PEM from request object
block, _ := pem.Decode(obj.Spec.Request)
if block == nil || block.Type != "CERTIFICATE REQUEST" {
return nil, fmt.Errorf("PEM block type must be CERTIFICATE REQUEST")
}
return x509.ParseCertificateRequest(block.Bytes)
}

View File

@ -1,61 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package connrotation
import (
"context"
"net"
"testing"
"time"
)
func TestCloseAll(t *testing.T) {
closed := make(chan struct{})
dialFn := func(ctx context.Context, network, address string) (net.Conn, error) {
return closeOnlyConn{onClose: func() { closed <- struct{}{} }}, nil
}
dialer := NewDialer(dialFn)
const numConns = 10
// Outer loop to ensure Dialer is re-usable after CloseAll.
for i := 0; i < 5; i++ {
for j := 0; j < numConns; j++ {
if _, err := dialer.Dial("", ""); err != nil {
t.Fatal(err)
}
}
dialer.CloseAll()
for j := 0; j < numConns; j++ {
select {
case <-closed:
case <-time.After(time.Second):
t.Fatalf("iteration %d: 1s after CloseAll only %d/%d connections closed", i, j, numConns)
}
}
}
}
type closeOnlyConn struct {
net.Conn
onClose func()
}
func (c closeOnlyConn) Close() error {
go c.onClose()
return nil
}

View File

@ -1,52 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package exec
// ExitError is an interface that presents an API similar to os.ProcessState, which is
// what ExitError from os/exec is. This is designed to make testing a bit easier and
// probably loses some of the cross-platform properties of the underlying library.
type ExitError interface {
String() string
Error() string
Exited() bool
ExitStatus() int
}
// CodeExitError is an implementation of ExitError consisting of an error object
// and an exit code (the upper bits of os.exec.ExitStatus).
type CodeExitError struct {
Err error
Code int
}
var _ ExitError = CodeExitError{}
func (e CodeExitError) Error() string {
return e.Err.Error()
}
func (e CodeExitError) String() string {
return e.Err.Error()
}
func (e CodeExitError) Exited() bool {
return true
}
func (e CodeExitError) ExitStatus() int {
return e.Code
}

View File

@ -1,195 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flowcontrol
import (
"testing"
"time"
"k8s.io/apimachinery/pkg/util/clock"
)
func TestSlowBackoff(t *testing.T) {
id := "_idSlow"
tc := clock.NewFakeClock(time.Now())
step := time.Second
maxDuration := 50 * step
b := NewFakeBackOff(step, maxDuration, tc)
cases := []time.Duration{0, 1, 2, 4, 8, 16, 32, 50, 50, 50}
for ix, c := range cases {
tc.Step(step)
w := b.Get(id)
if w != c*step {
t.Errorf("input: '%d': expected %s, got %s", ix, c*step, w)
}
b.Next(id, tc.Now())
}
//Now confirm that the Reset cancels backoff.
b.Next(id, tc.Now())
b.Reset(id)
if b.Get(id) != 0 {
t.Errorf("Reset didn't clear the backoff.")
}
}
func TestBackoffReset(t *testing.T) {
id := "_idReset"
tc := clock.NewFakeClock(time.Now())
step := time.Second
maxDuration := step * 5
b := NewFakeBackOff(step, maxDuration, tc)
startTime := tc.Now()
// get to backoff = maxDuration
for i := 0; i <= int(maxDuration/step); i++ {
tc.Step(step)
b.Next(id, tc.Now())
}
// backoff should be capped at maxDuration
if !b.IsInBackOffSince(id, tc.Now()) {
t.Errorf("expected to be in Backoff got %s", b.Get(id))
}
lastUpdate := tc.Now()
tc.Step(2*maxDuration + step) // time += 11s, 11 > 2*maxDuration
if b.IsInBackOffSince(id, lastUpdate) {
t.Errorf("expected to not be in Backoff after reset (start=%s, now=%s, lastUpdate=%s), got %s", startTime, tc.Now(), lastUpdate, b.Get(id))
}
}
func TestBackoffHighWaterMark(t *testing.T) {
id := "_idHiWaterMark"
tc := clock.NewFakeClock(time.Now())
step := time.Second
maxDuration := 5 * step
b := NewFakeBackOff(step, maxDuration, tc)
// get to backoff = maxDuration
for i := 0; i <= int(maxDuration/step); i++ {
tc.Step(step)
b.Next(id, tc.Now())
}
// backoff high watermark expires after 2*maxDuration
tc.Step(maxDuration + step)
b.Next(id, tc.Now())
if b.Get(id) != maxDuration {
t.Errorf("expected Backoff to stay at high watermark %s got %s", maxDuration, b.Get(id))
}
}
func TestBackoffGC(t *testing.T) {
id := "_idGC"
tc := clock.NewFakeClock(time.Now())
step := time.Second
maxDuration := 5 * step
b := NewFakeBackOff(step, maxDuration, tc)
for i := 0; i <= int(maxDuration/step); i++ {
tc.Step(step)
b.Next(id, tc.Now())
}
lastUpdate := tc.Now()
tc.Step(maxDuration + step)
b.GC()
_, found := b.perItemBackoff[id]
if !found {
t.Errorf("expected GC to skip entry, elapsed time=%s maxDuration=%s", tc.Now().Sub(lastUpdate), maxDuration)
}
tc.Step(maxDuration + step)
b.GC()
r, found := b.perItemBackoff[id]
if found {
t.Errorf("expected GC of entry after %s got entry %v", tc.Now().Sub(lastUpdate), r)
}
}
func TestIsInBackOffSinceUpdate(t *testing.T) {
id := "_idIsInBackOffSinceUpdate"
tc := clock.NewFakeClock(time.Now())
step := time.Second
maxDuration := 10 * step
b := NewFakeBackOff(step, maxDuration, tc)
startTime := tc.Now()
cases := []struct {
tick time.Duration
inBackOff bool
value int
}{
{tick: 0, inBackOff: false, value: 0},
{tick: 1, inBackOff: false, value: 1},
{tick: 2, inBackOff: true, value: 2},
{tick: 3, inBackOff: false, value: 2},
{tick: 4, inBackOff: true, value: 4},
{tick: 5, inBackOff: true, value: 4},
{tick: 6, inBackOff: true, value: 4},
{tick: 7, inBackOff: false, value: 4},
{tick: 8, inBackOff: true, value: 8},
{tick: 9, inBackOff: true, value: 8},
{tick: 10, inBackOff: true, value: 8},
{tick: 11, inBackOff: true, value: 8},
{tick: 12, inBackOff: true, value: 8},
{tick: 13, inBackOff: true, value: 8},
{tick: 14, inBackOff: true, value: 8},
{tick: 15, inBackOff: false, value: 8},
{tick: 16, inBackOff: true, value: 10},
{tick: 17, inBackOff: true, value: 10},
{tick: 18, inBackOff: true, value: 10},
{tick: 19, inBackOff: true, value: 10},
{tick: 20, inBackOff: true, value: 10},
{tick: 21, inBackOff: true, value: 10},
{tick: 22, inBackOff: true, value: 10},
{tick: 23, inBackOff: true, value: 10},
{tick: 24, inBackOff: true, value: 10},
{tick: 25, inBackOff: false, value: 10},
{tick: 26, inBackOff: true, value: 10},
{tick: 27, inBackOff: true, value: 10},
{tick: 28, inBackOff: true, value: 10},
{tick: 29, inBackOff: true, value: 10},
{tick: 30, inBackOff: true, value: 10},
{tick: 31, inBackOff: true, value: 10},
{tick: 32, inBackOff: true, value: 10},
{tick: 33, inBackOff: true, value: 10},
{tick: 34, inBackOff: true, value: 10},
{tick: 35, inBackOff: false, value: 10},
{tick: 56, inBackOff: false, value: 0},
{tick: 57, inBackOff: false, value: 1},
}
for _, c := range cases {
tc.SetTime(startTime.Add(c.tick * step))
if c.inBackOff != b.IsInBackOffSinceUpdate(id, tc.Now()) {
t.Errorf("expected IsInBackOffSinceUpdate %v got %v at tick %s", c.inBackOff, b.IsInBackOffSinceUpdate(id, tc.Now()), c.tick*step)
}
if c.inBackOff && (time.Duration(c.value)*step != b.Get(id)) {
t.Errorf("expected backoff value=%s got %s at tick %s", time.Duration(c.value)*step, b.Get(id), c.tick*step)
}
if !c.inBackOff {
b.Next(id, tc.Now())
}
}
}

View File

@ -1,153 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flowcontrol
import (
"sync"
"testing"
"time"
)
func TestMultithreadedThrottling(t *testing.T) {
// Bucket with 100QPS and no burst
r := NewTokenBucketRateLimiter(100, 1)
// channel to collect 100 tokens
taken := make(chan bool, 100)
// Set up goroutines to hammer the throttler
startCh := make(chan bool)
endCh := make(chan bool)
for i := 0; i < 10; i++ {
go func() {
// wait for the starting signal
<-startCh
for {
// get a token
r.Accept()
select {
// try to add it to the taken channel
case taken <- true:
continue
// if taken is full, notify and return
default:
endCh <- true
return
}
}
}()
}
// record wall time
startTime := time.Now()
// take the initial capacity so all tokens are the result of refill
r.Accept()
// start the thundering herd
close(startCh)
// wait for the first signal that we collected 100 tokens
<-endCh
// record wall time
endTime := time.Now()
// tolerate a 1% clock change because these things happen
if duration := endTime.Sub(startTime); duration < (time.Second * 99 / 100) {
// We shouldn't be able to get 100 tokens out of the bucket in less than 1 second of wall clock time, no matter what
t.Errorf("Expected it to take at least 1 second to get 100 tokens, took %v", duration)
} else {
t.Logf("Took %v to get 100 tokens", duration)
}
}
func TestBasicThrottle(t *testing.T) {
r := NewTokenBucketRateLimiter(1, 3)
for i := 0; i < 3; i++ {
if !r.TryAccept() {
t.Error("unexpected false accept")
}
}
if r.TryAccept() {
t.Error("unexpected true accept")
}
}
func TestIncrementThrottle(t *testing.T) {
r := NewTokenBucketRateLimiter(1, 1)
if !r.TryAccept() {
t.Error("unexpected false accept")
}
if r.TryAccept() {
t.Error("unexpected true accept")
}
// Allow to refill
time.Sleep(2 * time.Second)
if !r.TryAccept() {
t.Error("unexpected false accept")
}
}
func TestThrottle(t *testing.T) {
r := NewTokenBucketRateLimiter(10, 5)
// Should consume 5 tokens immediately, then
// the remaining 11 should take at least 1 second (0.1s each)
expectedFinish := time.Now().Add(time.Second * 1)
for i := 0; i < 16; i++ {
r.Accept()
}
if time.Now().Before(expectedFinish) {
t.Error("rate limit was not respected, finished too early")
}
}
func TestAlwaysFake(t *testing.T) {
rl := NewFakeAlwaysRateLimiter()
if !rl.TryAccept() {
t.Error("TryAccept in AlwaysFake should return true.")
}
// If this will block the test will timeout
rl.Accept()
}
func TestNeverFake(t *testing.T) {
rl := NewFakeNeverRateLimiter()
if rl.TryAccept() {
t.Error("TryAccept in NeverFake should return false.")
}
finished := false
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
rl.Accept()
finished = true
wg.Done()
}()
// Wait some time to make sure it never finished.
time.Sleep(time.Second)
if finished {
t.Error("Accept should block forever in NeverFake.")
}
rl.Stop()
wg.Wait()
if !finished {
t.Error("Stop should make Accept unblock in NeverFake.")
}
}

View File

@ -1,244 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integer
import "testing"
func TestIntMax(t *testing.T) {
tests := []struct {
nums []int
expectedMax int
}{
{
nums: []int{-1, 0},
expectedMax: 0,
},
{
nums: []int{-1, -2},
expectedMax: -1,
},
{
nums: []int{0, 1},
expectedMax: 1,
},
{
nums: []int{1, 2},
expectedMax: 2,
},
}
for i, test := range tests {
t.Logf("executing scenario %d", i)
if max := IntMax(test.nums[0], test.nums[1]); max != test.expectedMax {
t.Errorf("expected %v, got %v", test.expectedMax, max)
}
}
}
func TestIntMin(t *testing.T) {
tests := []struct {
nums []int
expectedMin int
}{
{
nums: []int{-1, 0},
expectedMin: -1,
},
{
nums: []int{-1, -2},
expectedMin: -2,
},
{
nums: []int{0, 1},
expectedMin: 0,
},
{
nums: []int{1, 2},
expectedMin: 1,
},
}
for i, test := range tests {
t.Logf("executing scenario %d", i)
if min := IntMin(test.nums[0], test.nums[1]); min != test.expectedMin {
t.Errorf("expected %v, got %v", test.expectedMin, min)
}
}
}
func TestInt32Max(t *testing.T) {
tests := []struct {
nums []int32
expectedMax int32
}{
{
nums: []int32{-1, 0},
expectedMax: 0,
},
{
nums: []int32{-1, -2},
expectedMax: -1,
},
{
nums: []int32{0, 1},
expectedMax: 1,
},
{
nums: []int32{1, 2},
expectedMax: 2,
},
}
for i, test := range tests {
t.Logf("executing scenario %d", i)
if max := Int32Max(test.nums[0], test.nums[1]); max != test.expectedMax {
t.Errorf("expected %v, got %v", test.expectedMax, max)
}
}
}
func TestInt32Min(t *testing.T) {
tests := []struct {
nums []int32
expectedMin int32
}{
{
nums: []int32{-1, 0},
expectedMin: -1,
},
{
nums: []int32{-1, -2},
expectedMin: -2,
},
{
nums: []int32{0, 1},
expectedMin: 0,
},
{
nums: []int32{1, 2},
expectedMin: 1,
},
}
for i, test := range tests {
t.Logf("executing scenario %d", i)
if min := Int32Min(test.nums[0], test.nums[1]); min != test.expectedMin {
t.Errorf("expected %v, got %v", test.expectedMin, min)
}
}
}
func TestInt64Max(t *testing.T) {
tests := []struct {
nums []int64
expectedMax int64
}{
{
nums: []int64{-1, 0},
expectedMax: 0,
},
{
nums: []int64{-1, -2},
expectedMax: -1,
},
{
nums: []int64{0, 1},
expectedMax: 1,
},
{
nums: []int64{1, 2},
expectedMax: 2,
},
}
for i, test := range tests {
t.Logf("executing scenario %d", i)
if max := Int64Max(test.nums[0], test.nums[1]); max != test.expectedMax {
t.Errorf("expected %v, got %v", test.expectedMax, max)
}
}
}
func TestInt64Min(t *testing.T) {
tests := []struct {
nums []int64
expectedMin int64
}{
{
nums: []int64{-1, 0},
expectedMin: -1,
},
{
nums: []int64{-1, -2},
expectedMin: -2,
},
{
nums: []int64{0, 1},
expectedMin: 0,
},
{
nums: []int64{1, 2},
expectedMin: 1,
},
}
for i, test := range tests {
t.Logf("executing scenario %d", i)
if min := Int64Min(test.nums[0], test.nums[1]); min != test.expectedMin {
t.Errorf("expected %v, got %v", test.expectedMin, min)
}
}
}
func TestRoundToInt32(t *testing.T) {
tests := []struct {
num float64
exp int32
}{
{
num: 5.5,
exp: 6,
},
{
num: -3.7,
exp: -4,
},
{
num: 3.49,
exp: 3,
},
{
num: -7.9,
exp: -8,
},
{
num: -4.499999,
exp: -4,
},
{
num: 0,
exp: 0,
},
}
for i, test := range tests {
t.Logf("executing scenario %d", i)
if got := RoundToInt32(test.num); got != test.exp {
t.Errorf("expected %d, got %d", test.exp, got)
}
}
}

View File

@ -1,20 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// package jsonpath is a template engine using jsonpath syntax,
// which can be seen at http://goessner.net/articles/JsonPath/.
// In addition, it has {range} {end} function to iterate list and slice.
package jsonpath // import "k8s.io/client-go/util/jsonpath"

View File

@ -1,517 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jsonpath
import (
"bytes"
"fmt"
"io"
"reflect"
"strings"
"k8s.io/client-go/third_party/forked/golang/template"
)
type JSONPath struct {
name string
parser *Parser
stack [][]reflect.Value // push and pop values in different scopes
cur []reflect.Value // current scope values
beginRange int
inRange int
endRange int
allowMissingKeys bool
}
// New creates a new JSONPath with the given name.
func New(name string) *JSONPath {
return &JSONPath{
name: name,
beginRange: 0,
inRange: 0,
endRange: 0,
}
}
// AllowMissingKeys allows a caller to specify whether they want an error if a field or map key
// cannot be located, or simply an empty result. The receiver is returned for chaining.
func (j *JSONPath) AllowMissingKeys(allow bool) *JSONPath {
j.allowMissingKeys = allow
return j
}
// Parse parses the given template and returns an error.
func (j *JSONPath) Parse(text string) error {
var err error
j.parser, err = Parse(j.name, text)
return err
}
// Execute bounds data into template and writes the result.
func (j *JSONPath) Execute(wr io.Writer, data interface{}) error {
fullResults, err := j.FindResults(data)
if err != nil {
return err
}
for ix := range fullResults {
if err := j.PrintResults(wr, fullResults[ix]); err != nil {
return err
}
}
return nil
}
func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) {
if j.parser == nil {
return nil, fmt.Errorf("%s is an incomplete jsonpath template", j.name)
}
j.cur = []reflect.Value{reflect.ValueOf(data)}
nodes := j.parser.Root.Nodes
fullResult := [][]reflect.Value{}
for i := 0; i < len(nodes); i++ {
node := nodes[i]
results, err := j.walk(j.cur, node)
if err != nil {
return nil, err
}
// encounter an end node, break the current block
if j.endRange > 0 && j.endRange <= j.inRange {
j.endRange -= 1
break
}
// encounter a range node, start a range loop
if j.beginRange > 0 {
j.beginRange -= 1
j.inRange += 1
for k, value := range results {
j.parser.Root.Nodes = nodes[i+1:]
if k == len(results)-1 {
j.inRange -= 1
}
nextResults, err := j.FindResults(value.Interface())
if err != nil {
return nil, err
}
fullResult = append(fullResult, nextResults...)
}
break
}
fullResult = append(fullResult, results)
}
return fullResult, nil
}
// PrintResults writes the results into writer
func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error {
for i, r := range results {
text, err := j.evalToText(r)
if err != nil {
return err
}
if i != len(results)-1 {
text = append(text, ' ')
}
if _, err = wr.Write(text); err != nil {
return err
}
}
return nil
}
// walk visits tree rooted at the given node in DFS order
func (j *JSONPath) walk(value []reflect.Value, node Node) ([]reflect.Value, error) {
switch node := node.(type) {
case *ListNode:
return j.evalList(value, node)
case *TextNode:
return []reflect.Value{reflect.ValueOf(node.Text)}, nil
case *FieldNode:
return j.evalField(value, node)
case *ArrayNode:
return j.evalArray(value, node)
case *FilterNode:
return j.evalFilter(value, node)
case *IntNode:
return j.evalInt(value, node)
case *BoolNode:
return j.evalBool(value, node)
case *FloatNode:
return j.evalFloat(value, node)
case *WildcardNode:
return j.evalWildcard(value, node)
case *RecursiveNode:
return j.evalRecursive(value, node)
case *UnionNode:
return j.evalUnion(value, node)
case *IdentifierNode:
return j.evalIdentifier(value, node)
default:
return value, fmt.Errorf("unexpected Node %v", node)
}
}
// evalInt evaluates IntNode
func (j *JSONPath) evalInt(input []reflect.Value, node *IntNode) ([]reflect.Value, error) {
result := make([]reflect.Value, len(input))
for i := range input {
result[i] = reflect.ValueOf(node.Value)
}
return result, nil
}
// evalFloat evaluates FloatNode
func (j *JSONPath) evalFloat(input []reflect.Value, node *FloatNode) ([]reflect.Value, error) {
result := make([]reflect.Value, len(input))
for i := range input {
result[i] = reflect.ValueOf(node.Value)
}
return result, nil
}
// evalBool evaluates BoolNode
func (j *JSONPath) evalBool(input []reflect.Value, node *BoolNode) ([]reflect.Value, error) {
result := make([]reflect.Value, len(input))
for i := range input {
result[i] = reflect.ValueOf(node.Value)
}
return result, nil
}
// evalList evaluates ListNode
func (j *JSONPath) evalList(value []reflect.Value, node *ListNode) ([]reflect.Value, error) {
var err error
curValue := value
for _, node := range node.Nodes {
curValue, err = j.walk(curValue, node)
if err != nil {
return curValue, err
}
}
return curValue, nil
}
// evalIdentifier evaluates IdentifierNode
func (j *JSONPath) evalIdentifier(input []reflect.Value, node *IdentifierNode) ([]reflect.Value, error) {
results := []reflect.Value{}
switch node.Name {
case "range":
j.stack = append(j.stack, j.cur)
j.beginRange += 1
results = input
case "end":
if j.endRange < j.inRange { // inside a loop, break the current block
j.endRange += 1
break
}
// the loop is about to end, pop value and continue the following execution
if len(j.stack) > 0 {
j.cur, j.stack = j.stack[len(j.stack)-1], j.stack[:len(j.stack)-1]
} else {
return results, fmt.Errorf("not in range, nothing to end")
}
default:
return input, fmt.Errorf("unrecognized identifier %v", node.Name)
}
return results, nil
}
// evalArray evaluates ArrayNode
func (j *JSONPath) evalArray(input []reflect.Value, node *ArrayNode) ([]reflect.Value, error) {
result := []reflect.Value{}
for _, value := range input {
value, isNil := template.Indirect(value)
if isNil {
continue
}
if value.Kind() != reflect.Array && value.Kind() != reflect.Slice {
return input, fmt.Errorf("%v is not array or slice", value.Type())
}
params := node.Params
if !params[0].Known {
params[0].Value = 0
}
if params[0].Value < 0 {
params[0].Value += value.Len()
}
if !params[1].Known {
params[1].Value = value.Len()
}
if params[1].Value < 0 {
params[1].Value += value.Len()
}
sliceLength := value.Len()
if params[1].Value != params[0].Value { // if you're requesting zero elements, allow it through.
if params[0].Value >= sliceLength || params[0].Value < 0 {
return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[0].Value, sliceLength)
}
if params[1].Value > sliceLength || params[1].Value < 0 {
return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[1].Value-1, sliceLength)
}
}
if !params[2].Known {
value = value.Slice(params[0].Value, params[1].Value)
} else {
value = value.Slice3(params[0].Value, params[1].Value, params[2].Value)
}
for i := 0; i < value.Len(); i++ {
result = append(result, value.Index(i))
}
}
return result, nil
}
// evalUnion evaluates UnionNode
func (j *JSONPath) evalUnion(input []reflect.Value, node *UnionNode) ([]reflect.Value, error) {
result := []reflect.Value{}
for _, listNode := range node.Nodes {
temp, err := j.evalList(input, listNode)
if err != nil {
return input, err
}
result = append(result, temp...)
}
return result, nil
}
func (j *JSONPath) findFieldInValue(value *reflect.Value, node *FieldNode) (reflect.Value, error) {
t := value.Type()
var inlineValue *reflect.Value
for ix := 0; ix < t.NumField(); ix++ {
f := t.Field(ix)
jsonTag := f.Tag.Get("json")
parts := strings.Split(jsonTag, ",")
if len(parts) == 0 {
continue
}
if parts[0] == node.Value {
return value.Field(ix), nil
}
if len(parts[0]) == 0 {
val := value.Field(ix)
inlineValue = &val
}
}
if inlineValue != nil {
if inlineValue.Kind() == reflect.Struct {
// handle 'inline'
match, err := j.findFieldInValue(inlineValue, node)
if err != nil {
return reflect.Value{}, err
}
if match.IsValid() {
return match, nil
}
}
}
return value.FieldByName(node.Value), nil
}
// evalField evaluates field of struct or key of map.
func (j *JSONPath) evalField(input []reflect.Value, node *FieldNode) ([]reflect.Value, error) {
results := []reflect.Value{}
// If there's no input, there's no output
if len(input) == 0 {
return results, nil
}
for _, value := range input {
var result reflect.Value
value, isNil := template.Indirect(value)
if isNil {
continue
}
if value.Kind() == reflect.Struct {
var err error
if result, err = j.findFieldInValue(&value, node); err != nil {
return nil, err
}
} else if value.Kind() == reflect.Map {
mapKeyType := value.Type().Key()
nodeValue := reflect.ValueOf(node.Value)
// node value type must be convertible to map key type
if !nodeValue.Type().ConvertibleTo(mapKeyType) {
return results, fmt.Errorf("%s is not convertible to %s", nodeValue, mapKeyType)
}
result = value.MapIndex(nodeValue.Convert(mapKeyType))
}
if result.IsValid() {
results = append(results, result)
}
}
if len(results) == 0 {
if j.allowMissingKeys {
return results, nil
}
return results, fmt.Errorf("%s is not found", node.Value)
}
return results, nil
}
// evalWildcard extracts all contents of the given value
func (j *JSONPath) evalWildcard(input []reflect.Value, node *WildcardNode) ([]reflect.Value, error) {
results := []reflect.Value{}
for _, value := range input {
value, isNil := template.Indirect(value)
if isNil {
continue
}
kind := value.Kind()
if kind == reflect.Struct {
for i := 0; i < value.NumField(); i++ {
results = append(results, value.Field(i))
}
} else if kind == reflect.Map {
for _, key := range value.MapKeys() {
results = append(results, value.MapIndex(key))
}
} else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String {
for i := 0; i < value.Len(); i++ {
results = append(results, value.Index(i))
}
}
}
return results, nil
}
// evalRecursive visits the given value recursively and pushes all of them to result
func (j *JSONPath) evalRecursive(input []reflect.Value, node *RecursiveNode) ([]reflect.Value, error) {
result := []reflect.Value{}
for _, value := range input {
results := []reflect.Value{}
value, isNil := template.Indirect(value)
if isNil {
continue
}
kind := value.Kind()
if kind == reflect.Struct {
for i := 0; i < value.NumField(); i++ {
results = append(results, value.Field(i))
}
} else if kind == reflect.Map {
for _, key := range value.MapKeys() {
results = append(results, value.MapIndex(key))
}
} else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String {
for i := 0; i < value.Len(); i++ {
results = append(results, value.Index(i))
}
}
if len(results) != 0 {
result = append(result, value)
output, err := j.evalRecursive(results, node)
if err != nil {
return result, err
}
result = append(result, output...)
}
}
return result, nil
}
// evalFilter filters array according to FilterNode
func (j *JSONPath) evalFilter(input []reflect.Value, node *FilterNode) ([]reflect.Value, error) {
results := []reflect.Value{}
for _, value := range input {
value, _ = template.Indirect(value)
if value.Kind() != reflect.Array && value.Kind() != reflect.Slice {
return input, fmt.Errorf("%v is not array or slice and cannot be filtered", value)
}
for i := 0; i < value.Len(); i++ {
temp := []reflect.Value{value.Index(i)}
lefts, err := j.evalList(temp, node.Left)
//case exists
if node.Operator == "exists" {
if len(lefts) > 0 {
results = append(results, value.Index(i))
}
continue
}
if err != nil {
return input, err
}
var left, right interface{}
switch {
case len(lefts) == 0:
continue
case len(lefts) > 1:
return input, fmt.Errorf("can only compare one element at a time")
}
left = lefts[0].Interface()
rights, err := j.evalList(temp, node.Right)
if err != nil {
return input, err
}
switch {
case len(rights) == 0:
continue
case len(rights) > 1:
return input, fmt.Errorf("can only compare one element at a time")
}
right = rights[0].Interface()
pass := false
switch node.Operator {
case "<":
pass, err = template.Less(left, right)
case ">":
pass, err = template.Greater(left, right)
case "==":
pass, err = template.Equal(left, right)
case "!=":
pass, err = template.NotEqual(left, right)
case "<=":
pass, err = template.LessEqual(left, right)
case ">=":
pass, err = template.GreaterEqual(left, right)
default:
return results, fmt.Errorf("unrecognized filter operator %s", node.Operator)
}
if err != nil {
return results, err
}
if pass {
results = append(results, value.Index(i))
}
}
}
return results, nil
}
// evalToText translates reflect value to corresponding text
func (j *JSONPath) evalToText(v reflect.Value) ([]byte, error) {
iface, ok := template.PrintableValue(v)
if !ok {
return nil, fmt.Errorf("can't print type %s", v.Type())
}
var buffer bytes.Buffer
fmt.Fprint(&buffer, iface)
return buffer.Bytes(), nil
}

View File

@ -1,371 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jsonpath
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"sort"
"strings"
"testing"
)
type jsonpathTest struct {
name string
template string
input interface{}
expect string
expectError bool
}
func testJSONPath(tests []jsonpathTest, allowMissingKeys bool, t *testing.T) {
for _, test := range tests {
j := New(test.name)
j.AllowMissingKeys(allowMissingKeys)
err := j.Parse(test.template)
if err != nil {
t.Errorf("in %s, parse %s error %v", test.name, test.template, err)
}
buf := new(bytes.Buffer)
err = j.Execute(buf, test.input)
if test.expectError {
if test.expectError && err == nil {
t.Errorf("in %s, expected execute error", test.name)
}
continue
} else if err != nil {
t.Errorf("in %s, execute error %v", test.name, err)
}
out := buf.String()
if out != test.expect {
t.Errorf(`in %s, expect to get "%s", got "%s"`, test.name, test.expect, out)
}
}
}
// testJSONPathSortOutput test cases related to map, the results may print in random order
func testJSONPathSortOutput(tests []jsonpathTest, t *testing.T) {
for _, test := range tests {
j := New(test.name)
err := j.Parse(test.template)
if err != nil {
t.Errorf("in %s, parse %s error %v", test.name, test.template, err)
}
buf := new(bytes.Buffer)
err = j.Execute(buf, test.input)
if err != nil {
t.Errorf("in %s, execute error %v", test.name, err)
}
out := buf.String()
//since map is visited in random order, we need to sort the results.
sortedOut := strings.Fields(out)
sort.Strings(sortedOut)
sortedExpect := strings.Fields(test.expect)
sort.Strings(sortedExpect)
if !reflect.DeepEqual(sortedOut, sortedExpect) {
t.Errorf(`in %s, expect to get "%s", got "%s"`, test.name, test.expect, out)
}
}
}
func testFailJSONPath(tests []jsonpathTest, t *testing.T) {
for _, test := range tests {
j := New(test.name)
err := j.Parse(test.template)
if err != nil {
t.Errorf("in %s, parse %s error %v", test.name, test.template, err)
}
buf := new(bytes.Buffer)
err = j.Execute(buf, test.input)
var out string
if err == nil {
out = "nil"
} else {
out = err.Error()
}
if out != test.expect {
t.Errorf("in %s, expect to get error %q, got %q", test.name, test.expect, out)
}
}
}
type book struct {
Category string
Author string
Title string
Price float32
}
func (b book) String() string {
return fmt.Sprintf("{Category: %s, Author: %s, Title: %s, Price: %v}", b.Category, b.Author, b.Title, b.Price)
}
type bicycle struct {
Color string
Price float32
IsNew bool
}
type empName string
type job string
type store struct {
Book []book
Bicycle []bicycle
Name string
Labels map[string]int
Employees map[empName]job
}
func TestStructInput(t *testing.T) {
storeData := store{
Name: "jsonpath",
Book: []book{
{"reference", "Nigel Rees", "Sayings of the Centurey", 8.95},
{"fiction", "Evelyn Waugh", "Sword of Honour", 12.99},
{"fiction", "Herman Melville", "Moby Dick", 8.99},
},
Bicycle: []bicycle{
{"red", 19.95, true},
{"green", 20.01, false},
},
Labels: map[string]int{
"engieer": 10,
"web/html": 15,
"k8s-app": 20,
},
Employees: map[empName]job{
"jason": "manager",
"dan": "clerk",
},
}
storeTests := []jsonpathTest{
{"plain", "hello jsonpath", nil, "hello jsonpath", false},
{"recursive", "{..}", []int{1, 2, 3}, "[1 2 3]", false},
{"filter", "{[?(@<5)]}", []int{2, 6, 3, 7}, "2 3", false},
{"quote", `{"{"}`, nil, "{", false},
{"union", "{[1,3,4]}", []int{0, 1, 2, 3, 4}, "1 3 4", false},
{"array", "{[0:2]}", []string{"Monday", "Tudesday"}, "Monday Tudesday", false},
{"variable", "hello {.Name}", storeData, "hello jsonpath", false},
{"dict/", "{$.Labels.web/html}", storeData, "15", false},
{"dict/", "{$.Employees.jason}", storeData, "manager", false},
{"dict/", "{$.Employees.dan}", storeData, "clerk", false},
{"dict-", "{.Labels.k8s-app}", storeData, "20", false},
{"nest", "{.Bicycle[*].Color}", storeData, "red green", false},
{"allarray", "{.Book[*].Author}", storeData, "Nigel Rees Evelyn Waugh Herman Melville", false},
{"allfileds", "{.Bicycle.*}", storeData, "{red 19.95 true} {green 20.01 false}", false},
{"recurfileds", "{..Price}", storeData, "8.95 12.99 8.99 19.95 20.01", false},
{"lastarray", "{.Book[-1:]}", storeData,
"{Category: fiction, Author: Herman Melville, Title: Moby Dick, Price: 8.99}", false},
{"recurarray", "{..Book[2]}", storeData,
"{Category: fiction, Author: Herman Melville, Title: Moby Dick, Price: 8.99}", false},
{"bool", "{.Bicycle[?(@.IsNew==true)]}", storeData, "{red 19.95 true}", false},
}
testJSONPath(storeTests, false, t)
missingKeyTests := []jsonpathTest{
{"nonexistent field", "{.hello}", storeData, "", false},
}
testJSONPath(missingKeyTests, true, t)
failStoreTests := []jsonpathTest{
{"invalid identifier", "{hello}", storeData, "unrecognized identifier hello", false},
{"nonexistent field", "{.hello}", storeData, "hello is not found", false},
{"invalid array", "{.Labels[0]}", storeData, "map[string]int is not array or slice", false},
{"invalid filter operator", "{.Book[?(@.Price<>10)]}", storeData, "unrecognized filter operator <>", false},
{"redundant end", "{range .Labels.*}{@}{end}{end}", storeData, "not in range, nothing to end", false},
}
testFailJSONPath(failStoreTests, t)
}
func TestJSONInput(t *testing.T) {
var pointsJSON = []byte(`[
{"id": "i1", "x":4, "y":-5},
{"id": "i2", "x":-2, "y":-5, "z":1},
{"id": "i3", "x": 8, "y": 3 },
{"id": "i4", "x": -6, "y": -1 },
{"id": "i5", "x": 0, "y": 2, "z": 1 },
{"id": "i6", "x": 1, "y": 4 }
]`)
var pointsData interface{}
err := json.Unmarshal(pointsJSON, &pointsData)
if err != nil {
t.Error(err)
}
pointsTests := []jsonpathTest{
{"exists filter", "{[?(@.z)].id}", pointsData, "i2 i5", false},
{"bracket key", "{[0]['id']}", pointsData, "i1", false},
}
testJSONPath(pointsTests, false, t)
}
// TestKubernetes tests some use cases from kubernetes
func TestKubernetes(t *testing.T) {
var input = []byte(`{
"kind": "List",
"items":[
{
"kind":"None",
"metadata":{
"name":"127.0.0.1",
"labels":{
"kubernetes.io/hostname":"127.0.0.1"
}
},
"status":{
"capacity":{"cpu":"4"},
"ready": true,
"addresses":[{"type": "LegacyHostIP", "address":"127.0.0.1"}]
}
},
{
"kind":"None",
"metadata":{
"name":"127.0.0.2",
"labels":{
"kubernetes.io/hostname":"127.0.0.2"
}
},
"status":{
"capacity":{"cpu":"8"},
"ready": false,
"addresses":[
{"type": "LegacyHostIP", "address":"127.0.0.2"},
{"type": "another", "address":"127.0.0.3"}
]
}
}
],
"users":[
{
"name": "myself",
"user": {}
},
{
"name": "e2e",
"user": {"username": "admin", "password": "secret"}
}
]
}`)
var nodesData interface{}
err := json.Unmarshal(input, &nodesData)
if err != nil {
t.Error(err)
}
nodesTests := []jsonpathTest{
{"range item", `{range .items[*]}{.metadata.name}, {end}{.kind}`, nodesData, "127.0.0.1, 127.0.0.2, List", false},
{"range item with quote", `{range .items[*]}{.metadata.name}{"\t"}{end}`, nodesData, "127.0.0.1\t127.0.0.2\t", false},
{"range addresss", `{.items[*].status.addresses[*].address}`, nodesData,
"127.0.0.1 127.0.0.2 127.0.0.3", false},
{"double range", `{range .items[*]}{range .status.addresses[*]}{.address}, {end}{end}`, nodesData,
"127.0.0.1, 127.0.0.2, 127.0.0.3, ", false},
{"item name", `{.items[*].metadata.name}`, nodesData, "127.0.0.1 127.0.0.2", false},
{"union nodes capacity", `{.items[*]['metadata.name', 'status.capacity']}`, nodesData,
"127.0.0.1 127.0.0.2 map[cpu:4] map[cpu:8]", false},
{"range nodes capacity", `{range .items[*]}[{.metadata.name}, {.status.capacity}] {end}`, nodesData,
"[127.0.0.1, map[cpu:4]] [127.0.0.2, map[cpu:8]] ", false},
{"user password", `{.users[?(@.name=="e2e")].user.password}`, &nodesData, "secret", false},
{"hostname", `{.items[0].metadata.labels.kubernetes\.io/hostname}`, &nodesData, "127.0.0.1", false},
{"hostname filter", `{.items[?(@.metadata.labels.kubernetes\.io/hostname=="127.0.0.1")].kind}`, &nodesData, "None", false},
{"bool item", `{.items[?(@..ready==true)].metadata.name}`, &nodesData, "127.0.0.1", false},
}
testJSONPath(nodesTests, false, t)
randomPrintOrderTests := []jsonpathTest{
{"recursive name", "{..name}", nodesData, `127.0.0.1 127.0.0.2 myself e2e`, false},
}
testJSONPathSortOutput(randomPrintOrderTests, t)
}
func TestFilterPartialMatchesSometimesMissingAnnotations(t *testing.T) {
// for https://issues.k8s.io/45546
var input = []byte(`{
"kind": "List",
"items": [
{
"kind": "Pod",
"metadata": {
"name": "pod1",
"annotations": {
"color": "blue"
}
}
},
{
"kind": "Pod",
"metadata": {
"name": "pod2"
}
},
{
"kind": "Pod",
"metadata": {
"name": "pod3",
"annotations": {
"color": "green"
}
}
},
{
"kind": "Pod",
"metadata": {
"name": "pod4",
"annotations": {
"color": "blue"
}
}
}
]
}`)
var data interface{}
err := json.Unmarshal(input, &data)
if err != nil {
t.Fatal(err)
}
testJSONPath(
[]jsonpathTest{
{
"filter, should only match a subset, some items don't have annotations, tolerate missing items",
`{.items[?(@.metadata.annotations.color=="blue")].metadata.name}`,
data,
"pod1 pod4",
false, // expect no error
},
},
true, // allow missing keys
t,
)
testJSONPath(
[]jsonpathTest{
{
"filter, should only match a subset, some items don't have annotations, error on missing items",
`{.items[?(@.metadata.annotations.color=="blue")].metadata.name}`,
data,
"",
true, // expect an error
},
},
false, // don't allow missing keys
t,
)
}

View File

@ -1,255 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jsonpath
import "fmt"
// NodeType identifies the type of a parse tree node.
type NodeType int
// Type returns itself and provides an easy default implementation
func (t NodeType) Type() NodeType {
return t
}
func (t NodeType) String() string {
return NodeTypeName[t]
}
const (
NodeText NodeType = iota
NodeArray
NodeList
NodeField
NodeIdentifier
NodeFilter
NodeInt
NodeFloat
NodeWildcard
NodeRecursive
NodeUnion
NodeBool
)
var NodeTypeName = map[NodeType]string{
NodeText: "NodeText",
NodeArray: "NodeArray",
NodeList: "NodeList",
NodeField: "NodeField",
NodeIdentifier: "NodeIdentifier",
NodeFilter: "NodeFilter",
NodeInt: "NodeInt",
NodeFloat: "NodeFloat",
NodeWildcard: "NodeWildcard",
NodeRecursive: "NodeRecursive",
NodeUnion: "NodeUnion",
NodeBool: "NodeBool",
}
type Node interface {
Type() NodeType
String() string
}
// ListNode holds a sequence of nodes.
type ListNode struct {
NodeType
Nodes []Node // The element nodes in lexical order.
}
func newList() *ListNode {
return &ListNode{NodeType: NodeList}
}
func (l *ListNode) append(n Node) {
l.Nodes = append(l.Nodes, n)
}
func (l *ListNode) String() string {
return l.Type().String()
}
// TextNode holds plain text.
type TextNode struct {
NodeType
Text string // The text; may span newlines.
}
func newText(text string) *TextNode {
return &TextNode{NodeType: NodeText, Text: text}
}
func (t *TextNode) String() string {
return fmt.Sprintf("%s: %s", t.Type(), t.Text)
}
// FieldNode holds field of struct
type FieldNode struct {
NodeType
Value string
}
func newField(value string) *FieldNode {
return &FieldNode{NodeType: NodeField, Value: value}
}
func (f *FieldNode) String() string {
return fmt.Sprintf("%s: %s", f.Type(), f.Value)
}
// IdentifierNode holds an identifier
type IdentifierNode struct {
NodeType
Name string
}
func newIdentifier(value string) *IdentifierNode {
return &IdentifierNode{
NodeType: NodeIdentifier,
Name: value,
}
}
func (f *IdentifierNode) String() string {
return fmt.Sprintf("%s: %s", f.Type(), f.Name)
}
// ParamsEntry holds param information for ArrayNode
type ParamsEntry struct {
Value int
Known bool // whether the value is known when parse it
}
// ArrayNode holds start, end, step information for array index selection
type ArrayNode struct {
NodeType
Params [3]ParamsEntry // start, end, step
}
func newArray(params [3]ParamsEntry) *ArrayNode {
return &ArrayNode{
NodeType: NodeArray,
Params: params,
}
}
func (a *ArrayNode) String() string {
return fmt.Sprintf("%s: %v", a.Type(), a.Params)
}
// FilterNode holds operand and operator information for filter
type FilterNode struct {
NodeType
Left *ListNode
Right *ListNode
Operator string
}
func newFilter(left, right *ListNode, operator string) *FilterNode {
return &FilterNode{
NodeType: NodeFilter,
Left: left,
Right: right,
Operator: operator,
}
}
func (f *FilterNode) String() string {
return fmt.Sprintf("%s: %s %s %s", f.Type(), f.Left, f.Operator, f.Right)
}
// IntNode holds integer value
type IntNode struct {
NodeType
Value int
}
func newInt(num int) *IntNode {
return &IntNode{NodeType: NodeInt, Value: num}
}
func (i *IntNode) String() string {
return fmt.Sprintf("%s: %d", i.Type(), i.Value)
}
// FloatNode holds float value
type FloatNode struct {
NodeType
Value float64
}
func newFloat(num float64) *FloatNode {
return &FloatNode{NodeType: NodeFloat, Value: num}
}
func (i *FloatNode) String() string {
return fmt.Sprintf("%s: %f", i.Type(), i.Value)
}
// WildcardNode means a wildcard
type WildcardNode struct {
NodeType
}
func newWildcard() *WildcardNode {
return &WildcardNode{NodeType: NodeWildcard}
}
func (i *WildcardNode) String() string {
return i.Type().String()
}
// RecursiveNode means a recursive descent operator
type RecursiveNode struct {
NodeType
}
func newRecursive() *RecursiveNode {
return &RecursiveNode{NodeType: NodeRecursive}
}
func (r *RecursiveNode) String() string {
return r.Type().String()
}
// UnionNode is union of ListNode
type UnionNode struct {
NodeType
Nodes []*ListNode
}
func newUnion(nodes []*ListNode) *UnionNode {
return &UnionNode{NodeType: NodeUnion, Nodes: nodes}
}
func (u *UnionNode) String() string {
return u.Type().String()
}
// BoolNode holds bool value
type BoolNode struct {
NodeType
Value bool
}
func newBool(value bool) *BoolNode {
return &BoolNode{NodeType: NodeBool, Value: value}
}
func (b *BoolNode) String() string {
return fmt.Sprintf("%s: %t", b.Type(), b.Value)
}

View File

@ -1,525 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jsonpath
import (
"errors"
"fmt"
"regexp"
"strconv"
"strings"
"unicode"
"unicode/utf8"
)
const eof = -1
const (
leftDelim = "{"
rightDelim = "}"
)
type Parser struct {
Name string
Root *ListNode
input string
cur *ListNode
pos int
start int
width int
}
var (
ErrSyntax = errors.New("invalid syntax")
dictKeyRex = regexp.MustCompile(`^'([^']*)'$`)
sliceOperatorRex = regexp.MustCompile(`^(-?[\d]*)(:-?[\d]*)?(:[\d]*)?$`)
)
// Parse parsed the given text and return a node Parser.
// If an error is encountered, parsing stops and an empty
// Parser is returned with the error
func Parse(name, text string) (*Parser, error) {
p := NewParser(name)
err := p.Parse(text)
if err != nil {
p = nil
}
return p, err
}
func NewParser(name string) *Parser {
return &Parser{
Name: name,
}
}
// parseAction parsed the expression inside delimiter
func parseAction(name, text string) (*Parser, error) {
p, err := Parse(name, fmt.Sprintf("%s%s%s", leftDelim, text, rightDelim))
// when error happens, p will be nil, so we need to return here
if err != nil {
return p, err
}
p.Root = p.Root.Nodes[0].(*ListNode)
return p, nil
}
func (p *Parser) Parse(text string) error {
p.input = text
p.Root = newList()
p.pos = 0
return p.parseText(p.Root)
}
// consumeText return the parsed text since last cosumeText
func (p *Parser) consumeText() string {
value := p.input[p.start:p.pos]
p.start = p.pos
return value
}
// next returns the next rune in the input.
func (p *Parser) next() rune {
if p.pos >= len(p.input) {
p.width = 0
return eof
}
r, w := utf8.DecodeRuneInString(p.input[p.pos:])
p.width = w
p.pos += p.width
return r
}
// peek returns but does not consume the next rune in the input.
func (p *Parser) peek() rune {
r := p.next()
p.backup()
return r
}
// backup steps back one rune. Can only be called once per call of next.
func (p *Parser) backup() {
p.pos -= p.width
}
func (p *Parser) parseText(cur *ListNode) error {
for {
if strings.HasPrefix(p.input[p.pos:], leftDelim) {
if p.pos > p.start {
cur.append(newText(p.consumeText()))
}
return p.parseLeftDelim(cur)
}
if p.next() == eof {
break
}
}
// Correctly reached EOF.
if p.pos > p.start {
cur.append(newText(p.consumeText()))
}
return nil
}
// parseLeftDelim scans the left delimiter, which is known to be present.
func (p *Parser) parseLeftDelim(cur *ListNode) error {
p.pos += len(leftDelim)
p.consumeText()
newNode := newList()
cur.append(newNode)
cur = newNode
return p.parseInsideAction(cur)
}
func (p *Parser) parseInsideAction(cur *ListNode) error {
prefixMap := map[string]func(*ListNode) error{
rightDelim: p.parseRightDelim,
"[?(": p.parseFilter,
"..": p.parseRecursive,
}
for prefix, parseFunc := range prefixMap {
if strings.HasPrefix(p.input[p.pos:], prefix) {
return parseFunc(cur)
}
}
switch r := p.next(); {
case r == eof || isEndOfLine(r):
return fmt.Errorf("unclosed action")
case r == ' ':
p.consumeText()
case r == '@' || r == '$': //the current object, just pass it
p.consumeText()
case r == '[':
return p.parseArray(cur)
case r == '"' || r == '\'':
return p.parseQuote(cur, r)
case r == '.':
return p.parseField(cur)
case r == '+' || r == '-' || unicode.IsDigit(r):
p.backup()
return p.parseNumber(cur)
case isAlphaNumeric(r):
p.backup()
return p.parseIdentifier(cur)
default:
return fmt.Errorf("unrecognized character in action: %#U", r)
}
return p.parseInsideAction(cur)
}
// parseRightDelim scans the right delimiter, which is known to be present.
func (p *Parser) parseRightDelim(cur *ListNode) error {
p.pos += len(rightDelim)
p.consumeText()
cur = p.Root
return p.parseText(cur)
}
// parseIdentifier scans build-in keywords, like "range" "end"
func (p *Parser) parseIdentifier(cur *ListNode) error {
var r rune
for {
r = p.next()
if isTerminator(r) {
p.backup()
break
}
}
value := p.consumeText()
if isBool(value) {
v, err := strconv.ParseBool(value)
if err != nil {
return fmt.Errorf("can not parse bool '%s': %s", value, err.Error())
}
cur.append(newBool(v))
} else {
cur.append(newIdentifier(value))
}
return p.parseInsideAction(cur)
}
// parseRecursive scans the recursive desent operator ..
func (p *Parser) parseRecursive(cur *ListNode) error {
p.pos += len("..")
p.consumeText()
cur.append(newRecursive())
if r := p.peek(); isAlphaNumeric(r) {
return p.parseField(cur)
}
return p.parseInsideAction(cur)
}
// parseNumber scans number
func (p *Parser) parseNumber(cur *ListNode) error {
r := p.peek()
if r == '+' || r == '-' {
r = p.next()
}
for {
r = p.next()
if r != '.' && !unicode.IsDigit(r) {
p.backup()
break
}
}
value := p.consumeText()
i, err := strconv.Atoi(value)
if err == nil {
cur.append(newInt(i))
return p.parseInsideAction(cur)
}
d, err := strconv.ParseFloat(value, 64)
if err == nil {
cur.append(newFloat(d))
return p.parseInsideAction(cur)
}
return fmt.Errorf("cannot parse number %s", value)
}
// parseArray scans array index selection
func (p *Parser) parseArray(cur *ListNode) error {
Loop:
for {
switch p.next() {
case eof, '\n':
return fmt.Errorf("unterminated array")
case ']':
break Loop
}
}
text := p.consumeText()
text = text[1 : len(text)-1]
if text == "*" {
text = ":"
}
//union operator
strs := strings.Split(text, ",")
if len(strs) > 1 {
union := []*ListNode{}
for _, str := range strs {
parser, err := parseAction("union", fmt.Sprintf("[%s]", strings.Trim(str, " ")))
if err != nil {
return err
}
union = append(union, parser.Root)
}
cur.append(newUnion(union))
return p.parseInsideAction(cur)
}
// dict key
value := dictKeyRex.FindStringSubmatch(text)
if value != nil {
parser, err := parseAction("arraydict", fmt.Sprintf(".%s", value[1]))
if err != nil {
return err
}
for _, node := range parser.Root.Nodes {
cur.append(node)
}
return p.parseInsideAction(cur)
}
//slice operator
value = sliceOperatorRex.FindStringSubmatch(text)
if value == nil {
return fmt.Errorf("invalid array index %s", text)
}
value = value[1:]
params := [3]ParamsEntry{}
for i := 0; i < 3; i++ {
if value[i] != "" {
if i > 0 {
value[i] = value[i][1:]
}
if i > 0 && value[i] == "" {
params[i].Known = false
} else {
var err error
params[i].Known = true
params[i].Value, err = strconv.Atoi(value[i])
if err != nil {
return fmt.Errorf("array index %s is not a number", value[i])
}
}
} else {
if i == 1 {
params[i].Known = true
params[i].Value = params[0].Value + 1
} else {
params[i].Known = false
params[i].Value = 0
}
}
}
cur.append(newArray(params))
return p.parseInsideAction(cur)
}
// parseFilter scans filter inside array selection
func (p *Parser) parseFilter(cur *ListNode) error {
p.pos += len("[?(")
p.consumeText()
begin := false
end := false
var pair rune
Loop:
for {
r := p.next()
switch r {
case eof, '\n':
return fmt.Errorf("unterminated filter")
case '"', '\'':
if begin == false {
//save the paired rune
begin = true
pair = r
continue
}
//only add when met paired rune
if p.input[p.pos-2] != '\\' && r == pair {
end = true
}
case ')':
//in rightParser below quotes only appear zero or once
//and must be paired at the beginning and end
if begin == end {
break Loop
}
}
}
if p.next() != ']' {
return fmt.Errorf("unclosed array expect ]")
}
reg := regexp.MustCompile(`^([^!<>=]+)([!<>=]+)(.+?)$`)
text := p.consumeText()
text = text[:len(text)-2]
value := reg.FindStringSubmatch(text)
if value == nil {
parser, err := parseAction("text", text)
if err != nil {
return err
}
cur.append(newFilter(parser.Root, newList(), "exists"))
} else {
leftParser, err := parseAction("left", value[1])
if err != nil {
return err
}
rightParser, err := parseAction("right", value[3])
if err != nil {
return err
}
cur.append(newFilter(leftParser.Root, rightParser.Root, value[2]))
}
return p.parseInsideAction(cur)
}
// parseQuote unquotes string inside double or single quote
func (p *Parser) parseQuote(cur *ListNode, end rune) error {
Loop:
for {
switch p.next() {
case eof, '\n':
return fmt.Errorf("unterminated quoted string")
case end:
//if it's not escape break the Loop
if p.input[p.pos-2] != '\\' {
break Loop
}
}
}
value := p.consumeText()
s, err := UnquoteExtend(value)
if err != nil {
return fmt.Errorf("unquote string %s error %v", value, err)
}
cur.append(newText(s))
return p.parseInsideAction(cur)
}
// parseField scans a field until a terminator
func (p *Parser) parseField(cur *ListNode) error {
p.consumeText()
for p.advance() {
}
value := p.consumeText()
if value == "*" {
cur.append(newWildcard())
} else {
cur.append(newField(strings.Replace(value, "\\", "", -1)))
}
return p.parseInsideAction(cur)
}
// advance scans until next non-escaped terminator
func (p *Parser) advance() bool {
r := p.next()
if r == '\\' {
p.next()
} else if isTerminator(r) {
p.backup()
return false
}
return true
}
// isTerminator reports whether the input is at valid termination character to appear after an identifier.
func isTerminator(r rune) bool {
if isSpace(r) || isEndOfLine(r) {
return true
}
switch r {
case eof, '.', ',', '[', ']', '$', '@', '{', '}':
return true
}
return false
}
// isSpace reports whether r is a space character.
func isSpace(r rune) bool {
return r == ' ' || r == '\t'
}
// isEndOfLine reports whether r is an end-of-line character.
func isEndOfLine(r rune) bool {
return r == '\r' || r == '\n'
}
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
func isAlphaNumeric(r rune) bool {
return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
}
// isBool reports whether s is a boolean value.
func isBool(s string) bool {
return s == "true" || s == "false"
}
//UnquoteExtend is almost same as strconv.Unquote(), but it support parse single quotes as a string
func UnquoteExtend(s string) (string, error) {
n := len(s)
if n < 2 {
return "", ErrSyntax
}
quote := s[0]
if quote != s[n-1] {
return "", ErrSyntax
}
s = s[1 : n-1]
if quote != '"' && quote != '\'' {
return "", ErrSyntax
}
// Is it trivial? Avoid allocation.
if !contains(s, '\\') && !contains(s, quote) {
return s, nil
}
var runeTmp [utf8.UTFMax]byte
buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
for len(s) > 0 {
c, multibyte, ss, err := strconv.UnquoteChar(s, quote)
if err != nil {
return "", err
}
s = ss
if c < utf8.RuneSelf || !multibyte {
buf = append(buf, byte(c))
} else {
n := utf8.EncodeRune(runeTmp[:], c)
buf = append(buf, runeTmp[:n]...)
}
}
return string(buf), nil
}
func contains(s string, c byte) bool {
for i := 0; i < len(s); i++ {
if s[i] == c {
return true
}
}
return false
}

View File

@ -1,152 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jsonpath
import (
"testing"
)
type parserTest struct {
name string
text string
nodes []Node
shouldError bool
}
var parserTests = []parserTest{
{"plain", `hello jsonpath`, []Node{newText("hello jsonpath")}, false},
{"variable", `hello {.jsonpath}`,
[]Node{newText("hello "), newList(), newField("jsonpath")}, false},
{"arrayfiled", `hello {['jsonpath']}`,
[]Node{newText("hello "), newList(), newField("jsonpath")}, false},
{"quote", `{"{"}`, []Node{newList(), newText("{")}, false},
{"array", `{[1:3]}`, []Node{newList(),
newArray([3]ParamsEntry{{1, true}, {3, true}, {0, false}})}, false},
{"allarray", `{.book[*].author}`,
[]Node{newList(), newField("book"),
newArray([3]ParamsEntry{{0, false}, {0, false}, {0, false}}), newField("author")}, false},
{"wildcard", `{.bicycle.*}`,
[]Node{newList(), newField("bicycle"), newWildcard()}, false},
{"filter", `{[?(@.price<3)]}`,
[]Node{newList(), newFilter(newList(), newList(), "<"),
newList(), newField("price"), newList(), newInt(3)}, false},
{"recursive", `{..}`, []Node{newList(), newRecursive()}, false},
{"recurField", `{..price}`,
[]Node{newList(), newRecursive(), newField("price")}, false},
{"arraydict", `{['book.price']}`, []Node{newList(),
newField("book"), newField("price"),
}, false},
{"union", `{['bicycle.price', 3, 'book.price']}`, []Node{newList(), newUnion([]*ListNode{}),
newList(), newField("bicycle"), newField("price"),
newList(), newArray([3]ParamsEntry{{3, true}, {4, true}, {0, false}}),
newList(), newField("book"), newField("price"),
}, false},
{"range", `{range .items}{.name},{end}`, []Node{
newList(), newIdentifier("range"), newField("items"),
newList(), newField("name"), newText(","),
newList(), newIdentifier("end"),
}, false},
{"malformat input", `{\\\}`, []Node{}, true},
{"paired parentheses in quotes", `{[?(@.status.nodeInfo.osImage == "()")]}`,
[]Node{newList(), newFilter(newList(), newList(), "=="), newList(), newField("status"), newField("nodeInfo"), newField("osImage"), newList(), newText("()")}, false},
{"paired parentheses in double quotes and with double quotes escape", `{[?(@.status.nodeInfo.osImage == "(\"\")")]}`,
[]Node{newList(), newFilter(newList(), newList(), "=="), newList(), newField("status"), newField("nodeInfo"), newField("osImage"), newList(), newText("(\"\")")}, false},
{"unregular parentheses in double quotes", `{[?(@.test == "())(")]}`,
[]Node{newList(), newFilter(newList(), newList(), "=="), newList(), newField("test"), newList(), newText("())(")}, false},
{"plain text in single quotes", `{[?(@.status.nodeInfo.osImage == 'Linux')]}`,
[]Node{newList(), newFilter(newList(), newList(), "=="), newList(), newField("status"), newField("nodeInfo"), newField("osImage"), newList(), newText("Linux")}, false},
{"test filter suffix", `{[?(@.status.nodeInfo.osImage == "{[()]}")]}`,
[]Node{newList(), newFilter(newList(), newList(), "=="), newList(), newField("status"), newField("nodeInfo"), newField("osImage"), newList(), newText("{[()]}")}, false},
{"double inside single", `{[?(@.status.nodeInfo.osImage == "''")]}`,
[]Node{newList(), newFilter(newList(), newList(), "=="), newList(), newField("status"), newField("nodeInfo"), newField("osImage"), newList(), newText("''")}, false},
{"single inside double", `{[?(@.status.nodeInfo.osImage == '""')]}`,
[]Node{newList(), newFilter(newList(), newList(), "=="), newList(), newField("status"), newField("nodeInfo"), newField("osImage"), newList(), newText("\"\"")}, false},
{"single containing escaped single", `{[?(@.status.nodeInfo.osImage == '\\\'')]}`,
[]Node{newList(), newFilter(newList(), newList(), "=="), newList(), newField("status"), newField("nodeInfo"), newField("osImage"), newList(), newText("\\'")}, false},
}
func collectNode(nodes []Node, cur Node) []Node {
nodes = append(nodes, cur)
switch cur.Type() {
case NodeList:
for _, node := range cur.(*ListNode).Nodes {
nodes = collectNode(nodes, node)
}
case NodeFilter:
nodes = collectNode(nodes, cur.(*FilterNode).Left)
nodes = collectNode(nodes, cur.(*FilterNode).Right)
case NodeUnion:
for _, node := range cur.(*UnionNode).Nodes {
nodes = collectNode(nodes, node)
}
}
return nodes
}
func TestParser(t *testing.T) {
for _, test := range parserTests {
parser, err := Parse(test.name, test.text)
if test.shouldError {
if err == nil {
t.Errorf("unexpected non-error when parsing %s", test.name)
}
continue
}
if err != nil {
t.Errorf("parse %s error %v", test.name, err)
}
result := collectNode([]Node{}, parser.Root)[1:]
if len(result) != len(test.nodes) {
t.Errorf("in %s, expect to get %d nodes, got %d nodes", test.name, len(test.nodes), len(result))
t.Error(result)
}
for i, expect := range test.nodes {
if result[i].String() != expect.String() {
t.Errorf("in %s, %dth node, expect %v, got %v", test.name, i, expect, result[i])
}
}
}
}
type failParserTest struct {
name string
text string
err string
}
func TestFailParser(t *testing.T) {
failParserTests := []failParserTest{
{"unclosed action", "{.hello", "unclosed action"},
{"unrecognized character", "{*}", "unrecognized character in action: U+002A '*'"},
{"invalid number", "{+12.3.0}", "cannot parse number +12.3.0"},
{"unterminated array", "{[1}", "unterminated array"},
{"invalid index", "{[::-1]}", "invalid array index ::-1"},
{"unterminated filter", "{[?(.price]}", "unterminated filter"},
}
for _, test := range failParserTests {
_, err := Parse(test.name, test.text)
var out string
if err == nil {
out = "nil"
} else {
out = err.Error()
}
if out != test.err {
t.Errorf("in %s, expect to get error %v, got %v", test.name, test.err, out)
}
}
}

View File

@ -1,2 +0,0 @@
reviewers:
- caesarxuchao

View File

@ -1,79 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package retry
import (
"time"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/wait"
)
// DefaultRetry is the recommended retry for a conflict where multiple clients
// are making changes to the same resource.
var DefaultRetry = wait.Backoff{
Steps: 5,
Duration: 10 * time.Millisecond,
Factor: 1.0,
Jitter: 0.1,
}
// DefaultBackoff is the recommended backoff for a conflict where a client
// may be attempting to make an unrelated modification to a resource under
// active management by one or more controllers.
var DefaultBackoff = wait.Backoff{
Steps: 4,
Duration: 10 * time.Millisecond,
Factor: 5.0,
Jitter: 0.1,
}
// RetryConflict executes the provided function repeatedly, retrying if the server returns a conflicting
// write. Callers should preserve previous executions if they wish to retry changes. It performs an
// exponential backoff.
//
// var pod *api.Pod
// err := RetryOnConflict(DefaultBackoff, func() (err error) {
// pod, err = c.Pods("mynamespace").UpdateStatus(podStatus)
// return
// })
// if err != nil {
// // may be conflict if max retries were hit
// return err
// }
// ...
//
// TODO: Make Backoff an interface?
func RetryOnConflict(backoff wait.Backoff, fn func() error) error {
var lastConflictErr error
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
err := fn()
switch {
case err == nil:
return true, nil
case errors.IsConflict(err):
lastConflictErr = err
return false, nil
default:
return false, err
}
})
if err == wait.ErrWaitTimeout {
err = lastConflictErr
}
return err
}

View File

@ -1,71 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package retry
import (
"fmt"
"testing"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
)
func TestRetryOnConflict(t *testing.T) {
opts := wait.Backoff{Factor: 1.0, Steps: 3}
conflictErr := errors.NewConflict(schema.GroupResource{Resource: "test"}, "other", nil)
// never returns
err := RetryOnConflict(opts, func() error {
return conflictErr
})
if err != conflictErr {
t.Errorf("unexpected error: %v", err)
}
// returns immediately
i := 0
err = RetryOnConflict(opts, func() error {
i++
return nil
})
if err != nil || i != 1 {
t.Errorf("unexpected error: %v", err)
}
// returns immediately on error
testErr := fmt.Errorf("some other error")
err = RetryOnConflict(opts, func() error {
return testErr
})
if err != testErr {
t.Errorf("unexpected error: %v", err)
}
// keeps retrying
i = 0
err = RetryOnConflict(opts, func() error {
if i < 2 {
i++
return errors.NewConflict(schema.GroupResource{Resource: "test"}, "other", nil)
}
return nil
})
if err != nil || i != 2 {
t.Errorf("unexpected error: %v", err)
}
}

View File

@ -1,139 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"io/ioutil"
"net/http"
"net/url"
"reflect"
"sync"
)
// TestInterface is a simple interface providing Errorf, to make injection for
// testing easier (insert 'yo dawg' meme here).
type TestInterface interface {
Errorf(format string, args ...interface{})
Logf(format string, args ...interface{})
}
// LogInterface is a simple interface to allow injection of Logf to report serving errors.
type LogInterface interface {
Logf(format string, args ...interface{})
}
// FakeHandler is to assist in testing HTTP requests. Notice that FakeHandler is
// not thread safe and you must not direct traffic to except for the request
// you want to test. You can do this by hiding it in an http.ServeMux.
type FakeHandler struct {
RequestReceived *http.Request
RequestBody string
StatusCode int
ResponseBody string
// For logging - you can use a *testing.T
// This will keep log messages associated with the test.
T LogInterface
// Enforce "only one use" constraint.
lock sync.Mutex
requestCount int
hasBeenChecked bool
SkipRequestFn func(verb string, url url.URL) bool
}
func (f *FakeHandler) SetResponseBody(responseBody string) {
f.lock.Lock()
defer f.lock.Unlock()
f.ResponseBody = responseBody
}
func (f *FakeHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) {
f.lock.Lock()
defer f.lock.Unlock()
if f.SkipRequestFn != nil && f.SkipRequestFn(request.Method, *request.URL) {
response.Header().Set("Content-Type", "application/json")
response.WriteHeader(f.StatusCode)
response.Write([]byte(f.ResponseBody))
return
}
f.requestCount++
if f.hasBeenChecked {
panic("got request after having been validated")
}
f.RequestReceived = request
response.Header().Set("Content-Type", "application/json")
response.WriteHeader(f.StatusCode)
response.Write([]byte(f.ResponseBody))
bodyReceived, err := ioutil.ReadAll(request.Body)
if err != nil && f.T != nil {
f.T.Logf("Received read error: %v", err)
}
f.RequestBody = string(bodyReceived)
if f.T != nil {
f.T.Logf("request body: %s", f.RequestBody)
}
}
func (f *FakeHandler) ValidateRequestCount(t TestInterface, count int) bool {
ok := true
f.lock.Lock()
defer f.lock.Unlock()
if f.requestCount != count {
ok = false
t.Errorf("Expected %d call, but got %d. Only the last call is recorded and checked.", count, f.requestCount)
}
f.hasBeenChecked = true
return ok
}
// ValidateRequest verifies that FakeHandler received a request with expected path, method, and body.
func (f *FakeHandler) ValidateRequest(t TestInterface, expectedPath, expectedMethod string, body *string) {
f.lock.Lock()
defer f.lock.Unlock()
if f.requestCount != 1 {
t.Logf("Expected 1 call, but got %v. Only the last call is recorded and checked.", f.requestCount)
}
f.hasBeenChecked = true
expectURL, err := url.Parse(expectedPath)
if err != nil {
t.Errorf("Couldn't parse %v as a URL.", expectedPath)
}
if f.RequestReceived == nil {
t.Errorf("Unexpected nil request received for %s", expectedPath)
return
}
if f.RequestReceived.URL.Path != expectURL.Path {
t.Errorf("Unexpected request path for request %#v, received: %q, expected: %q", f.RequestReceived, f.RequestReceived.URL.Path, expectURL.Path)
}
if e, a := expectURL.Query(), f.RequestReceived.URL.Query(); !reflect.DeepEqual(e, a) {
t.Errorf("Unexpected query for request %#v, received: %q, expected: %q", f.RequestReceived, a, e)
}
if f.RequestReceived.Method != expectedMethod {
t.Errorf("Unexpected method: %q, expected: %q", f.RequestReceived.Method, expectedMethod)
}
if body != nil {
if *body != f.RequestBody {
t.Errorf("Received body:\n%s\n Doesn't match expected body:\n%s", f.RequestBody, *body)
}
}
}

View File

@ -1,180 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"bytes"
"net/http"
"net/http/httptest"
"testing"
)
func TestFakeHandlerPath(t *testing.T) {
handler := FakeHandler{StatusCode: http.StatusOK}
server := httptest.NewServer(&handler)
defer server.Close()
method := "GET"
path := "/foo/bar"
body := "somebody"
req, err := http.NewRequest(method, server.URL+path, bytes.NewBufferString(body))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
client := http.Client{}
_, err = client.Do(req)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
handler.ValidateRequest(t, path, method, &body)
}
func TestFakeHandlerPathNoBody(t *testing.T) {
handler := FakeHandler{StatusCode: http.StatusOK}
server := httptest.NewServer(&handler)
defer server.Close()
method := "GET"
path := "/foo/bar"
req, err := http.NewRequest(method, server.URL+path, nil)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
client := http.Client{}
_, err = client.Do(req)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
handler.ValidateRequest(t, path, method, nil)
}
type fakeError struct {
errors []string
}
func (f *fakeError) Errorf(format string, args ...interface{}) {
f.errors = append(f.errors, format)
}
func (f *fakeError) Logf(format string, args ...interface{}) {}
func TestFakeHandlerWrongPath(t *testing.T) {
handler := FakeHandler{StatusCode: http.StatusOK}
server := httptest.NewServer(&handler)
defer server.Close()
method := "GET"
path := "/foo/bar"
fakeT := fakeError{}
req, err := http.NewRequest(method, server.URL+"/foo/baz", nil)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
client := http.Client{}
_, err = client.Do(req)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
handler.ValidateRequest(&fakeT, path, method, nil)
if len(fakeT.errors) != 1 {
t.Errorf("Unexpected error set: %#v", fakeT.errors)
}
}
func TestFakeHandlerWrongMethod(t *testing.T) {
handler := FakeHandler{StatusCode: http.StatusOK}
server := httptest.NewServer(&handler)
defer server.Close()
method := "GET"
path := "/foo/bar"
fakeT := fakeError{}
req, err := http.NewRequest("PUT", server.URL+path, nil)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
client := http.Client{}
_, err = client.Do(req)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
handler.ValidateRequest(&fakeT, path, method, nil)
if len(fakeT.errors) != 1 {
t.Errorf("Unexpected error set: %#v", fakeT.errors)
}
}
func TestFakeHandlerWrongBody(t *testing.T) {
handler := FakeHandler{StatusCode: http.StatusOK}
server := httptest.NewServer(&handler)
defer server.Close()
method := "GET"
path := "/foo/bar"
body := "somebody"
fakeT := fakeError{}
req, err := http.NewRequest(method, server.URL+path, bytes.NewBufferString(body))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
client := http.Client{}
_, err = client.Do(req)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
otherbody := "otherbody"
handler.ValidateRequest(&fakeT, path, method, &otherbody)
if len(fakeT.errors) != 1 {
t.Errorf("Unexpected error set: %#v", fakeT.errors)
}
}
func TestFakeHandlerNilBody(t *testing.T) {
handler := FakeHandler{StatusCode: http.StatusOK}
server := httptest.NewServer(&handler)
defer server.Close()
method := "GET"
path := "/foo/bar"
body := "somebody"
fakeT := fakeError{}
req, err := http.NewRequest(method, server.URL+path, nil)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
client := http.Client{}
_, err = client.Do(req)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
handler.ValidateRequest(&fakeT, path, method, &body)
if len(fakeT.errors) != 1 {
t.Errorf("Unexpected error set: %#v", fakeT.errors)
}
}

View File

@ -1,44 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"io/ioutil"
"os"
)
// MkTmpdir creates a temporary directory based upon the prefix passed in.
// If successful, it returns the temporary directory path. The directory can be
// deleted with a call to "os.RemoveAll(...)".
// In case of error, it'll return an empty string and the error.
func MkTmpdir(prefix string) (string, error) {
tmpDir, err := ioutil.TempDir(os.TempDir(), prefix)
if err != nil {
return "", err
}
return tmpDir, nil
}
// MkTmpdir does the same work as "MkTmpdir", except in case of
// errors, it'll trigger a panic.
func MkTmpdirOrDie(prefix string) string {
tmpDir, err := MkTmpdir(prefix)
if err != nil {
panic(err)
}
return tmpDir
}

View File

@ -1,211 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"math"
"sync"
"time"
"golang.org/x/time/rate"
)
type RateLimiter interface {
// When gets an item and gets to decide how long that item should wait
When(item interface{}) time.Duration
// Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing
// or for success, we'll stop tracking it
Forget(item interface{})
// NumRequeues returns back how many failures the item has had
NumRequeues(item interface{}) int
}
// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has
// both overall and per-item rate limitting. The overall is a token bucket and the per-item is exponential
func DefaultControllerRateLimiter() RateLimiter {
return NewMaxOfRateLimiter(
NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second),
// 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item)
&BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
)
}
// BucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API
type BucketRateLimiter struct {
*rate.Limiter
}
var _ RateLimiter = &BucketRateLimiter{}
func (r *BucketRateLimiter) When(item interface{}) time.Duration {
return r.Limiter.Reserve().Delay()
}
func (r *BucketRateLimiter) NumRequeues(item interface{}) int {
return 0
}
func (r *BucketRateLimiter) Forget(item interface{}) {
}
// ItemExponentialFailureRateLimiter does a simple baseDelay*10^<num-failures> limit
// dealing with max failures and expiration are up to the caller
type ItemExponentialFailureRateLimiter struct {
failuresLock sync.Mutex
failures map[interface{}]int
baseDelay time.Duration
maxDelay time.Duration
}
var _ RateLimiter = &ItemExponentialFailureRateLimiter{}
func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter {
return &ItemExponentialFailureRateLimiter{
failures: map[interface{}]int{},
baseDelay: baseDelay,
maxDelay: maxDelay,
}
}
func DefaultItemBasedRateLimiter() RateLimiter {
return NewItemExponentialFailureRateLimiter(time.Millisecond, 1000*time.Second)
}
func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
exp := r.failures[item]
r.failures[item] = r.failures[item] + 1
// The backoff is capped such that 'calculated' value never overflows.
backoff := float64(r.baseDelay.Nanoseconds()) * math.Pow(2, float64(exp))
if backoff > math.MaxInt64 {
return r.maxDelay
}
calculated := time.Duration(backoff)
if calculated > r.maxDelay {
return r.maxDelay
}
return calculated
}
func (r *ItemExponentialFailureRateLimiter) NumRequeues(item interface{}) int {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
return r.failures[item]
}
func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
delete(r.failures, item)
}
// ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that
type ItemFastSlowRateLimiter struct {
failuresLock sync.Mutex
failures map[interface{}]int
maxFastAttempts int
fastDelay time.Duration
slowDelay time.Duration
}
var _ RateLimiter = &ItemFastSlowRateLimiter{}
func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter {
return &ItemFastSlowRateLimiter{
failures: map[interface{}]int{},
fastDelay: fastDelay,
slowDelay: slowDelay,
maxFastAttempts: maxFastAttempts,
}
}
func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
r.failures[item] = r.failures[item] + 1
if r.failures[item] <= r.maxFastAttempts {
return r.fastDelay
}
return r.slowDelay
}
func (r *ItemFastSlowRateLimiter) NumRequeues(item interface{}) int {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
return r.failures[item]
}
func (r *ItemFastSlowRateLimiter) Forget(item interface{}) {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
delete(r.failures, item)
}
// MaxOfRateLimiter calls every RateLimiter and returns the worst case response
// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items
// were separately delayed a longer time.
type MaxOfRateLimiter struct {
limiters []RateLimiter
}
func (r *MaxOfRateLimiter) When(item interface{}) time.Duration {
ret := time.Duration(0)
for _, limiter := range r.limiters {
curr := limiter.When(item)
if curr > ret {
ret = curr
}
}
return ret
}
func NewMaxOfRateLimiter(limiters ...RateLimiter) RateLimiter {
return &MaxOfRateLimiter{limiters: limiters}
}
func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int {
ret := 0
for _, limiter := range r.limiters {
curr := limiter.NumRequeues(item)
if curr > ret {
ret = curr
}
}
return ret
}
func (r *MaxOfRateLimiter) Forget(item interface{}) {
for _, limiter := range r.limiters {
limiter.Forget(item)
}
}

View File

@ -1,184 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"testing"
"time"
)
func TestItemExponentialFailureRateLimiter(t *testing.T) {
limiter := NewItemExponentialFailureRateLimiter(1*time.Millisecond, 1*time.Second)
if e, a := 1*time.Millisecond, limiter.When("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 2*time.Millisecond, limiter.When("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 4*time.Millisecond, limiter.When("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 8*time.Millisecond, limiter.When("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 16*time.Millisecond, limiter.When("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 5, limiter.NumRequeues("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 1*time.Millisecond, limiter.When("two"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 2*time.Millisecond, limiter.When("two"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 2, limiter.NumRequeues("two"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
limiter.Forget("one")
if e, a := 0, limiter.NumRequeues("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 1*time.Millisecond, limiter.When("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
}
func TestItemExponentialFailureRateLimiterOverFlow(t *testing.T) {
limiter := NewItemExponentialFailureRateLimiter(1*time.Millisecond, 1000*time.Second)
for i := 0; i < 5; i++ {
limiter.When("one")
}
if e, a := 32*time.Millisecond, limiter.When("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
for i := 0; i < 1000; i++ {
limiter.When("overflow1")
}
if e, a := 1000*time.Second, limiter.When("overflow1"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
limiter = NewItemExponentialFailureRateLimiter(1*time.Minute, 1000*time.Hour)
for i := 0; i < 2; i++ {
limiter.When("two")
}
if e, a := 4*time.Minute, limiter.When("two"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
for i := 0; i < 1000; i++ {
limiter.When("overflow2")
}
if e, a := 1000*time.Hour, limiter.When("overflow2"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
}
func TestItemFastSlowRateLimiter(t *testing.T) {
limiter := NewItemFastSlowRateLimiter(5*time.Millisecond, 10*time.Second, 3)
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 10*time.Second, limiter.When("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 10*time.Second, limiter.When("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 5, limiter.NumRequeues("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 5*time.Millisecond, limiter.When("two"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 5*time.Millisecond, limiter.When("two"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 2, limiter.NumRequeues("two"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
limiter.Forget("one")
if e, a := 0, limiter.NumRequeues("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
}
func TestMaxOfRateLimiter(t *testing.T) {
limiter := NewMaxOfRateLimiter(
NewItemFastSlowRateLimiter(5*time.Millisecond, 3*time.Second, 3),
NewItemExponentialFailureRateLimiter(1*time.Millisecond, 1*time.Second),
)
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 3*time.Second, limiter.When("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 3*time.Second, limiter.When("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 5, limiter.NumRequeues("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 5*time.Millisecond, limiter.When("two"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 5*time.Millisecond, limiter.When("two"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 2, limiter.NumRequeues("two"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
limiter.Forget("one")
if e, a := 0, limiter.NumRequeues("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
}

View File

@ -1,255 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"container/heap"
"time"
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
// requeue items after failures without ending up in a hot-loop.
type DelayingInterface interface {
Interface
// AddAfter adds an item to the workqueue after the indicated duration has passed
AddAfter(item interface{}, duration time.Duration)
}
// NewDelayingQueue constructs a new workqueue with delayed queuing ability
func NewDelayingQueue() DelayingInterface {
return newDelayingQueue(clock.RealClock{}, "")
}
func NewNamedDelayingQueue(name string) DelayingInterface {
return newDelayingQueue(clock.RealClock{}, name)
}
func newDelayingQueue(clock clock.Clock, name string) DelayingInterface {
ret := &delayingType{
Interface: NewNamed(name),
clock: clock,
heartbeat: clock.NewTicker(maxWait),
stopCh: make(chan struct{}),
waitingForAddCh: make(chan *waitFor, 1000),
metrics: newRetryMetrics(name),
}
go ret.waitingLoop()
return ret
}
// delayingType wraps an Interface and provides delayed re-enquing
type delayingType struct {
Interface
// clock tracks time for delayed firing
clock clock.Clock
// stopCh lets us signal a shutdown to the waiting loop
stopCh chan struct{}
// heartbeat ensures we wait no more than maxWait before firing
heartbeat clock.Ticker
// waitingForAddCh is a buffered channel that feeds waitingForAdd
waitingForAddCh chan *waitFor
// metrics counts the number of retries
metrics retryMetrics
}
// waitFor holds the data to add and the time it should be added
type waitFor struct {
data t
readyAt time.Time
// index in the priority queue (heap)
index int
}
// waitForPriorityQueue implements a priority queue for waitFor items.
//
// waitForPriorityQueue implements heap.Interface. The item occurring next in
// time (i.e., the item with the smallest readyAt) is at the root (index 0).
// Peek returns this minimum item at index 0. Pop returns the minimum item after
// it has been removed from the queue and placed at index Len()-1 by
// container/heap. Push adds an item at index Len(), and container/heap
// percolates it into the correct location.
type waitForPriorityQueue []*waitFor
func (pq waitForPriorityQueue) Len() int {
return len(pq)
}
func (pq waitForPriorityQueue) Less(i, j int) bool {
return pq[i].readyAt.Before(pq[j].readyAt)
}
func (pq waitForPriorityQueue) Swap(i, j int) {
pq[i], pq[j] = pq[j], pq[i]
pq[i].index = i
pq[j].index = j
}
// Push adds an item to the queue. Push should not be called directly; instead,
// use `heap.Push`.
func (pq *waitForPriorityQueue) Push(x interface{}) {
n := len(*pq)
item := x.(*waitFor)
item.index = n
*pq = append(*pq, item)
}
// Pop removes an item from the queue. Pop should not be called directly;
// instead, use `heap.Pop`.
func (pq *waitForPriorityQueue) Pop() interface{} {
n := len(*pq)
item := (*pq)[n-1]
item.index = -1
*pq = (*pq)[0:(n - 1)]
return item
}
// Peek returns the item at the beginning of the queue, without removing the
// item or otherwise mutating the queue. It is safe to call directly.
func (pq waitForPriorityQueue) Peek() interface{} {
return pq[0]
}
// ShutDown gives a way to shut off this queue
func (q *delayingType) ShutDown() {
q.Interface.ShutDown()
close(q.stopCh)
q.heartbeat.Stop()
}
// AddAfter adds the given item to the work queue after the given delay
func (q *delayingType) AddAfter(item interface{}, duration time.Duration) {
// don't add if we're already shutting down
if q.ShuttingDown() {
return
}
q.metrics.retry()
// immediately add things with no delay
if duration <= 0 {
q.Add(item)
return
}
select {
case <-q.stopCh:
// unblock if ShutDown() is called
case q.waitingForAddCh <- &waitFor{data: item, readyAt: q.clock.Now().Add(duration)}:
}
}
// maxWait keeps a max bound on the wait time. It's just insurance against weird things happening.
// Checking the queue every 10 seconds isn't expensive and we know that we'll never end up with an
// expired item sitting for more than 10 seconds.
const maxWait = 10 * time.Second
// waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added.
func (q *delayingType) waitingLoop() {
defer utilruntime.HandleCrash()
// Make a placeholder channel to use when there are no items in our list
never := make(<-chan time.Time)
waitingForQueue := &waitForPriorityQueue{}
heap.Init(waitingForQueue)
waitingEntryByData := map[t]*waitFor{}
for {
if q.Interface.ShuttingDown() {
return
}
now := q.clock.Now()
// Add ready entries
for waitingForQueue.Len() > 0 {
entry := waitingForQueue.Peek().(*waitFor)
if entry.readyAt.After(now) {
break
}
entry = heap.Pop(waitingForQueue).(*waitFor)
q.Add(entry.data)
delete(waitingEntryByData, entry.data)
}
// Set up a wait for the first item's readyAt (if one exists)
nextReadyAt := never
if waitingForQueue.Len() > 0 {
entry := waitingForQueue.Peek().(*waitFor)
nextReadyAt = q.clock.After(entry.readyAt.Sub(now))
}
select {
case <-q.stopCh:
return
case <-q.heartbeat.C():
// continue the loop, which will add ready items
case <-nextReadyAt:
// continue the loop, which will add ready items
case waitEntry := <-q.waitingForAddCh:
if waitEntry.readyAt.After(q.clock.Now()) {
insert(waitingForQueue, waitingEntryByData, waitEntry)
} else {
q.Add(waitEntry.data)
}
drained := false
for !drained {
select {
case waitEntry := <-q.waitingForAddCh:
if waitEntry.readyAt.After(q.clock.Now()) {
insert(waitingForQueue, waitingEntryByData, waitEntry)
} else {
q.Add(waitEntry.data)
}
default:
drained = true
}
}
}
}
}
// insert adds the entry to the priority queue, or updates the readyAt if it already exists in the queue
func insert(q *waitForPriorityQueue, knownEntries map[t]*waitFor, entry *waitFor) {
// if the entry already exists, update the time only if it would cause the item to be queued sooner
existing, exists := knownEntries[entry.data]
if exists {
if existing.readyAt.After(entry.readyAt) {
existing.readyAt = entry.readyAt
heap.Fix(q, existing.index)
}
return
}
heap.Push(q, entry)
knownEntries[entry.data] = entry
}

View File

@ -1,255 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"fmt"
"math/rand"
"reflect"
"testing"
"time"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait"
)
func TestSimpleQueue(t *testing.T) {
fakeClock := clock.NewFakeClock(time.Now())
q := newDelayingQueue(fakeClock, "")
first := "foo"
q.AddAfter(first, 50*time.Millisecond)
if err := waitForWaitingQueueToFill(q); err != nil {
t.Fatalf("unexpected err: %v", err)
}
if q.Len() != 0 {
t.Errorf("should not have added")
}
fakeClock.Step(60 * time.Millisecond)
if err := waitForAdded(q, 1); err != nil {
t.Errorf("should have added")
}
item, _ := q.Get()
q.Done(item)
// step past the next heartbeat
fakeClock.Step(10 * time.Second)
err := wait.Poll(1*time.Millisecond, 30*time.Millisecond, func() (done bool, err error) {
if q.Len() > 0 {
return false, fmt.Errorf("added to queue")
}
return false, nil
})
if err != wait.ErrWaitTimeout {
t.Errorf("expected timeout, got: %v", err)
}
if q.Len() != 0 {
t.Errorf("should not have added")
}
}
func TestDeduping(t *testing.T) {
fakeClock := clock.NewFakeClock(time.Now())
q := newDelayingQueue(fakeClock, "")
first := "foo"
q.AddAfter(first, 50*time.Millisecond)
if err := waitForWaitingQueueToFill(q); err != nil {
t.Fatalf("unexpected err: %v", err)
}
q.AddAfter(first, 70*time.Millisecond)
if err := waitForWaitingQueueToFill(q); err != nil {
t.Fatalf("unexpected err: %v", err)
}
if q.Len() != 0 {
t.Errorf("should not have added")
}
// step past the first block, we should receive now
fakeClock.Step(60 * time.Millisecond)
if err := waitForAdded(q, 1); err != nil {
t.Errorf("should have added")
}
item, _ := q.Get()
q.Done(item)
// step past the second add
fakeClock.Step(20 * time.Millisecond)
if q.Len() != 0 {
t.Errorf("should not have added")
}
// test again, but this time the earlier should override
q.AddAfter(first, 50*time.Millisecond)
q.AddAfter(first, 30*time.Millisecond)
if err := waitForWaitingQueueToFill(q); err != nil {
t.Fatalf("unexpected err: %v", err)
}
if q.Len() != 0 {
t.Errorf("should not have added")
}
fakeClock.Step(40 * time.Millisecond)
if err := waitForAdded(q, 1); err != nil {
t.Errorf("should have added")
}
item, _ = q.Get()
q.Done(item)
// step past the second add
fakeClock.Step(20 * time.Millisecond)
if q.Len() != 0 {
t.Errorf("should not have added")
}
if q.Len() != 0 {
t.Errorf("should not have added")
}
}
func TestAddTwoFireEarly(t *testing.T) {
fakeClock := clock.NewFakeClock(time.Now())
q := newDelayingQueue(fakeClock, "")
first := "foo"
second := "bar"
third := "baz"
q.AddAfter(first, 1*time.Second)
q.AddAfter(second, 50*time.Millisecond)
if err := waitForWaitingQueueToFill(q); err != nil {
t.Fatalf("unexpected err: %v", err)
}
if q.Len() != 0 {
t.Errorf("should not have added")
}
fakeClock.Step(60 * time.Millisecond)
if err := waitForAdded(q, 1); err != nil {
t.Fatalf("unexpected err: %v", err)
}
item, _ := q.Get()
if !reflect.DeepEqual(item, second) {
t.Errorf("expected %v, got %v", second, item)
}
q.AddAfter(third, 2*time.Second)
fakeClock.Step(1 * time.Second)
if err := waitForAdded(q, 1); err != nil {
t.Fatalf("unexpected err: %v", err)
}
item, _ = q.Get()
if !reflect.DeepEqual(item, first) {
t.Errorf("expected %v, got %v", first, item)
}
fakeClock.Step(2 * time.Second)
if err := waitForAdded(q, 1); err != nil {
t.Fatalf("unexpected err: %v", err)
}
item, _ = q.Get()
if !reflect.DeepEqual(item, third) {
t.Errorf("expected %v, got %v", third, item)
}
}
func TestCopyShifting(t *testing.T) {
fakeClock := clock.NewFakeClock(time.Now())
q := newDelayingQueue(fakeClock, "")
first := "foo"
second := "bar"
third := "baz"
q.AddAfter(first, 1*time.Second)
q.AddAfter(second, 500*time.Millisecond)
q.AddAfter(third, 250*time.Millisecond)
if err := waitForWaitingQueueToFill(q); err != nil {
t.Fatalf("unexpected err: %v", err)
}
if q.Len() != 0 {
t.Errorf("should not have added")
}
fakeClock.Step(2 * time.Second)
if err := waitForAdded(q, 3); err != nil {
t.Fatalf("unexpected err: %v", err)
}
actualFirst, _ := q.Get()
if !reflect.DeepEqual(actualFirst, third) {
t.Errorf("expected %v, got %v", third, actualFirst)
}
actualSecond, _ := q.Get()
if !reflect.DeepEqual(actualSecond, second) {
t.Errorf("expected %v, got %v", second, actualSecond)
}
actualThird, _ := q.Get()
if !reflect.DeepEqual(actualThird, first) {
t.Errorf("expected %v, got %v", first, actualThird)
}
}
func BenchmarkDelayingQueue_AddAfter(b *testing.B) {
r := rand.New(rand.NewSource(time.Now().Unix()))
fakeClock := clock.NewFakeClock(time.Now())
q := newDelayingQueue(fakeClock, "")
// Add items
for n := 0; n < b.N; n++ {
data := fmt.Sprintf("%d", n)
q.AddAfter(data, time.Duration(r.Int63n(int64(10*time.Minute))))
}
// Exercise item removal as well
fakeClock.Step(11 * time.Minute)
for n := 0; n < b.N; n++ {
_, _ = q.Get()
}
}
func waitForAdded(q DelayingInterface, depth int) error {
return wait.Poll(1*time.Millisecond, 10*time.Second, func() (done bool, err error) {
if q.Len() == depth {
return true, nil
}
return false, nil
})
}
func waitForWaitingQueueToFill(q DelayingInterface) error {
return wait.Poll(1*time.Millisecond, 10*time.Second, func() (done bool, err error) {
if len(q.(*delayingType).waitingForAddCh) == 0 {
return true, nil
}
return false, nil
})
}

View File

@ -1,26 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package workqueue provides a simple queue that supports the following
// features:
// * Fair: items processed in the order in which they are added.
// * Stingy: a single item will not be processed multiple times concurrently,
// and if an item is added multiple times before it can be processed, it
// will only be processed once.
// * Multiple consumers and producers. In particular, it is allowed for an
// item to be reenqueued while it is being processed.
// * Shutdown notifications.
package workqueue

View File

@ -1,258 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"sync"
"time"
"k8s.io/apimachinery/pkg/util/clock"
)
// This file provides abstractions for setting the provider (e.g., prometheus)
// of metrics.
type queueMetrics interface {
add(item t)
get(item t)
done(item t)
updateUnfinishedWork()
}
// GaugeMetric represents a single numerical value that can arbitrarily go up
// and down.
type GaugeMetric interface {
Inc()
Dec()
}
// SettableGaugeMetric represents a single numerical value that can arbitrarily go up
// and down. (Separate from GaugeMetric to preserve backwards compatibility.)
type SettableGaugeMetric interface {
Set(float64)
}
// CounterMetric represents a single numerical value that only ever
// goes up.
type CounterMetric interface {
Inc()
}
// SummaryMetric captures individual observations.
type SummaryMetric interface {
Observe(float64)
}
type noopMetric struct{}
func (noopMetric) Inc() {}
func (noopMetric) Dec() {}
func (noopMetric) Set(float64) {}
func (noopMetric) Observe(float64) {}
// defaultQueueMetrics expects the caller to lock before setting any metrics.
type defaultQueueMetrics struct {
clock clock.Clock
// current depth of a workqueue
depth GaugeMetric
// total number of adds handled by a workqueue
adds CounterMetric
// how long an item stays in a workqueue
latency SummaryMetric
// how long processing an item from a workqueue takes
workDuration SummaryMetric
addTimes map[t]time.Time
processingStartTimes map[t]time.Time
// how long have current threads been working?
unfinishedWorkSeconds SettableGaugeMetric
longestRunningProcessor SettableGaugeMetric
}
func (m *defaultQueueMetrics) add(item t) {
if m == nil {
return
}
m.adds.Inc()
m.depth.Inc()
if _, exists := m.addTimes[item]; !exists {
m.addTimes[item] = m.clock.Now()
}
}
func (m *defaultQueueMetrics) get(item t) {
if m == nil {
return
}
m.depth.Dec()
m.processingStartTimes[item] = m.clock.Now()
if startTime, exists := m.addTimes[item]; exists {
m.latency.Observe(m.sinceInMicroseconds(startTime))
delete(m.addTimes, item)
}
}
func (m *defaultQueueMetrics) done(item t) {
if m == nil {
return
}
if startTime, exists := m.processingStartTimes[item]; exists {
m.workDuration.Observe(m.sinceInMicroseconds(startTime))
delete(m.processingStartTimes, item)
}
}
func (m *defaultQueueMetrics) updateUnfinishedWork() {
// Note that a summary metric would be better for this, but prometheus
// doesn't seem to have non-hacky ways to reset the summary metrics.
var total float64
var oldest float64
for _, t := range m.processingStartTimes {
age := m.sinceInMicroseconds(t)
total += age
if age > oldest {
oldest = age
}
}
// Convert to seconds; microseconds is unhelpfully granular for this.
total /= 1000000
m.unfinishedWorkSeconds.Set(total)
m.longestRunningProcessor.Set(oldest) // in microseconds.
}
type noMetrics struct{}
func (noMetrics) add(item t) {}
func (noMetrics) get(item t) {}
func (noMetrics) done(item t) {}
func (noMetrics) updateUnfinishedWork() {}
// Gets the time since the specified start in microseconds.
func (m *defaultQueueMetrics) sinceInMicroseconds(start time.Time) float64 {
return float64(m.clock.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
}
type retryMetrics interface {
retry()
}
type defaultRetryMetrics struct {
retries CounterMetric
}
func (m *defaultRetryMetrics) retry() {
if m == nil {
return
}
m.retries.Inc()
}
// MetricsProvider generates various metrics used by the queue.
type MetricsProvider interface {
NewDepthMetric(name string) GaugeMetric
NewAddsMetric(name string) CounterMetric
NewLatencyMetric(name string) SummaryMetric
NewWorkDurationMetric(name string) SummaryMetric
NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric
NewLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric
NewRetriesMetric(name string) CounterMetric
}
type noopMetricsProvider struct{}
func (_ noopMetricsProvider) NewDepthMetric(name string) GaugeMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewAddsMetric(name string) CounterMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewLatencyMetric(name string) SummaryMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewWorkDurationMetric(name string) SummaryMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric {
return noopMetric{}
}
var globalMetricsFactory = queueMetricsFactory{
metricsProvider: noopMetricsProvider{},
}
type queueMetricsFactory struct {
metricsProvider MetricsProvider
onlyOnce sync.Once
}
func (f *queueMetricsFactory) setProvider(mp MetricsProvider) {
f.onlyOnce.Do(func() {
f.metricsProvider = mp
})
}
func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) queueMetrics {
mp := f.metricsProvider
if len(name) == 0 || mp == (noopMetricsProvider{}) {
return noMetrics{}
}
return &defaultQueueMetrics{
clock: clock,
depth: mp.NewDepthMetric(name),
adds: mp.NewAddsMetric(name),
latency: mp.NewLatencyMetric(name),
workDuration: mp.NewWorkDurationMetric(name),
unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name),
longestRunningProcessor: mp.NewLongestRunningProcessorMicrosecondsMetric(name),
addTimes: map[t]time.Time{},
processingStartTimes: map[t]time.Time{},
}
}
func newRetryMetrics(name string) retryMetrics {
var ret *defaultRetryMetrics
if len(name) == 0 {
return ret
}
return &defaultRetryMetrics{
retries: globalMetricsFactory.metricsProvider.NewRetriesMetric(name),
}
}
// SetProvider sets the metrics provider for all subsequently created work
// queues. Only the first call has an effect.
func SetProvider(metricsProvider MetricsProvider) {
globalMetricsFactory.setProvider(metricsProvider)
}

View File

@ -1,293 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"sync"
"testing"
"time"
"k8s.io/apimachinery/pkg/util/clock"
)
type testMetrics struct {
added, gotten, finished int64
updateCalled chan<- struct{}
}
func (m *testMetrics) add(item t) { m.added++ }
func (m *testMetrics) get(item t) { m.gotten++ }
func (m *testMetrics) done(item t) { m.finished++ }
func (m *testMetrics) updateUnfinishedWork() { m.updateCalled <- struct{}{} }
func TestMetricShutdown(t *testing.T) {
ch := make(chan struct{})
m := &testMetrics{
updateCalled: ch,
}
c := clock.NewFakeClock(time.Now())
q := newQueue(c, m, time.Millisecond)
for !c.HasWaiters() {
// Wait for the go routine to call NewTicker()
time.Sleep(time.Millisecond)
}
c.Step(time.Millisecond)
<-ch
q.ShutDown()
c.Step(time.Hour)
select {
default:
return
case <-ch:
t.Errorf("Unexpected update after shutdown was called.")
}
}
type testMetric struct {
inc int64
dec int64
set float64
observedValue float64
observedCount int
notifyCh chan<- struct{}
lock sync.Mutex
}
func (m *testMetric) Inc() {
m.lock.Lock()
defer m.lock.Unlock()
m.inc++
m.notify()
}
func (m *testMetric) Dec() {
m.lock.Lock()
defer m.lock.Unlock()
m.dec++
m.notify()
}
func (m *testMetric) Set(f float64) {
m.lock.Lock()
defer m.lock.Unlock()
m.set = f
m.notify()
}
func (m *testMetric) Observe(f float64) {
m.lock.Lock()
defer m.lock.Unlock()
m.observedValue = f
m.observedCount++
m.notify()
}
func (m *testMetric) gaugeValue() float64 {
m.lock.Lock()
defer m.lock.Unlock()
if m.set != 0 {
return m.set
}
return float64(m.inc - m.dec)
}
func (m *testMetric) observationValue() float64 {
m.lock.Lock()
defer m.lock.Unlock()
return m.observedValue
}
func (m *testMetric) observationCount() int {
m.lock.Lock()
defer m.lock.Unlock()
return m.observedCount
}
func (m *testMetric) notify() {
if m.notifyCh != nil {
m.notifyCh <- struct{}{}
}
}
type testMetricsProvider struct {
depth testMetric
adds testMetric
latency testMetric
duration testMetric
unfinished testMetric
longest testMetric
retries testMetric
}
func (m *testMetricsProvider) NewDepthMetric(name string) GaugeMetric {
return &m.depth
}
func (m *testMetricsProvider) NewAddsMetric(name string) CounterMetric {
return &m.adds
}
func (m *testMetricsProvider) NewLatencyMetric(name string) SummaryMetric {
return &m.latency
}
func (m *testMetricsProvider) NewWorkDurationMetric(name string) SummaryMetric {
return &m.duration
}
func (m *testMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric {
return &m.unfinished
}
func (m *testMetricsProvider) NewLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric {
return &m.longest
}
func (m *testMetricsProvider) NewRetriesMetric(name string) CounterMetric {
return &m.retries
}
func TestSinceInMicroseconds(t *testing.T) {
mp := testMetricsProvider{}
c := clock.NewFakeClock(time.Now())
mf := queueMetricsFactory{metricsProvider: &mp}
m := mf.newQueueMetrics("test", c)
dqm := m.(*defaultQueueMetrics)
for _, i := range []int{1, 50, 100, 500, 1000, 10000, 100000, 1000000} {
n := c.Now()
c.Step(time.Duration(i) * time.Microsecond)
if e, a := float64(i), dqm.sinceInMicroseconds(n); e != a {
t.Errorf("Expected %v, got %v", e, a)
}
}
}
func TestMetrics(t *testing.T) {
mp := testMetricsProvider{}
t0 := time.Unix(0, 0)
c := clock.NewFakeClock(t0)
mf := queueMetricsFactory{metricsProvider: &mp}
m := mf.newQueueMetrics("test", c)
q := newQueue(c, m, time.Millisecond)
defer q.ShutDown()
for !c.HasWaiters() {
// Wait for the go routine to call NewTicker()
time.Sleep(time.Millisecond)
}
q.Add("foo")
if e, a := 1.0, mp.adds.gaugeValue(); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 1.0, mp.depth.gaugeValue(); e != a {
t.Errorf("expected %v, got %v", e, a)
}
c.Step(50 * time.Microsecond)
// Start processing
i, _ := q.Get()
if i != "foo" {
t.Errorf("Expected %v, got %v", "foo", i)
}
if e, a := 50.0, mp.latency.observationValue(); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 1, mp.latency.observationCount(); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 0.0, mp.depth.gaugeValue(); e != a {
t.Errorf("expected %v, got %v", e, a)
}
// Add it back while processing; multiple adds of the same item are
// de-duped.
q.Add(i)
q.Add(i)
q.Add(i)
q.Add(i)
q.Add(i)
if e, a := 2.0, mp.adds.gaugeValue(); e != a {
t.Errorf("expected %v, got %v", e, a)
}
// One thing remains in the queue
if e, a := 1.0, mp.depth.gaugeValue(); e != a {
t.Errorf("expected %v, got %v", e, a)
}
c.Step(25 * time.Microsecond)
// Finish it up
q.Done(i)
if e, a := 25.0, mp.duration.observationValue(); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 1, mp.duration.observationCount(); e != a {
t.Errorf("expected %v, got %v", e, a)
}
// One thing remains in the queue
if e, a := 1.0, mp.depth.gaugeValue(); e != a {
t.Errorf("expected %v, got %v", e, a)
}
// It should be back on the queue
i, _ = q.Get()
if i != "foo" {
t.Errorf("Expected %v, got %v", "foo", i)
}
if e, a := 25.0, mp.latency.observationValue(); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 2, mp.latency.observationCount(); e != a {
t.Errorf("expected %v, got %v", e, a)
}
// use a channel to ensure we don't look at the metric before it's
// been set.
ch := make(chan struct{}, 1)
mp.unfinished.notifyCh = ch
c.Step(time.Millisecond)
<-ch
mp.unfinished.notifyCh = nil
if e, a := .001, mp.unfinished.gaugeValue(); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 1000.0, mp.longest.gaugeValue(); e != a {
t.Errorf("expected %v, got %v", e, a)
}
// Finish that one up
q.Done(i)
if e, a := 1000.0, mp.duration.observationValue(); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 2, mp.duration.observationCount(); e != a {
t.Errorf("expected %v, got %v", e, a)
}
}

View File

@ -1,71 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"context"
"sync"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
type DoWorkPieceFunc func(piece int)
// Parallelize is a very simple framework that allows for parallelizing
// N independent pieces of work.
//
// Deprecated: Use ParallelizeUntil instead.
func Parallelize(workers, pieces int, doWorkPiece DoWorkPieceFunc) {
ParallelizeUntil(nil, workers, pieces, doWorkPiece)
}
// ParallelizeUntil is a framework that allows for parallelizing N
// independent pieces of work until done or the context is canceled.
func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc) {
var stop <-chan struct{}
if ctx != nil {
stop = ctx.Done()
}
toProcess := make(chan int, pieces)
for i := 0; i < pieces; i++ {
toProcess <- i
}
close(toProcess)
if pieces < workers {
workers = pieces
}
wg := sync.WaitGroup{}
wg.Add(workers)
for i := 0; i < workers; i++ {
go func() {
defer utilruntime.HandleCrash()
defer wg.Done()
for piece := range toProcess {
select {
case <-stop:
return
default:
doWorkPiece(piece)
}
}
}()
}
wg.Wait()
}

View File

@ -1,212 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"sync"
"time"
"k8s.io/apimachinery/pkg/util/clock"
)
type Interface interface {
Add(item interface{})
Len() int
Get() (item interface{}, shutdown bool)
Done(item interface{})
ShutDown()
ShuttingDown() bool
}
// New constructs a new work queue (see the package comment).
func New() *Type {
return NewNamed("")
}
func NewNamed(name string) *Type {
rc := clock.RealClock{}
return newQueue(
rc,
globalMetricsFactory.newQueueMetrics(name, rc),
defaultUnfinishedWorkUpdatePeriod,
)
}
func newQueue(c clock.Clock, metrics queueMetrics, updatePeriod time.Duration) *Type {
t := &Type{
clock: c,
dirty: set{},
processing: set{},
cond: sync.NewCond(&sync.Mutex{}),
metrics: metrics,
unfinishedWorkUpdatePeriod: updatePeriod,
}
go t.updateUnfinishedWorkLoop()
return t
}
const defaultUnfinishedWorkUpdatePeriod = 500 * time.Millisecond
// Type is a work queue (see the package comment).
type Type struct {
// queue defines the order in which we will work on items. Every
// element of queue should be in the dirty set and not in the
// processing set.
queue []t
// dirty defines all of the items that need to be processed.
dirty set
// Things that are currently being processed are in the processing set.
// These things may be simultaneously in the dirty set. When we finish
// processing something and remove it from this set, we'll check if
// it's in the dirty set, and if so, add it to the queue.
processing set
cond *sync.Cond
shuttingDown bool
metrics queueMetrics
unfinishedWorkUpdatePeriod time.Duration
clock clock.Clock
}
type empty struct{}
type t interface{}
type set map[t]empty
func (s set) has(item t) bool {
_, exists := s[item]
return exists
}
func (s set) insert(item t) {
s[item] = empty{}
}
func (s set) delete(item t) {
delete(s, item)
}
// Add marks item as needing processing.
func (q *Type) Add(item interface{}) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
if q.shuttingDown {
return
}
if q.dirty.has(item) {
return
}
q.metrics.add(item)
q.dirty.insert(item)
if q.processing.has(item) {
return
}
q.queue = append(q.queue, item)
q.cond.Signal()
}
// Len returns the current queue length, for informational purposes only. You
// shouldn't e.g. gate a call to Add() or Get() on Len() being a particular
// value, that can't be synchronized properly.
func (q *Type) Len() int {
q.cond.L.Lock()
defer q.cond.L.Unlock()
return len(q.queue)
}
// Get blocks until it can return an item to be processed. If shutdown = true,
// the caller should end their goroutine. You must call Done with item when you
// have finished processing it.
func (q *Type) Get() (item interface{}, shutdown bool) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
for len(q.queue) == 0 && !q.shuttingDown {
q.cond.Wait()
}
if len(q.queue) == 0 {
// We must be shutting down.
return nil, true
}
item, q.queue = q.queue[0], q.queue[1:]
q.metrics.get(item)
q.processing.insert(item)
q.dirty.delete(item)
return item, false
}
// Done marks item as done processing, and if it has been marked as dirty again
// while it was being processed, it will be re-added to the queue for
// re-processing.
func (q *Type) Done(item interface{}) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
q.metrics.done(item)
q.processing.delete(item)
if q.dirty.has(item) {
q.queue = append(q.queue, item)
q.cond.Signal()
}
}
// ShutDown will cause q to ignore all new items added to it. As soon as the
// worker goroutines have drained the existing items in the queue, they will be
// instructed to exit.
func (q *Type) ShutDown() {
q.cond.L.Lock()
defer q.cond.L.Unlock()
q.shuttingDown = true
q.cond.Broadcast()
}
func (q *Type) ShuttingDown() bool {
q.cond.L.Lock()
defer q.cond.L.Unlock()
return q.shuttingDown
}
func (q *Type) updateUnfinishedWorkLoop() {
t := q.clock.NewTicker(q.unfinishedWorkUpdatePeriod)
defer t.Stop()
for range t.C() {
if !func() bool {
q.cond.L.Lock()
defer q.cond.L.Unlock()
if !q.shuttingDown {
q.metrics.updateUnfinishedWork()
return true
}
return false
}() {
return
}
}
}

View File

@ -1,161 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue_test
import (
"sync"
"testing"
"time"
"k8s.io/client-go/util/workqueue"
)
func TestBasic(t *testing.T) {
// If something is seriously wrong this test will never complete.
q := workqueue.New()
// Start producers
const producers = 50
producerWG := sync.WaitGroup{}
producerWG.Add(producers)
for i := 0; i < producers; i++ {
go func(i int) {
defer producerWG.Done()
for j := 0; j < 50; j++ {
q.Add(i)
time.Sleep(time.Millisecond)
}
}(i)
}
// Start consumers
const consumers = 10
consumerWG := sync.WaitGroup{}
consumerWG.Add(consumers)
for i := 0; i < consumers; i++ {
go func(i int) {
defer consumerWG.Done()
for {
item, quit := q.Get()
if item == "added after shutdown!" {
t.Errorf("Got an item added after shutdown.")
}
if quit {
return
}
t.Logf("Worker %v: begin processing %v", i, item)
time.Sleep(3 * time.Millisecond)
t.Logf("Worker %v: done processing %v", i, item)
q.Done(item)
}
}(i)
}
producerWG.Wait()
q.ShutDown()
q.Add("added after shutdown!")
consumerWG.Wait()
}
func TestAddWhileProcessing(t *testing.T) {
q := workqueue.New()
// Start producers
const producers = 50
producerWG := sync.WaitGroup{}
producerWG.Add(producers)
for i := 0; i < producers; i++ {
go func(i int) {
defer producerWG.Done()
q.Add(i)
}(i)
}
// Start consumers
const consumers = 10
consumerWG := sync.WaitGroup{}
consumerWG.Add(consumers)
for i := 0; i < consumers; i++ {
go func(i int) {
defer consumerWG.Done()
// Every worker will re-add every item up to two times.
// This tests the dirty-while-processing case.
counters := map[interface{}]int{}
for {
item, quit := q.Get()
if quit {
return
}
counters[item]++
if counters[item] < 2 {
q.Add(item)
}
q.Done(item)
}
}(i)
}
producerWG.Wait()
q.ShutDown()
consumerWG.Wait()
}
func TestLen(t *testing.T) {
q := workqueue.New()
q.Add("foo")
if e, a := 1, q.Len(); e != a {
t.Errorf("Expected %v, got %v", e, a)
}
q.Add("bar")
if e, a := 2, q.Len(); e != a {
t.Errorf("Expected %v, got %v", e, a)
}
q.Add("foo") // should not increase the queue length.
if e, a := 2, q.Len(); e != a {
t.Errorf("Expected %v, got %v", e, a)
}
}
func TestReinsert(t *testing.T) {
q := workqueue.New()
q.Add("foo")
// Start processing
i, _ := q.Get()
if i != "foo" {
t.Errorf("Expected %v, got %v", "foo", i)
}
// Add it back while processing
q.Add(i)
// Finish it up
q.Done(i)
// It should be back on the queue
i, _ = q.Get()
if i != "foo" {
t.Errorf("Expected %v, got %v", "foo", i)
}
// Finish that one up
q.Done(i)
if a := q.Len(); a != 0 {
t.Errorf("Expected queue to be empty. Has %v items", a)
}
}

View File

@ -1,69 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
// RateLimitingInterface is an interface that rate limits items being added to the queue.
type RateLimitingInterface interface {
DelayingInterface
// AddRateLimited adds an item to the workqueue after the rate limiter says it's ok
AddRateLimited(item interface{})
// Forget indicates that an item is finished being retried. Doesn't matter whether it's for perm failing
// or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you
// still have to call `Done` on the queue.
Forget(item interface{})
// NumRequeues returns back how many times the item was requeued
NumRequeues(item interface{}) int
}
// NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability
// Remember to call Forget! If you don't, you may end up tracking failures forever.
func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface {
return &rateLimitingType{
DelayingInterface: NewDelayingQueue(),
rateLimiter: rateLimiter,
}
}
func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitingInterface {
return &rateLimitingType{
DelayingInterface: NewNamedDelayingQueue(name),
rateLimiter: rateLimiter,
}
}
// rateLimitingType wraps an Interface and provides rateLimited re-enquing
type rateLimitingType struct {
DelayingInterface
rateLimiter RateLimiter
}
// AddRateLimited AddAfter's the item based on the time when the rate limiter says it's ok
func (q *rateLimitingType) AddRateLimited(item interface{}) {
q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item))
}
func (q *rateLimitingType) NumRequeues(item interface{}) int {
return q.rateLimiter.NumRequeues(item)
}
func (q *rateLimitingType) Forget(item interface{}) {
q.rateLimiter.Forget(item)
}

View File

@ -1,75 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"testing"
"time"
"k8s.io/apimachinery/pkg/util/clock"
)
func TestRateLimitingQueue(t *testing.T) {
limiter := NewItemExponentialFailureRateLimiter(1*time.Millisecond, 1*time.Second)
queue := NewRateLimitingQueue(limiter).(*rateLimitingType)
fakeClock := clock.NewFakeClock(time.Now())
delayingQueue := &delayingType{
Interface: New(),
clock: fakeClock,
heartbeat: fakeClock.NewTicker(maxWait),
stopCh: make(chan struct{}),
waitingForAddCh: make(chan *waitFor, 1000),
metrics: newRetryMetrics(""),
}
queue.DelayingInterface = delayingQueue
queue.AddRateLimited("one")
waitEntry := <-delayingQueue.waitingForAddCh
if e, a := 1*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a {
t.Errorf("expected %v, got %v", e, a)
}
queue.AddRateLimited("one")
waitEntry = <-delayingQueue.waitingForAddCh
if e, a := 2*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := 2, queue.NumRequeues("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
queue.AddRateLimited("two")
waitEntry = <-delayingQueue.waitingForAddCh
if e, a := 1*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a {
t.Errorf("expected %v, got %v", e, a)
}
queue.AddRateLimited("two")
waitEntry = <-delayingQueue.waitingForAddCh
if e, a := 2*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a {
t.Errorf("expected %v, got %v", e, a)
}
queue.Forget("one")
if e, a := 0, queue.NumRequeues("one"); e != a {
t.Errorf("expected %v, got %v", e, a)
}
queue.AddRateLimited("one")
waitEntry = <-delayingQueue.waitingForAddCh
if e, a := 1*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a {
t.Errorf("expected %v, got %v", e, a)
}
}