mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-12-18 02:50:30 +00:00
rebase: update packages in go.mod to latest releases
updated few packages in go.mod to latest available release. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
parent
0f44c6acb7
commit
fb7dc13dfe
16
go.mod
16
go.mod
@ -3,26 +3,26 @@ module github.com/ceph/ceph-csi
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go v1.38.8
|
||||
github.com/aws/aws-sdk-go v1.38.54
|
||||
github.com/ceph/go-ceph v0.8.0
|
||||
github.com/container-storage-interface/spec v1.3.0
|
||||
github.com/csi-addons/replication-lib-utils v0.2.0
|
||||
github.com/csi-addons/spec v0.1.0
|
||||
github.com/go-logr/logr v0.2.1 // indirect
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/hashicorp/vault/api v1.0.5-0.20200902155336-f9d5ce5a171a
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.7.0
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.9.1
|
||||
github.com/kubernetes-csi/external-snapshotter/v2 v2.1.1
|
||||
github.com/libopenstorage/secrets v0.0.0-20201006135900-af310b01fe47
|
||||
github.com/onsi/ginkgo v1.12.0
|
||||
github.com/onsi/gomega v1.9.0
|
||||
github.com/pborman/uuid v1.2.0
|
||||
github.com/prometheus/client_golang v1.7.1
|
||||
github.com/onsi/ginkgo v1.16.4
|
||||
github.com/onsi/gomega v1.13.0
|
||||
github.com/pborman/uuid v1.2.1
|
||||
github.com/prometheus/client_golang v1.10.0
|
||||
github.com/stretchr/testify v1.7.0
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83
|
||||
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da
|
||||
google.golang.org/grpc v1.36.1
|
||||
k8s.io/api v0.20.6
|
||||
k8s.io/apimachinery v0.20.6
|
||||
|
2
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
generated
vendored
@ -178,7 +178,7 @@ func handleSendError(r *request.Request, err error) {
|
||||
var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
|
||||
if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
|
||||
// this may be replaced by an UnmarshalError handler
|
||||
r.Error = awserr.New("UnknownError", "unknown error", nil)
|
||||
r.Error = awserr.New("UnknownError", "unknown error", r.Error)
|
||||
}
|
||||
}}
|
||||
|
||||
|
358
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
358
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
@ -302,6 +302,7 @@ var awsPartition = partition{
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-northeast-3": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
@ -637,7 +638,19 @@ var awsPartition = partition{
|
||||
"api.fleethub.iot": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"api.mediatailor": service{
|
||||
@ -744,6 +757,7 @@ var awsPartition = partition{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
@ -823,6 +837,16 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"apprunner": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"appstream2": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
@ -1313,7 +1337,10 @@ var awsPartition = partition{
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-south-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
@ -1374,6 +1401,7 @@ var awsPartition = partition{
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-northeast-3": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
@ -1778,6 +1806,7 @@ var awsPartition = partition{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
@ -1789,6 +1818,7 @@ var awsPartition = partition{
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
@ -2833,8 +2863,11 @@ var awsPartition = partition{
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
@ -2933,6 +2966,26 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"finspace": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"finspace-api": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"firehose": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -3134,6 +3187,24 @@ var awsPartition = partition{
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"fips-us-east-1": endpoint{
|
||||
Hostname: "forecast-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"fips-us-east-2": endpoint{
|
||||
Hostname: "forecast-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"fips-us-west-2": endpoint{
|
||||
Hostname: "forecast-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
@ -3149,6 +3220,24 @@ var awsPartition = partition{
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"fips-us-east-1": endpoint{
|
||||
Hostname: "forecastquery-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"fips-us-east-2": endpoint{
|
||||
Hostname: "forecastquery-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"fips-us-west-2": endpoint{
|
||||
Hostname: "forecastquery-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
@ -3212,6 +3301,8 @@ var awsPartition = partition{
|
||||
"gamelift": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"af-south-1": endpoint{},
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
@ -3219,8 +3310,12 @@ var awsPartition = partition{
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-south-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"me-south-1": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
@ -3359,9 +3454,17 @@ var awsPartition = partition{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"af-south-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"fips-us-east-1": endpoint{
|
||||
Hostname: "groundstation-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"fips-us-east-2": endpoint{
|
||||
Hostname: "groundstation-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
@ -3375,6 +3478,7 @@ var awsPartition = partition{
|
||||
},
|
||||
},
|
||||
"me-south-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
@ -3389,6 +3493,7 @@ var awsPartition = partition{
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-northeast-3": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
@ -3870,6 +3975,7 @@ var awsPartition = partition{
|
||||
"lakeformation": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"af-south-1": endpoint{},
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
@ -3972,6 +4078,7 @@ var awsPartition = partition{
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-northeast-3": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
@ -4024,6 +4131,7 @@ var awsPartition = partition{
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
@ -4082,6 +4190,14 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"lookoutequipment": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-2": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"lookoutvision": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -4127,6 +4243,7 @@ var awsPartition = partition{
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-northeast-3": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
@ -4463,6 +4580,7 @@ var awsPartition = partition{
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-northeast-3": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
@ -4819,6 +4937,22 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"personalize": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"pinpoint": service{
|
||||
Defaults: endpoint{
|
||||
CredentialScope: credentialScope{
|
||||
@ -4973,6 +5107,7 @@ var awsPartition = partition{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
@ -5000,6 +5135,24 @@ var awsPartition = partition{
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"fips-us-east-1": endpoint{
|
||||
Hostname: "qldb-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"fips-us-east-2": endpoint{
|
||||
Hostname: "qldb-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"fips-us-west-2": endpoint{
|
||||
Hostname: "qldb-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
@ -5012,6 +5165,7 @@ var awsPartition = partition{
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-northeast-3": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
@ -5318,6 +5472,7 @@ var awsPartition = partition{
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-northeast-3": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
@ -5865,6 +6020,7 @@ var awsPartition = partition{
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-northeast-3": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
@ -5975,6 +6131,7 @@ var awsPartition = partition{
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-northeast-3": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
@ -6017,6 +6174,61 @@ var awsPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"servicecatalog-appregistry": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"af-south-1": endpoint{},
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-south-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"fips-ca-central-1": endpoint{
|
||||
Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ca-central-1",
|
||||
},
|
||||
},
|
||||
"fips-us-east-1": endpoint{
|
||||
Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"fips-us-east-2": endpoint{
|
||||
Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"fips-us-west-1": endpoint{
|
||||
Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-1",
|
||||
},
|
||||
},
|
||||
"fips-us-west-2": endpoint{
|
||||
Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
"me-south-1": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"servicediscovery": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -6085,6 +6297,24 @@ var awsPartition = partition{
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"fips-us-east-1": endpoint{
|
||||
Hostname: "session.qldb-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"fips-us-east-2": endpoint{
|
||||
Hostname: "session.qldb-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"fips-us-west-2": endpoint{
|
||||
Hostname: "session.qldb-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
@ -6812,6 +7042,7 @@ var awsPartition = partition{
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-south-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
@ -7799,6 +8030,7 @@ var awscnPartition = partition{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"lambda": service{
|
||||
@ -7842,6 +8074,13 @@ var awscnPartition = partition{
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"mq": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"neptune": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -7866,6 +8105,12 @@ var awscnPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"personalize": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"polly": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -7913,6 +8158,15 @@ var awscnPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"route53resolver": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"runtime.sagemaker": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -7993,6 +8247,13 @@ var awscnPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"servicecatalog": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"servicediscovery": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -8404,6 +8665,18 @@ var awsusgovPartition = partition{
|
||||
"batch": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"fips-us-gov-east-1": endpoint{
|
||||
Hostname: "batch.us-gov-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
},
|
||||
"fips-us-gov-west-1": endpoint{
|
||||
Hostname: "batch.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
@ -8928,6 +9201,27 @@ var awsusgovPartition = partition{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"fms": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"fips-us-gov-east-1": endpoint{
|
||||
Hostname: "fms-fips.us-gov-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
},
|
||||
"fips-us-gov-west-1": endpoint{
|
||||
Hostname: "fms-fips.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"fsx": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -9660,6 +9954,46 @@ var awsusgovPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"servicecatalog-appregistry": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"fips-us-gov-east-1": endpoint{
|
||||
Hostname: "servicecatalog-appregistry.us-gov-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
},
|
||||
"fips-us-gov-west-1": endpoint{
|
||||
Hostname: "servicecatalog-appregistry.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"servicequotas": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"fips-us-gov-east-1": endpoint{
|
||||
Hostname: "servicequotas.us-gov-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
},
|
||||
"fips-us-gov-west-1": endpoint{
|
||||
Hostname: "servicequotas.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"sms": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -10151,6 +10485,18 @@ var awsisoPartition = partition{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"elasticfilesystem": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"fips-us-iso-east-1": endpoint{
|
||||
Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-iso-east-1",
|
||||
},
|
||||
},
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"elasticloadbalancing": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -10179,6 +10525,12 @@ var awsisoPartition = partition{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"firehose": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"glacier": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -10260,6 +10612,12 @@ var awsisoPartition = partition{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"ram": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"rds": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
|
8
vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
generated
vendored
8
vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
generated
vendored
@ -178,14 +178,14 @@ type service struct {
|
||||
}
|
||||
|
||||
func (s *service) endpointForRegion(region string) (endpoint, bool) {
|
||||
if s.IsRegionalized == boxedFalse {
|
||||
return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint
|
||||
}
|
||||
|
||||
if e, ok := s.Endpoints[region]; ok {
|
||||
return e, true
|
||||
}
|
||||
|
||||
if s.IsRegionalized == boxedFalse {
|
||||
return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint
|
||||
}
|
||||
|
||||
// Unable to find any matching endpoint, return
|
||||
// blank that will be used for generic endpoint creation.
|
||||
return endpoint{}, false
|
||||
|
17
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
17
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
@ -129,12 +129,27 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
|
||||
httpReq, _ := http.NewRequest(method, "", nil)
|
||||
|
||||
var err error
|
||||
httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath)
|
||||
httpReq.URL, err = url.Parse(clientInfo.Endpoint)
|
||||
if err != nil {
|
||||
httpReq.URL = &url.URL{}
|
||||
err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
|
||||
}
|
||||
|
||||
if len(operation.HTTPPath) != 0 {
|
||||
opHTTPPath := operation.HTTPPath
|
||||
var opQueryString string
|
||||
if idx := strings.Index(opHTTPPath, "?"); idx >= 0 {
|
||||
opQueryString = opHTTPPath[idx+1:]
|
||||
opHTTPPath = opHTTPPath[:idx]
|
||||
}
|
||||
|
||||
if strings.HasSuffix(httpReq.URL.Path, "/") && strings.HasPrefix(opHTTPPath, "/") {
|
||||
opHTTPPath = opHTTPPath[1:]
|
||||
}
|
||||
httpReq.URL.Path += opHTTPPath
|
||||
httpReq.URL.RawQuery = opQueryString
|
||||
}
|
||||
|
||||
r := &Request{
|
||||
Config: cfg,
|
||||
ClientInfo: clientInfo,
|
||||
|
14
vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
generated
vendored
14
vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
generated
vendored
@ -101,13 +101,6 @@ func resolveCredsFromProfile(cfg *aws.Config,
|
||||
sharedCfg.Creds,
|
||||
)
|
||||
|
||||
case sharedCfg.hasSSOConfiguration():
|
||||
creds, err = resolveSSOCredentials(cfg, sharedCfg, handlers)
|
||||
|
||||
case len(sharedCfg.CredentialProcess) != 0:
|
||||
// Get credentials from CredentialProcess
|
||||
creds = processcreds.NewCredentials(sharedCfg.CredentialProcess)
|
||||
|
||||
case len(sharedCfg.CredentialSource) != 0:
|
||||
creds, err = resolveCredsFromSource(cfg, envCfg,
|
||||
sharedCfg, handlers, sessOpts,
|
||||
@ -123,6 +116,13 @@ func resolveCredsFromProfile(cfg *aws.Config,
|
||||
sharedCfg.RoleSessionName,
|
||||
)
|
||||
|
||||
case sharedCfg.hasSSOConfiguration():
|
||||
creds, err = resolveSSOCredentials(cfg, sharedCfg, handlers)
|
||||
|
||||
case len(sharedCfg.CredentialProcess) != 0:
|
||||
// Get credentials from CredentialProcess
|
||||
creds = processcreds.NewCredentials(sharedCfg.CredentialProcess)
|
||||
|
||||
default:
|
||||
// Fallback to default credentials provider, include mock errors for
|
||||
// the credential chain so user can identify why credentials failed to
|
||||
|
5
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
5
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
@ -401,7 +401,6 @@ func (cfg *sharedConfig) validateCredentialType() error {
|
||||
len(cfg.CredentialSource) != 0,
|
||||
len(cfg.CredentialProcess) != 0,
|
||||
len(cfg.WebIdentityTokenFile) != 0,
|
||||
cfg.hasSSOConfiguration(),
|
||||
) {
|
||||
return ErrSharedConfigSourceCollision
|
||||
}
|
||||
@ -459,6 +458,10 @@ func (cfg *sharedConfig) clearCredentialOptions() {
|
||||
cfg.CredentialProcess = ""
|
||||
cfg.WebIdentityTokenFile = ""
|
||||
cfg.Creds = credentials.Value{}
|
||||
cfg.SSOAccountID = ""
|
||||
cfg.SSORegion = ""
|
||||
cfg.SSORoleName = ""
|
||||
cfg.SSOStartURL = ""
|
||||
}
|
||||
|
||||
func (cfg *sharedConfig) clearAssumeRoleOptions() {
|
||||
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
@ -5,4 +5,4 @@ package aws
|
||||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.38.8"
|
||||
const SDKVersion = "1.38.54"
|
||||
|
2
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
generated
vendored
@ -308,6 +308,8 @@ func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag refl
|
||||
if tag.Get("xmlAttribute") != "" { // put into current node's attribute list
|
||||
attr := xml.Attr{Name: xname, Value: str}
|
||||
current.Attr = append(current.Attr, attr)
|
||||
} else if len(xname.Local) == 0 {
|
||||
current.Text = str
|
||||
} else { // regular text node
|
||||
current.AddChild(&XMLNode{Name: xname, Text: str})
|
||||
}
|
||||
|
22
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
generated
vendored
22
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
generated
vendored
@ -18,6 +18,14 @@ type XMLNode struct {
|
||||
parent *XMLNode
|
||||
}
|
||||
|
||||
// textEncoder is a string type alias that implemnts the TextMarshaler interface.
|
||||
// This alias type is used to ensure that the line feed (\n) (U+000A) is escaped.
|
||||
type textEncoder string
|
||||
|
||||
func (t textEncoder) MarshalText() ([]byte, error) {
|
||||
return []byte(t), nil
|
||||
}
|
||||
|
||||
// NewXMLElement returns a pointer to a new XMLNode initialized to default values.
|
||||
func NewXMLElement(name xml.Name) *XMLNode {
|
||||
return &XMLNode{
|
||||
@ -130,11 +138,16 @@ func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
|
||||
attrs = sortedAttrs
|
||||
}
|
||||
|
||||
e.EncodeToken(xml.StartElement{Name: node.Name, Attr: attrs})
|
||||
startElement := xml.StartElement{Name: node.Name, Attr: attrs}
|
||||
|
||||
if node.Text != "" {
|
||||
e.EncodeToken(xml.CharData([]byte(node.Text)))
|
||||
} else if sorted {
|
||||
e.EncodeElement(textEncoder(node.Text), startElement)
|
||||
return e.Flush()
|
||||
}
|
||||
|
||||
e.EncodeToken(startElement)
|
||||
|
||||
if sorted {
|
||||
sortedNames := []string{}
|
||||
for k := range node.Children {
|
||||
sortedNames = append(sortedNames, k)
|
||||
@ -154,6 +167,7 @@ func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
|
||||
}
|
||||
}
|
||||
|
||||
e.EncodeToken(xml.EndElement{Name: node.Name})
|
||||
e.EncodeToken(startElement.End())
|
||||
|
||||
return e.Flush()
|
||||
}
|
||||
|
2220
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
2220
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
File diff suppressed because it is too large
Load Diff
2
vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go
generated
vendored
@ -982,7 +982,7 @@ func (c *EC2) WaitUntilSecurityGroupExistsWithContext(ctx aws.Context, input *De
|
||||
{
|
||||
State: request.RetryWaiterState,
|
||||
Matcher: request.ErrorWaiterMatch,
|
||||
Expected: "InvalidGroupNotFound",
|
||||
Expected: "InvalidGroup.NotFound",
|
||||
},
|
||||
},
|
||||
Logger: c.Config.Logger,
|
||||
|
392
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
392
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
@ -65,34 +65,6 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
|
||||
// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// You cannot use AWS account root user credentials to call AssumeRole. You
|
||||
// must use credentials for an IAM user or an IAM role to call AssumeRole.
|
||||
//
|
||||
// For cross-account access, imagine that you own multiple accounts and need
|
||||
// to access resources in each account. You could create long-term credentials
|
||||
// in each account to access those resources. However, managing all those credentials
|
||||
// and remembering which one can access which account can be time consuming.
|
||||
// Instead, you can create one set of long-term credentials in one account.
|
||||
// Then use temporary security credentials to access all the other accounts
|
||||
// by assuming roles in those accounts. For more information about roles, see
|
||||
// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// Session Duration
|
||||
//
|
||||
// By default, the temporary security credentials created by AssumeRole last
|
||||
// for one hour. However, you can use the optional DurationSeconds parameter
|
||||
// to specify the duration of your session. You can provide a value from 900
|
||||
// seconds (15 minutes) up to the maximum session duration setting for the role.
|
||||
// This setting can have a value from 1 hour to 12 hours. To learn how to view
|
||||
// the maximum value for your role, see View the Maximum Session Duration Setting
|
||||
// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
|
||||
// in the IAM User Guide. The maximum session duration limit applies when you
|
||||
// use the AssumeRole* API operations or the assume-role* CLI commands. However
|
||||
// the limit does not apply when you use those operations to create a console
|
||||
// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// Permissions
|
||||
//
|
||||
// The temporary security credentials created by AssumeRole can be used to make
|
||||
@ -308,6 +280,15 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
|
||||
// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining)
|
||||
// limits your AWS CLI or AWS API role session to a maximum of one hour. When
|
||||
// you use the AssumeRole API operation to assume a role, you can specify the
|
||||
// duration of your role session with the DurationSeconds parameter. You can
|
||||
// specify a parameter value of up to 43200 seconds (12 hours), depending on
|
||||
// the maximum session duration setting for your role. However, if you assume
|
||||
// a role using role chaining and provide a DurationSeconds parameter value
|
||||
// greater than one hour, the operation fails.
|
||||
//
|
||||
// Permissions
|
||||
//
|
||||
// The temporary security credentials created by AssumeRoleWithSAML can be used
|
||||
@ -353,9 +334,9 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
|
||||
//
|
||||
// An AWS conversion compresses the passed session policies and session tags
|
||||
// into a packed binary format that has a separate limit. Your request can fail
|
||||
// for this limit even if your plain text meets the other requirements. The
|
||||
// PackedPolicySize response element indicates by percentage how close the policies
|
||||
// and tags for your request are to the upper size limit.
|
||||
// for this limit even if your plaintext meets the other requirements. The PackedPolicySize
|
||||
// response element indicates by percentage how close the policies and tags
|
||||
// for your request are to the upper size limit.
|
||||
//
|
||||
// You can pass a session tag with the same key as a tag that is attached to
|
||||
// the role. When you do, session tags override the role's tags with the same
|
||||
@ -590,9 +571,9 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
|
||||
//
|
||||
// An AWS conversion compresses the passed session policies and session tags
|
||||
// into a packed binary format that has a separate limit. Your request can fail
|
||||
// for this limit even if your plain text meets the other requirements. The
|
||||
// PackedPolicySize response element indicates by percentage how close the policies
|
||||
// and tags for your request are to the upper size limit.
|
||||
// for this limit even if your plaintext meets the other requirements. The PackedPolicySize
|
||||
// response element indicates by percentage how close the policies and tags
|
||||
// for your request are to the upper size limit.
|
||||
//
|
||||
// You can pass a session tag with the same key as a tag that is attached to
|
||||
// the role. When you do, the session tag overrides the role tag with the same
|
||||
@ -619,7 +600,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
|
||||
//
|
||||
// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail
|
||||
// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims)
|
||||
// of the provided Web Identity Token. We recommend that you avoid using any
|
||||
// of the provided web identity token. We recommend that you avoid using any
|
||||
// personally identifiable information (PII) in this field. For example, you
|
||||
// could instead use a GUID or a pairwise identifier, as suggested in the OIDC
|
||||
// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes).
|
||||
@ -1136,6 +1117,70 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
|
||||
// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// You can create a mobile-based or browser-based app that can authenticate
|
||||
// users using a web identity provider like Login with Amazon, Facebook, Google,
|
||||
// or an OpenID Connect-compatible identity provider. In this case, we recommend
|
||||
// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity.
|
||||
// For more information, see Federation Through a Web-based Identity Provider
|
||||
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// You can also call GetFederationToken using the security credentials of an
|
||||
// AWS account root user, but we do not recommend it. Instead, we recommend
|
||||
// that you create an IAM user for the purpose of the proxy application. Then
|
||||
// attach a policy to the IAM user that limits federated users to only the actions
|
||||
// and resources that they need to access. For more information, see IAM Best
|
||||
// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// Session duration
|
||||
//
|
||||
// The temporary credentials are valid for the specified duration, from 900
|
||||
// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default
|
||||
// session duration is 43,200 seconds (12 hours). Temporary credentials that
|
||||
// are obtained by using AWS account root user credentials have a maximum duration
|
||||
// of 3,600 seconds (1 hour).
|
||||
//
|
||||
// Permissions
|
||||
//
|
||||
// You can use the temporary credentials created by GetFederationToken in any
|
||||
// AWS service except the following:
|
||||
//
|
||||
// * You cannot call any IAM operations using the AWS CLI or the AWS API.
|
||||
//
|
||||
// * You cannot call any STS operations except GetCallerIdentity.
|
||||
//
|
||||
// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||
// to this operation. You can pass a single JSON policy document to use as an
|
||||
// inline session policy. You can also specify up to 10 managed policies to
|
||||
// use as managed session policies. The plain text that you use for both inline
|
||||
// and managed session policies can't exceed 2,048 characters.
|
||||
//
|
||||
// Though the session policy parameters are optional, if you do not pass a policy,
|
||||
// then the resulting federated user session has no permissions. When you pass
|
||||
// session policies, the session permissions are the intersection of the IAM
|
||||
// user policies and the session policies that you pass. This gives you a way
|
||||
// to further restrict the permissions for a federated user. You cannot use
|
||||
// session policies to grant more permissions than those that are defined in
|
||||
// the permissions policy of the IAM user. For more information, see Session
|
||||
// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||
// in the IAM User Guide. For information about using GetFederationToken to
|
||||
// create temporary security credentials, see GetFederationToken—Federation
|
||||
// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
|
||||
//
|
||||
// You can use the credentials to access a resource that has a resource-based
|
||||
// policy. If that policy specifically references the federated user session
|
||||
// in the Principal element of the policy, the session has the permissions allowed
|
||||
// by the policy. These permissions are granted in addition to the permissions
|
||||
// granted by the session policies.
|
||||
//
|
||||
// Tags
|
||||
//
|
||||
// (Optional) You can pass tag key-value pairs to your session. These are called
|
||||
// session tags. For more information about session tags, see Passing Session
|
||||
// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// An administrator must grant you the permissions necessary to pass session
|
||||
// tags. The administrator can also create granular permissions to allow you
|
||||
// to pass only specific session tags. For more information, see Tutorial: Using
|
||||
@ -1338,14 +1383,15 @@ func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionToken
|
||||
type AssumeRoleInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The duration, in seconds, of the role session. The value can range from 900
|
||||
// seconds (15 minutes) up to the maximum session duration setting for the role.
|
||||
// This setting can have a value from 1 hour to 12 hours. If you specify a value
|
||||
// higher than this setting, the operation fails. For example, if you specify
|
||||
// a session duration of 12 hours, but your administrator set the maximum session
|
||||
// duration to 6 hours, your operation fails. To learn how to view the maximum
|
||||
// value for your role, see View the Maximum Session Duration Setting for a
|
||||
// Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
|
||||
// The duration, in seconds, of the role session. The value specified can can
|
||||
// range from 900 seconds (15 minutes) up to the maximum session duration that
|
||||
// is set for the role. The maximum session duration setting can have a value
|
||||
// from 1 hour to 12 hours. If you specify a value higher than this setting
|
||||
// or the administrator setting (whichever is lower), the operation fails. For
|
||||
// example, if you specify a session duration of 12 hours, but your administrator
|
||||
// set the maximum session duration to 6 hours, your operation fails. To learn
|
||||
// how to view the maximum value for your role, see View the Maximum Session
|
||||
// Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// By default, the value is set to 3600 seconds.
|
||||
@ -1387,17 +1433,17 @@ type AssumeRoleInput struct {
|
||||
// that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// The plain text that you use for both inline and managed session policies
|
||||
// can't exceed 2,048 characters. The JSON policy characters can be any ASCII
|
||||
// character from the space character to the end of the valid character list
|
||||
// (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
|
||||
// and carriage return (\u000D) characters.
|
||||
// The plaintext that you use for both inline and managed session policies can't
|
||||
// exceed 2,048 characters. The JSON policy characters can be any ASCII character
|
||||
// from the space character to the end of the valid character list (\u0020 through
|
||||
// \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
|
||||
// return (\u000D) characters.
|
||||
//
|
||||
// An AWS conversion compresses the passed session policies and session tags
|
||||
// into a packed binary format that has a separate limit. Your request can fail
|
||||
// for this limit even if your plain text meets the other requirements. The
|
||||
// PackedPolicySize response element indicates by percentage how close the policies
|
||||
// and tags for your request are to the upper size limit.
|
||||
// for this limit even if your plaintext meets the other requirements. The PackedPolicySize
|
||||
// response element indicates by percentage how close the policies and tags
|
||||
// for your request are to the upper size limit.
|
||||
Policy *string `min:"1" type:"string"`
|
||||
|
||||
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want
|
||||
@ -1405,16 +1451,16 @@ type AssumeRoleInput struct {
|
||||
// as the role.
|
||||
//
|
||||
// This parameter is optional. You can provide up to 10 managed policy ARNs.
|
||||
// However, the plain text that you use for both inline and managed session
|
||||
// policies can't exceed 2,048 characters. For more information about ARNs,
|
||||
// see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
|
||||
// However, the plaintext that you use for both inline and managed session policies
|
||||
// can't exceed 2,048 characters. For more information about ARNs, see Amazon
|
||||
// Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
|
||||
// in the AWS General Reference.
|
||||
//
|
||||
// An AWS conversion compresses the passed session policies and session tags
|
||||
// into a packed binary format that has a separate limit. Your request can fail
|
||||
// for this limit even if your plain text meets the other requirements. The
|
||||
// PackedPolicySize response element indicates by percentage how close the policies
|
||||
// and tags for your request are to the upper size limit.
|
||||
// for this limit even if your plaintext meets the other requirements. The PackedPolicySize
|
||||
// response element indicates by percentage how close the policies and tags
|
||||
// for your request are to the upper size limit.
|
||||
//
|
||||
// Passing policies to this operation returns new temporary credentials. The
|
||||
// resulting session's permissions are the intersection of the role's identity-based
|
||||
@ -1459,22 +1505,41 @@ type AssumeRoleInput struct {
|
||||
// also include underscores or any of the following characters: =,.@-
|
||||
SerialNumber *string `min:"9" type:"string"`
|
||||
|
||||
// The source identity specified by the principal that is calling the AssumeRole
|
||||
// operation.
|
||||
//
|
||||
// You can require users to specify a source identity when they assume a role.
|
||||
// You do this by using the sts:SourceIdentity condition key in a role trust
|
||||
// policy. You can use source identity information in AWS CloudTrail logs to
|
||||
// determine who took actions with a role. You can use the aws:SourceIdentity
|
||||
// condition key to further control access to AWS resources based on the value
|
||||
// of source identity. For more information about using source identity, see
|
||||
// Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// The regex used to validate this parameter is a string of characters consisting
|
||||
// of upper- and lower-case alphanumeric characters with no spaces. You can
|
||||
// also include underscores or any of the following characters: =,.@-. You cannot
|
||||
// use a value that begins with the text aws:. This prefix is reserved for AWS
|
||||
// internal use.
|
||||
SourceIdentity *string `min:"2" type:"string"`
|
||||
|
||||
// A list of session tags that you want to pass. Each session tag consists of
|
||||
// a key name and an associated value. For more information about session tags,
|
||||
// see Tagging AWS STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// This parameter is optional. You can pass up to 50 session tags. The plain
|
||||
// text session tag keys can’t exceed 128 characters, and the values can’t
|
||||
// exceed 256 characters. For these and additional limits, see IAM and STS Character
|
||||
// This parameter is optional. You can pass up to 50 session tags. The plaintext
|
||||
// session tag keys can’t exceed 128 characters, and the values can’t exceed
|
||||
// 256 characters. For these and additional limits, see IAM and STS Character
|
||||
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// An AWS conversion compresses the passed session policies and session tags
|
||||
// into a packed binary format that has a separate limit. Your request can fail
|
||||
// for this limit even if your plain text meets the other requirements. The
|
||||
// PackedPolicySize response element indicates by percentage how close the policies
|
||||
// and tags for your request are to the upper size limit.
|
||||
// for this limit even if your plaintext meets the other requirements. The PackedPolicySize
|
||||
// response element indicates by percentage how close the policies and tags
|
||||
// for your request are to the upper size limit.
|
||||
//
|
||||
// You can pass a session tag with the same key as a tag that is already attached
|
||||
// to the role. When you do, session tags override a role tag with the same
|
||||
@ -1495,9 +1560,10 @@ type AssumeRoleInput struct {
|
||||
Tags []*Tag `type:"list"`
|
||||
|
||||
// The value provided by the MFA device, if the trust policy of the role being
|
||||
// assumed requires MFA (that is, if the policy includes a condition that tests
|
||||
// for MFA). If the role being assumed requires MFA and if the TokenCode value
|
||||
// is missing or expired, the AssumeRole call returns an "access denied" error.
|
||||
// assumed requires MFA. (In other words, if the policy includes a condition
|
||||
// that tests for MFA). If the role being assumed requires MFA and if the TokenCode
|
||||
// value is missing or expired, the AssumeRole call returns an "access denied"
|
||||
// error.
|
||||
//
|
||||
// The format for this parameter, as described by its regex pattern, is a sequence
|
||||
// of six numeric digits.
|
||||
@ -1554,6 +1620,9 @@ func (s *AssumeRoleInput) Validate() error {
|
||||
if s.SerialNumber != nil && len(*s.SerialNumber) < 9 {
|
||||
invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9))
|
||||
}
|
||||
if s.SourceIdentity != nil && len(*s.SourceIdentity) < 2 {
|
||||
invalidParams.Add(request.NewErrParamMinLen("SourceIdentity", 2))
|
||||
}
|
||||
if s.TokenCode != nil && len(*s.TokenCode) < 6 {
|
||||
invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6))
|
||||
}
|
||||
@ -1626,6 +1695,12 @@ func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput {
|
||||
return s
|
||||
}
|
||||
|
||||
// SetSourceIdentity sets the SourceIdentity field's value.
|
||||
func (s *AssumeRoleInput) SetSourceIdentity(v string) *AssumeRoleInput {
|
||||
s.SourceIdentity = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetTags sets the Tags field's value.
|
||||
func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput {
|
||||
s.Tags = v
|
||||
@ -1668,6 +1743,23 @@ type AssumeRoleOutput struct {
|
||||
// packed size is greater than 100 percent, which means the policies and tags
|
||||
// exceeded the allowed space.
|
||||
PackedPolicySize *int64 `type:"integer"`
|
||||
|
||||
// The source identity specified by the principal that is calling the AssumeRole
|
||||
// operation.
|
||||
//
|
||||
// You can require users to specify a source identity when they assume a role.
|
||||
// You do this by using the sts:SourceIdentity condition key in a role trust
|
||||
// policy. You can use source identity information in AWS CloudTrail logs to
|
||||
// determine who took actions with a role. You can use the aws:SourceIdentity
|
||||
// condition key to further control access to AWS resources based on the value
|
||||
// of source identity. For more information about using source identity, see
|
||||
// Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// The regex used to validate this parameter is a string of characters consisting
|
||||
// of upper- and lower-case alphanumeric characters with no spaces. You can
|
||||
// also include underscores or any of the following characters: =,.@-
|
||||
SourceIdentity *string `min:"2" type:"string"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
@ -1698,6 +1790,12 @@ func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput {
|
||||
return s
|
||||
}
|
||||
|
||||
// SetSourceIdentity sets the SourceIdentity field's value.
|
||||
func (s *AssumeRoleOutput) SetSourceIdentity(v string) *AssumeRoleOutput {
|
||||
s.SourceIdentity = &v
|
||||
return s
|
||||
}
|
||||
|
||||
type AssumeRoleWithSAMLInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
@ -1736,17 +1834,17 @@ type AssumeRoleWithSAMLInput struct {
|
||||
// that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// The plain text that you use for both inline and managed session policies
|
||||
// can't exceed 2,048 characters. The JSON policy characters can be any ASCII
|
||||
// character from the space character to the end of the valid character list
|
||||
// (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
|
||||
// and carriage return (\u000D) characters.
|
||||
// The plaintext that you use for both inline and managed session policies can't
|
||||
// exceed 2,048 characters. The JSON policy characters can be any ASCII character
|
||||
// from the space character to the end of the valid character list (\u0020 through
|
||||
// \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
|
||||
// return (\u000D) characters.
|
||||
//
|
||||
// An AWS conversion compresses the passed session policies and session tags
|
||||
// into a packed binary format that has a separate limit. Your request can fail
|
||||
// for this limit even if your plain text meets the other requirements. The
|
||||
// PackedPolicySize response element indicates by percentage how close the policies
|
||||
// and tags for your request are to the upper size limit.
|
||||
// for this limit even if your plaintext meets the other requirements. The PackedPolicySize
|
||||
// response element indicates by percentage how close the policies and tags
|
||||
// for your request are to the upper size limit.
|
||||
Policy *string `min:"1" type:"string"`
|
||||
|
||||
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want
|
||||
@ -1754,16 +1852,16 @@ type AssumeRoleWithSAMLInput struct {
|
||||
// as the role.
|
||||
//
|
||||
// This parameter is optional. You can provide up to 10 managed policy ARNs.
|
||||
// However, the plain text that you use for both inline and managed session
|
||||
// policies can't exceed 2,048 characters. For more information about ARNs,
|
||||
// see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
|
||||
// However, the plaintext that you use for both inline and managed session policies
|
||||
// can't exceed 2,048 characters. For more information about ARNs, see Amazon
|
||||
// Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
|
||||
// in the AWS General Reference.
|
||||
//
|
||||
// An AWS conversion compresses the passed session policies and session tags
|
||||
// into a packed binary format that has a separate limit. Your request can fail
|
||||
// for this limit even if your plain text meets the other requirements. The
|
||||
// PackedPolicySize response element indicates by percentage how close the policies
|
||||
// and tags for your request are to the upper size limit.
|
||||
// for this limit even if your plaintext meets the other requirements. The PackedPolicySize
|
||||
// response element indicates by percentage how close the policies and tags
|
||||
// for your request are to the upper size limit.
|
||||
//
|
||||
// Passing policies to this operation returns new temporary credentials. The
|
||||
// resulting session's permissions are the intersection of the role's identity-based
|
||||
@ -1786,7 +1884,7 @@ type AssumeRoleWithSAMLInput struct {
|
||||
// RoleArn is a required field
|
||||
RoleArn *string `min:"20" type:"string" required:"true"`
|
||||
|
||||
// The base-64 encoded SAML authentication response provided by the IdP.
|
||||
// The base64 encoded SAML authentication response provided by the IdP.
|
||||
//
|
||||
// For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
|
||||
// in the IAM User Guide.
|
||||
@ -1908,10 +2006,17 @@ type AssumeRoleWithSAMLOutput struct {
|
||||
// The value of the Issuer element of the SAML assertion.
|
||||
Issuer *string `type:"string"`
|
||||
|
||||
// A hash value based on the concatenation of the Issuer response value, the
|
||||
// AWS account ID, and the friendly name (the last part of the ARN) of the SAML
|
||||
// provider in IAM. The combination of NameQualifier and Subject can be used
|
||||
// to uniquely identify a federated user.
|
||||
// A hash value based on the concatenation of the following:
|
||||
//
|
||||
// * The Issuer response value.
|
||||
//
|
||||
// * The AWS account ID.
|
||||
//
|
||||
// * The friendly name (the last part of the ARN) of the SAML provider in
|
||||
// IAM.
|
||||
//
|
||||
// The combination of NameQualifier and Subject can be used to uniquely identify
|
||||
// a federated user.
|
||||
//
|
||||
// The following pseudocode shows how the hash value is calculated:
|
||||
//
|
||||
@ -1925,6 +2030,26 @@ type AssumeRoleWithSAMLOutput struct {
|
||||
// exceeded the allowed space.
|
||||
PackedPolicySize *int64 `type:"integer"`
|
||||
|
||||
// The value in the SourceIdentity attribute in the SAML assertion.
|
||||
//
|
||||
// You can require users to set a source identity value when they assume a role.
|
||||
// You do this by using the sts:SourceIdentity condition key in a role trust
|
||||
// policy. That way, actions that are taken with the role are associated with
|
||||
// that user. After the source identity is set, the value cannot be changed.
|
||||
// It is present in the request for all actions that are taken by the role and
|
||||
// persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining)
|
||||
// sessions. You can configure your SAML identity provider to use an attribute
|
||||
// associated with your users, like user name or email, as the source identity
|
||||
// when calling AssumeRoleWithSAML. You do this by adding an attribute to the
|
||||
// SAML assertion. For more information about using source identity, see Monitor
|
||||
// and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// The regex used to validate this parameter is a string of characters consisting
|
||||
// of upper- and lower-case alphanumeric characters with no spaces. You can
|
||||
// also include underscores or any of the following characters: =,.@-
|
||||
SourceIdentity *string `min:"2" type:"string"`
|
||||
|
||||
// The value of the NameID element in the Subject element of the SAML assertion.
|
||||
Subject *string `type:"string"`
|
||||
|
||||
@ -1985,6 +2110,12 @@ func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithS
|
||||
return s
|
||||
}
|
||||
|
||||
// SetSourceIdentity sets the SourceIdentity field's value.
|
||||
func (s *AssumeRoleWithSAMLOutput) SetSourceIdentity(v string) *AssumeRoleWithSAMLOutput {
|
||||
s.SourceIdentity = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetSubject sets the Subject field's value.
|
||||
func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput {
|
||||
s.Subject = &v
|
||||
@ -2032,17 +2163,17 @@ type AssumeRoleWithWebIdentityInput struct {
|
||||
// that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// The plain text that you use for both inline and managed session policies
|
||||
// can't exceed 2,048 characters. The JSON policy characters can be any ASCII
|
||||
// character from the space character to the end of the valid character list
|
||||
// (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
|
||||
// and carriage return (\u000D) characters.
|
||||
// The plaintext that you use for both inline and managed session policies can't
|
||||
// exceed 2,048 characters. The JSON policy characters can be any ASCII character
|
||||
// from the space character to the end of the valid character list (\u0020 through
|
||||
// \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
|
||||
// return (\u000D) characters.
|
||||
//
|
||||
// An AWS conversion compresses the passed session policies and session tags
|
||||
// into a packed binary format that has a separate limit. Your request can fail
|
||||
// for this limit even if your plain text meets the other requirements. The
|
||||
// PackedPolicySize response element indicates by percentage how close the policies
|
||||
// and tags for your request are to the upper size limit.
|
||||
// for this limit even if your plaintext meets the other requirements. The PackedPolicySize
|
||||
// response element indicates by percentage how close the policies and tags
|
||||
// for your request are to the upper size limit.
|
||||
Policy *string `min:"1" type:"string"`
|
||||
|
||||
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want
|
||||
@ -2050,16 +2181,16 @@ type AssumeRoleWithWebIdentityInput struct {
|
||||
// as the role.
|
||||
//
|
||||
// This parameter is optional. You can provide up to 10 managed policy ARNs.
|
||||
// However, the plain text that you use for both inline and managed session
|
||||
// policies can't exceed 2,048 characters. For more information about ARNs,
|
||||
// see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
|
||||
// However, the plaintext that you use for both inline and managed session policies
|
||||
// can't exceed 2,048 characters. For more information about ARNs, see Amazon
|
||||
// Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
|
||||
// in the AWS General Reference.
|
||||
//
|
||||
// An AWS conversion compresses the passed session policies and session tags
|
||||
// into a packed binary format that has a separate limit. Your request can fail
|
||||
// for this limit even if your plain text meets the other requirements. The
|
||||
// PackedPolicySize response element indicates by percentage how close the policies
|
||||
// and tags for your request are to the upper size limit.
|
||||
// for this limit even if your plaintext meets the other requirements. The PackedPolicySize
|
||||
// response element indicates by percentage how close the policies and tags
|
||||
// for your request are to the upper size limit.
|
||||
//
|
||||
// Passing policies to this operation returns new temporary credentials. The
|
||||
// resulting session's permissions are the intersection of the role's identity-based
|
||||
@ -2242,6 +2373,29 @@ type AssumeRoleWithWebIdentityOutput struct {
|
||||
// in the AssumeRoleWithWebIdentity request.
|
||||
Provider *string `type:"string"`
|
||||
|
||||
// The value of the source identity that is returned in the JSON web token (JWT)
|
||||
// from the identity provider.
|
||||
//
|
||||
// You can require users to set a source identity value when they assume a role.
|
||||
// You do this by using the sts:SourceIdentity condition key in a role trust
|
||||
// policy. That way, actions that are taken with the role are associated with
|
||||
// that user. After the source identity is set, the value cannot be changed.
|
||||
// It is present in the request for all actions that are taken by the role and
|
||||
// persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining)
|
||||
// sessions. You can configure your identity provider to use an attribute associated
|
||||
// with your users, like user name or email, as the source identity when calling
|
||||
// AssumeRoleWithWebIdentity. You do this by adding a claim to the JSON web
|
||||
// token. To learn more about OIDC tokens and claims, see Using Tokens with
|
||||
// User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html)
|
||||
// in the Amazon Cognito Developer Guide. For more information about using source
|
||||
// identity, see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// The regex used to validate this parameter is a string of characters consisting
|
||||
// of upper- and lower-case alphanumeric characters with no spaces. You can
|
||||
// also include underscores or any of the following characters: =,.@-
|
||||
SourceIdentity *string `min:"2" type:"string"`
|
||||
|
||||
// The unique user identifier that is returned by the identity provider. This
|
||||
// identifier is associated with the WebIdentityToken that was submitted with
|
||||
// the AssumeRoleWithWebIdentity call. The identifier is typically unique to
|
||||
@ -2291,6 +2445,12 @@ func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithW
|
||||
return s
|
||||
}
|
||||
|
||||
// SetSourceIdentity sets the SourceIdentity field's value.
|
||||
func (s *AssumeRoleWithWebIdentityOutput) SetSourceIdentity(v string) *AssumeRoleWithWebIdentityOutput {
|
||||
s.SourceIdentity = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value.
|
||||
func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput {
|
||||
s.SubjectFromWebIdentityToken = &v
|
||||
@ -2682,17 +2842,17 @@ type GetFederationTokenInput struct {
|
||||
// by the policy. These permissions are granted in addition to the permissions
|
||||
// that are granted by the session policies.
|
||||
//
|
||||
// The plain text that you use for both inline and managed session policies
|
||||
// can't exceed 2,048 characters. The JSON policy characters can be any ASCII
|
||||
// character from the space character to the end of the valid character list
|
||||
// (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
|
||||
// and carriage return (\u000D) characters.
|
||||
// The plaintext that you use for both inline and managed session policies can't
|
||||
// exceed 2,048 characters. The JSON policy characters can be any ASCII character
|
||||
// from the space character to the end of the valid character list (\u0020 through
|
||||
// \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
|
||||
// return (\u000D) characters.
|
||||
//
|
||||
// An AWS conversion compresses the passed session policies and session tags
|
||||
// into a packed binary format that has a separate limit. Your request can fail
|
||||
// for this limit even if your plain text meets the other requirements. The
|
||||
// PackedPolicySize response element indicates by percentage how close the policies
|
||||
// and tags for your request are to the upper size limit.
|
||||
// for this limit even if your plaintext meets the other requirements. The PackedPolicySize
|
||||
// response element indicates by percentage how close the policies and tags
|
||||
// for your request are to the upper size limit.
|
||||
Policy *string `min:"1" type:"string"`
|
||||
|
||||
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want
|
||||
@ -2727,9 +2887,9 @@ type GetFederationTokenInput struct {
|
||||
//
|
||||
// An AWS conversion compresses the passed session policies and session tags
|
||||
// into a packed binary format that has a separate limit. Your request can fail
|
||||
// for this limit even if your plain text meets the other requirements. The
|
||||
// PackedPolicySize response element indicates by percentage how close the policies
|
||||
// and tags for your request are to the upper size limit.
|
||||
// for this limit even if your plaintext meets the other requirements. The PackedPolicySize
|
||||
// response element indicates by percentage how close the policies and tags
|
||||
// for your request are to the upper size limit.
|
||||
PolicyArns []*PolicyDescriptorType `type:"list"`
|
||||
|
||||
// A list of session tags. Each session tag consists of a key name and an associated
|
||||
@ -2737,17 +2897,17 @@ type GetFederationTokenInput struct {
|
||||
// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// This parameter is optional. You can pass up to 50 session tags. The plain
|
||||
// text session tag keys can’t exceed 128 characters and the values can’t
|
||||
// exceed 256 characters. For these and additional limits, see IAM and STS Character
|
||||
// This parameter is optional. You can pass up to 50 session tags. The plaintext
|
||||
// session tag keys can’t exceed 128 characters and the values can’t exceed
|
||||
// 256 characters. For these and additional limits, see IAM and STS Character
|
||||
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// An AWS conversion compresses the passed session policies and session tags
|
||||
// into a packed binary format that has a separate limit. Your request can fail
|
||||
// for this limit even if your plain text meets the other requirements. The
|
||||
// PackedPolicySize response element indicates by percentage how close the policies
|
||||
// and tags for your request are to the upper size limit.
|
||||
// for this limit even if your plaintext meets the other requirements. The PackedPolicySize
|
||||
// response element indicates by percentage how close the policies and tags
|
||||
// for your request are to the upper size limit.
|
||||
//
|
||||
// You can pass a session tag with the same key as a tag that is already attached
|
||||
// to the user you are federating. When you do, session tags override a user
|
||||
|
12
vendor/github.com/fsnotify/fsnotify/.editorconfig
generated
vendored
Normal file
12
vendor/github.com/fsnotify/fsnotify/.editorconfig
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
root = true
|
||||
|
||||
[*.go]
|
||||
indent_style = tab
|
||||
indent_size = 4
|
||||
insert_final_newline = true
|
||||
|
||||
[*.{yml,yaml}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
1
vendor/github.com/fsnotify/fsnotify/.gitattributes
generated
vendored
Normal file
1
vendor/github.com/fsnotify/fsnotify/.gitattributes
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
go.sum linguist-generated
|
6
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
Normal file
6
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
# Setup a Global .gitignore for OS and editor generated files:
|
||||
# https://help.github.com/articles/ignoring-files
|
||||
# git config --global core.excludesfile ~/.gitignore_global
|
||||
|
||||
.vagrant
|
||||
*.sublime-project
|
36
vendor/github.com/fsnotify/fsnotify/.travis.yml
generated
vendored
Normal file
36
vendor/github.com/fsnotify/fsnotify/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
sudo: false
|
||||
language: go
|
||||
|
||||
go:
|
||||
- "stable"
|
||||
- "1.11.x"
|
||||
- "1.10.x"
|
||||
- "1.9.x"
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- go: "stable"
|
||||
env: GOLINT=true
|
||||
allow_failures:
|
||||
- go: tip
|
||||
fast_finish: true
|
||||
|
||||
|
||||
before_install:
|
||||
- if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi
|
||||
|
||||
script:
|
||||
- go test --race ./...
|
||||
|
||||
after_script:
|
||||
- test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
|
||||
- if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi
|
||||
- go vet ./...
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
- windows
|
||||
|
||||
notifications:
|
||||
email: false
|
52
vendor/github.com/fsnotify/fsnotify/AUTHORS
generated
vendored
Normal file
52
vendor/github.com/fsnotify/fsnotify/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# You can update this list using the following command:
|
||||
#
|
||||
# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Aaron L <aaron@bettercoder.net>
|
||||
Adrien Bustany <adrien@bustany.org>
|
||||
Amit Krishnan <amit.krishnan@oracle.com>
|
||||
Anmol Sethi <me@anmol.io>
|
||||
Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
|
||||
Bruno Bigras <bigras.bruno@gmail.com>
|
||||
Caleb Spare <cespare@gmail.com>
|
||||
Case Nelson <case@teammating.com>
|
||||
Chris Howey <chris@howey.me> <howeyc@gmail.com>
|
||||
Christoffer Buchholz <christoffer.buchholz@gmail.com>
|
||||
Daniel Wagner-Hall <dawagner@gmail.com>
|
||||
Dave Cheney <dave@cheney.net>
|
||||
Evan Phoenix <evan@fallingsnow.net>
|
||||
Francisco Souza <f@souza.cc>
|
||||
Hari haran <hariharan.uno@gmail.com>
|
||||
John C Barstow
|
||||
Kelvin Fo <vmirage@gmail.com>
|
||||
Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
|
||||
Matt Layher <mdlayher@gmail.com>
|
||||
Nathan Youngman <git@nathany.com>
|
||||
Nickolai Zeldovich <nickolai@csail.mit.edu>
|
||||
Patrick <patrick@dropbox.com>
|
||||
Paul Hammond <paul@paulhammond.org>
|
||||
Pawel Knap <pawelknap88@gmail.com>
|
||||
Pieter Droogendijk <pieter@binky.org.uk>
|
||||
Pursuit92 <JoshChase@techpursuit.net>
|
||||
Riku Voipio <riku.voipio@linaro.org>
|
||||
Rob Figueiredo <robfig@gmail.com>
|
||||
Rodrigo Chiossi <rodrigochiossi@gmail.com>
|
||||
Slawek Ligus <root@ooz.ie>
|
||||
Soge Zhang <zhssoge@gmail.com>
|
||||
Tiffany Jernigan <tiffany.jernigan@intel.com>
|
||||
Tilak Sharma <tilaks@google.com>
|
||||
Tom Payne <twpayne@gmail.com>
|
||||
Travis Cline <travis.cline@gmail.com>
|
||||
Tudor Golubenco <tudor.g@gmail.com>
|
||||
Vahe Khachikyan <vahe@live.ca>
|
||||
Yukang <moorekang@gmail.com>
|
||||
bronze1man <bronze1man@gmail.com>
|
||||
debrando <denis.brandolini@gmail.com>
|
||||
henrikedwards <henrik.edwards@gmail.com>
|
||||
铁哥 <guotie.9@gmail.com>
|
317
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
Normal file
317
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,317 @@
|
||||
# Changelog
|
||||
|
||||
## v1.4.7 / 2018-01-09
|
||||
|
||||
* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
|
||||
* Tests: Fix missing verb on format string (thanks @rchiossi)
|
||||
* Linux: Fix deadlock in Remove (thanks @aarondl)
|
||||
* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
|
||||
* Docs: Moved FAQ into the README (thanks @vahe)
|
||||
* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
|
||||
* Docs: replace references to OS X with macOS
|
||||
|
||||
## v1.4.2 / 2016-10-10
|
||||
|
||||
* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
|
||||
|
||||
## v1.4.1 / 2016-10-04
|
||||
|
||||
* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
|
||||
|
||||
## v1.4.0 / 2016-10-01
|
||||
|
||||
* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
|
||||
|
||||
## v1.3.1 / 2016-06-28
|
||||
|
||||
* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
|
||||
|
||||
## v1.3.0 / 2016-04-19
|
||||
|
||||
* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
|
||||
|
||||
## v1.2.10 / 2016-03-02
|
||||
|
||||
* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
|
||||
|
||||
## v1.2.9 / 2016-01-13
|
||||
|
||||
kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
|
||||
|
||||
## v1.2.8 / 2015-12-17
|
||||
|
||||
* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
|
||||
* inotify: fix race in test
|
||||
* enable race detection for continuous integration (Linux, Mac, Windows)
|
||||
|
||||
## v1.2.5 / 2015-10-17
|
||||
|
||||
* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
|
||||
* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
|
||||
* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
|
||||
* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
|
||||
|
||||
## v1.2.1 / 2015-10-14
|
||||
|
||||
* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
|
||||
|
||||
## v1.2.0 / 2015-02-08
|
||||
|
||||
* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
|
||||
* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
|
||||
* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
|
||||
|
||||
## v1.1.1 / 2015-02-05
|
||||
|
||||
* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
|
||||
|
||||
## v1.1.0 / 2014-12-12
|
||||
|
||||
* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
|
||||
* add low-level functions
|
||||
* only need to store flags on directories
|
||||
* less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
|
||||
* done can be an unbuffered channel
|
||||
* remove calls to os.NewSyscallError
|
||||
* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
|
||||
* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
|
||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||
|
||||
## v1.0.4 / 2014-09-07
|
||||
|
||||
* kqueue: add dragonfly to the build tags.
|
||||
* Rename source code files, rearrange code so exported APIs are at the top.
|
||||
* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
|
||||
|
||||
## v1.0.3 / 2014-08-19
|
||||
|
||||
* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
|
||||
|
||||
## v1.0.2 / 2014-08-17
|
||||
|
||||
* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
|
||||
|
||||
## v1.0.0 / 2014-08-15
|
||||
|
||||
* [API] Remove AddWatch on Windows, use Add.
|
||||
* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
|
||||
* Minor updates based on feedback from golint.
|
||||
|
||||
## dev / 2014-07-09
|
||||
|
||||
* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
|
||||
* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
|
||||
|
||||
## dev / 2014-07-04
|
||||
|
||||
* kqueue: fix incorrect mutex used in Close()
|
||||
* Update example to demonstrate usage of Op.
|
||||
|
||||
## dev / 2014-06-28
|
||||
|
||||
* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
|
||||
* Fix for String() method on Event (thanks Alex Brainman)
|
||||
* Don't build on Plan 9 or Solaris (thanks @4ad)
|
||||
|
||||
## dev / 2014-06-21
|
||||
|
||||
* Events channel of type Event rather than *Event.
|
||||
* [internal] use syscall constants directly for inotify and kqueue.
|
||||
* [internal] kqueue: rename events to kevents and fileEvent to event.
|
||||
|
||||
## dev / 2014-06-19
|
||||
|
||||
* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
|
||||
* [internal] remove cookie from Event struct (unused).
|
||||
* [internal] Event struct has the same definition across every OS.
|
||||
* [internal] remove internal watch and removeWatch methods.
|
||||
|
||||
## dev / 2014-06-12
|
||||
|
||||
* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
|
||||
* [API] Pluralized channel names: Events and Errors.
|
||||
* [API] Renamed FileEvent struct to Event.
|
||||
* [API] Op constants replace methods like IsCreate().
|
||||
|
||||
## dev / 2014-06-12
|
||||
|
||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||
|
||||
## dev / 2014-05-23
|
||||
|
||||
* [API] Remove current implementation of WatchFlags.
|
||||
* current implementation doesn't take advantage of OS for efficiency
|
||||
* provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
|
||||
* no tests for the current implementation
|
||||
* not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
|
||||
|
||||
## v0.9.3 / 2014-12-31
|
||||
|
||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||
|
||||
## v0.9.2 / 2014-08-17
|
||||
|
||||
* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||
|
||||
## v0.9.1 / 2014-06-12
|
||||
|
||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||
|
||||
## v0.9.0 / 2014-01-17
|
||||
|
||||
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
|
||||
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
|
||||
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
|
||||
|
||||
## v0.8.12 / 2013-11-13
|
||||
|
||||
* [API] Remove FD_SET and friends from Linux adapter
|
||||
|
||||
## v0.8.11 / 2013-11-02
|
||||
|
||||
* [Doc] Add Changelog [#72][] (thanks @nathany)
|
||||
* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
|
||||
|
||||
## v0.8.10 / 2013-10-19
|
||||
|
||||
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
|
||||
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
|
||||
* [Doc] specify OS-specific limits in README (thanks @debrando)
|
||||
|
||||
## v0.8.9 / 2013-09-08
|
||||
|
||||
* [Doc] Contributing (thanks @nathany)
|
||||
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
|
||||
* [Doc] GoCI badge in README (Linux only) [#60][]
|
||||
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
|
||||
|
||||
## v0.8.8 / 2013-06-17
|
||||
|
||||
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
|
||||
|
||||
## v0.8.7 / 2013-06-03
|
||||
|
||||
* [API] Make syscall flags internal
|
||||
* [Fix] inotify: ignore event changes
|
||||
* [Fix] race in symlink test [#45][] (reported by @srid)
|
||||
* [Fix] tests on Windows
|
||||
* lower case error messages
|
||||
|
||||
## v0.8.6 / 2013-05-23
|
||||
|
||||
* kqueue: Use EVT_ONLY flag on Darwin
|
||||
* [Doc] Update README with full example
|
||||
|
||||
## v0.8.5 / 2013-05-09
|
||||
|
||||
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
|
||||
|
||||
## v0.8.4 / 2013-04-07
|
||||
|
||||
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
|
||||
|
||||
## v0.8.3 / 2013-03-13
|
||||
|
||||
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
|
||||
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
|
||||
|
||||
## v0.8.2 / 2013-02-07
|
||||
|
||||
* [Doc] add Authors
|
||||
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
|
||||
|
||||
## v0.8.1 / 2013-01-09
|
||||
|
||||
* [Fix] Windows path separators
|
||||
* [Doc] BSD License
|
||||
|
||||
## v0.8.0 / 2012-11-09
|
||||
|
||||
* kqueue: directory watching improvements (thanks @vmirage)
|
||||
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
|
||||
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
|
||||
|
||||
## v0.7.4 / 2012-10-09
|
||||
|
||||
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
|
||||
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
|
||||
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
|
||||
* [Fix] kqueue: modify after recreation of file
|
||||
|
||||
## v0.7.3 / 2012-09-27
|
||||
|
||||
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
|
||||
* [Fix] kqueue: no longer get duplicate CREATE events
|
||||
|
||||
## v0.7.2 / 2012-09-01
|
||||
|
||||
* kqueue: events for created directories
|
||||
|
||||
## v0.7.1 / 2012-07-14
|
||||
|
||||
* [Fix] for renaming files
|
||||
|
||||
## v0.7.0 / 2012-07-02
|
||||
|
||||
* [Feature] FSNotify flags
|
||||
* [Fix] inotify: Added file name back to event path
|
||||
|
||||
## v0.6.0 / 2012-06-06
|
||||
|
||||
* kqueue: watch files after directory created (thanks @tmc)
|
||||
|
||||
## v0.5.1 / 2012-05-22
|
||||
|
||||
* [Fix] inotify: remove all watches before Close()
|
||||
|
||||
## v0.5.0 / 2012-05-03
|
||||
|
||||
* [API] kqueue: return errors during watch instead of sending over channel
|
||||
* kqueue: match symlink behavior on Linux
|
||||
* inotify: add `DELETE_SELF` (requested by @taralx)
|
||||
* [Fix] kqueue: handle EINTR (reported by @robfig)
|
||||
* [Doc] Godoc example [#1][] (thanks @davecheney)
|
||||
|
||||
## v0.4.0 / 2012-03-30
|
||||
|
||||
* Go 1 released: build with go tool
|
||||
* [Feature] Windows support using winfsnotify
|
||||
* Windows does not have attribute change notifications
|
||||
* Roll attribute notifications into IsModify
|
||||
|
||||
## v0.3.0 / 2012-02-19
|
||||
|
||||
* kqueue: add files when watch directory
|
||||
|
||||
## v0.2.0 / 2011-12-30
|
||||
|
||||
* update to latest Go weekly code
|
||||
|
||||
## v0.1.0 / 2011-10-19
|
||||
|
||||
* kqueue: add watch on file creation to match inotify
|
||||
* kqueue: create file event
|
||||
* inotify: ignore `IN_IGNORED` events
|
||||
* event String()
|
||||
* linux: common FileEvent functions
|
||||
* initial commit
|
||||
|
||||
[#79]: https://github.com/howeyc/fsnotify/pull/79
|
||||
[#77]: https://github.com/howeyc/fsnotify/pull/77
|
||||
[#72]: https://github.com/howeyc/fsnotify/issues/72
|
||||
[#71]: https://github.com/howeyc/fsnotify/issues/71
|
||||
[#70]: https://github.com/howeyc/fsnotify/issues/70
|
||||
[#63]: https://github.com/howeyc/fsnotify/issues/63
|
||||
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||
[#60]: https://github.com/howeyc/fsnotify/issues/60
|
||||
[#59]: https://github.com/howeyc/fsnotify/issues/59
|
||||
[#49]: https://github.com/howeyc/fsnotify/issues/49
|
||||
[#45]: https://github.com/howeyc/fsnotify/issues/45
|
||||
[#40]: https://github.com/howeyc/fsnotify/issues/40
|
||||
[#36]: https://github.com/howeyc/fsnotify/issues/36
|
||||
[#33]: https://github.com/howeyc/fsnotify/issues/33
|
||||
[#29]: https://github.com/howeyc/fsnotify/issues/29
|
||||
[#25]: https://github.com/howeyc/fsnotify/issues/25
|
||||
[#24]: https://github.com/howeyc/fsnotify/issues/24
|
||||
[#21]: https://github.com/howeyc/fsnotify/issues/21
|
77
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
Normal file
77
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
# Contributing
|
||||
|
||||
## Issues
|
||||
|
||||
* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
|
||||
* Please indicate the platform you are using fsnotify on.
|
||||
* A code example to reproduce the problem is appreciated.
|
||||
|
||||
## Pull Requests
|
||||
|
||||
### Contributor License Agreement
|
||||
|
||||
fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
|
||||
|
||||
Please indicate that you have signed the CLA in your pull request.
|
||||
|
||||
### How fsnotify is Developed
|
||||
|
||||
* Development is done on feature branches.
|
||||
* Tests are run on BSD, Linux, macOS and Windows.
|
||||
* Pull requests are reviewed and [applied to master][am] using [hub][].
|
||||
* Maintainers may modify or squash commits rather than asking contributors to.
|
||||
* To issue a new release, the maintainers will:
|
||||
* Update the CHANGELOG
|
||||
* Tag a version, which will become available through gopkg.in.
|
||||
|
||||
### How to Fork
|
||||
|
||||
For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
|
||||
|
||||
1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
|
||||
2. Create your feature branch (`git checkout -b my-new-feature`)
|
||||
3. Ensure everything works and the tests pass (see below)
|
||||
4. Commit your changes (`git commit -am 'Add some feature'`)
|
||||
|
||||
Contribute upstream:
|
||||
|
||||
1. Fork fsnotify on GitHub
|
||||
2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
|
||||
3. Push to the branch (`git push fork my-new-feature`)
|
||||
4. Create a new Pull Request on GitHub
|
||||
|
||||
This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
|
||||
|
||||
### Testing
|
||||
|
||||
fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
|
||||
|
||||
Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
|
||||
|
||||
To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
|
||||
|
||||
* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
|
||||
* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
|
||||
* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
|
||||
* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
|
||||
* When you're done, you will want to halt or destroy the Vagrant boxes.
|
||||
|
||||
Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
|
||||
|
||||
Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
|
||||
|
||||
### Maintainers
|
||||
|
||||
Help maintaining fsnotify is welcome. To be a maintainer:
|
||||
|
||||
* Submit a pull request and sign the CLA as above.
|
||||
* You must be able to run the test suite on Mac, Windows, Linux and BSD.
|
||||
|
||||
To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
|
||||
|
||||
All code changes should be internal pull requests.
|
||||
|
||||
Releases are tagged using [Semantic Versioning](http://semver.org/).
|
||||
|
||||
[hub]: https://github.com/github/hub
|
||||
[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
|
3
vendor/golang.org/x/xerrors/LICENSE → vendor/github.com/fsnotify/fsnotify/LICENSE
generated
vendored
3
vendor/golang.org/x/xerrors/LICENSE → vendor/github.com/fsnotify/fsnotify/LICENSE
generated
vendored
@ -1,4 +1,5 @@
|
||||
Copyright (c) 2019 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2012-2019 fsnotify Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
130
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
Normal file
130
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
# File system notifications for Go
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
|
||||
|
||||
fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
|
||||
|
||||
```console
|
||||
go get -u golang.org/x/sys/...
|
||||
```
|
||||
|
||||
Cross platform: Windows, Linux, BSD and macOS.
|
||||
|
||||
| Adapter | OS | Status |
|
||||
| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| inotify | Linux 2.6.27 or later, Android\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) |
|
||||
| kqueue | BSD, macOS, iOS\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) |
|
||||
| ReadDirectoryChangesW | Windows | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) |
|
||||
| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
|
||||
| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) |
|
||||
| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) |
|
||||
| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
|
||||
| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
|
||||
|
||||
\* Android and iOS are untested.
|
||||
|
||||
Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
|
||||
|
||||
## API stability
|
||||
|
||||
fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
|
||||
|
||||
All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
|
||||
|
||||
Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
|
||||
func main() {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer watcher.Close()
|
||||
|
||||
done := make(chan bool)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-watcher.Events:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
log.Println("event:", event)
|
||||
if event.Op&fsnotify.Write == fsnotify.Write {
|
||||
log.Println("modified file:", event.Name)
|
||||
}
|
||||
case err, ok := <-watcher.Errors:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
log.Println("error:", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
err = watcher.Add("/tmp/foo")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
<-done
|
||||
}
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
Please refer to [CONTRIBUTING][] before opening an issue or pull request.
|
||||
|
||||
## Example
|
||||
|
||||
See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
|
||||
|
||||
## FAQ
|
||||
|
||||
**When a file is moved to another directory is it still being watched?**
|
||||
|
||||
No (it shouldn't be, unless you are watching where it was moved to).
|
||||
|
||||
**When I watch a directory, are all subdirectories watched as well?**
|
||||
|
||||
No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
|
||||
|
||||
**Do I have to watch the Error and Event channels in a separate goroutine?**
|
||||
|
||||
As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
|
||||
|
||||
**Why am I receiving multiple events for the same file on OS X?**
|
||||
|
||||
Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
|
||||
|
||||
**How many files can be watched at once?**
|
||||
|
||||
There are OS-specific limits as to how many watches can be created:
|
||||
* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
|
||||
* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
|
||||
|
||||
**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?**
|
||||
|
||||
fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications.
|
||||
|
||||
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||
[#18]: https://github.com/fsnotify/fsnotify/issues/18
|
||||
[#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
[#7]: https://github.com/howeyc/fsnotify/issues/7
|
||||
|
||||
[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
|
||||
|
||||
## Related Projects
|
||||
|
||||
* [notify](https://github.com/rjeczalik/notify)
|
||||
* [fsevents](https://github.com/fsnotify/fsevents)
|
||||
|
37
vendor/github.com/fsnotify/fsnotify/fen.go
generated
vendored
Normal file
37
vendor/github.com/fsnotify/fsnotify/fen.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build solaris
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops watching the the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
return nil
|
||||
}
|
68
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
Normal file
68
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !plan9
|
||||
|
||||
// Package fsnotify provides a platform-independent interface for file system notifications.
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Event represents a single file system notification.
|
||||
type Event struct {
|
||||
Name string // Relative path to the file or directory.
|
||||
Op Op // File operation that triggered the event.
|
||||
}
|
||||
|
||||
// Op describes a set of file operations.
|
||||
type Op uint32
|
||||
|
||||
// These are the generalized file operations that can trigger a notification.
|
||||
const (
|
||||
Create Op = 1 << iota
|
||||
Write
|
||||
Remove
|
||||
Rename
|
||||
Chmod
|
||||
)
|
||||
|
||||
func (op Op) String() string {
|
||||
// Use a buffer for efficient string concatenation
|
||||
var buffer bytes.Buffer
|
||||
|
||||
if op&Create == Create {
|
||||
buffer.WriteString("|CREATE")
|
||||
}
|
||||
if op&Remove == Remove {
|
||||
buffer.WriteString("|REMOVE")
|
||||
}
|
||||
if op&Write == Write {
|
||||
buffer.WriteString("|WRITE")
|
||||
}
|
||||
if op&Rename == Rename {
|
||||
buffer.WriteString("|RENAME")
|
||||
}
|
||||
if op&Chmod == Chmod {
|
||||
buffer.WriteString("|CHMOD")
|
||||
}
|
||||
if buffer.Len() == 0 {
|
||||
return ""
|
||||
}
|
||||
return buffer.String()[1:] // Strip leading pipe
|
||||
}
|
||||
|
||||
// String returns a string representation of the event in the form
|
||||
// "file: REMOVE|WRITE|..."
|
||||
func (e Event) String() string {
|
||||
return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
|
||||
}
|
||||
|
||||
// Common errors that can be reported by a watcher
|
||||
var (
|
||||
ErrEventOverflow = errors.New("fsnotify queue overflow")
|
||||
)
|
5
vendor/github.com/fsnotify/fsnotify/go.mod
generated
vendored
Normal file
5
vendor/github.com/fsnotify/fsnotify/go.mod
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
module github.com/fsnotify/fsnotify
|
||||
|
||||
go 1.13
|
||||
|
||||
require golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9
|
2
vendor/github.com/fsnotify/fsnotify/go.sum
generated
vendored
Normal file
2
vendor/github.com/fsnotify/fsnotify/go.sum
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
337
vendor/github.com/fsnotify/fsnotify/inotify.go
generated
vendored
Normal file
337
vendor/github.com/fsnotify/fsnotify/inotify.go
generated
vendored
Normal file
@ -0,0 +1,337 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
mu sync.Mutex // Map access
|
||||
fd int
|
||||
poller *fdPoller
|
||||
watches map[string]*watch // Map of inotify watches (key: path)
|
||||
paths map[int]string // Map of watched paths (key: watch descriptor)
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
doneResp chan struct{} // Channel to respond to Close
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
// Create inotify fd
|
||||
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
|
||||
if fd == -1 {
|
||||
return nil, errno
|
||||
}
|
||||
// Create epoll
|
||||
poller, err := newFdPoller(fd)
|
||||
if err != nil {
|
||||
unix.Close(fd)
|
||||
return nil, err
|
||||
}
|
||||
w := &Watcher{
|
||||
fd: fd,
|
||||
poller: poller,
|
||||
watches: make(map[string]*watch),
|
||||
paths: make(map[int]string),
|
||||
Events: make(chan Event),
|
||||
Errors: make(chan error),
|
||||
done: make(chan struct{}),
|
||||
doneResp: make(chan struct{}),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (w *Watcher) isClosed() bool {
|
||||
select {
|
||||
case <-w.done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send 'close' signal to goroutine, and set the Watcher to closed.
|
||||
close(w.done)
|
||||
|
||||
// Wake up goroutine
|
||||
w.poller.wake()
|
||||
|
||||
// Wait for goroutine to close
|
||||
<-w.doneResp
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
if w.isClosed() {
|
||||
return errors.New("inotify instance already closed")
|
||||
}
|
||||
|
||||
const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
|
||||
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
|
||||
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
|
||||
|
||||
var flags uint32 = agnosticEvents
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
watchEntry := w.watches[name]
|
||||
if watchEntry != nil {
|
||||
flags |= watchEntry.flags | unix.IN_MASK_ADD
|
||||
}
|
||||
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
|
||||
if wd == -1 {
|
||||
return errno
|
||||
}
|
||||
|
||||
if watchEntry == nil {
|
||||
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
|
||||
w.paths[wd] = name
|
||||
} else {
|
||||
watchEntry.wd = uint32(wd)
|
||||
watchEntry.flags = flags
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
|
||||
// Fetch the watch.
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
watch, ok := w.watches[name]
|
||||
|
||||
// Remove it from inotify.
|
||||
if !ok {
|
||||
return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
|
||||
}
|
||||
|
||||
// We successfully removed the watch if InotifyRmWatch doesn't return an
|
||||
// error, we need to clean up our internal state to ensure it matches
|
||||
// inotify's kernel state.
|
||||
delete(w.paths, int(watch.wd))
|
||||
delete(w.watches, name)
|
||||
|
||||
// inotify_rm_watch will return EINVAL if the file has been deleted;
|
||||
// the inotify will already have been removed.
|
||||
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
|
||||
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
|
||||
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
|
||||
// by another thread and we have not received IN_IGNORE event.
|
||||
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
|
||||
if success == -1 {
|
||||
// TODO: Perhaps it's not helpful to return an error here in every case.
|
||||
// the only two possible errors are:
|
||||
// EBADF, which happens when w.fd is not a valid file descriptor of any kind.
|
||||
// EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
|
||||
// Watch descriptors are invalidated when they are removed explicitly or implicitly;
|
||||
// explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
|
||||
return errno
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||
}
|
||||
|
||||
// readEvents reads from the inotify file descriptor, converts the
|
||||
// received events into Event objects and sends them via the Events channel
|
||||
func (w *Watcher) readEvents() {
|
||||
var (
|
||||
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
|
||||
n int // Number of bytes read with read()
|
||||
errno error // Syscall errno
|
||||
ok bool // For poller.wait
|
||||
)
|
||||
|
||||
defer close(w.doneResp)
|
||||
defer close(w.Errors)
|
||||
defer close(w.Events)
|
||||
defer unix.Close(w.fd)
|
||||
defer w.poller.close()
|
||||
|
||||
for {
|
||||
// See if we have been closed.
|
||||
if w.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
ok, errno = w.poller.wait()
|
||||
if errno != nil {
|
||||
select {
|
||||
case w.Errors <- errno:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
n, errno = unix.Read(w.fd, buf[:])
|
||||
// If a signal interrupted execution, see if we've been asked to close, and try again.
|
||||
// http://man7.org/linux/man-pages/man7/signal.7.html :
|
||||
// "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
|
||||
if errno == unix.EINTR {
|
||||
continue
|
||||
}
|
||||
|
||||
// unix.Read might have been woken up by Close. If so, we're done.
|
||||
if w.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
if n < unix.SizeofInotifyEvent {
|
||||
var err error
|
||||
if n == 0 {
|
||||
// If EOF is received. This should really never happen.
|
||||
err = io.EOF
|
||||
} else if n < 0 {
|
||||
// If an error occurred while reading.
|
||||
err = errno
|
||||
} else {
|
||||
// Read was too short.
|
||||
err = errors.New("notify: short read in readEvents()")
|
||||
}
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
// We don't know how many events we just read into the buffer
|
||||
// While the offset points to at least one whole event...
|
||||
for offset <= uint32(n-unix.SizeofInotifyEvent) {
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
||||
|
||||
mask := uint32(raw.Mask)
|
||||
nameLen := uint32(raw.Len)
|
||||
|
||||
if mask&unix.IN_Q_OVERFLOW != 0 {
|
||||
select {
|
||||
case w.Errors <- ErrEventOverflow:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If the event happened to the watched directory or the watched file, the kernel
|
||||
// doesn't append the filename to the event, but we would like to always fill the
|
||||
// the "Name" field with a valid filename. We retrieve the path of the watch from
|
||||
// the "paths" map.
|
||||
w.mu.Lock()
|
||||
name, ok := w.paths[int(raw.Wd)]
|
||||
// IN_DELETE_SELF occurs when the file/directory being watched is removed.
|
||||
// This is a sign to clean up the maps, otherwise we are no longer in sync
|
||||
// with the inotify kernel state which has already deleted the watch
|
||||
// automatically.
|
||||
if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
|
||||
delete(w.paths, int(raw.Wd))
|
||||
delete(w.watches, name)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
if nameLen > 0 {
|
||||
// Point "bytes" at the first byte of the filename
|
||||
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
|
||||
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
||||
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
|
||||
}
|
||||
|
||||
event := newEvent(name, mask)
|
||||
|
||||
// Send the events that are not ignored on the events channel
|
||||
if !event.ignoreLinux(mask) {
|
||||
select {
|
||||
case w.Events <- event:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
offset += unix.SizeofInotifyEvent + nameLen
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Certain types of events can be "ignored" and not sent over the Events
|
||||
// channel. Such as events marked ignore by the kernel, or MODIFY events
|
||||
// against files that do not exist.
|
||||
func (e *Event) ignoreLinux(mask uint32) bool {
|
||||
// Ignore anything the inotify API says to ignore
|
||||
if mask&unix.IN_IGNORED == unix.IN_IGNORED {
|
||||
return true
|
||||
}
|
||||
|
||||
// If the event is not a DELETE or RENAME, the file must exist.
|
||||
// Otherwise the event is ignored.
|
||||
// *Note*: this was put in place because it was seen that a MODIFY
|
||||
// event was sent after the DELETE. This ignores that MODIFY and
|
||||
// assumes a DELETE will come or has come if the file doesn't exist.
|
||||
if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
|
||||
_, statErr := os.Lstat(e.Name)
|
||||
return os.IsNotExist(statErr)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// newEvent returns an platform-independent Event based on an inotify mask.
|
||||
func newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||
e.Op |= Create
|
||||
}
|
||||
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
187
vendor/github.com/fsnotify/fsnotify/inotify_poller.go
generated
vendored
Normal file
187
vendor/github.com/fsnotify/fsnotify/inotify_poller.go
generated
vendored
Normal file
@ -0,0 +1,187 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type fdPoller struct {
|
||||
fd int // File descriptor (as returned by the inotify_init() syscall)
|
||||
epfd int // Epoll file descriptor
|
||||
pipe [2]int // Pipe for waking up
|
||||
}
|
||||
|
||||
func emptyPoller(fd int) *fdPoller {
|
||||
poller := new(fdPoller)
|
||||
poller.fd = fd
|
||||
poller.epfd = -1
|
||||
poller.pipe[0] = -1
|
||||
poller.pipe[1] = -1
|
||||
return poller
|
||||
}
|
||||
|
||||
// Create a new inotify poller.
|
||||
// This creates an inotify handler, and an epoll handler.
|
||||
func newFdPoller(fd int) (*fdPoller, error) {
|
||||
var errno error
|
||||
poller := emptyPoller(fd)
|
||||
defer func() {
|
||||
if errno != nil {
|
||||
poller.close()
|
||||
}
|
||||
}()
|
||||
poller.fd = fd
|
||||
|
||||
// Create epoll fd
|
||||
poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC)
|
||||
if poller.epfd == -1 {
|
||||
return nil, errno
|
||||
}
|
||||
// Create pipe; pipe[0] is the read end, pipe[1] the write end.
|
||||
errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
// Register inotify fd with epoll
|
||||
event := unix.EpollEvent{
|
||||
Fd: int32(poller.fd),
|
||||
Events: unix.EPOLLIN,
|
||||
}
|
||||
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
// Register pipe fd with epoll
|
||||
event = unix.EpollEvent{
|
||||
Fd: int32(poller.pipe[0]),
|
||||
Events: unix.EPOLLIN,
|
||||
}
|
||||
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
return poller, nil
|
||||
}
|
||||
|
||||
// Wait using epoll.
|
||||
// Returns true if something is ready to be read,
|
||||
// false if there is not.
|
||||
func (poller *fdPoller) wait() (bool, error) {
|
||||
// 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
|
||||
// I don't know whether epoll_wait returns the number of events returned,
|
||||
// or the total number of events ready.
|
||||
// I decided to catch both by making the buffer one larger than the maximum.
|
||||
events := make([]unix.EpollEvent, 7)
|
||||
for {
|
||||
n, errno := unix.EpollWait(poller.epfd, events, -1)
|
||||
if n == -1 {
|
||||
if errno == unix.EINTR {
|
||||
continue
|
||||
}
|
||||
return false, errno
|
||||
}
|
||||
if n == 0 {
|
||||
// If there are no events, try again.
|
||||
continue
|
||||
}
|
||||
if n > 6 {
|
||||
// This should never happen. More events were returned than should be possible.
|
||||
return false, errors.New("epoll_wait returned more events than I know what to do with")
|
||||
}
|
||||
ready := events[:n]
|
||||
epollhup := false
|
||||
epollerr := false
|
||||
epollin := false
|
||||
for _, event := range ready {
|
||||
if event.Fd == int32(poller.fd) {
|
||||
if event.Events&unix.EPOLLHUP != 0 {
|
||||
// This should not happen, but if it does, treat it as a wakeup.
|
||||
epollhup = true
|
||||
}
|
||||
if event.Events&unix.EPOLLERR != 0 {
|
||||
// If an error is waiting on the file descriptor, we should pretend
|
||||
// something is ready to read, and let unix.Read pick up the error.
|
||||
epollerr = true
|
||||
}
|
||||
if event.Events&unix.EPOLLIN != 0 {
|
||||
// There is data to read.
|
||||
epollin = true
|
||||
}
|
||||
}
|
||||
if event.Fd == int32(poller.pipe[0]) {
|
||||
if event.Events&unix.EPOLLHUP != 0 {
|
||||
// Write pipe descriptor was closed, by us. This means we're closing down the
|
||||
// watcher, and we should wake up.
|
||||
}
|
||||
if event.Events&unix.EPOLLERR != 0 {
|
||||
// If an error is waiting on the pipe file descriptor.
|
||||
// This is an absolute mystery, and should never ever happen.
|
||||
return false, errors.New("Error on the pipe descriptor.")
|
||||
}
|
||||
if event.Events&unix.EPOLLIN != 0 {
|
||||
// This is a regular wakeup, so we have to clear the buffer.
|
||||
err := poller.clearWake()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if epollhup || epollerr || epollin {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Close the write end of the poller.
|
||||
func (poller *fdPoller) wake() error {
|
||||
buf := make([]byte, 1)
|
||||
n, errno := unix.Write(poller.pipe[1], buf)
|
||||
if n == -1 {
|
||||
if errno == unix.EAGAIN {
|
||||
// Buffer is full, poller will wake.
|
||||
return nil
|
||||
}
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (poller *fdPoller) clearWake() error {
|
||||
// You have to be woken up a LOT in order to get to 100!
|
||||
buf := make([]byte, 100)
|
||||
n, errno := unix.Read(poller.pipe[0], buf)
|
||||
if n == -1 {
|
||||
if errno == unix.EAGAIN {
|
||||
// Buffer is empty, someone else cleared our wake.
|
||||
return nil
|
||||
}
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close all poller file descriptors, but not the one passed to it.
|
||||
func (poller *fdPoller) close() {
|
||||
if poller.pipe[1] != -1 {
|
||||
unix.Close(poller.pipe[1])
|
||||
}
|
||||
if poller.pipe[0] != -1 {
|
||||
unix.Close(poller.pipe[0])
|
||||
}
|
||||
if poller.epfd != -1 {
|
||||
unix.Close(poller.epfd)
|
||||
}
|
||||
}
|
521
vendor/github.com/fsnotify/fsnotify/kqueue.go
generated
vendored
Normal file
521
vendor/github.com/fsnotify/fsnotify/kqueue.go
generated
vendored
Normal file
@ -0,0 +1,521 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build freebsd openbsd netbsd dragonfly darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
|
||||
kq int // File descriptor (as returned by the kqueue() syscall).
|
||||
|
||||
mu sync.Mutex // Protects access to watcher data
|
||||
watches map[string]int // Map of watched file descriptors (key: path).
|
||||
externalWatches map[string]bool // Map of watches added by user of the library.
|
||||
dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
|
||||
paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
|
||||
fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
}
|
||||
|
||||
type pathInfo struct {
|
||||
name string
|
||||
isDir bool
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
kq, err := kqueue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := &Watcher{
|
||||
kq: kq,
|
||||
watches: make(map[string]int),
|
||||
dirFlags: make(map[string]uint32),
|
||||
paths: make(map[int]pathInfo),
|
||||
fileExists: make(map[string]bool),
|
||||
externalWatches: make(map[string]bool),
|
||||
Events: make(chan Event),
|
||||
Errors: make(chan error),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
|
||||
// copy paths to remove while locked
|
||||
var pathsToRemove = make([]string, 0, len(w.watches))
|
||||
for name := range w.watches {
|
||||
pathsToRemove = append(pathsToRemove, name)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
// unlock before calling Remove, which also locks
|
||||
|
||||
for _, name := range pathsToRemove {
|
||||
w.Remove(name)
|
||||
}
|
||||
|
||||
// send a "quit" message to the reader goroutine
|
||||
close(w.done)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
w.mu.Lock()
|
||||
w.externalWatches[name] = true
|
||||
w.mu.Unlock()
|
||||
_, err := w.addWatch(name, noteAllEvents)
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove stops watching the the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
w.mu.Lock()
|
||||
watchfd, ok := w.watches[name]
|
||||
w.mu.Unlock()
|
||||
if !ok {
|
||||
return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
|
||||
}
|
||||
|
||||
const registerRemove = unix.EV_DELETE
|
||||
if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
unix.Close(watchfd)
|
||||
|
||||
w.mu.Lock()
|
||||
isDir := w.paths[watchfd].isDir
|
||||
delete(w.watches, name)
|
||||
delete(w.paths, watchfd)
|
||||
delete(w.dirFlags, name)
|
||||
w.mu.Unlock()
|
||||
|
||||
// Find all watched paths that are in this directory that are not external.
|
||||
if isDir {
|
||||
var pathsToRemove []string
|
||||
w.mu.Lock()
|
||||
for _, path := range w.paths {
|
||||
wdir, _ := filepath.Split(path.name)
|
||||
if filepath.Clean(wdir) == name {
|
||||
if !w.externalWatches[path.name] {
|
||||
pathsToRemove = append(pathsToRemove, path.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
w.mu.Unlock()
|
||||
for _, name := range pathsToRemove {
|
||||
// Since these are internal, not much sense in propagating error
|
||||
// to the user, as that will just confuse them with an error about
|
||||
// a path they did not explicitly watch themselves.
|
||||
w.Remove(name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
|
||||
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
|
||||
|
||||
// keventWaitTime to block on each read from kevent
|
||||
var keventWaitTime = durationToTimespec(100 * time.Millisecond)
|
||||
|
||||
// addWatch adds name to the watched file set.
|
||||
// The flags are interpreted as described in kevent(2).
|
||||
// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
|
||||
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
|
||||
var isDir bool
|
||||
// Make ./name and name equivalent
|
||||
name = filepath.Clean(name)
|
||||
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return "", errors.New("kevent instance already closed")
|
||||
}
|
||||
watchfd, alreadyWatching := w.watches[name]
|
||||
// We already have a watch, but we can still override flags.
|
||||
if alreadyWatching {
|
||||
isDir = w.paths[watchfd].isDir
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
if !alreadyWatching {
|
||||
fi, err := os.Lstat(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Don't watch sockets.
|
||||
if fi.Mode()&os.ModeSocket == os.ModeSocket {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Don't watch named pipes.
|
||||
if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Follow Symlinks
|
||||
// Unfortunately, Linux can add bogus symlinks to watch list without
|
||||
// issue, and Windows can't do symlinks period (AFAIK). To maintain
|
||||
// consistency, we will act like everything is fine. There will simply
|
||||
// be no file events for broken symlinks.
|
||||
// Hence the returns of nil on errors.
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
name, err = filepath.EvalSymlinks(name)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
_, alreadyWatching = w.watches[name]
|
||||
w.mu.Unlock()
|
||||
|
||||
if alreadyWatching {
|
||||
return name, nil
|
||||
}
|
||||
|
||||
fi, err = os.Lstat(name)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
|
||||
watchfd, err = unix.Open(name, openMode, 0700)
|
||||
if watchfd == -1 {
|
||||
return "", err
|
||||
}
|
||||
|
||||
isDir = fi.IsDir()
|
||||
}
|
||||
|
||||
const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
|
||||
if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
|
||||
unix.Close(watchfd)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !alreadyWatching {
|
||||
w.mu.Lock()
|
||||
w.watches[name] = watchfd
|
||||
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
if isDir {
|
||||
// Watch the directory if it has not been watched before,
|
||||
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
|
||||
w.mu.Lock()
|
||||
|
||||
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
|
||||
(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
|
||||
// Store flags so this watch can be updated later
|
||||
w.dirFlags[name] = flags
|
||||
w.mu.Unlock()
|
||||
|
||||
if watchDir {
|
||||
if err := w.watchDirectoryFiles(name); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// readEvents reads from kqueue and converts the received kevents into
|
||||
// Event values that it sends down the Events channel.
|
||||
func (w *Watcher) readEvents() {
|
||||
eventBuffer := make([]unix.Kevent_t, 10)
|
||||
|
||||
loop:
|
||||
for {
|
||||
// See if there is a message on the "done" channel
|
||||
select {
|
||||
case <-w.done:
|
||||
break loop
|
||||
default:
|
||||
}
|
||||
|
||||
// Get new events
|
||||
kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
|
||||
// EINTR is okay, the syscall was interrupted before timeout expired.
|
||||
if err != nil && err != unix.EINTR {
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
case <-w.done:
|
||||
break loop
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Flush the events we received to the Events channel
|
||||
for len(kevents) > 0 {
|
||||
kevent := &kevents[0]
|
||||
watchfd := int(kevent.Ident)
|
||||
mask := uint32(kevent.Fflags)
|
||||
w.mu.Lock()
|
||||
path := w.paths[watchfd]
|
||||
w.mu.Unlock()
|
||||
event := newEvent(path.name, mask)
|
||||
|
||||
if path.isDir && !(event.Op&Remove == Remove) {
|
||||
// Double check to make sure the directory exists. This can happen when
|
||||
// we do a rm -fr on a recursively watched folders and we receive a
|
||||
// modification event first but the folder has been deleted and later
|
||||
// receive the delete event
|
||||
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
|
||||
// mark is as delete event
|
||||
event.Op |= Remove
|
||||
}
|
||||
}
|
||||
|
||||
if event.Op&Rename == Rename || event.Op&Remove == Remove {
|
||||
w.Remove(event.Name)
|
||||
w.mu.Lock()
|
||||
delete(w.fileExists, event.Name)
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
|
||||
w.sendDirectoryChangeEvents(event.Name)
|
||||
} else {
|
||||
// Send the event on the Events channel.
|
||||
select {
|
||||
case w.Events <- event:
|
||||
case <-w.done:
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
if event.Op&Remove == Remove {
|
||||
// Look for a file that may have overwritten this.
|
||||
// For example, mv f1 f2 will delete f2, then create f2.
|
||||
if path.isDir {
|
||||
fileDir := filepath.Clean(event.Name)
|
||||
w.mu.Lock()
|
||||
_, found := w.watches[fileDir]
|
||||
w.mu.Unlock()
|
||||
if found {
|
||||
// make sure the directory exists before we watch for changes. When we
|
||||
// do a recursive watch and perform rm -fr, the parent directory might
|
||||
// have gone missing, ignore the missing directory and let the
|
||||
// upcoming delete event remove the watch from the parent directory.
|
||||
if _, err := os.Lstat(fileDir); err == nil {
|
||||
w.sendDirectoryChangeEvents(fileDir)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
filePath := filepath.Clean(event.Name)
|
||||
if fileInfo, err := os.Lstat(filePath); err == nil {
|
||||
w.sendFileCreatedEventIfNew(filePath, fileInfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Move to next event
|
||||
kevents = kevents[1:]
|
||||
}
|
||||
}
|
||||
|
||||
// cleanup
|
||||
err := unix.Close(w.kq)
|
||||
if err != nil {
|
||||
// only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
}
|
||||
|
||||
// newEvent returns an platform-independent Event based on kqueue Fflags.
|
||||
func newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func newCreateEvent(name string) Event {
|
||||
return Event{Name: name, Op: Create}
|
||||
}
|
||||
|
||||
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
|
||||
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
|
||||
// Get all files
|
||||
files, err := ioutil.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, fileInfo := range files {
|
||||
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||
filePath, err = w.internalWatch(filePath, fileInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.fileExists[filePath] = true
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendDirectoryEvents searches the directory for newly created files
|
||||
// and sends them over the event channel. This functionality is to have
|
||||
// the BSD version of fsnotify match Linux inotify which provides a
|
||||
// create event for files created in a watched directory.
|
||||
func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
|
||||
// Get all files
|
||||
files, err := ioutil.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Search for new files
|
||||
for _, fileInfo := range files {
|
||||
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||
err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
|
||||
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
|
||||
w.mu.Lock()
|
||||
_, doesExist := w.fileExists[filePath]
|
||||
w.mu.Unlock()
|
||||
if !doesExist {
|
||||
// Send create event
|
||||
select {
|
||||
case w.Events <- newCreateEvent(filePath):
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// like watchDirectoryFiles (but without doing another ReadDir)
|
||||
filePath, err = w.internalWatch(filePath, fileInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.fileExists[filePath] = true
|
||||
w.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
|
||||
if fileInfo.IsDir() {
|
||||
// mimic Linux providing delete events for subdirectories
|
||||
// but preserve the flags used if currently watching subdirectory
|
||||
w.mu.Lock()
|
||||
flags := w.dirFlags[name]
|
||||
w.mu.Unlock()
|
||||
|
||||
flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
|
||||
return w.addWatch(name, flags)
|
||||
}
|
||||
|
||||
// watch file to mimic Linux inotify
|
||||
return w.addWatch(name, noteAllEvents)
|
||||
}
|
||||
|
||||
// kqueue creates a new kernel event queue and returns a descriptor.
|
||||
func kqueue() (kq int, err error) {
|
||||
kq, err = unix.Kqueue()
|
||||
if kq == -1 {
|
||||
return kq, err
|
||||
}
|
||||
return kq, nil
|
||||
}
|
||||
|
||||
// register events with the queue
|
||||
func register(kq int, fds []int, flags int, fflags uint32) error {
|
||||
changes := make([]unix.Kevent_t, len(fds))
|
||||
|
||||
for i, fd := range fds {
|
||||
// SetKevent converts int to the platform-specific types:
|
||||
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
|
||||
changes[i].Fflags = fflags
|
||||
}
|
||||
|
||||
// register the events
|
||||
success, err := unix.Kevent(kq, changes, nil, nil)
|
||||
if success == -1 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// read retrieves pending events, or waits until an event occurs.
|
||||
// A timeout of nil blocks indefinitely, while 0 polls the queue.
|
||||
func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
|
||||
n, err := unix.Kevent(kq, nil, events, timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return events[0:n], nil
|
||||
}
|
||||
|
||||
// durationToTimespec prepares a timeout value
|
||||
func durationToTimespec(d time.Duration) unix.Timespec {
|
||||
return unix.NsecToTimespec(d.Nanoseconds())
|
||||
}
|
11
vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
generated
vendored
Normal file
11
vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build freebsd openbsd netbsd dragonfly
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC
|
12
vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
generated
vendored
Normal file
12
vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// note: this constant is not defined on BSD
|
||||
const openMode = unix.O_EVTONLY | unix.O_CLOEXEC
|
561
vendor/github.com/fsnotify/fsnotify/windows.go
generated
vendored
Normal file
561
vendor/github.com/fsnotify/fsnotify/windows.go
generated
vendored
Normal file
@ -0,0 +1,561 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build windows
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
mu sync.Mutex // Map access
|
||||
port syscall.Handle // Handle to completion port
|
||||
watches watchMap // Map of watches (key: i-number)
|
||||
input chan *input // Inputs to the reader are sent on this channel
|
||||
quit chan chan<- error
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
|
||||
if e != nil {
|
||||
return nil, os.NewSyscallError("CreateIoCompletionPort", e)
|
||||
}
|
||||
w := &Watcher{
|
||||
port: port,
|
||||
watches: make(watchMap),
|
||||
input: make(chan *input, 1),
|
||||
Events: make(chan Event, 50),
|
||||
Errors: make(chan error),
|
||||
quit: make(chan chan<- error, 1),
|
||||
}
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
if w.isClosed {
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
|
||||
// Send "quit" message to the reader goroutine
|
||||
ch := make(chan error)
|
||||
w.quit <- ch
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-ch
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
if w.isClosed {
|
||||
return errors.New("watcher already closed")
|
||||
}
|
||||
in := &input{
|
||||
op: opAddWatch,
|
||||
path: filepath.Clean(name),
|
||||
flags: sysFSALLEVENTS,
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
// Remove stops watching the the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
in := &input{
|
||||
op: opRemoveWatch,
|
||||
path: filepath.Clean(name),
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
const (
|
||||
// Options for AddWatch
|
||||
sysFSONESHOT = 0x80000000
|
||||
sysFSONLYDIR = 0x1000000
|
||||
|
||||
// Events
|
||||
sysFSACCESS = 0x1
|
||||
sysFSALLEVENTS = 0xfff
|
||||
sysFSATTRIB = 0x4
|
||||
sysFSCLOSE = 0x18
|
||||
sysFSCREATE = 0x100
|
||||
sysFSDELETE = 0x200
|
||||
sysFSDELETESELF = 0x400
|
||||
sysFSMODIFY = 0x2
|
||||
sysFSMOVE = 0xc0
|
||||
sysFSMOVEDFROM = 0x40
|
||||
sysFSMOVEDTO = 0x80
|
||||
sysFSMOVESELF = 0x800
|
||||
|
||||
// Special events
|
||||
sysFSIGNORED = 0x8000
|
||||
sysFSQOVERFLOW = 0x4000
|
||||
)
|
||||
|
||||
func newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
|
||||
e.Op |= Create
|
||||
}
|
||||
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&sysFSMODIFY == sysFSMODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&sysFSATTRIB == sysFSATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
const (
|
||||
opAddWatch = iota
|
||||
opRemoveWatch
|
||||
)
|
||||
|
||||
const (
|
||||
provisional uint64 = 1 << (32 + iota)
|
||||
)
|
||||
|
||||
type input struct {
|
||||
op int
|
||||
path string
|
||||
flags uint32
|
||||
reply chan error
|
||||
}
|
||||
|
||||
type inode struct {
|
||||
handle syscall.Handle
|
||||
volume uint32
|
||||
index uint64
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
ov syscall.Overlapped
|
||||
ino *inode // i-number
|
||||
path string // Directory path
|
||||
mask uint64 // Directory itself is being watched with these notify flags
|
||||
names map[string]uint64 // Map of names being watched and their notify flags
|
||||
rename string // Remembers the old name while renaming a file
|
||||
buf [4096]byte
|
||||
}
|
||||
|
||||
type indexMap map[uint64]*watch
|
||||
type watchMap map[uint32]indexMap
|
||||
|
||||
func (w *Watcher) wakeupReader() error {
|
||||
e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
|
||||
if e != nil {
|
||||
return os.NewSyscallError("PostQueuedCompletionStatus", e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDir(pathname string) (dir string, err error) {
|
||||
attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
|
||||
if e != nil {
|
||||
return "", os.NewSyscallError("GetFileAttributes", e)
|
||||
}
|
||||
if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
|
||||
dir = pathname
|
||||
} else {
|
||||
dir, _ = filepath.Split(pathname)
|
||||
dir = filepath.Clean(dir)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getIno(path string) (ino *inode, err error) {
|
||||
h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
|
||||
syscall.FILE_LIST_DIRECTORY,
|
||||
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
||||
nil, syscall.OPEN_EXISTING,
|
||||
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
|
||||
if e != nil {
|
||||
return nil, os.NewSyscallError("CreateFile", e)
|
||||
}
|
||||
var fi syscall.ByHandleFileInformation
|
||||
if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
|
||||
syscall.CloseHandle(h)
|
||||
return nil, os.NewSyscallError("GetFileInformationByHandle", e)
|
||||
}
|
||||
ino = &inode{
|
||||
handle: h,
|
||||
volume: fi.VolumeSerialNumber,
|
||||
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
|
||||
}
|
||||
return ino, nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) get(ino *inode) *watch {
|
||||
if i := m[ino.volume]; i != nil {
|
||||
return i[ino.index]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) set(ino *inode, watch *watch) {
|
||||
i := m[ino.volume]
|
||||
if i == nil {
|
||||
i = make(indexMap)
|
||||
m[ino.volume] = i
|
||||
}
|
||||
i[ino.index] = watch
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
||||
dir, err := getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if flags&sysFSONLYDIR != 0 && pathname != dir {
|
||||
return nil
|
||||
}
|
||||
ino, err := getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.mu.Lock()
|
||||
watchEntry := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
if watchEntry == nil {
|
||||
if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
|
||||
syscall.CloseHandle(ino.handle)
|
||||
return os.NewSyscallError("CreateIoCompletionPort", e)
|
||||
}
|
||||
watchEntry = &watch{
|
||||
ino: ino,
|
||||
path: dir,
|
||||
names: make(map[string]uint64),
|
||||
}
|
||||
w.mu.Lock()
|
||||
w.watches.set(ino, watchEntry)
|
||||
w.mu.Unlock()
|
||||
flags |= provisional
|
||||
} else {
|
||||
syscall.CloseHandle(ino.handle)
|
||||
}
|
||||
if pathname == dir {
|
||||
watchEntry.mask |= flags
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] |= flags
|
||||
}
|
||||
if err = w.startRead(watchEntry); err != nil {
|
||||
return err
|
||||
}
|
||||
if pathname == dir {
|
||||
watchEntry.mask &= ^provisional
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] &= ^provisional
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) remWatch(pathname string) error {
|
||||
dir, err := getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ino, err := getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.mu.Lock()
|
||||
watch := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
if watch == nil {
|
||||
return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
|
||||
}
|
||||
if pathname == dir {
|
||||
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||
watch.mask = 0
|
||||
} else {
|
||||
name := filepath.Base(pathname)
|
||||
w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
return w.startRead(watch)
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) deleteWatch(watch *watch) {
|
||||
for name, mask := range watch.names {
|
||||
if mask&provisional == 0 {
|
||||
w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
|
||||
}
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if watch.mask != 0 {
|
||||
if watch.mask&provisional == 0 {
|
||||
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||
}
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) startRead(watch *watch) error {
|
||||
if e := syscall.CancelIo(watch.ino.handle); e != nil {
|
||||
w.Errors <- os.NewSyscallError("CancelIo", e)
|
||||
w.deleteWatch(watch)
|
||||
}
|
||||
mask := toWindowsFlags(watch.mask)
|
||||
for _, m := range watch.names {
|
||||
mask |= toWindowsFlags(m)
|
||||
}
|
||||
if mask == 0 {
|
||||
if e := syscall.CloseHandle(watch.ino.handle); e != nil {
|
||||
w.Errors <- os.NewSyscallError("CloseHandle", e)
|
||||
}
|
||||
w.mu.Lock()
|
||||
delete(w.watches[watch.ino.volume], watch.ino.index)
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
|
||||
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
|
||||
if e != nil {
|
||||
err := os.NewSyscallError("ReadDirectoryChanges", e)
|
||||
if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
||||
// Watched directory was probably removed
|
||||
if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
|
||||
if watch.mask&sysFSONESHOT != 0 {
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
err = nil
|
||||
}
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readEvents reads from the I/O completion port, converts the
|
||||
// received events into Event objects and sends them via the Events channel.
|
||||
// Entry point to the I/O thread.
|
||||
func (w *Watcher) readEvents() {
|
||||
var (
|
||||
n, key uint32
|
||||
ov *syscall.Overlapped
|
||||
)
|
||||
runtime.LockOSThread()
|
||||
|
||||
for {
|
||||
e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
|
||||
watch := (*watch)(unsafe.Pointer(ov))
|
||||
|
||||
if watch == nil {
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.mu.Lock()
|
||||
var indexes []indexMap
|
||||
for _, index := range w.watches {
|
||||
indexes = append(indexes, index)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
for _, index := range indexes {
|
||||
for _, watch := range index {
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if e := syscall.CloseHandle(w.port); e != nil {
|
||||
err = os.NewSyscallError("CloseHandle", e)
|
||||
}
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
ch <- err
|
||||
return
|
||||
case in := <-w.input:
|
||||
switch in.op {
|
||||
case opAddWatch:
|
||||
in.reply <- w.addWatch(in.path, uint64(in.flags))
|
||||
case opRemoveWatch:
|
||||
in.reply <- w.remWatch(in.path)
|
||||
}
|
||||
default:
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch e {
|
||||
case syscall.ERROR_MORE_DATA:
|
||||
if watch == nil {
|
||||
w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
|
||||
} else {
|
||||
// The i/o succeeded but the buffer is full.
|
||||
// In theory we should be building up a full packet.
|
||||
// In practice we can get away with just carrying on.
|
||||
n = uint32(unsafe.Sizeof(watch.buf))
|
||||
}
|
||||
case syscall.ERROR_ACCESS_DENIED:
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
continue
|
||||
case syscall.ERROR_OPERATION_ABORTED:
|
||||
// CancelIo was called on this handle
|
||||
continue
|
||||
default:
|
||||
w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
|
||||
continue
|
||||
case nil:
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
for {
|
||||
if n == 0 {
|
||||
w.Events <- newEvent("", sysFSQOVERFLOW)
|
||||
w.Errors <- errors.New("short read in readEvents()")
|
||||
break
|
||||
}
|
||||
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
|
||||
buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
|
||||
name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
|
||||
fullname := filepath.Join(watch.path, name)
|
||||
|
||||
var mask uint64
|
||||
switch raw.Action {
|
||||
case syscall.FILE_ACTION_REMOVED:
|
||||
mask = sysFSDELETESELF
|
||||
case syscall.FILE_ACTION_MODIFIED:
|
||||
mask = sysFSMODIFY
|
||||
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
watch.rename = name
|
||||
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
if watch.names[watch.rename] != 0 {
|
||||
watch.names[name] |= watch.names[watch.rename]
|
||||
delete(watch.names, watch.rename)
|
||||
mask = sysFSMOVESELF
|
||||
}
|
||||
}
|
||||
|
||||
sendNameEvent := func() {
|
||||
if w.sendEvent(fullname, watch.names[name]&mask) {
|
||||
if watch.names[name]&sysFSONESHOT != 0 {
|
||||
delete(watch.names, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
sendNameEvent()
|
||||
}
|
||||
if raw.Action == syscall.FILE_ACTION_REMOVED {
|
||||
w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
|
||||
if watch.mask&sysFSONESHOT != 0 {
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
fullname = filepath.Join(watch.path, watch.rename)
|
||||
sendNameEvent()
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
if raw.NextEntryOffset == 0 {
|
||||
break
|
||||
}
|
||||
offset += raw.NextEntryOffset
|
||||
|
||||
// Error!
|
||||
if offset >= n {
|
||||
w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := w.startRead(watch); err != nil {
|
||||
w.Errors <- err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) sendEvent(name string, mask uint64) bool {
|
||||
if mask == 0 {
|
||||
return false
|
||||
}
|
||||
event := newEvent(name, uint32(mask))
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.quit <- ch
|
||||
case w.Events <- event:
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func toWindowsFlags(mask uint64) uint32 {
|
||||
var m uint32
|
||||
if mask&sysFSACCESS != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
|
||||
}
|
||||
if mask&sysFSMODIFY != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||
}
|
||||
if mask&sysFSATTRIB != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
|
||||
}
|
||||
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func toFSnotifyFlags(action uint32) uint64 {
|
||||
switch action {
|
||||
case syscall.FILE_ACTION_ADDED:
|
||||
return sysFSCREATE
|
||||
case syscall.FILE_ACTION_REMOVED:
|
||||
return sysFSDELETE
|
||||
case syscall.FILE_ACTION_MODIFIED:
|
||||
return sysFSMODIFY
|
||||
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
return sysFSMOVEDFROM
|
||||
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
return sysFSMOVEDTO
|
||||
}
|
||||
return 0
|
||||
}
|
6
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
6
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cmp determines equality of values.
|
||||
//
|
||||
@ -100,8 +100,8 @@ func Equal(x, y interface{}, opts ...Option) bool {
|
||||
// same input values and options.
|
||||
//
|
||||
// The output is displayed as a literal in pseudo-Go syntax.
|
||||
// At the start of each line, a "-" prefix indicates an element removed from y,
|
||||
// a "+" prefix to indicates an element added to y, and the lack of a prefix
|
||||
// At the start of each line, a "-" prefix indicates an element removed from x,
|
||||
// a "+" prefix to indicates an element added from y, and the lack of a prefix
|
||||
// indicates an element common to both x and y. If possible, the output
|
||||
// uses fmt.Stringer.String or error.Error methods to produce more humanly
|
||||
// readable outputs. In such cases, the string is prefixed with either an
|
||||
|
2
vendor/github.com/google/go-cmp/cmp/export_panic.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/export_panic.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build purego
|
||||
|
||||
|
2
vendor/github.com/google/go-cmp/cmp/export_unsafe.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/export_unsafe.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !purego
|
||||
|
||||
|
2
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !cmp_debug
|
||||
|
||||
|
2
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build cmp_debug
|
||||
|
||||
|
50
vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
generated
vendored
50
vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package diff implements an algorithm for producing edit-scripts.
|
||||
// The edit-script is a sequence of operations needed to transform one list
|
||||
@ -119,7 +119,7 @@ func (r Result) Similar() bool {
|
||||
return r.NumSame+1 >= r.NumDiff
|
||||
}
|
||||
|
||||
var randInt = rand.New(rand.NewSource(time.Now().Unix())).Intn(2)
|
||||
var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
|
||||
|
||||
// Difference reports whether two lists of lengths nx and ny are equal
|
||||
// given the definition of equality provided as f.
|
||||
@ -168,17 +168,6 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
|
||||
// A vertical edge is equivalent to inserting a symbol from list Y.
|
||||
// A diagonal edge is equivalent to a matching symbol between both X and Y.
|
||||
|
||||
// To ensure flexibility in changing the algorithm in the future,
|
||||
// introduce some degree of deliberate instability.
|
||||
// This is achieved by fiddling the zigzag iterator to start searching
|
||||
// the graph starting from the bottom-right versus than the top-left.
|
||||
// The result may differ depending on the starting search location,
|
||||
// but still produces a valid edit script.
|
||||
zigzagInit := randInt // either 0 or 1
|
||||
if flags.Deterministic {
|
||||
zigzagInit = 0
|
||||
}
|
||||
|
||||
// Invariants:
|
||||
// • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
|
||||
// • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
|
||||
@ -197,6 +186,11 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
|
||||
// approximately the square-root of the search budget.
|
||||
searchBudget := 4 * (nx + ny) // O(n)
|
||||
|
||||
// Running the tests with the "cmp_debug" build tag prints a visualization
|
||||
// of the algorithm running in real-time. This is educational for
|
||||
// understanding how the algorithm works. See debug_enable.go.
|
||||
f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
|
||||
|
||||
// The algorithm below is a greedy, meet-in-the-middle algorithm for
|
||||
// computing sub-optimal edit-scripts between two lists.
|
||||
//
|
||||
@ -214,22 +208,28 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
|
||||
// frontier towards the opposite corner.
|
||||
// • This algorithm terminates when either the X coordinates or the
|
||||
// Y coordinates of the forward and reverse frontier points ever intersect.
|
||||
//
|
||||
|
||||
// This algorithm is correct even if searching only in the forward direction
|
||||
// or in the reverse direction. We do both because it is commonly observed
|
||||
// that two lists commonly differ because elements were added to the front
|
||||
// or end of the other list.
|
||||
//
|
||||
// Running the tests with the "cmp_debug" build tag prints a visualization
|
||||
// of the algorithm running in real-time. This is educational for
|
||||
// understanding how the algorithm works. See debug_enable.go.
|
||||
f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
|
||||
for {
|
||||
// Non-deterministically start with either the forward or reverse direction
|
||||
// to introduce some deliberate instability so that we have the flexibility
|
||||
// to change this algorithm in the future.
|
||||
if flags.Deterministic || randBool {
|
||||
goto forwardSearch
|
||||
} else {
|
||||
goto reverseSearch
|
||||
}
|
||||
|
||||
forwardSearch:
|
||||
{
|
||||
// Forward search from the beginning.
|
||||
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
|
||||
break
|
||||
goto finishSearch
|
||||
}
|
||||
for stop1, stop2, i := false, false, zigzagInit; !(stop1 && stop2) && searchBudget > 0; i++ {
|
||||
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
|
||||
// Search in a diagonal pattern for a match.
|
||||
z := zigzag(i)
|
||||
p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
|
||||
@ -262,10 +262,14 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
|
||||
} else {
|
||||
fwdFrontier.Y++
|
||||
}
|
||||
goto reverseSearch
|
||||
}
|
||||
|
||||
reverseSearch:
|
||||
{
|
||||
// Reverse search from the end.
|
||||
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
|
||||
break
|
||||
goto finishSearch
|
||||
}
|
||||
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
|
||||
// Search in a diagonal pattern for a match.
|
||||
@ -300,8 +304,10 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
|
||||
} else {
|
||||
revFrontier.Y--
|
||||
}
|
||||
goto forwardSearch
|
||||
}
|
||||
|
||||
finishSearch:
|
||||
// Join the forward and reverse paths and then append the reverse path.
|
||||
fwdPath.connect(revPath.point, f)
|
||||
for i := len(revPath.es) - 1; i >= 0; i-- {
|
||||
|
2
vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flags
|
||||
|
||||
|
2
vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.10
|
||||
|
||||
|
2
vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.10
|
||||
|
||||
|
2
vendor/github.com/google/go-cmp/cmp/internal/function/func.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/internal/function/func.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package function provides functionality for identifying function types.
|
||||
package function
|
||||
|
2
vendor/github.com/google/go-cmp/cmp/internal/value/name.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/internal/value/name.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2020, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package value
|
||||
|
||||
|
2
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2018, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build purego
|
||||
|
||||
|
2
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2018, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !purego
|
||||
|
||||
|
2
vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package value
|
||||
|
||||
|
2
vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package value
|
||||
|
||||
|
2
vendor/github.com/google/go-cmp/cmp/options.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/options.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
|
2
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
|
2
vendor/github.com/google/go-cmp/cmp/report.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/report.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
|
2
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
|
2
vendor/github.com/google/go-cmp/cmp/report_references.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/report_references.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2020, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
|
4
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
4
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
@ -351,6 +351,8 @@ func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) s
|
||||
opts.PrintAddresses = disambiguate
|
||||
opts.AvoidStringer = disambiguate
|
||||
opts.QualifiedNames = disambiguate
|
||||
opts.VerbosityLevel = maxVerbosityPreset
|
||||
opts.LimitVerbosity = true
|
||||
s := opts.FormatValue(v, reflect.Map, ptrs).String()
|
||||
return strings.TrimSpace(s)
|
||||
}
|
||||
|
2
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
|
2
vendor/github.com/google/go-cmp/cmp/report_text.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/report_text.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
|
2
vendor/github.com/google/go-cmp/cmp/report_value.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/report_value.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
|
2
vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore
generated
vendored
2
vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore
generated
vendored
@ -200,3 +200,5 @@ coverage.txt
|
||||
|
||||
#vendor
|
||||
vendor/
|
||||
|
||||
.envrc
|
17
vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml
generated
vendored
17
vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml
generated
vendored
@ -1,18 +1,13 @@
|
||||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.8.x
|
||||
- 1.13.x
|
||||
- 1.14.x
|
||||
- 1.15.x
|
||||
|
||||
env:
|
||||
- DEP_VERSION="0.3.2"
|
||||
|
||||
before_install:
|
||||
# Download the binary to bin folder in $GOPATH
|
||||
- curl -L -s https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 -o $GOPATH/bin/dep
|
||||
# Make the binary executable
|
||||
- chmod +x $GOPATH/bin/dep
|
||||
|
||||
install:
|
||||
- dep ensure
|
||||
global:
|
||||
- GO111MODULE=on
|
||||
|
||||
script:
|
||||
- make test
|
||||
|
29
vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md
generated
vendored
29
vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md
generated
vendored
@ -13,10 +13,30 @@ Types of changes:
|
||||
- `Security` in case of vulnerabilities.
|
||||
|
||||
## [Unreleased]
|
||||
### Added
|
||||
- This CHANGELOG file to keep track of changes.
|
||||
|
||||
## 1.0.0 - 2018-05-08
|
||||
### Added
|
||||
|
||||
- [#223](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/223) Add go-kit logging middleware - [adrien-f](https://github.com/adrien-f)
|
||||
|
||||
## [v1.1.0] - 2019-09-12
|
||||
### Added
|
||||
- [#226](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/226) Support for go modules.
|
||||
- [#221](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/221) logging/zap add support for gRPC LoggerV2 - [kush-patel-hs](https://github.com/kush-patel-hs)
|
||||
- [#181](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/181) Rate Limit support - [ceshihao](https://github.com/ceshihao)
|
||||
- [#161](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/161) Retry on server stream call - [lonnblad](https://github.com/lonnblad)
|
||||
- [#152](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/152) Exponential backoff functions - [polyfloyd](https://github.com/polyfloyd)
|
||||
- [#147](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/147) Jaeger support for ctxtags extraction - [vporoshok](https://github.com/vporoshok)
|
||||
- [#184](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/184) ctxTags identifies if the call was sampled
|
||||
|
||||
### Deprecated
|
||||
- [#201](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/201) `golang.org/x/net/context` - [houz42](https://github.com/houz42)
|
||||
- [#183](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/183) Documentation Generation in favour of <godoc.org>.
|
||||
|
||||
### Fixed
|
||||
- [172](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/172) Passing ctx into retry and recover - [johanbrandhorst](https://github.com/johanbrandhorst)
|
||||
- Numerious documentation fixes.
|
||||
|
||||
## v1.0.0 - 2018-05-08
|
||||
### Added
|
||||
- grpc_auth
|
||||
- grpc_ctxtags
|
||||
@ -27,4 +47,5 @@ Types of changes:
|
||||
- grpc_validator
|
||||
- grpc_recovery
|
||||
|
||||
[Unreleased]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.0.0...HEAD
|
||||
[Unreleased]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.1.0...HEAD
|
||||
[v1.1.0]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.0.0...v1.1.0
|
||||
|
123
vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.lock
generated
vendored
123
vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.lock
generated
vendored
@ -1,123 +0,0 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata"]
|
||||
revision = "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
|
||||
version = "v0.16.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = ["gogoproto","proto","protoc-gen-gogo/descriptor"]
|
||||
revision = "342cbe0a04158f6dcb03ca0079991a51a4248c02"
|
||||
version = "v0.5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["jsonpb","proto","ptypes","ptypes/any","ptypes/duration","ptypes/struct","ptypes/timestamp"]
|
||||
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/opentracing/opentracing-go"
|
||||
packages = [".","ext","log","mocktracer"]
|
||||
revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
|
||||
version = "v1.0.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = ["."]
|
||||
revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e"
|
||||
version = "v1.0.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = ["assert","require","suite"]
|
||||
revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0"
|
||||
version = "v1.1.4"
|
||||
|
||||
[[projects]]
|
||||
name = "go.uber.org/atomic"
|
||||
packages = ["."]
|
||||
revision = "8474b86a5a6f79c443ce4b2992817ff32cf208b8"
|
||||
version = "v1.3.1"
|
||||
|
||||
[[projects]]
|
||||
name = "go.uber.org/multierr"
|
||||
packages = ["."]
|
||||
revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "go.uber.org/zap"
|
||||
packages = [".","buffer","internal/bufferpool","internal/color","internal/exit","zapcore"]
|
||||
revision = "35aad584952c3e7020db7b839f6b102de6271f89"
|
||||
version = "v1.7.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ssh/terminal"]
|
||||
revision = "94eea52f7b742c7cbe0b03b22f0c4c8631ece122"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["context","context/ctxhttp","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"]
|
||||
revision = "a8b9294777976932365dabb6640cf1468d95c70f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [".","google","internal","jws","jwt"]
|
||||
revision = "f95fa95eaa936d9d87489b15d1d18b97c1ba9c28"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix","windows"]
|
||||
revision = "13fcbd661c8ececa8807a29b48407d674b1d8ed8"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
|
||||
revision = "75cc3cad82b5f47d3fb229ddda8c5167da14f294"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
|
||||
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
revision = "7f0da29060c682909f650ad8ed4e515bd74fa12a"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [".","balancer","balancer/roundrobin","codes","connectivity","credentials","credentials/oauth","encoding","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"]
|
||||
revision = "5a9f7b402fe85096d2e1d0383435ee1876e863d0"
|
||||
version = "v1.8.0"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "b24c6670412eb0bc44ed1db77fecc52333f8725f3e3272bdc568f5683a63031f"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
35
vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.toml
generated
vendored
35
vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.toml
generated
vendored
@ -1,35 +0,0 @@
|
||||
[[constraint]]
|
||||
name = "github.com/gogo/protobuf"
|
||||
version = "0.5.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/opentracing/opentracing-go"
|
||||
version = "1.0.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/sirupsen/logrus"
|
||||
version = "1.0.3"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/stretchr/testify"
|
||||
version = "1.1.4"
|
||||
|
||||
[[constraint]]
|
||||
name = "go.uber.org/zap"
|
||||
version = "1.7.1"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/grpc"
|
||||
version = "1.8.0"
|
12
vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
generated
vendored
12
vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
generated
vendored
@ -7,7 +7,7 @@
|
||||
[![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware)
|
||||
[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE)
|
||||
[![quality: production](https://img.shields.io/badge/quality-production-orange.svg)](#status)
|
||||
[![Slack](slack.png)](https://join.slack.com/t/improbable-eng/shared_invite/enQtMzQ1ODcyMzQ5MjM4LWY5ZWZmNGM2ODc5MmViNmQ3ZTA3ZTY3NzQwOTBlMTkzZmIxZTIxODk0OWU3YjZhNWVlNDU3MDlkZGViZjhkMjc)
|
||||
[![Slack](https://img.shields.io/badge/slack-%23grpc--middleware-brightgreen)](https://slack.com/share/IRUQCFC23/9Tm7hxRFVKKNoajQfMOcUiIk/enQtODc4ODI4NTIyMDcxLWM5NDA0ZTE4Njg5YjRjYWZkMTI5MzQwNDY3YzBjMzE1YzdjOGM5ZjI1NDNiM2JmNzI2YjM5ODE5OTRiNTEyOWE)
|
||||
|
||||
[gRPC Go](https://github.com/grpc/grpc-go) Middleware: interceptors, helpers, utilities.
|
||||
|
||||
@ -15,7 +15,7 @@
|
||||
|
||||
[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for
|
||||
Interceptors, i.e. [middleware](https://medium.com/@matryer/writing-middleware-in-golang-and-how-go-makes-it-so-much-fun-4375c1246e81#.gv7tdlghs)
|
||||
that is executed either on the gRPC Server before the request is passed onto the user's application logic, or on the gRPC client either around the user call. It is a perfect way to implement
|
||||
that is executed either on the gRPC Server before the request is passed onto the user's application logic, or on the gRPC client around the user call. It is a perfect way to implement
|
||||
common patterns: auth, logging, message, validation, retries or monitoring.
|
||||
|
||||
These are generic building blocks that make it easy to build multiple microservices easily.
|
||||
@ -29,20 +29,20 @@ import "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
|
||||
myServer := grpc.NewServer(
|
||||
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
|
||||
grpc_recovery.StreamServerInterceptor(),
|
||||
grpc_ctxtags.StreamServerInterceptor(),
|
||||
grpc_opentracing.StreamServerInterceptor(),
|
||||
grpc_prometheus.StreamServerInterceptor,
|
||||
grpc_zap.StreamServerInterceptor(zapLogger),
|
||||
grpc_auth.StreamServerInterceptor(myAuthFunction),
|
||||
grpc_recovery.StreamServerInterceptor(),
|
||||
)),
|
||||
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
|
||||
grpc_recovery.UnaryServerInterceptor(),
|
||||
grpc_ctxtags.UnaryServerInterceptor(),
|
||||
grpc_opentracing.UnaryServerInterceptor(),
|
||||
grpc_prometheus.UnaryServerInterceptor,
|
||||
grpc_zap.UnaryServerInterceptor(zapLogger),
|
||||
grpc_auth.UnaryServerInterceptor(myAuthFunction),
|
||||
grpc_recovery.UnaryServerInterceptor(),
|
||||
)),
|
||||
)
|
||||
```
|
||||
@ -58,7 +58,8 @@ myServer := grpc.NewServer(
|
||||
* [`grpc_ctxtags`](tags/) - a library that adds a `Tag` map to context, with data populated from request body
|
||||
* [`grpc_zap`](logging/zap/) - integration of [zap](https://github.com/uber-go/zap) logging library into gRPC handlers.
|
||||
* [`grpc_logrus`](logging/logrus/) - integration of [logrus](https://github.com/sirupsen/logrus) logging library into gRPC handlers.
|
||||
|
||||
* [`grpc_kit`](logging/kit/) - integration of [go-kit](https://github.com/go-kit/kit/tree/master/log) logging library into gRPC handlers.
|
||||
* [`grpc_grpc_logsettable`](logging/settable/) - a wrapper around `grpclog.LoggerV2` that allows to replace loggers in runtime (thread-safe).
|
||||
|
||||
#### Monitoring
|
||||
* [`grpc_prometheus`⚡](https://github.com/grpc-ecosystem/go-grpc-prometheus) - Prometheus client-side and server-side monitoring middleware
|
||||
@ -71,6 +72,7 @@ myServer := grpc.NewServer(
|
||||
#### Server
|
||||
* [`grpc_validator`](validator/) - codegen inbound message validation from `.proto` options
|
||||
* [`grpc_recovery`](recovery/) - turn panics into gRPC errors
|
||||
* [`ratelimit`](ratelimit/) - grpc rate limiting by your own limiter
|
||||
|
||||
|
||||
## Status
|
||||
|
125
vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
generated
vendored
125
vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
generated
vendored
@ -6,7 +6,8 @@
|
||||
package grpc_middleware
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
@ -18,35 +19,19 @@ import (
|
||||
func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor {
|
||||
n := len(interceptors)
|
||||
|
||||
if n > 1 {
|
||||
lastI := n - 1
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
var (
|
||||
chainHandler grpc.UnaryHandler
|
||||
curI int
|
||||
)
|
||||
|
||||
chainHandler = func(currentCtx context.Context, currentReq interface{}) (interface{}, error) {
|
||||
if curI == lastI {
|
||||
return handler(currentCtx, currentReq)
|
||||
}
|
||||
curI++
|
||||
resp, err := interceptors[curI](currentCtx, currentReq, info, chainHandler)
|
||||
curI--
|
||||
return resp, err
|
||||
}
|
||||
|
||||
return interceptors[0](ctx, req, info, chainHandler)
|
||||
chainer := func(currentInter grpc.UnaryServerInterceptor, currentHandler grpc.UnaryHandler) grpc.UnaryHandler {
|
||||
return func(currentCtx context.Context, currentReq interface{}) (interface{}, error) {
|
||||
return currentInter(currentCtx, currentReq, info, currentHandler)
|
||||
}
|
||||
}
|
||||
|
||||
if n == 1 {
|
||||
return interceptors[0]
|
||||
chainedHandler := handler
|
||||
for i := n - 1; i >= 0; i-- {
|
||||
chainedHandler = chainer(interceptors[i], chainedHandler)
|
||||
}
|
||||
|
||||
// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
|
||||
return func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
return handler(ctx, req)
|
||||
return chainedHandler(ctx, req)
|
||||
}
|
||||
}
|
||||
|
||||
@ -58,35 +43,19 @@ func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnarySer
|
||||
func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.StreamServerInterceptor {
|
||||
n := len(interceptors)
|
||||
|
||||
if n > 1 {
|
||||
lastI := n - 1
|
||||
return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
var (
|
||||
chainHandler grpc.StreamHandler
|
||||
curI int
|
||||
)
|
||||
|
||||
chainHandler = func(currentSrv interface{}, currentStream grpc.ServerStream) error {
|
||||
if curI == lastI {
|
||||
return handler(currentSrv, currentStream)
|
||||
}
|
||||
curI++
|
||||
err := interceptors[curI](currentSrv, currentStream, info, chainHandler)
|
||||
curI--
|
||||
return err
|
||||
}
|
||||
|
||||
return interceptors[0](srv, stream, info, chainHandler)
|
||||
return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
chainer := func(currentInter grpc.StreamServerInterceptor, currentHandler grpc.StreamHandler) grpc.StreamHandler {
|
||||
return func(currentSrv interface{}, currentStream grpc.ServerStream) error {
|
||||
return currentInter(currentSrv, currentStream, info, currentHandler)
|
||||
}
|
||||
}
|
||||
|
||||
if n == 1 {
|
||||
return interceptors[0]
|
||||
chainedHandler := handler
|
||||
for i := n - 1; i >= 0; i-- {
|
||||
chainedHandler = chainer(interceptors[i], chainedHandler)
|
||||
}
|
||||
|
||||
// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
|
||||
return func(srv interface{}, stream grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
return handler(srv, stream)
|
||||
return chainedHandler(srv, ss)
|
||||
}
|
||||
}
|
||||
|
||||
@ -97,35 +66,19 @@ func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.Stream
|
||||
func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor {
|
||||
n := len(interceptors)
|
||||
|
||||
if n > 1 {
|
||||
lastI := n - 1
|
||||
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||
var (
|
||||
chainHandler grpc.UnaryInvoker
|
||||
curI int
|
||||
)
|
||||
|
||||
chainHandler = func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error {
|
||||
if curI == lastI {
|
||||
return invoker(currentCtx, currentMethod, currentReq, currentRepl, currentConn, currentOpts...)
|
||||
}
|
||||
curI++
|
||||
err := interceptors[curI](currentCtx, currentMethod, currentReq, currentRepl, currentConn, chainHandler, currentOpts...)
|
||||
curI--
|
||||
return err
|
||||
}
|
||||
|
||||
return interceptors[0](ctx, method, req, reply, cc, chainHandler, opts...)
|
||||
chainer := func(currentInter grpc.UnaryClientInterceptor, currentInvoker grpc.UnaryInvoker) grpc.UnaryInvoker {
|
||||
return func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error {
|
||||
return currentInter(currentCtx, currentMethod, currentReq, currentRepl, currentConn, currentInvoker, currentOpts...)
|
||||
}
|
||||
}
|
||||
|
||||
if n == 1 {
|
||||
return interceptors[0]
|
||||
chainedInvoker := invoker
|
||||
for i := n - 1; i >= 0; i-- {
|
||||
chainedInvoker = chainer(interceptors[i], chainedInvoker)
|
||||
}
|
||||
|
||||
// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
|
||||
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||
return invoker(ctx, method, req, reply, cc, opts...)
|
||||
return chainedInvoker(ctx, method, req, reply, cc, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
@ -136,35 +89,19 @@ func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryCli
|
||||
func ChainStreamClient(interceptors ...grpc.StreamClientInterceptor) grpc.StreamClientInterceptor {
|
||||
n := len(interceptors)
|
||||
|
||||
if n > 1 {
|
||||
lastI := n - 1
|
||||
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
var (
|
||||
chainHandler grpc.Streamer
|
||||
curI int
|
||||
)
|
||||
|
||||
chainHandler = func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
if curI == lastI {
|
||||
return streamer(currentCtx, currentDesc, currentConn, currentMethod, currentOpts...)
|
||||
}
|
||||
curI++
|
||||
stream, err := interceptors[curI](currentCtx, currentDesc, currentConn, currentMethod, chainHandler, currentOpts...)
|
||||
curI--
|
||||
return stream, err
|
||||
}
|
||||
|
||||
return interceptors[0](ctx, desc, cc, method, chainHandler, opts...)
|
||||
chainer := func(currentInter grpc.StreamClientInterceptor, currentStreamer grpc.Streamer) grpc.Streamer {
|
||||
return func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
return currentInter(currentCtx, currentDesc, currentConn, currentMethod, currentStreamer, currentOpts...)
|
||||
}
|
||||
}
|
||||
|
||||
if n == 1 {
|
||||
return interceptors[0]
|
||||
chainedStreamer := streamer
|
||||
for i := n - 1; i >= 0; i-- {
|
||||
chainedStreamer = chainer(interceptors[i], chainedStreamer)
|
||||
}
|
||||
|
||||
// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
|
||||
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
return streamer(ctx, desc, cc, method, opts...)
|
||||
return chainedStreamer(ctx, desc, cc, method, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
|
4
vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go
generated
vendored
4
vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go
generated
vendored
@ -23,7 +23,7 @@ server chaining:
|
||||
|
||||
myServer := grpc.NewServer(
|
||||
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(loggingStream, monitoringStream, authStream)),
|
||||
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(loggingUnary, monitoringUnary, authUnary),
|
||||
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(loggingUnary, monitoringUnary, authUnary)),
|
||||
)
|
||||
|
||||
These interceptors will be executed from left to right: logging, monitoring and auth.
|
||||
@ -63,7 +63,7 @@ needed. For example:
|
||||
func FakeAuthStreamingInterceptor(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
newStream := grpc_middleware.WrapServerStream(stream)
|
||||
newStream.WrappedContext = context.WithValue(ctx, "user_id", "john@example.com")
|
||||
return handler(srv, stream)
|
||||
return handler(srv, newStream)
|
||||
}
|
||||
*/
|
||||
package grpc_middleware
|
||||
|
22
vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.mod
generated
vendored
Normal file
22
vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.mod
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
module github.com/grpc-ecosystem/go-grpc-middleware
|
||||
|
||||
require (
|
||||
github.com/go-kit/kit v0.9.0
|
||||
github.com/go-logfmt/logfmt v0.4.0 // indirect
|
||||
github.com/go-stack/stack v1.8.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/protobuf v1.3.3
|
||||
github.com/opentracing/opentracing-go v1.1.0
|
||||
github.com/pkg/errors v0.8.1 // indirect
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/stretchr/testify v1.4.0
|
||||
go.uber.org/atomic v1.4.0 // indirect
|
||||
go.uber.org/multierr v1.1.0 // indirect
|
||||
go.uber.org/zap v1.10.0
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
|
||||
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215 // indirect
|
||||
google.golang.org/grpc v1.29.1
|
||||
)
|
||||
|
||||
go 1.14
|
122
vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.sum
generated
vendored
Normal file
122
vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.sum
generated
vendored
Normal file
@ -0,0 +1,122 @@
|
||||
cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215 h1:0Uz5jLJQioKgVozXa1gzGbzYxbb/rhQEVvSWxzw5oUs=
|
||||
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
3
vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile
generated
vendored
3
vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile
generated
vendored
@ -8,7 +8,8 @@ fmt:
|
||||
go fmt $(GOFILES_NOVENDOR)
|
||||
|
||||
vet:
|
||||
go vet $(GOFILES_NOVENDOR)
|
||||
# do not check lostcancel, they are intentional.
|
||||
go vet -lostcancel=false $(GOFILES_NOVENDOR)
|
||||
|
||||
test: vet
|
||||
./scripts/test_all.sh
|
||||
|
3
vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go
generated
vendored
3
vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go
generated
vendored
@ -4,7 +4,8 @@
|
||||
package grpc_middleware
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
|
3
vendor/github.com/hpcloud/tail/.gitignore
generated
vendored
3
vendor/github.com/hpcloud/tail/.gitignore
generated
vendored
@ -1,3 +0,0 @@
|
||||
.test
|
||||
.go
|
||||
|
18
vendor/github.com/hpcloud/tail/.travis.yml
generated
vendored
18
vendor/github.com/hpcloud/tail/.travis.yml
generated
vendored
@ -1,18 +0,0 @@
|
||||
language: go
|
||||
|
||||
script:
|
||||
- go test -race -v ./...
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
install:
|
||||
- go get gopkg.in/fsnotify.v1
|
||||
- go get gopkg.in/tomb.v1
|
63
vendor/github.com/hpcloud/tail/CHANGES.md
generated
vendored
63
vendor/github.com/hpcloud/tail/CHANGES.md
generated
vendored
@ -1,63 +0,0 @@
|
||||
# API v1 (gopkg.in/hpcloud/tail.v1)
|
||||
|
||||
## April, 2016
|
||||
|
||||
* Migrated to godep, as depman is not longer supported
|
||||
* Introduced golang vendoring feature
|
||||
* Fixed issue [#57](https://github.com/hpcloud/tail/issues/57) related to reopen deleted file
|
||||
|
||||
## July, 2015
|
||||
|
||||
* Fix inotify watcher leak; remove `Cleanup` (#51)
|
||||
|
||||
# API v0 (gopkg.in/hpcloud/tail.v0)
|
||||
|
||||
## June, 2015
|
||||
|
||||
* Don't return partial lines (PR #40)
|
||||
* Use stable version of fsnotify (#46)
|
||||
|
||||
## July, 2014
|
||||
|
||||
* Fix tail for Windows (PR #36)
|
||||
|
||||
## May, 2014
|
||||
|
||||
* Improved rate limiting using leaky bucket (PR #29)
|
||||
* Fix odd line splitting (PR #30)
|
||||
|
||||
## Apr, 2014
|
||||
|
||||
* LimitRate now discards read buffer (PR #28)
|
||||
* allow reading of longer lines if MaxLineSize is unset (PR #24)
|
||||
* updated deps.json to latest fsnotify (441bbc86b1)
|
||||
|
||||
## Feb, 2014
|
||||
|
||||
* added `Config.Logger` to suppress library logging
|
||||
|
||||
## Nov, 2013
|
||||
|
||||
* add Cleanup to remove leaky inotify watches (PR #20)
|
||||
|
||||
## Aug, 2013
|
||||
|
||||
* redesigned Location field (PR #12)
|
||||
* add tail.Tell (PR #14)
|
||||
|
||||
## July, 2013
|
||||
|
||||
* Rate limiting (PR #10)
|
||||
|
||||
## May, 2013
|
||||
|
||||
* Detect file deletions/renames in polling file watcher (PR #1)
|
||||
* Detect file truncation
|
||||
* Fix potential race condition when reopening the file (issue 5)
|
||||
* Fix potential blocking of `tail.Stop` (issue 4)
|
||||
* Fix uncleaned up ChangeEvents goroutines after calling tail.Stop
|
||||
* Support Follow=false
|
||||
|
||||
## Feb, 2013
|
||||
|
||||
* Initial open source release
|
19
vendor/github.com/hpcloud/tail/Dockerfile
generated
vendored
19
vendor/github.com/hpcloud/tail/Dockerfile
generated
vendored
@ -1,19 +0,0 @@
|
||||
FROM golang
|
||||
|
||||
RUN mkdir -p $GOPATH/src/github.com/hpcloud/tail/
|
||||
ADD . $GOPATH/src/github.com/hpcloud/tail/
|
||||
|
||||
# expecting to fetch dependencies successfully.
|
||||
RUN go get -v github.com/hpcloud/tail
|
||||
|
||||
# expecting to run the test successfully.
|
||||
RUN go test -v github.com/hpcloud/tail
|
||||
|
||||
# expecting to install successfully
|
||||
RUN go install -v github.com/hpcloud/tail
|
||||
RUN go install -v github.com/hpcloud/tail/cmd/gotail
|
||||
|
||||
RUN $GOPATH/bin/gotail -h || true
|
||||
|
||||
ENV PATH $GOPATH/bin:$PATH
|
||||
CMD ["gotail"]
|
11
vendor/github.com/hpcloud/tail/Makefile
generated
vendored
11
vendor/github.com/hpcloud/tail/Makefile
generated
vendored
@ -1,11 +0,0 @@
|
||||
default: test
|
||||
|
||||
test: *.go
|
||||
go test -v -race ./...
|
||||
|
||||
fmt:
|
||||
gofmt -w .
|
||||
|
||||
# Run the test in an isolated environment.
|
||||
fulltest:
|
||||
docker build -t hpcloud/tail .
|
28
vendor/github.com/hpcloud/tail/README.md
generated
vendored
28
vendor/github.com/hpcloud/tail/README.md
generated
vendored
@ -1,28 +0,0 @@
|
||||
[![Build Status](https://travis-ci.org/hpcloud/tail.svg)](https://travis-ci.org/hpcloud/tail)
|
||||
[![Build status](https://ci.appveyor.com/api/projects/status/kohpsf3rvhjhrox6?svg=true)](https://ci.appveyor.com/project/HelionCloudFoundry/tail)
|
||||
|
||||
# Go package for tail-ing files
|
||||
|
||||
A Go package striving to emulate the features of the BSD `tail` program.
|
||||
|
||||
```Go
|
||||
t, err := tail.TailFile("/var/log/nginx.log", tail.Config{Follow: true})
|
||||
for line := range t.Lines {
|
||||
fmt.Println(line.Text)
|
||||
}
|
||||
```
|
||||
|
||||
See [API documentation](http://godoc.org/github.com/hpcloud/tail).
|
||||
|
||||
## Log rotation
|
||||
|
||||
Tail comes with full support for truncation/move detection as it is
|
||||
designed to work with log rotation tools.
|
||||
|
||||
## Installing
|
||||
|
||||
go get github.com/hpcloud/tail/...
|
||||
|
||||
## Windows support
|
||||
|
||||
This package [needs assistance](https://github.com/hpcloud/tail/labels/Windows) for full Windows support.
|
11
vendor/github.com/hpcloud/tail/appveyor.yml
generated
vendored
11
vendor/github.com/hpcloud/tail/appveyor.yml
generated
vendored
@ -1,11 +0,0 @@
|
||||
version: 0.{build}
|
||||
skip_tags: true
|
||||
cache: C:\Users\appveyor\AppData\Local\NuGet\Cache
|
||||
build_script:
|
||||
- SET GOPATH=c:\workspace
|
||||
- go test -v -race ./...
|
||||
test: off
|
||||
clone_folder: c:\workspace\src\github.com\hpcloud\tail
|
||||
branches:
|
||||
only:
|
||||
- master
|
11
vendor/github.com/hpcloud/tail/tail_posix.go
generated
vendored
11
vendor/github.com/hpcloud/tail/tail_posix.go
generated
vendored
@ -1,11 +0,0 @@
|
||||
// +build linux darwin freebsd netbsd openbsd
|
||||
|
||||
package tail
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
func OpenFile(name string) (file *os.File, err error) {
|
||||
return os.Open(name)
|
||||
}
|
12
vendor/github.com/hpcloud/tail/tail_windows.go
generated
vendored
12
vendor/github.com/hpcloud/tail/tail_windows.go
generated
vendored
@ -1,12 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package tail
|
||||
|
||||
import (
|
||||
"github.com/hpcloud/tail/winfile"
|
||||
"os"
|
||||
)
|
||||
|
||||
func OpenFile(name string) (file *os.File, err error) {
|
||||
return winfile.OpenFile(name, os.O_RDONLY, 0)
|
||||
}
|
62
vendor/github.com/kubernetes-csi/csi-lib-utils/connection/connection.go
generated
vendored
62
vendor/github.com/kubernetes-csi/csi-lib-utils/connection/connection.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
"github.com/kubernetes-csi/csi-lib-utils/metrics"
|
||||
"github.com/kubernetes-csi/csi-lib-utils/protosanitizer"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -109,7 +109,7 @@ func connect(
|
||||
grpc.WithBlock(), // Block until connection succeeds.
|
||||
grpc.WithChainUnaryInterceptor(
|
||||
LogGRPC, // Log all messages.
|
||||
extendedCSIMetricsManager{metricsManager}.recordMetricsInterceptor, // Record metrics for each gRPC call.
|
||||
ExtendedCSIMetricsManager{metricsManager}.RecordMetricsClientInterceptor, // Record metrics for each gRPC call.
|
||||
),
|
||||
)
|
||||
unixPrefix := "unix://"
|
||||
@ -140,7 +140,7 @@ func connect(
|
||||
}
|
||||
conn, err := net.DialTimeout("unix", address[len(unixPrefix):], timeout)
|
||||
if err == nil {
|
||||
// Connection restablished.
|
||||
// Connection reestablished.
|
||||
haveConnected = true
|
||||
lostConnection = false
|
||||
}
|
||||
@ -150,7 +150,7 @@ func connect(
|
||||
return nil, errors.New("OnConnectionLoss callback only supported for unix:// addresses")
|
||||
}
|
||||
|
||||
klog.Infof("Connecting to %s", address)
|
||||
klog.V(5).Infof("Connecting to %s", address)
|
||||
|
||||
// Connect in background.
|
||||
var conn *grpc.ClientConn
|
||||
@ -187,12 +187,20 @@ func LogGRPC(ctx context.Context, method string, req, reply interface{}, cc *grp
|
||||
return err
|
||||
}
|
||||
|
||||
type extendedCSIMetricsManager struct {
|
||||
type ExtendedCSIMetricsManager struct {
|
||||
metrics.CSIMetricsManager
|
||||
}
|
||||
|
||||
// recordMetricsInterceptor is a gPRC unary interceptor for recording metrics for CSI operations.
|
||||
func (cmm extendedCSIMetricsManager) recordMetricsInterceptor(
|
||||
type AdditionalInfo struct {
|
||||
Migrated string
|
||||
}
|
||||
type AdditionalInfoKeyType struct{}
|
||||
|
||||
var AdditionalInfoKey AdditionalInfoKeyType
|
||||
|
||||
// RecordMetricsClientInterceptor is a gPRC unary interceptor for recording metrics for CSI operations
|
||||
// in a gRPC client.
|
||||
func (cmm ExtendedCSIMetricsManager) RecordMetricsClientInterceptor(
|
||||
ctx context.Context,
|
||||
method string,
|
||||
req, reply interface{},
|
||||
@ -202,10 +210,48 @@ func (cmm extendedCSIMetricsManager) recordMetricsInterceptor(
|
||||
start := time.Now()
|
||||
err := invoker(ctx, method, req, reply, cc, opts...)
|
||||
duration := time.Since(start)
|
||||
cmm.RecordMetrics(
|
||||
|
||||
var cmmBase metrics.CSIMetricsManager
|
||||
cmmBase = cmm
|
||||
if cmm.HaveAdditionalLabel(metrics.LabelMigrated) {
|
||||
// record migration status
|
||||
additionalInfo := ctx.Value(AdditionalInfoKey)
|
||||
migrated := "false"
|
||||
if additionalInfo != nil {
|
||||
additionalInfoVal, ok := additionalInfo.(AdditionalInfo)
|
||||
if !ok {
|
||||
klog.Errorf("Failed to record migrated status, cannot convert additional info %v", additionalInfo)
|
||||
return err
|
||||
}
|
||||
migrated = additionalInfoVal.Migrated
|
||||
}
|
||||
cmmv, metricsErr := cmm.WithLabelValues(map[string]string{metrics.LabelMigrated: migrated})
|
||||
if metricsErr != nil {
|
||||
klog.Errorf("Failed to record migrated status, error: %v", metricsErr)
|
||||
} else {
|
||||
cmmBase = cmmv
|
||||
}
|
||||
}
|
||||
// Record the default metric
|
||||
cmmBase.RecordMetrics(
|
||||
method, /* operationName */
|
||||
err, /* operationErr */
|
||||
duration, /* operationDuration */
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// RecordMetricsServerInterceptor is a gPRC unary interceptor for recording metrics for CSI operations
|
||||
// in a gRCP server.
|
||||
func (cmm ExtendedCSIMetricsManager) RecordMetricsServerInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
start := time.Now()
|
||||
resp, err := handler(ctx, req)
|
||||
duration := time.Since(start)
|
||||
cmm.RecordMetrics(
|
||||
info.FullMethod, /* operationName */
|
||||
err, /* operationErr */
|
||||
duration, /* operationDuration */
|
||||
)
|
||||
return resp, err
|
||||
}
|
||||
|
311
vendor/github.com/kubernetes-csi/csi-lib-utils/metrics/metrics.go
generated
vendored
311
vendor/github.com/kubernetes-csi/csi-lib-utils/metrics/metrics.go
generated
vendored
@ -20,23 +20,36 @@ import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"k8s.io/component-base/metrics"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
// SubsystemSidecar is the default subsystem name in a metrics
|
||||
// (= the prefix in the final metrics name). It is to be used
|
||||
// by CSI sidecars. Using the same subsystem in different CSI
|
||||
// drivers makes it possible to reuse dashboards because
|
||||
// the metrics names will be identical. Data from different
|
||||
// drivers can be selected via the "driver_name" tag.
|
||||
SubsystemSidecar = "csi_sidecar"
|
||||
// SubsystemPlugin is what CSI driver's should use as
|
||||
// subsystem name.
|
||||
SubsystemPlugin = "csi_plugin"
|
||||
|
||||
// Common metric strings
|
||||
subsystem = "csi_sidecar"
|
||||
labelCSIDriverName = "driver_name"
|
||||
labelCSIOperationName = "method_name"
|
||||
labelGrpcStatusCode = "grpc_status_code"
|
||||
unknownCSIDriverName = "unknown-driver"
|
||||
|
||||
// LabelMigrated is the Label that indicate whether this is a CSI migration operation
|
||||
LabelMigrated = "migrated"
|
||||
|
||||
// CSI Operation Latency with status code total - Histogram Metric
|
||||
operationsLatencyMetricName = "operations_seconds"
|
||||
operationsLatencyHelp = "Container Storage Interface operation duration with gRPC error code status total"
|
||||
@ -56,42 +69,184 @@ type CSIMetricsManager interface {
|
||||
// operationName - Name of the CSI operation.
|
||||
// operationErr - Error, if any, that resulted from execution of operation.
|
||||
// operationDuration - time it took for the operation to complete
|
||||
//
|
||||
// If WithLabelNames was used to define additional labels when constructing
|
||||
// the manager, then WithLabelValues should be used to create a wrapper which
|
||||
// holds the corresponding values before calling RecordMetrics of the wrapper.
|
||||
// Labels with missing values are recorded as empty.
|
||||
RecordMetrics(
|
||||
operationName string,
|
||||
operationErr error,
|
||||
operationDuration time.Duration)
|
||||
|
||||
// WithLabelValues must be used to add the additional label
|
||||
// values defined via WithLabelNames. When calling RecordMetrics
|
||||
// without it or with too few values, the missing values are
|
||||
// recorded as empty. WithLabelValues can be called multiple times
|
||||
// and then accumulates values.
|
||||
WithLabelValues(labels map[string]string) (CSIMetricsManager, error)
|
||||
|
||||
// HaveAdditionalLabel can be used to check if the additional label
|
||||
// value is defined in the metrics manager
|
||||
HaveAdditionalLabel(name string) bool
|
||||
|
||||
// SetDriverName is called to update the CSI driver name. This should be done
|
||||
// as soon as possible, otherwise metrics recorded by this manager will be
|
||||
// recorded with an "unknown-driver" driver_name.
|
||||
// driverName - Name of the CSI driver against which this operation was executed.
|
||||
SetDriverName(driverName string)
|
||||
|
||||
// StartMetricsEndpoint starts the metrics endpoint at the specified address/path
|
||||
// for this metrics manager.
|
||||
// If the metricsAddress is an empty string, this will be a no op.
|
||||
StartMetricsEndpoint(metricsAddress, metricsPath string)
|
||||
// RegisterToServer registers an HTTP handler for this metrics manager to the
|
||||
// given server at the specified address/path.
|
||||
RegisterToServer(s Server, metricsPath string)
|
||||
}
|
||||
|
||||
// NewCSIMetricsManager creates and registers metrics for for CSI Sidecars and
|
||||
// returns an object that can be used to trigger the metrics.
|
||||
// Server represents any type that could serve HTTP requests for the metrics
|
||||
// endpoint.
|
||||
type Server interface {
|
||||
Handle(pattern string, handler http.Handler)
|
||||
}
|
||||
|
||||
// MetricsManagerOption is used to pass optional configuration to a
|
||||
// new metrics manager.
|
||||
type MetricsManagerOption func(*csiMetricsManager)
|
||||
|
||||
// WithSubsystem overrides the default subsystem name.
|
||||
func WithSubsystem(subsystem string) MetricsManagerOption {
|
||||
return func(cmm *csiMetricsManager) {
|
||||
cmm.subsystem = subsystem
|
||||
}
|
||||
}
|
||||
|
||||
// WithStabilityLevel overrides the default stability level. The recommended
|
||||
// usage is to keep metrics at a lower level when csi-lib-utils switches
|
||||
// to beta or GA. Overriding the alpha default with beta or GA is risky
|
||||
// because the metrics can still change in the library.
|
||||
func WithStabilityLevel(stabilityLevel metrics.StabilityLevel) MetricsManagerOption {
|
||||
return func(cmm *csiMetricsManager) {
|
||||
cmm.stabilityLevel = stabilityLevel
|
||||
}
|
||||
}
|
||||
|
||||
// WithLabelNames defines labels for each sample that get added to the
|
||||
// default labels (driver, method call, and gRPC result). This makes
|
||||
// it possible to partition the histograms along additional
|
||||
// dimensions.
|
||||
//
|
||||
// To record a metrics with additional values, use
|
||||
// CSIMetricManager.WithLabelValues().RecordMetrics().
|
||||
func WithLabelNames(labelNames ...string) MetricsManagerOption {
|
||||
return func(cmm *csiMetricsManager) {
|
||||
cmm.additionalLabelNames = labelNames
|
||||
}
|
||||
}
|
||||
|
||||
// WithLabels defines some label name and value pairs that are added to all
|
||||
// samples. They get recorded sorted by name.
|
||||
func WithLabels(labels map[string]string) MetricsManagerOption {
|
||||
return func(cmm *csiMetricsManager) {
|
||||
var l []label
|
||||
for name, value := range labels {
|
||||
l = append(l, label{name, value})
|
||||
}
|
||||
sort.Slice(l, func(i, j int) bool {
|
||||
return l[i].name < l[j].name
|
||||
})
|
||||
cmm.additionalLabels = l
|
||||
}
|
||||
}
|
||||
|
||||
// WithMigration adds the migrated field to the current metrics label
|
||||
func WithMigration() MetricsManagerOption {
|
||||
return func(cmm *csiMetricsManager) {
|
||||
cmm.additionalLabelNames = append(cmm.additionalLabelNames, LabelMigrated)
|
||||
}
|
||||
}
|
||||
|
||||
// WithProcessStartTime controlls whether process_start_time_seconds is registered
|
||||
// in the registry of the metrics manager. It's enabled by default out of convenience
|
||||
// (no need to do anything special in most sidecars) but should be disabled in more
|
||||
// complex scenarios (more than one metrics manager per process, metric already
|
||||
// provided elsewhere like via the Prometheus Golang collector).
|
||||
//
|
||||
// In particular, registering this metric via metric manager and thus the Kubernetes
|
||||
// component base conflicts with the Prometheus Golang collector (gathered metric family
|
||||
// process_start_time_seconds has help "[ALPHA] Start time of the process since unix epoch in seconds."
|
||||
// but should have "Start time of the process since unix epoch in seconds."
|
||||
func WithProcessStartTime(registerProcessStartTime bool) MetricsManagerOption {
|
||||
return func(cmm *csiMetricsManager) {
|
||||
cmm.registerProcessStartTime = registerProcessStartTime
|
||||
}
|
||||
}
|
||||
|
||||
// NewCSIMetricsManagerForSidecar creates and registers metrics for CSI Sidecars and
|
||||
// returns an object that can be used to trigger the metrics. It uses "csi_sidecar"
|
||||
// as subsystem.
|
||||
//
|
||||
// driverName - Name of the CSI driver against which this operation was executed.
|
||||
// If unknown, leave empty, and use SetDriverName method to update later.
|
||||
func NewCSIMetricsManager(driverName string) CSIMetricsManager {
|
||||
func NewCSIMetricsManagerForSidecar(driverName string) CSIMetricsManager {
|
||||
return NewCSIMetricsManagerWithOptions(driverName)
|
||||
}
|
||||
|
||||
// NewCSIMetricsManager is provided for backwards-compatibility.
|
||||
var NewCSIMetricsManager = NewCSIMetricsManagerForSidecar
|
||||
|
||||
// NewCSIMetricsManagerForPlugin creates and registers metrics for CSI drivers and
|
||||
// returns an object that can be used to trigger the metrics. It uses "csi_plugin"
|
||||
// as subsystem.
|
||||
//
|
||||
// driverName - Name of the CSI driver against which this operation was executed.
|
||||
// If unknown, leave empty, and use SetDriverName method to update later.
|
||||
func NewCSIMetricsManagerForPlugin(driverName string) CSIMetricsManager {
|
||||
return NewCSIMetricsManagerWithOptions(driverName,
|
||||
WithSubsystem(SubsystemPlugin),
|
||||
)
|
||||
}
|
||||
|
||||
// NewCSIMetricsManagerWithOptions is a customizable constructor, to be used only
|
||||
// if there are special needs like changing the default subsystems.
|
||||
//
|
||||
// driverName - Name of the CSI driver against which this operation was executed.
|
||||
// If unknown, leave empty, and use SetDriverName method to update later.
|
||||
func NewCSIMetricsManagerWithOptions(driverName string, options ...MetricsManagerOption) CSIMetricsManager {
|
||||
cmm := csiMetricsManager{
|
||||
registry: metrics.NewKubeRegistry(),
|
||||
csiOperationsLatencyMetric: metrics.NewHistogramVec(
|
||||
subsystem: SubsystemSidecar,
|
||||
stabilityLevel: metrics.ALPHA,
|
||||
registerProcessStartTime: true,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(&cmm)
|
||||
}
|
||||
|
||||
if cmm.registerProcessStartTime {
|
||||
// https://github.com/open-telemetry/opentelemetry-collector/issues/969
|
||||
// Add process_start_time_seconds into the metric to let the start time be parsed correctly
|
||||
metrics.RegisterProcessStartTime(cmm.registry.Register)
|
||||
// TODO: This is a bug in component-base library. We need to remove this after upgrade component-base dependency
|
||||
// BugFix: https://github.com/kubernetes/kubernetes/pull/96435
|
||||
// The first call to RegisterProcessStartTime can only create the metric, so we need a second call to actually
|
||||
// register the metric.
|
||||
metrics.RegisterProcessStartTime(cmm.registry.Register)
|
||||
}
|
||||
|
||||
labels := []string{labelCSIDriverName, labelCSIOperationName, labelGrpcStatusCode}
|
||||
labels = append(labels, cmm.additionalLabelNames...)
|
||||
for _, label := range cmm.additionalLabels {
|
||||
labels = append(labels, label.name)
|
||||
}
|
||||
cmm.csiOperationsLatencyMetric = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: subsystem,
|
||||
Subsystem: cmm.subsystem,
|
||||
Name: operationsLatencyMetricName,
|
||||
Help: operationsLatencyHelp,
|
||||
Buckets: operationsLatencyBuckets,
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
StabilityLevel: cmm.stabilityLevel,
|
||||
},
|
||||
[]string{labelCSIDriverName, labelCSIOperationName, labelGrpcStatusCode},
|
||||
),
|
||||
}
|
||||
|
||||
labels,
|
||||
)
|
||||
cmm.SetDriverName(driverName)
|
||||
cmm.registerMetrics()
|
||||
return &cmm
|
||||
@ -101,26 +256,106 @@ var _ CSIMetricsManager = &csiMetricsManager{}
|
||||
|
||||
type csiMetricsManager struct {
|
||||
registry metrics.KubeRegistry
|
||||
subsystem string
|
||||
stabilityLevel metrics.StabilityLevel
|
||||
driverName string
|
||||
csiOperationsMetric *metrics.CounterVec
|
||||
additionalLabelNames []string
|
||||
additionalLabels []label
|
||||
csiOperationsLatencyMetric *metrics.HistogramVec
|
||||
registerProcessStartTime bool
|
||||
}
|
||||
|
||||
type label struct {
|
||||
name, value string
|
||||
}
|
||||
|
||||
func (cmm *csiMetricsManager) GetRegistry() metrics.KubeRegistry {
|
||||
return cmm.registry
|
||||
}
|
||||
|
||||
// RecordMetrics must be called upon CSI Operation completion to record
|
||||
// the operation's metric.
|
||||
// operationName - Name of the CSI operation.
|
||||
// operationErr - Error, if any, that resulted from execution of operation.
|
||||
// operationDuration - time it took for the operation to complete
|
||||
// RecordMetrics implements CSIMetricsManager.RecordMetrics.
|
||||
func (cmm *csiMetricsManager) RecordMetrics(
|
||||
operationName string,
|
||||
operationErr error,
|
||||
operationDuration time.Duration) {
|
||||
cmm.csiOperationsLatencyMetric.WithLabelValues(
|
||||
cmm.driverName, operationName, getErrorCode(operationErr)).Observe(operationDuration.Seconds())
|
||||
cmm.recordMetricsWithLabels(operationName, operationErr, operationDuration, nil)
|
||||
}
|
||||
|
||||
// recordMetricsWithLabels is the internal implementation of RecordMetrics.
|
||||
func (cmm *csiMetricsManager) recordMetricsWithLabels(
|
||||
operationName string,
|
||||
operationErr error,
|
||||
operationDuration time.Duration,
|
||||
labelValues map[string]string) {
|
||||
values := []string{cmm.driverName, operationName, getErrorCode(operationErr)}
|
||||
for _, name := range cmm.additionalLabelNames {
|
||||
values = append(values, labelValues[name])
|
||||
}
|
||||
for _, label := range cmm.additionalLabels {
|
||||
values = append(values, label.value)
|
||||
}
|
||||
cmm.csiOperationsLatencyMetric.WithLabelValues(values...).Observe(operationDuration.Seconds())
|
||||
}
|
||||
|
||||
type csiMetricsManagerWithValues struct {
|
||||
*csiMetricsManager
|
||||
|
||||
// additionalValues holds the values passed via WithLabelValues.
|
||||
additionalValues map[string]string
|
||||
}
|
||||
|
||||
// WithLabelValues in the base metrics manager creates a fresh wrapper with no labels and let's
|
||||
// that deal with adding the label values.
|
||||
func (cmm *csiMetricsManager) WithLabelValues(labels map[string]string) (CSIMetricsManager, error) {
|
||||
cmmv := &csiMetricsManagerWithValues{
|
||||
csiMetricsManager: cmm,
|
||||
additionalValues: map[string]string{},
|
||||
}
|
||||
return cmmv.WithLabelValues(labels)
|
||||
}
|
||||
|
||||
// WithLabelValues in the wrapper creates a wrapper which has all existing labels and
|
||||
// adds the new ones, with error checking. Can be called multiple times. Each call then
|
||||
// can add some new value(s). It is an error to overwrite an already set value.
|
||||
// If RecordMetrics is called before setting all additional values, the missing ones will
|
||||
// be empty.
|
||||
func (cmmv *csiMetricsManagerWithValues) WithLabelValues(labels map[string]string) (CSIMetricsManager, error) {
|
||||
extended := &csiMetricsManagerWithValues{
|
||||
csiMetricsManager: cmmv.csiMetricsManager,
|
||||
additionalValues: map[string]string{},
|
||||
}
|
||||
// We need to copy the old values to avoid modifying the map in cmmv.
|
||||
for name, value := range cmmv.additionalValues {
|
||||
extended.additionalValues[name] = value
|
||||
}
|
||||
// Now add all new values.
|
||||
for name, value := range labels {
|
||||
if !extended.HaveAdditionalLabel(name) {
|
||||
return nil, fmt.Errorf("label %q was not defined via WithLabelNames", name)
|
||||
}
|
||||
if v, ok := extended.additionalValues[name]; ok {
|
||||
return nil, fmt.Errorf("label %q already has value %q", name, v)
|
||||
}
|
||||
extended.additionalValues[name] = value
|
||||
}
|
||||
return extended, nil
|
||||
}
|
||||
|
||||
func (cmm *csiMetricsManager) HaveAdditionalLabel(name string) bool {
|
||||
for _, n := range cmm.additionalLabelNames {
|
||||
if n == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RecordMetrics passes the stored values as to the implementation.
|
||||
func (cmmv *csiMetricsManagerWithValues) RecordMetrics(
|
||||
operationName string,
|
||||
operationErr error,
|
||||
operationDuration time.Duration) {
|
||||
cmmv.recordMetricsWithLabels(operationName, operationErr, operationDuration, cmmv.additionalValues)
|
||||
}
|
||||
|
||||
// SetDriverName is called to update the CSI driver name. This should be done
|
||||
@ -134,27 +369,13 @@ func (cmm *csiMetricsManager) SetDriverName(driverName string) {
|
||||
}
|
||||
}
|
||||
|
||||
// StartMetricsEndpoint starts the metrics endpoint at the specified address/path
|
||||
// for this metrics manager on a new go routine.
|
||||
// If the metricsAddress is an empty string, this will be a no op.
|
||||
func (cmm *csiMetricsManager) StartMetricsEndpoint(metricsAddress, metricsPath string) {
|
||||
if metricsAddress == "" {
|
||||
klog.Warningf("metrics endpoint will not be started because `metrics-address` was not specified.")
|
||||
return
|
||||
}
|
||||
|
||||
http.Handle(metricsPath, metrics.HandlerFor(
|
||||
// RegisterToServer registers an HTTP handler for this metrics manager to the
|
||||
// given server at the specified address/path.
|
||||
func (cmm *csiMetricsManager) RegisterToServer(s Server, metricsPath string) {
|
||||
s.Handle(metricsPath, metrics.HandlerFor(
|
||||
cmm.GetRegistry(),
|
||||
metrics.HandlerOpts{
|
||||
ErrorHandling: metrics.ContinueOnError}))
|
||||
|
||||
// Spawn a new go routine to listen on specified endpoint
|
||||
go func() {
|
||||
err := http.ListenAndServe(metricsAddress, nil)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to start prometheus metrics endpoint on specified address (%q) and path (%q): %s", metricsAddress, metricsPath, err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// VerifyMetricsMatch is a helper function that verifies that the expected and
|
||||
@ -171,7 +392,11 @@ func VerifyMetricsMatch(expectedMetrics, actualMetrics string, metricToIgnore st
|
||||
wantScanner.Scan()
|
||||
wantLine := strings.TrimSpace(wantScanner.Text())
|
||||
gotLine := strings.TrimSpace(gotScanner.Text())
|
||||
if wantLine != gotLine && (metricToIgnore == "" || !strings.HasPrefix(gotLine, metricToIgnore)) {
|
||||
if wantLine != gotLine &&
|
||||
(metricToIgnore == "" || !strings.HasPrefix(gotLine, metricToIgnore)) &&
|
||||
// We should ignore the comments from metricToIgnore, otherwise the verification will
|
||||
// fail because of the comments.
|
||||
!strings.HasPrefix(gotLine, "#") {
|
||||
return fmt.Errorf("\r\nMetric Want: %q\r\nMetric Got: %q\r\n", wantLine, gotLine)
|
||||
}
|
||||
}
|
||||
|
2
vendor/github.com/kubernetes-csi/csi-lib-utils/rpc/common.go
generated
vendored
2
vendor/github.com/kubernetes-csi/csi-lib-utils/rpc/common.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
|
3
vendor/github.com/nxadm/tail/.gitignore
generated
vendored
Normal file
3
vendor/github.com/nxadm/tail/.gitignore
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
.idea/
|
||||
.test/
|
||||
examples/_*
|
56
vendor/github.com/nxadm/tail/CHANGES.md
generated
vendored
Normal file
56
vendor/github.com/nxadm/tail/CHANGES.md
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
# Version v1.4.7-v1.4.8
|
||||
* Documentation updates.
|
||||
* Small linter cleanups.
|
||||
* Added example in test.
|
||||
|
||||
# Version v1.4.6
|
||||
|
||||
* Document the usage of Cleanup when re-reading a file (thanks to @lesovsky) for issue #18.
|
||||
* Add example directories with example and tests for issues.
|
||||
|
||||
# Version v1.4.4-v1.4.5
|
||||
|
||||
* Fix of checksum problem because of forced tag. No changes to the code.
|
||||
|
||||
# Version v1.4.1
|
||||
|
||||
* Incorporated PR 162 by by Mohammed902: "Simplify non-Windows build tag".
|
||||
|
||||
# Version v1.4.0
|
||||
|
||||
* Incorporated PR 9 by mschneider82: "Added seekinfo to Tail".
|
||||
|
||||
# Version v1.3.1
|
||||
|
||||
* Incorporated PR 7: "Fix deadlock when stopping on non-empty file/buffer",
|
||||
fixes upstream issue 93.
|
||||
|
||||
|
||||
# Version v1.3.0
|
||||
|
||||
* Incorporated changes of unmerged upstream PR 149 by mezzi: "added line num
|
||||
to Line struct".
|
||||
|
||||
# Version v1.2.1
|
||||
|
||||
* Incorporated changes of unmerged upstream PR 128 by jadekler: "Compile-able
|
||||
code in readme".
|
||||
* Incorporated changes of unmerged upstream PR 130 by fgeller: "small change
|
||||
to comment wording".
|
||||
* Incorporated changes of unmerged upstream PR 133 by sm3142: "removed
|
||||
spurious newlines from log messages".
|
||||
|
||||
# Version v1.2.0
|
||||
|
||||
* Incorporated changes of unmerged upstream PR 126 by Code-Hex: "Solved the
|
||||
problem for never return the last line if it's not followed by a newline".
|
||||
* Incorporated changes of unmerged upstream PR 131 by StoicPerlman: "Remove
|
||||
deprecated os.SEEK consts". The changes bumped the minimal supported Go
|
||||
release to 1.9.
|
||||
|
||||
# Version v1.1.0
|
||||
|
||||
* migration to go modules.
|
||||
* release of master branch of the dormant upstream, because it contains
|
||||
fixes and improvement no present in the tagged release.
|
||||
|
19
vendor/github.com/nxadm/tail/Dockerfile
generated
vendored
Normal file
19
vendor/github.com/nxadm/tail/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
FROM golang
|
||||
|
||||
RUN mkdir -p $GOPATH/src/github.com/nxadm/tail/
|
||||
ADD . $GOPATH/src/github.com/nxadm/tail/
|
||||
|
||||
# expecting to fetch dependencies successfully.
|
||||
RUN go get -v github.com/nxadm/tail
|
||||
|
||||
# expecting to run the test successfully.
|
||||
RUN go test -v github.com/nxadm/tail
|
||||
|
||||
# expecting to install successfully
|
||||
RUN go install -v github.com/nxadm/tail
|
||||
RUN go install -v github.com/nxadm/tail/cmd/gotail
|
||||
|
||||
RUN $GOPATH/bin/gotail -h || true
|
||||
|
||||
ENV PATH $GOPATH/bin:$PATH
|
||||
CMD ["gotail"]
|
0
vendor/github.com/hpcloud/tail/LICENSE.txt → vendor/github.com/nxadm/tail/LICENSE
generated
vendored
0
vendor/github.com/hpcloud/tail/LICENSE.txt → vendor/github.com/nxadm/tail/LICENSE
generated
vendored
44
vendor/github.com/nxadm/tail/README.md
generated
vendored
Normal file
44
vendor/github.com/nxadm/tail/README.md
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
![ci](https://github.com/nxadm/tail/workflows/ci/badge.svg)[![Go Reference](https://pkg.go.dev/badge/github.com/nxadm/tail.svg)](https://pkg.go.dev/github.com/nxadm/tail)
|
||||
|
||||
# tail functionality in Go
|
||||
|
||||
nxadm/tail provides a Go library that emulates the features of the BSD `tail`
|
||||
program. The library comes with full support for truncation/move detection as
|
||||
it is designed to work with log rotation tools. The library works on all
|
||||
operating systems supported by Go, including POSIX systems like Linux and
|
||||
*BSD, and MS Windows. Go 1.9 is the oldest compiler release supported.
|
||||
|
||||
A simple example:
|
||||
|
||||
```Go
|
||||
// Create a tail
|
||||
t, err := tail.TailFile(
|
||||
"/var/log/nginx.log", tail.Config{Follow: true, ReOpen: true})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Print the text of each received line
|
||||
for line := range t.Lines {
|
||||
fmt.Println(line.Text)
|
||||
}
|
||||
```
|
||||
|
||||
See [API documentation](https://pkg.go.dev/github.com/nxadm/tail).
|
||||
|
||||
## Installing
|
||||
|
||||
go get github.com/nxadm/tail/...
|
||||
|
||||
## History
|
||||
|
||||
This project is an active, drop-in replacement for the
|
||||
[abandoned](https://en.wikipedia.org/wiki/HPE_Helion) Go tail library at
|
||||
[hpcloud](https://github.com/hpcloud/tail). Next to
|
||||
[addressing open issues/PRs of the original project](https://github.com/nxadm/tail/issues/6),
|
||||
nxadm/tail continues the development by keeping up to date with the Go toolchain
|
||||
(e.g. go modules) and dependencies, completing the documentation, adding features
|
||||
and fixing bugs.
|
||||
|
||||
## Examples
|
||||
Examples, e.g. used to debug an issue, are kept in the [examples directory](/examples).
|
8
vendor/github.com/nxadm/tail/go.mod
generated
vendored
Normal file
8
vendor/github.com/nxadm/tail/go.mod
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
module github.com/nxadm/tail
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/fsnotify/fsnotify v1.4.9
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7
|
||||
)
|
6
vendor/github.com/nxadm/tail/go.sum
generated
vendored
Normal file
6
vendor/github.com/nxadm/tail/go.sum
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
@ -5,7 +5,10 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const GC_SIZE int = 100
|
||||
const (
|
||||
GC_SIZE int = 100
|
||||
GC_PERIOD time.Duration = 60 * time.Second
|
||||
)
|
||||
|
||||
type Memory struct {
|
||||
store map[string]LeakyBucket
|
||||
@ -44,11 +47,10 @@ func (m *Memory) GarbageCollect() {
|
||||
now := time.Now()
|
||||
|
||||
// rate limit GC to once per minute
|
||||
if now.Add(60*time.Second).Unix() > m.lastGCCollected.Unix() {
|
||||
|
||||
if now.Unix() >= m.lastGCCollected.Add(GC_PERIOD).Unix() {
|
||||
for key, bucket := range m.store {
|
||||
// if the bucket is drained, then GC
|
||||
if bucket.DrainedAt().Unix() > now.Unix() {
|
||||
if bucket.DrainedAt().Unix() < now.Unix() {
|
||||
delete(m.store, key)
|
||||
}
|
||||
}
|
123
vendor/github.com/hpcloud/tail/tail.go → vendor/github.com/nxadm/tail/tail.go
generated
vendored
123
vendor/github.com/hpcloud/tail/tail.go → vendor/github.com/nxadm/tail/tail.go
generated
vendored
@ -1,6 +1,12 @@
|
||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
//nxadm/tail provides a Go library that emulates the features of the BSD `tail`
|
||||
//program. The library comes with full support for truncation/move detection as
|
||||
//it is designed to work with log rotation tools. The library works on all
|
||||
//operating systems supported by Go, including POSIX systems like Linux and
|
||||
//*BSD, and MS Windows. Go 1.9 is the oldest compiler release supported.
|
||||
package tail
|
||||
|
||||
import (
|
||||
@ -15,31 +21,38 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hpcloud/tail/ratelimiter"
|
||||
"github.com/hpcloud/tail/util"
|
||||
"github.com/hpcloud/tail/watch"
|
||||
"github.com/nxadm/tail/ratelimiter"
|
||||
"github.com/nxadm/tail/util"
|
||||
"github.com/nxadm/tail/watch"
|
||||
"gopkg.in/tomb.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrStop = fmt.Errorf("tail should now stop")
|
||||
// ErrStop is returned when the tail of a file has been marked to be stopped.
|
||||
ErrStop = errors.New("tail should now stop")
|
||||
)
|
||||
|
||||
type Line struct {
|
||||
Text string
|
||||
Time time.Time
|
||||
Text string // The contents of the file
|
||||
Num int // The line number
|
||||
SeekInfo SeekInfo // SeekInfo
|
||||
Time time.Time // Present time
|
||||
Err error // Error from tail
|
||||
}
|
||||
|
||||
// NewLine returns a Line with present time.
|
||||
func NewLine(text string) *Line {
|
||||
return &Line{text, time.Now(), nil}
|
||||
// Deprecated: this function is no longer used internally and it has little of no
|
||||
// use in the API. As such, it will be removed from the API in a future major
|
||||
// release.
|
||||
//
|
||||
// NewLine returns a * pointer to a Line struct.
|
||||
func NewLine(text string, lineNum int) *Line {
|
||||
return &Line{text, lineNum, SeekInfo{}, time.Now(), nil}
|
||||
}
|
||||
|
||||
// SeekInfo represents arguments to `os.Seek`
|
||||
// SeekInfo represents arguments to io.Seek. See: https://golang.org/pkg/io/#SectionReader.Seek
|
||||
type SeekInfo struct {
|
||||
Offset int64
|
||||
Whence int // os.SEEK_*
|
||||
Whence int
|
||||
}
|
||||
|
||||
type logger interface {
|
||||
@ -57,29 +70,32 @@ type logger interface {
|
||||
// Config is used to specify how a file must be tailed.
|
||||
type Config struct {
|
||||
// File-specifc
|
||||
Location *SeekInfo // Seek to this location before tailing
|
||||
Location *SeekInfo // Tail from this location. If nil, start at the beginning of the file
|
||||
ReOpen bool // Reopen recreated files (tail -F)
|
||||
MustExist bool // Fail early if the file does not exist
|
||||
Poll bool // Poll for file changes instead of using inotify
|
||||
Pipe bool // Is a named pipe (mkfifo)
|
||||
RateLimiter *ratelimiter.LeakyBucket
|
||||
Poll bool // Poll for file changes instead of using the default inotify
|
||||
Pipe bool // The file is a named pipe (mkfifo)
|
||||
|
||||
// Generic IO
|
||||
Follow bool // Continue looking for new lines (tail -f)
|
||||
MaxLineSize int // If non-zero, split longer lines into multiple lines
|
||||
|
||||
// Logger, when nil, is set to tail.DefaultLogger
|
||||
// To disable logging: set field to tail.DiscardingLogger
|
||||
// Optionally, use a ratelimiter (e.g. created by the ratelimiter/NewLeakyBucket function)
|
||||
RateLimiter *ratelimiter.LeakyBucket
|
||||
|
||||
// Optionally use a Logger. When nil, the Logger is set to tail.DefaultLogger.
|
||||
// To disable logging, set it to tail.DiscardingLogger
|
||||
Logger logger
|
||||
}
|
||||
|
||||
type Tail struct {
|
||||
Filename string
|
||||
Lines chan *Line
|
||||
Config
|
||||
Filename string // The filename
|
||||
Lines chan *Line // A consumable channel of *Line
|
||||
Config // Tail.Configuration
|
||||
|
||||
file *os.File
|
||||
reader *bufio.Reader
|
||||
lineNum int
|
||||
|
||||
watcher watch.FileWatcher
|
||||
changes *watch.FileChanges
|
||||
@ -90,16 +106,17 @@ type Tail struct {
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultLogger is used when Config.Logger == nil
|
||||
// DefaultLogger logs to os.Stderr and it is used when Config.Logger == nil
|
||||
DefaultLogger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
// DiscardingLogger can be used to disable logging output
|
||||
DiscardingLogger = log.New(ioutil.Discard, "", 0)
|
||||
)
|
||||
|
||||
// TailFile begins tailing the file. Output stream is made available
|
||||
// via the `Tail.Lines` channel. To handle errors during tailing,
|
||||
// invoke the `Wait` or `Err` method after finishing reading from the
|
||||
// `Lines` channel.
|
||||
// TailFile begins tailing the file. And returns a pointer to a Tail struct
|
||||
// and an error. An output stream is made available via the Tail.Lines
|
||||
// channel (e.g. to be looped and printed). To handle errors during tailing,
|
||||
// after finishing reading from the Lines channel, invoke the `Wait` or `Err`
|
||||
// method on the returned *Tail.
|
||||
func TailFile(filename string, config Config) (*Tail, error) {
|
||||
if config.ReOpen && !config.Follow {
|
||||
util.Fatal("cannot set ReOpen without Follow.")
|
||||
@ -113,7 +130,7 @@ func TailFile(filename string, config Config) (*Tail, error) {
|
||||
|
||||
// when Logger was not specified in config, use default logger
|
||||
if t.Logger == nil {
|
||||
t.Logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
t.Logger = DefaultLogger
|
||||
}
|
||||
|
||||
if t.Poll {
|
||||
@ -135,15 +152,14 @@ func TailFile(filename string, config Config) (*Tail, error) {
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Return the file's current position, like stdio's ftell().
|
||||
// But this value is not very accurate.
|
||||
// it may readed one line in the chan(tail.Lines),
|
||||
// so it may lost one line.
|
||||
// Tell returns the file's current position, like stdio's ftell() and an error.
|
||||
// Beware that this value may not be completely accurate because one line from
|
||||
// the chan(tail.Lines) may have been read already.
|
||||
func (tail *Tail) Tell() (offset int64, err error) {
|
||||
if tail.file == nil {
|
||||
return
|
||||
}
|
||||
offset, err = tail.file.Seek(0, os.SEEK_CUR)
|
||||
offset, err = tail.file.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -164,7 +180,8 @@ func (tail *Tail) Stop() error {
|
||||
return tail.Wait()
|
||||
}
|
||||
|
||||
// StopAtEOF stops tailing as soon as the end of the file is reached.
|
||||
// StopAtEOF stops tailing as soon as the end of the file is reached. The function
|
||||
// returns an error,
|
||||
func (tail *Tail) StopAtEOF() error {
|
||||
tail.Kill(errStopAtEOF)
|
||||
return tail.Wait()
|
||||
@ -186,6 +203,7 @@ func (tail *Tail) closeFile() {
|
||||
|
||||
func (tail *Tail) reopen() error {
|
||||
tail.closeFile()
|
||||
tail.lineNum = 0
|
||||
for {
|
||||
var err error
|
||||
tail.file, err = OpenFile(tail.Filename)
|
||||
@ -241,7 +259,6 @@ func (tail *Tail) tailFileSync() {
|
||||
// Seek to requested location on first open of the file.
|
||||
if tail.Location != nil {
|
||||
_, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence)
|
||||
tail.Logger.Printf("Seeked %s - %+v\n", tail.Filename, tail.Location)
|
||||
if err != nil {
|
||||
tail.Killf("Seek error on %s: %s", tail.Filename, err)
|
||||
return
|
||||
@ -250,16 +267,12 @@ func (tail *Tail) tailFileSync() {
|
||||
|
||||
tail.openReader()
|
||||
|
||||
var offset int64 = 0
|
||||
var err error
|
||||
|
||||
// Read line by line.
|
||||
for {
|
||||
// do not seek in named pipes
|
||||
if !tail.Pipe {
|
||||
// grab the position in case we need to back up in the event of a half-line
|
||||
offset, err = tail.Tell()
|
||||
if err != nil {
|
||||
if _, err := tail.Tell(); err != nil {
|
||||
tail.Kill(err)
|
||||
return
|
||||
}
|
||||
@ -273,10 +286,9 @@ func (tail *Tail) tailFileSync() {
|
||||
if cooloff {
|
||||
// Wait a second before seeking till the end of
|
||||
// file when rate limit is reached.
|
||||
msg := fmt.Sprintf(
|
||||
"Too much log activity; waiting a second " +
|
||||
"before resuming tailing")
|
||||
tail.Lines <- &Line{msg, time.Now(), fmt.Errorf(msg)}
|
||||
msg := ("Too much log activity; waiting a second before resuming tailing")
|
||||
offset, _ := tail.Tell()
|
||||
tail.Lines <- &Line{msg, tail.lineNum, SeekInfo{Offset: offset}, time.Now(), errors.New(msg)}
|
||||
select {
|
||||
case <-time.After(time.Second):
|
||||
case <-tail.Dying():
|
||||
@ -296,10 +308,8 @@ func (tail *Tail) tailFileSync() {
|
||||
}
|
||||
|
||||
if tail.Follow && line != "" {
|
||||
// this has the potential to never return the last line if
|
||||
// it's not followed by a newline; seems a fair trade here
|
||||
err := tail.seekTo(SeekInfo{Offset: offset, Whence: 0})
|
||||
if err != nil {
|
||||
tail.sendLine(line)
|
||||
if err := tail.seekEnd(); err != nil {
|
||||
tail.Kill(err)
|
||||
return
|
||||
}
|
||||
@ -337,7 +347,7 @@ func (tail *Tail) tailFileSync() {
|
||||
// reopened if ReOpen is true. Truncated files are always reopened.
|
||||
func (tail *Tail) waitForChanges() error {
|
||||
if tail.changes == nil {
|
||||
pos, err := tail.file.Seek(0, os.SEEK_CUR)
|
||||
pos, err := tail.file.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -361,10 +371,9 @@ func (tail *Tail) waitForChanges() error {
|
||||
tail.Logger.Printf("Successfully reopened %s", tail.Filename)
|
||||
tail.openReader()
|
||||
return nil
|
||||
} else {
|
||||
}
|
||||
tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename)
|
||||
return ErrStop
|
||||
}
|
||||
case <-tail.changes.Truncated:
|
||||
// Always reopen truncated files (Follow is true)
|
||||
tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename)
|
||||
@ -377,20 +386,21 @@ func (tail *Tail) waitForChanges() error {
|
||||
case <-tail.Dying():
|
||||
return ErrStop
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (tail *Tail) openReader() {
|
||||
tail.lk.Lock()
|
||||
if tail.MaxLineSize > 0 {
|
||||
// add 2 to account for newline characters
|
||||
tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2)
|
||||
} else {
|
||||
tail.reader = bufio.NewReader(tail.file)
|
||||
}
|
||||
tail.lk.Unlock()
|
||||
}
|
||||
|
||||
func (tail *Tail) seekEnd() error {
|
||||
return tail.seekTo(SeekInfo{Offset: 0, Whence: os.SEEK_END})
|
||||
return tail.seekTo(SeekInfo{Offset: 0, Whence: io.SeekEnd})
|
||||
}
|
||||
|
||||
func (tail *Tail) seekTo(pos SeekInfo) error {
|
||||
@ -415,13 +425,19 @@ func (tail *Tail) sendLine(line string) bool {
|
||||
}
|
||||
|
||||
for _, line := range lines {
|
||||
tail.Lines <- &Line{line, now, nil}
|
||||
tail.lineNum++
|
||||
offset, _ := tail.Tell()
|
||||
select {
|
||||
case tail.Lines <- &Line{line, tail.lineNum, SeekInfo{Offset: offset}, now, nil}:
|
||||
case <-tail.Dying():
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if tail.Config.RateLimiter != nil {
|
||||
ok := tail.Config.RateLimiter.Pour(uint16(len(lines)))
|
||||
if !ok {
|
||||
tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.\n",
|
||||
tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.",
|
||||
tail.Filename)
|
||||
return false
|
||||
}
|
||||
@ -433,6 +449,7 @@ func (tail *Tail) sendLine(line string) bool {
|
||||
// Cleanup removes inotify watches added by the tail package. This function is
|
||||
// meant to be invoked from a process's exit handler. Linux kernel may not
|
||||
// automatically remove inotify watches after the process exits.
|
||||
// If you plan to re-read a file, don't call Cleanup in between.
|
||||
func (tail *Tail) Cleanup() {
|
||||
watch.Cleanup(tail.Filename)
|
||||
}
|
17
vendor/github.com/nxadm/tail/tail_posix.go
generated
vendored
Normal file
17
vendor/github.com/nxadm/tail/tail_posix.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// +build !windows
|
||||
|
||||
package tail
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// Deprecated: this function is only useful internally and, as such,
|
||||
// it will be removed from the API in a future major release.
|
||||
//
|
||||
// OpenFile proxies a os.Open call for a file so it can be correctly tailed
|
||||
// on POSIX and non-POSIX OSes like MS Windows.
|
||||
func OpenFile(name string) (file *os.File, err error) {
|
||||
return os.Open(name)
|
||||
}
|
19
vendor/github.com/nxadm/tail/tail_windows.go
generated
vendored
Normal file
19
vendor/github.com/nxadm/tail/tail_windows.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// +build windows
|
||||
|
||||
package tail
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/nxadm/tail/winfile"
|
||||
)
|
||||
|
||||
// Deprecated: this function is only useful internally and, as such,
|
||||
// it will be removed from the API in a future major release.
|
||||
//
|
||||
// OpenFile proxies a os.Open call for a file so it can be correctly tailed
|
||||
// on POSIX and non-POSIX OSes like MS Windows.
|
||||
func OpenFile(name string) (file *os.File, err error) {
|
||||
return winfile.OpenFile(name, os.O_RDONLY, 0)
|
||||
}
|
@ -1,3 +1,4 @@
|
||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
@ -18,7 +19,7 @@ var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)}
|
||||
|
||||
// fatal is like panic except it displays only the current goroutine's stack.
|
||||
func Fatal(format string, v ...interface{}) {
|
||||
// https://github.com/hpcloud/log/blob/master/log.go#L45
|
||||
// https://github.com/nxadm/log/blob/master/log.go#L45
|
||||
LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack()))
|
||||
os.Exit(1)
|
||||
}
|
@ -1,3 +1,4 @@
|
||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
package watch
|
||||
|
||||
type FileChanges struct {
|
||||
@ -8,7 +9,7 @@ type FileChanges struct {
|
||||
|
||||
func NewFileChanges() *FileChanges {
|
||||
return &FileChanges{
|
||||
make(chan bool), make(chan bool), make(chan bool)}
|
||||
make(chan bool, 1), make(chan bool, 1), make(chan bool, 1)}
|
||||
}
|
||||
|
||||
func (fc *FileChanges) NotifyModified() {
|
@ -1,3 +1,4 @@
|
||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
@ -8,9 +9,9 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/hpcloud/tail/util"
|
||||
"github.com/nxadm/tail/util"
|
||||
|
||||
"gopkg.in/fsnotify.v1"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"gopkg.in/tomb.v1"
|
||||
)
|
||||
|
||||
@ -75,7 +76,6 @@ func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChange
|
||||
fw.Size = pos
|
||||
|
||||
go func() {
|
||||
defer RemoveWatch(fw.Filename)
|
||||
|
||||
events := Events(fw.Filename)
|
||||
|
||||
@ -88,9 +88,11 @@ func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChange
|
||||
select {
|
||||
case evt, ok = <-events:
|
||||
if !ok {
|
||||
RemoveWatch(fw.Filename)
|
||||
return
|
||||
}
|
||||
case <-t.Dying():
|
||||
RemoveWatch(fw.Filename)
|
||||
return
|
||||
}
|
||||
|
||||
@ -99,13 +101,19 @@ func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChange
|
||||
fallthrough
|
||||
|
||||
case evt.Op&fsnotify.Rename == fsnotify.Rename:
|
||||
RemoveWatch(fw.Filename)
|
||||
changes.NotifyDeleted()
|
||||
return
|
||||
|
||||
//With an open fd, unlink(fd) - inotify returns IN_ATTRIB (==fsnotify.Chmod)
|
||||
case evt.Op&fsnotify.Chmod == fsnotify.Chmod:
|
||||
fallthrough
|
||||
|
||||
case evt.Op&fsnotify.Write == fsnotify.Write:
|
||||
fi, err := os.Stat(fw.Filename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
RemoveWatch(fw.Filename)
|
||||
changes.NotifyDeleted()
|
||||
return
|
||||
}
|
@ -1,3 +1,4 @@
|
||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
@ -10,9 +11,9 @@ import (
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/hpcloud/tail/util"
|
||||
"github.com/nxadm/tail/util"
|
||||
|
||||
"gopkg.in/fsnotify.v1"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
|
||||
type InotifyTracker struct {
|
||||
@ -83,21 +84,21 @@ func watch(winfo *watchInfo) error {
|
||||
}
|
||||
|
||||
// RemoveWatch signals the run goroutine to remove the watch for the input filename
|
||||
func RemoveWatch(fname string) {
|
||||
remove(&watchInfo{
|
||||
func RemoveWatch(fname string) error {
|
||||
return remove(&watchInfo{
|
||||
fname: fname,
|
||||
})
|
||||
}
|
||||
|
||||
// RemoveWatch create signals the run goroutine to remove the watch for the input filename
|
||||
func RemoveWatchCreate(fname string) {
|
||||
remove(&watchInfo{
|
||||
func RemoveWatchCreate(fname string) error {
|
||||
return remove(&watchInfo{
|
||||
op: fsnotify.Create,
|
||||
fname: fname,
|
||||
})
|
||||
}
|
||||
|
||||
func remove(winfo *watchInfo) {
|
||||
func remove(winfo *watchInfo) error {
|
||||
// start running the shared InotifyTracker if not already running
|
||||
once.Do(goRun)
|
||||
|
||||
@ -108,6 +109,67 @@ func remove(winfo *watchInfo) {
|
||||
delete(shared.done, winfo.fname)
|
||||
close(done)
|
||||
}
|
||||
shared.mux.Unlock()
|
||||
|
||||
shared.remove <- winfo
|
||||
return <-shared.error
|
||||
}
|
||||
|
||||
// Events returns a channel to which FileEvents corresponding to the input filename
|
||||
// will be sent. This channel will be closed when removeWatch is called on this
|
||||
// filename.
|
||||
func Events(fname string) <-chan fsnotify.Event {
|
||||
shared.mux.Lock()
|
||||
defer shared.mux.Unlock()
|
||||
|
||||
return shared.chans[fname]
|
||||
}
|
||||
|
||||
// Cleanup removes the watch for the input filename if necessary.
|
||||
func Cleanup(fname string) error {
|
||||
return RemoveWatch(fname)
|
||||
}
|
||||
|
||||
// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating
|
||||
// a new Watcher if the previous Watcher was closed.
|
||||
func (shared *InotifyTracker) addWatch(winfo *watchInfo) error {
|
||||
shared.mux.Lock()
|
||||
defer shared.mux.Unlock()
|
||||
|
||||
if shared.chans[winfo.fname] == nil {
|
||||
shared.chans[winfo.fname] = make(chan fsnotify.Event)
|
||||
}
|
||||
if shared.done[winfo.fname] == nil {
|
||||
shared.done[winfo.fname] = make(chan bool)
|
||||
}
|
||||
|
||||
fname := winfo.fname
|
||||
if winfo.isCreate() {
|
||||
// Watch for new files to be created in the parent directory.
|
||||
fname = filepath.Dir(fname)
|
||||
}
|
||||
|
||||
var err error
|
||||
// already in inotify watch
|
||||
if shared.watchNums[fname] == 0 {
|
||||
err = shared.watcher.Add(fname)
|
||||
}
|
||||
if err == nil {
|
||||
shared.watchNums[fname]++
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the
|
||||
// corresponding events channel.
|
||||
func (shared *InotifyTracker) removeWatch(winfo *watchInfo) error {
|
||||
shared.mux.Lock()
|
||||
|
||||
ch := shared.chans[winfo.fname]
|
||||
if ch != nil {
|
||||
delete(shared.chans, winfo.fname)
|
||||
close(ch)
|
||||
}
|
||||
|
||||
fname := winfo.fname
|
||||
if winfo.isCreate() {
|
||||
@ -121,91 +183,18 @@ func remove(winfo *watchInfo) {
|
||||
}
|
||||
shared.mux.Unlock()
|
||||
|
||||
var err error
|
||||
// If we were the last ones to watch this file, unsubscribe from inotify.
|
||||
// This needs to happen after releasing the lock because fsnotify waits
|
||||
// synchronously for the kernel to acknowledge the removal of the watch
|
||||
// for this file, which causes us to deadlock if we still held the lock.
|
||||
if watchNum == 0 {
|
||||
shared.watcher.Remove(fname)
|
||||
}
|
||||
shared.remove <- winfo
|
||||
err = shared.watcher.Remove(fname)
|
||||
}
|
||||
|
||||
// Events returns a channel to which FileEvents corresponding to the input filename
|
||||
// will be sent. This channel will be closed when removeWatch is called on this
|
||||
// filename.
|
||||
func Events(fname string) <-chan fsnotify.Event {
|
||||
shared.mux.Lock()
|
||||
defer shared.mux.Unlock()
|
||||
|
||||
return shared.chans[fname]
|
||||
}
|
||||
|
||||
// Cleanup removes the watch for the input filename if necessary.
|
||||
func Cleanup(fname string) {
|
||||
RemoveWatch(fname)
|
||||
}
|
||||
|
||||
// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating
|
||||
// a new Watcher if the previous Watcher was closed.
|
||||
func (shared *InotifyTracker) addWatch(winfo *watchInfo) error {
|
||||
shared.mux.Lock()
|
||||
defer shared.mux.Unlock()
|
||||
|
||||
if shared.chans[winfo.fname] == nil {
|
||||
shared.chans[winfo.fname] = make(chan fsnotify.Event)
|
||||
shared.done[winfo.fname] = make(chan bool)
|
||||
}
|
||||
|
||||
fname := winfo.fname
|
||||
if winfo.isCreate() {
|
||||
// Watch for new files to be created in the parent directory.
|
||||
fname = filepath.Dir(fname)
|
||||
}
|
||||
|
||||
// already in inotify watch
|
||||
if shared.watchNums[fname] > 0 {
|
||||
shared.watchNums[fname]++
|
||||
if winfo.isCreate() {
|
||||
shared.watchNums[winfo.fname]++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
err := shared.watcher.Add(fname)
|
||||
if err == nil {
|
||||
shared.watchNums[fname]++
|
||||
if winfo.isCreate() {
|
||||
shared.watchNums[winfo.fname]++
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the
|
||||
// corresponding events channel.
|
||||
func (shared *InotifyTracker) removeWatch(winfo *watchInfo) {
|
||||
shared.mux.Lock()
|
||||
defer shared.mux.Unlock()
|
||||
|
||||
ch := shared.chans[winfo.fname]
|
||||
if ch == nil {
|
||||
return
|
||||
}
|
||||
|
||||
delete(shared.chans, winfo.fname)
|
||||
close(ch)
|
||||
|
||||
if !winfo.isCreate() {
|
||||
return
|
||||
}
|
||||
|
||||
shared.watchNums[winfo.fname]--
|
||||
if shared.watchNums[winfo.fname] == 0 {
|
||||
delete(shared.watchNums, winfo.fname)
|
||||
}
|
||||
}
|
||||
|
||||
// sendEvent sends the input event to the appropriate Tail.
|
||||
func (shared *InotifyTracker) sendEvent(event fsnotify.Event) {
|
||||
name := filepath.Clean(event.Name)
|
||||
@ -238,7 +227,7 @@ func (shared *InotifyTracker) run() {
|
||||
shared.error <- shared.addWatch(winfo)
|
||||
|
||||
case winfo := <-shared.remove:
|
||||
shared.removeWatch(winfo)
|
||||
shared.error <- shared.removeWatch(winfo)
|
||||
|
||||
case event, open := <-shared.watcher.Events:
|
||||
if !open {
|
@ -1,3 +1,4 @@
|
||||
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
@ -8,7 +9,7 @@ import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/hpcloud/tail/util"
|
||||
"github.com/nxadm/tail/util"
|
||||
"gopkg.in/tomb.v1"
|
||||
)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user