rebase: update github.com/libopenstorage/secrets to latest

With this update, we no longer import github.com/hashicorp/vault
which now is under BSL license.
https://github.com/hashicorp/vault/blob/main/LICENSE

resolves: #4196

Signed-off-by: Rakshith R <rar@redhat.com>
This commit is contained in:
Rakshith R 2023-10-17 10:29:04 +05:30 committed by mergify[bot]
parent 5ff0607360
commit e46a007961
131 changed files with 3931 additions and 16000 deletions

21
go.mod
View File

@ -21,7 +21,7 @@ require (
github.com/hashicorp/vault/api v1.10.0
github.com/kubernetes-csi/csi-lib-utils v0.14.0
github.com/kubernetes-csi/external-snapshotter/client/v6 v6.3.0
github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a
github.com/libopenstorage/secrets v0.0.0-20231011182615-5f4b25ceede1
github.com/onsi/ginkgo/v2 v2.13.0
github.com/onsi/gomega v1.28.0
github.com/pkg/xattr v0.4.9
@ -52,7 +52,6 @@ require (
github.com/ansel1/merry v1.6.2 // indirect
github.com/ansel1/merry/v2 v2.0.1 // indirect
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
github.com/armon/go-metrics v0.3.10 // indirect
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
github.com/aws/aws-sdk-go-v2 v1.21.1 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.42 // indirect
@ -66,14 +65,12 @@ require (
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/docker/distribution v2.8.2+incompatible // indirect
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/frankban/quicktest v1.13.0 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gemalto/flume v0.13.0 // indirect
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect
@ -86,27 +83,25 @@ require (
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/cel-go v0.16.1 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-hclog v1.2.2 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-retryablehttp v0.7.0 // indirect
github.com/hashicorp/go-retryablehttp v0.7.1 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/hcl v1.0.1-vault-3 // indirect
github.com/hashicorp/vault v1.11.11 // indirect
github.com/hashicorp/vault/sdk v0.7.0 // indirect
github.com/hashicorp/hcl v1.0.1-vault-5 // indirect
github.com/hashicorp/vault/api/auth/approle v0.5.0 // indirect
github.com/hashicorp/vault/api/auth/kubernetes v0.5.0 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
@ -127,13 +122,13 @@ require (
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/selinux v1.10.0 // indirect
github.com/openshift/api v0.0.0-20230320192226-1fc631efd341 // indirect
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect
github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.11.1 // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/spf13/cobra v1.7.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect

556
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -396,7 +396,8 @@ func initVaultKMS(args ProviderInitArgs) (EncryptionKMS, error) {
// FetchDEK returns passphrase from Vault. The passphrase is stored in a
// data.data.passphrase structure.
func (kms *vaultKMS) FetchDEK(key string) (string, error) {
s, err := kms.secrets.GetSecret(filepath.Join(kms.vaultPassphrasePath, key), kms.keyContext)
// Since the second return variable loss.Version is not used, there it is ignored.
s, _, err := kms.secrets.GetSecret(filepath.Join(kms.vaultPassphrasePath, key), kms.keyContext)
if err != nil {
return "", err
}
@ -422,7 +423,8 @@ func (kms *vaultKMS) StoreDEK(key, value string) error {
}
pathKey := filepath.Join(kms.vaultPassphrasePath, key)
err := kms.secrets.PutSecret(pathKey, data, kms.keyContext)
// Since the first return variable loss.Version is not used, there it is ignored.
_, err := kms.secrets.PutSecret(pathKey, data, kms.keyContext)
if err != nil {
return fmt.Errorf("saving passphrase at %s request to vault failed: %w", pathKey, err)
}

View File

@ -460,7 +460,8 @@ func (vtc *vaultTenantConnection) getK8sClient() (*kubernetes.Clientset, error)
// FetchDEK returns passphrase from Vault. The passphrase is stored in a
// data.data.passphrase structure.
func (vtc *vaultTenantConnection) FetchDEK(key string) (string, error) {
s, err := vtc.secrets.GetSecret(key, vtc.keyContext)
// Since the second return variable loss.Version is not used, there it is ignored.
s, _, err := vtc.secrets.GetSecret(key, vtc.keyContext)
if err != nil {
return "", err
}
@ -485,7 +486,8 @@ func (vtc *vaultTenantConnection) StoreDEK(key, value string) error {
},
}
err := vtc.secrets.PutSecret(key, data, vtc.keyContext)
// Since the first return variable loss.Version is not used, there it is ignored.
_, err := vtc.secrets.PutSecret(key, data, vtc.keyContext)
if err != nil {
return fmt.Errorf("saving passphrase at %s request to vault failed: %w", key, err)
}

View File

@ -1,26 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
/metrics.out
.idea

View File

@ -1,13 +0,0 @@
language: go
go:
- "1.x"
env:
- GO111MODULE=on
install:
- go get ./...
script:
- go test ./...

View File

@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2013 Armon Dadgar
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,91 +0,0 @@
go-metrics
==========
This library provides a `metrics` package which can be used to instrument code,
expose application metrics, and profile runtime performance in a flexible manner.
Current API: [![GoDoc](https://godoc.org/github.com/armon/go-metrics?status.svg)](https://godoc.org/github.com/armon/go-metrics)
Sinks
-----
The `metrics` package makes use of a `MetricSink` interface to support delivery
to any type of backend. Currently the following sinks are provided:
* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP)
* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP)
* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes)
* InmemSink : Provides in-memory aggregation, can be used to export stats
* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example.
* BlackholeSink : Sinks to nowhere
In addition to the sinks, the `InmemSignal` can be used to catch a signal,
and dump a formatted output of recent metrics. For example, when a process gets
a SIGUSR1, it can dump to stderr recent performance metrics for debugging.
Labels
------
Most metrics do have an equivalent ending with `WithLabels`, such methods
allow to push metrics with labels and use some features of underlying Sinks
(ex: translated into Prometheus labels).
Since some of these labels may increase greatly cardinality of metrics, the
library allow to filter labels using a blacklist/whitelist filtering system
which is global to all metrics.
* If `Config.AllowedLabels` is not nil, then only labels specified in this value will be sent to underlying Sink, otherwise, all labels are sent by default.
* If `Config.BlockedLabels` is not nil, any label specified in this value will not be sent to underlying Sinks.
By default, both `Config.AllowedLabels` and `Config.BlockedLabels` are nil, meaning that
no tags are filetered at all, but it allow to a user to globally block some tags with high
cardinality at application level.
Examples
--------
Here is an example of using the package:
```go
func SlowMethod() {
// Profiling the runtime of a method
defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now())
}
// Configure a statsite sink as the global metrics sink
sink, _ := metrics.NewStatsiteSink("statsite:8125")
metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink)
// Emit a Key/Value pair
metrics.EmitKey([]string{"questions", "meaning of life"}, 42)
```
Here is an example of setting up a signal handler:
```go
// Setup the inmem sink and signal handler
inm := metrics.NewInmemSink(10*time.Second, time.Minute)
sig := metrics.DefaultInmemSignal(inm)
metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm)
// Run some code
inm.SetGauge([]string{"foo"}, 42)
inm.EmitKey([]string{"bar"}, 30)
inm.IncrCounter([]string{"baz"}, 42)
inm.IncrCounter([]string{"baz"}, 1)
inm.IncrCounter([]string{"baz"}, 80)
inm.AddSample([]string{"method", "wow"}, 42)
inm.AddSample([]string{"method", "wow"}, 100)
inm.AddSample([]string{"method", "wow"}, 22)
....
```
When a signal comes in, output like the following will be dumped to stderr:
[2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000
[2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000
[2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509
[2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513

View File

@ -1,12 +0,0 @@
// +build !windows
package metrics
import (
"syscall"
)
const (
// DefaultSignal is used with DefaultInmemSignal
DefaultSignal = syscall.SIGUSR1
)

View File

@ -1,13 +0,0 @@
// +build windows
package metrics
import (
"syscall"
)
const (
// DefaultSignal is used with DefaultInmemSignal
// Windows has no SIGUSR1, use SIGBREAK
DefaultSignal = syscall.Signal(21)
)

View File

@ -1,339 +0,0 @@
package metrics
import (
"bytes"
"fmt"
"math"
"net/url"
"strings"
"sync"
"time"
)
var spaceReplacer = strings.NewReplacer(" ", "_")
// InmemSink provides a MetricSink that does in-memory aggregation
// without sending metrics over a network. It can be embedded within
// an application to provide profiling information.
type InmemSink struct {
// How long is each aggregation interval
interval time.Duration
// Retain controls how many metrics interval we keep
retain time.Duration
// maxIntervals is the maximum length of intervals.
// It is retain / interval.
maxIntervals int
// intervals is a slice of the retained intervals
intervals []*IntervalMetrics
intervalLock sync.RWMutex
rateDenom float64
}
// IntervalMetrics stores the aggregated metrics
// for a specific interval
type IntervalMetrics struct {
sync.RWMutex
// The start time of the interval
Interval time.Time
// Gauges maps the key to the last set value
Gauges map[string]GaugeValue
// Points maps the string to the list of emitted values
// from EmitKey
Points map[string][]float32
// Counters maps the string key to a sum of the counter
// values
Counters map[string]SampledValue
// Samples maps the key to an AggregateSample,
// which has the rolled up view of a sample
Samples map[string]SampledValue
// done is closed when this interval has ended, and a new IntervalMetrics
// has been created to receive any future metrics.
done chan struct{}
}
// NewIntervalMetrics creates a new IntervalMetrics for a given interval
func NewIntervalMetrics(intv time.Time) *IntervalMetrics {
return &IntervalMetrics{
Interval: intv,
Gauges: make(map[string]GaugeValue),
Points: make(map[string][]float32),
Counters: make(map[string]SampledValue),
Samples: make(map[string]SampledValue),
done: make(chan struct{}),
}
}
// AggregateSample is used to hold aggregate metrics
// about a sample
type AggregateSample struct {
Count int // The count of emitted pairs
Rate float64 // The values rate per time unit (usually 1 second)
Sum float64 // The sum of values
SumSq float64 `json:"-"` // The sum of squared values
Min float64 // Minimum value
Max float64 // Maximum value
LastUpdated time.Time `json:"-"` // When value was last updated
}
// Computes a Stddev of the values
func (a *AggregateSample) Stddev() float64 {
num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2)
div := float64(a.Count * (a.Count - 1))
if div == 0 {
return 0
}
return math.Sqrt(num / div)
}
// Computes a mean of the values
func (a *AggregateSample) Mean() float64 {
if a.Count == 0 {
return 0
}
return a.Sum / float64(a.Count)
}
// Ingest is used to update a sample
func (a *AggregateSample) Ingest(v float64, rateDenom float64) {
a.Count++
a.Sum += v
a.SumSq += (v * v)
if v < a.Min || a.Count == 1 {
a.Min = v
}
if v > a.Max || a.Count == 1 {
a.Max = v
}
a.Rate = float64(a.Sum) / rateDenom
a.LastUpdated = time.Now()
}
func (a *AggregateSample) String() string {
if a.Count == 0 {
return "Count: 0"
} else if a.Stddev() == 0 {
return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated)
} else {
return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s",
a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated)
}
}
// NewInmemSinkFromURL creates an InmemSink from a URL. It is used
// (and tested) from NewMetricSinkFromURL.
func NewInmemSinkFromURL(u *url.URL) (MetricSink, error) {
params := u.Query()
interval, err := time.ParseDuration(params.Get("interval"))
if err != nil {
return nil, fmt.Errorf("Bad 'interval' param: %s", err)
}
retain, err := time.ParseDuration(params.Get("retain"))
if err != nil {
return nil, fmt.Errorf("Bad 'retain' param: %s", err)
}
return NewInmemSink(interval, retain), nil
}
// NewInmemSink is used to construct a new in-memory sink.
// Uses an aggregation interval and maximum retention period.
func NewInmemSink(interval, retain time.Duration) *InmemSink {
rateTimeUnit := time.Second
i := &InmemSink{
interval: interval,
retain: retain,
maxIntervals: int(retain / interval),
rateDenom: float64(interval.Nanoseconds()) / float64(rateTimeUnit.Nanoseconds()),
}
i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals)
return i
}
func (i *InmemSink) SetGauge(key []string, val float32) {
i.SetGaugeWithLabels(key, val, nil)
}
func (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
k, name := i.flattenKeyLabels(key, labels)
intv := i.getInterval()
intv.Lock()
defer intv.Unlock()
intv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels}
}
func (i *InmemSink) EmitKey(key []string, val float32) {
k := i.flattenKey(key)
intv := i.getInterval()
intv.Lock()
defer intv.Unlock()
vals := intv.Points[k]
intv.Points[k] = append(vals, val)
}
func (i *InmemSink) IncrCounter(key []string, val float32) {
i.IncrCounterWithLabels(key, val, nil)
}
func (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
k, name := i.flattenKeyLabels(key, labels)
intv := i.getInterval()
intv.Lock()
defer intv.Unlock()
agg, ok := intv.Counters[k]
if !ok {
agg = SampledValue{
Name: name,
AggregateSample: &AggregateSample{},
Labels: labels,
}
intv.Counters[k] = agg
}
agg.Ingest(float64(val), i.rateDenom)
}
func (i *InmemSink) AddSample(key []string, val float32) {
i.AddSampleWithLabels(key, val, nil)
}
func (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
k, name := i.flattenKeyLabels(key, labels)
intv := i.getInterval()
intv.Lock()
defer intv.Unlock()
agg, ok := intv.Samples[k]
if !ok {
agg = SampledValue{
Name: name,
AggregateSample: &AggregateSample{},
Labels: labels,
}
intv.Samples[k] = agg
}
agg.Ingest(float64(val), i.rateDenom)
}
// Data is used to retrieve all the aggregated metrics
// Intervals may be in use, and a read lock should be acquired
func (i *InmemSink) Data() []*IntervalMetrics {
// Get the current interval, forces creation
i.getInterval()
i.intervalLock.RLock()
defer i.intervalLock.RUnlock()
n := len(i.intervals)
intervals := make([]*IntervalMetrics, n)
copy(intervals[:n-1], i.intervals[:n-1])
current := i.intervals[n-1]
// make its own copy for current interval
intervals[n-1] = &IntervalMetrics{}
copyCurrent := intervals[n-1]
current.RLock()
*copyCurrent = *current
// RWMutex is not safe to copy, so create a new instance on the copy
copyCurrent.RWMutex = sync.RWMutex{}
copyCurrent.Gauges = make(map[string]GaugeValue, len(current.Gauges))
for k, v := range current.Gauges {
copyCurrent.Gauges[k] = v
}
// saved values will be not change, just copy its link
copyCurrent.Points = make(map[string][]float32, len(current.Points))
for k, v := range current.Points {
copyCurrent.Points[k] = v
}
copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters))
for k, v := range current.Counters {
copyCurrent.Counters[k] = v.deepCopy()
}
copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples))
for k, v := range current.Samples {
copyCurrent.Samples[k] = v.deepCopy()
}
current.RUnlock()
return intervals
}
// getInterval returns the current interval. A new interval is created if no
// previous interval exists, or if the current time is beyond the window for the
// current interval.
func (i *InmemSink) getInterval() *IntervalMetrics {
intv := time.Now().Truncate(i.interval)
// Attempt to return the existing interval first, because it only requires
// a read lock.
i.intervalLock.RLock()
n := len(i.intervals)
if n > 0 && i.intervals[n-1].Interval == intv {
defer i.intervalLock.RUnlock()
return i.intervals[n-1]
}
i.intervalLock.RUnlock()
i.intervalLock.Lock()
defer i.intervalLock.Unlock()
// Re-check for an existing interval now that the lock is re-acquired.
n = len(i.intervals)
if n > 0 && i.intervals[n-1].Interval == intv {
return i.intervals[n-1]
}
current := NewIntervalMetrics(intv)
i.intervals = append(i.intervals, current)
if n > 0 {
close(i.intervals[n-1].done)
}
n++
// Prune old intervals if the count exceeds the max.
if n >= i.maxIntervals {
copy(i.intervals[0:], i.intervals[n-i.maxIntervals:])
i.intervals = i.intervals[:i.maxIntervals]
}
return current
}
// Flattens the key for formatting, removes spaces
func (i *InmemSink) flattenKey(parts []string) string {
buf := &bytes.Buffer{}
joined := strings.Join(parts, ".")
spaceReplacer.WriteString(buf, joined)
return buf.String()
}
// Flattens the key for formatting along with its labels, removes spaces
func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) {
key := i.flattenKey(parts)
buf := bytes.NewBufferString(key)
for _, label := range labels {
spaceReplacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value))
}
return buf.String(), key
}

View File

@ -1,162 +0,0 @@
package metrics
import (
"context"
"fmt"
"net/http"
"sort"
"time"
)
// MetricsSummary holds a roll-up of metrics info for a given interval
type MetricsSummary struct {
Timestamp string
Gauges []GaugeValue
Points []PointValue
Counters []SampledValue
Samples []SampledValue
}
type GaugeValue struct {
Name string
Hash string `json:"-"`
Value float32
Labels []Label `json:"-"`
DisplayLabels map[string]string `json:"Labels"`
}
type PointValue struct {
Name string
Points []float32
}
type SampledValue struct {
Name string
Hash string `json:"-"`
*AggregateSample
Mean float64
Stddev float64
Labels []Label `json:"-"`
DisplayLabels map[string]string `json:"Labels"`
}
// deepCopy allocates a new instance of AggregateSample
func (source *SampledValue) deepCopy() SampledValue {
dest := *source
if source.AggregateSample != nil {
dest.AggregateSample = &AggregateSample{}
*dest.AggregateSample = *source.AggregateSample
}
return dest
}
// DisplayMetrics returns a summary of the metrics from the most recent finished interval.
func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
data := i.Data()
var interval *IntervalMetrics
n := len(data)
switch {
case n == 0:
return nil, fmt.Errorf("no metric intervals have been initialized yet")
case n == 1:
// Show the current interval if it's all we have
interval = data[0]
default:
// Show the most recent finished interval if we have one
interval = data[n-2]
}
return newMetricSummaryFromInterval(interval), nil
}
func newMetricSummaryFromInterval(interval *IntervalMetrics) MetricsSummary {
interval.RLock()
defer interval.RUnlock()
summary := MetricsSummary{
Timestamp: interval.Interval.Round(time.Second).UTC().String(),
Gauges: make([]GaugeValue, 0, len(interval.Gauges)),
Points: make([]PointValue, 0, len(interval.Points)),
}
// Format and sort the output of each metric type, so it gets displayed in a
// deterministic order.
for name, points := range interval.Points {
summary.Points = append(summary.Points, PointValue{name, points})
}
sort.Slice(summary.Points, func(i, j int) bool {
return summary.Points[i].Name < summary.Points[j].Name
})
for hash, value := range interval.Gauges {
value.Hash = hash
value.DisplayLabels = make(map[string]string)
for _, label := range value.Labels {
value.DisplayLabels[label.Name] = label.Value
}
value.Labels = nil
summary.Gauges = append(summary.Gauges, value)
}
sort.Slice(summary.Gauges, func(i, j int) bool {
return summary.Gauges[i].Hash < summary.Gauges[j].Hash
})
summary.Counters = formatSamples(interval.Counters)
summary.Samples = formatSamples(interval.Samples)
return summary
}
func formatSamples(source map[string]SampledValue) []SampledValue {
output := make([]SampledValue, 0, len(source))
for hash, sample := range source {
displayLabels := make(map[string]string)
for _, label := range sample.Labels {
displayLabels[label.Name] = label.Value
}
output = append(output, SampledValue{
Name: sample.Name,
Hash: hash,
AggregateSample: sample.AggregateSample,
Mean: sample.AggregateSample.Mean(),
Stddev: sample.AggregateSample.Stddev(),
DisplayLabels: displayLabels,
})
}
sort.Slice(output, func(i, j int) bool {
return output[i].Hash < output[j].Hash
})
return output
}
type Encoder interface {
Encode(interface{}) error
}
// Stream writes metrics using encoder.Encode each time an interval ends. Runs
// until the request context is cancelled, or the encoder returns an error.
// The caller is responsible for logging any errors from encoder.
func (i *InmemSink) Stream(ctx context.Context, encoder Encoder) {
interval := i.getInterval()
for {
select {
case <-interval.done:
summary := newMetricSummaryFromInterval(interval)
if err := encoder.Encode(summary); err != nil {
return
}
// update interval to the next one
interval = i.getInterval()
case <-ctx.Done():
return
}
}
}

View File

@ -1,117 +0,0 @@
package metrics
import (
"bytes"
"fmt"
"io"
"os"
"os/signal"
"strings"
"sync"
"syscall"
)
// InmemSignal is used to listen for a given signal, and when received,
// to dump the current metrics from the InmemSink to an io.Writer
type InmemSignal struct {
signal syscall.Signal
inm *InmemSink
w io.Writer
sigCh chan os.Signal
stop bool
stopCh chan struct{}
stopLock sync.Mutex
}
// NewInmemSignal creates a new InmemSignal which listens for a given signal,
// and dumps the current metrics out to a writer
func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal {
i := &InmemSignal{
signal: sig,
inm: inmem,
w: w,
sigCh: make(chan os.Signal, 1),
stopCh: make(chan struct{}),
}
signal.Notify(i.sigCh, sig)
go i.run()
return i
}
// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1
// and writes output to stderr. Windows uses SIGBREAK
func DefaultInmemSignal(inmem *InmemSink) *InmemSignal {
return NewInmemSignal(inmem, DefaultSignal, os.Stderr)
}
// Stop is used to stop the InmemSignal from listening
func (i *InmemSignal) Stop() {
i.stopLock.Lock()
defer i.stopLock.Unlock()
if i.stop {
return
}
i.stop = true
close(i.stopCh)
signal.Stop(i.sigCh)
}
// run is a long running routine that handles signals
func (i *InmemSignal) run() {
for {
select {
case <-i.sigCh:
i.dumpStats()
case <-i.stopCh:
return
}
}
}
// dumpStats is used to dump the data to output writer
func (i *InmemSignal) dumpStats() {
buf := bytes.NewBuffer(nil)
data := i.inm.Data()
// Skip the last period which is still being aggregated
for j := 0; j < len(data)-1; j++ {
intv := data[j]
intv.RLock()
for _, val := range intv.Gauges {
name := i.flattenLabels(val.Name, val.Labels)
fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val.Value)
}
for name, vals := range intv.Points {
for _, val := range vals {
fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val)
}
}
for _, agg := range intv.Counters {
name := i.flattenLabels(agg.Name, agg.Labels)
fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg.AggregateSample)
}
for _, agg := range intv.Samples {
name := i.flattenLabels(agg.Name, agg.Labels)
fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg.AggregateSample)
}
intv.RUnlock()
}
// Write out the bytes
i.w.Write(buf.Bytes())
}
// Flattens the key for formatting along with its labels, removes spaces
func (i *InmemSignal) flattenLabels(name string, labels []Label) string {
buf := bytes.NewBufferString(name)
replacer := strings.NewReplacer(" ", "_", ":", "_")
for _, label := range labels {
replacer.WriteString(buf, ".")
replacer.WriteString(buf, label.Value)
}
return buf.String()
}

View File

@ -1,293 +0,0 @@
package metrics
import (
"runtime"
"strings"
"time"
"github.com/hashicorp/go-immutable-radix"
)
type Label struct {
Name string
Value string
}
func (m *Metrics) SetGauge(key []string, val float32) {
m.SetGaugeWithLabels(key, val, nil)
}
func (m *Metrics) SetGaugeWithLabels(key []string, val float32, labels []Label) {
if m.HostName != "" {
if m.EnableHostnameLabel {
labels = append(labels, Label{"host", m.HostName})
} else if m.EnableHostname {
key = insert(0, m.HostName, key)
}
}
if m.EnableTypePrefix {
key = insert(0, "gauge", key)
}
if m.ServiceName != "" {
if m.EnableServiceLabel {
labels = append(labels, Label{"service", m.ServiceName})
} else {
key = insert(0, m.ServiceName, key)
}
}
allowed, labelsFiltered := m.allowMetric(key, labels)
if !allowed {
return
}
m.sink.SetGaugeWithLabels(key, val, labelsFiltered)
}
func (m *Metrics) EmitKey(key []string, val float32) {
if m.EnableTypePrefix {
key = insert(0, "kv", key)
}
if m.ServiceName != "" {
key = insert(0, m.ServiceName, key)
}
allowed, _ := m.allowMetric(key, nil)
if !allowed {
return
}
m.sink.EmitKey(key, val)
}
func (m *Metrics) IncrCounter(key []string, val float32) {
m.IncrCounterWithLabels(key, val, nil)
}
func (m *Metrics) IncrCounterWithLabels(key []string, val float32, labels []Label) {
if m.HostName != "" && m.EnableHostnameLabel {
labels = append(labels, Label{"host", m.HostName})
}
if m.EnableTypePrefix {
key = insert(0, "counter", key)
}
if m.ServiceName != "" {
if m.EnableServiceLabel {
labels = append(labels, Label{"service", m.ServiceName})
} else {
key = insert(0, m.ServiceName, key)
}
}
allowed, labelsFiltered := m.allowMetric(key, labels)
if !allowed {
return
}
m.sink.IncrCounterWithLabels(key, val, labelsFiltered)
}
func (m *Metrics) AddSample(key []string, val float32) {
m.AddSampleWithLabels(key, val, nil)
}
func (m *Metrics) AddSampleWithLabels(key []string, val float32, labels []Label) {
if m.HostName != "" && m.EnableHostnameLabel {
labels = append(labels, Label{"host", m.HostName})
}
if m.EnableTypePrefix {
key = insert(0, "sample", key)
}
if m.ServiceName != "" {
if m.EnableServiceLabel {
labels = append(labels, Label{"service", m.ServiceName})
} else {
key = insert(0, m.ServiceName, key)
}
}
allowed, labelsFiltered := m.allowMetric(key, labels)
if !allowed {
return
}
m.sink.AddSampleWithLabels(key, val, labelsFiltered)
}
func (m *Metrics) MeasureSince(key []string, start time.Time) {
m.MeasureSinceWithLabels(key, start, nil)
}
func (m *Metrics) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) {
if m.HostName != "" && m.EnableHostnameLabel {
labels = append(labels, Label{"host", m.HostName})
}
if m.EnableTypePrefix {
key = insert(0, "timer", key)
}
if m.ServiceName != "" {
if m.EnableServiceLabel {
labels = append(labels, Label{"service", m.ServiceName})
} else {
key = insert(0, m.ServiceName, key)
}
}
allowed, labelsFiltered := m.allowMetric(key, labels)
if !allowed {
return
}
now := time.Now()
elapsed := now.Sub(start)
msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity)
m.sink.AddSampleWithLabels(key, msec, labelsFiltered)
}
// UpdateFilter overwrites the existing filter with the given rules.
func (m *Metrics) UpdateFilter(allow, block []string) {
m.UpdateFilterAndLabels(allow, block, m.AllowedLabels, m.BlockedLabels)
}
// UpdateFilterAndLabels overwrites the existing filter with the given rules.
func (m *Metrics) UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) {
m.filterLock.Lock()
defer m.filterLock.Unlock()
m.AllowedPrefixes = allow
m.BlockedPrefixes = block
if allowedLabels == nil {
// Having a white list means we take only elements from it
m.allowedLabels = nil
} else {
m.allowedLabels = make(map[string]bool)
for _, v := range allowedLabels {
m.allowedLabels[v] = true
}
}
m.blockedLabels = make(map[string]bool)
for _, v := range blockedLabels {
m.blockedLabels[v] = true
}
m.AllowedLabels = allowedLabels
m.BlockedLabels = blockedLabels
m.filter = iradix.New()
for _, prefix := range m.AllowedPrefixes {
m.filter, _, _ = m.filter.Insert([]byte(prefix), true)
}
for _, prefix := range m.BlockedPrefixes {
m.filter, _, _ = m.filter.Insert([]byte(prefix), false)
}
}
// labelIsAllowed return true if a should be included in metric
// the caller should lock m.filterLock while calling this method
func (m *Metrics) labelIsAllowed(label *Label) bool {
labelName := (*label).Name
if m.blockedLabels != nil {
_, ok := m.blockedLabels[labelName]
if ok {
// If present, let's remove this label
return false
}
}
if m.allowedLabels != nil {
_, ok := m.allowedLabels[labelName]
return ok
}
// Allow by default
return true
}
// filterLabels return only allowed labels
// the caller should lock m.filterLock while calling this method
func (m *Metrics) filterLabels(labels []Label) []Label {
if labels == nil {
return nil
}
toReturn := []Label{}
for _, label := range labels {
if m.labelIsAllowed(&label) {
toReturn = append(toReturn, label)
}
}
return toReturn
}
// Returns whether the metric should be allowed based on configured prefix filters
// Also return the applicable labels
func (m *Metrics) allowMetric(key []string, labels []Label) (bool, []Label) {
m.filterLock.RLock()
defer m.filterLock.RUnlock()
if m.filter == nil || m.filter.Len() == 0 {
return m.Config.FilterDefault, m.filterLabels(labels)
}
_, allowed, ok := m.filter.Root().LongestPrefix([]byte(strings.Join(key, ".")))
if !ok {
return m.Config.FilterDefault, m.filterLabels(labels)
}
return allowed.(bool), m.filterLabels(labels)
}
// Periodically collects runtime stats to publish
func (m *Metrics) collectStats() {
for {
time.Sleep(m.ProfileInterval)
m.EmitRuntimeStats()
}
}
// Emits various runtime statsitics
func (m *Metrics) EmitRuntimeStats() {
// Export number of Goroutines
numRoutines := runtime.NumGoroutine()
m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines))
// Export memory stats
var stats runtime.MemStats
runtime.ReadMemStats(&stats)
m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc))
m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys))
m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs))
m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees))
m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects))
m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs))
m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC))
// Export info about the last few GC runs
num := stats.NumGC
// Handle wrap around
if num < m.lastNumGC {
m.lastNumGC = 0
}
// Ensure we don't scan more than 256
if num-m.lastNumGC >= 256 {
m.lastNumGC = num - 255
}
for i := m.lastNumGC; i < num; i++ {
pause := stats.PauseNs[i%256]
m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause))
}
m.lastNumGC = num
}
// Creates a new slice with the provided string value as the first element
// and the provided slice values as the remaining values.
// Ordering of the values in the provided input slice is kept in tact in the output slice.
func insert(i int, v string, s []string) []string {
// Allocate new slice to avoid modifying the input slice
newS := make([]string, len(s)+1)
// Copy s[0, i-1] into newS
for j := 0; j < i; j++ {
newS[j] = s[j]
}
// Insert provided element at index i
newS[i] = v
// Copy s[i, len(s)-1] into newS starting at newS[i+1]
for j := i; j < len(s); j++ {
newS[j+1] = s[j]
}
return newS
}

View File

@ -1,115 +0,0 @@
package metrics
import (
"fmt"
"net/url"
)
// The MetricSink interface is used to transmit metrics information
// to an external system
type MetricSink interface {
// A Gauge should retain the last value it is set to
SetGauge(key []string, val float32)
SetGaugeWithLabels(key []string, val float32, labels []Label)
// Should emit a Key/Value pair for each call
EmitKey(key []string, val float32)
// Counters should accumulate values
IncrCounter(key []string, val float32)
IncrCounterWithLabels(key []string, val float32, labels []Label)
// Samples are for timing information, where quantiles are used
AddSample(key []string, val float32)
AddSampleWithLabels(key []string, val float32, labels []Label)
}
// BlackholeSink is used to just blackhole messages
type BlackholeSink struct{}
func (*BlackholeSink) SetGauge(key []string, val float32) {}
func (*BlackholeSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {}
func (*BlackholeSink) EmitKey(key []string, val float32) {}
func (*BlackholeSink) IncrCounter(key []string, val float32) {}
func (*BlackholeSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {}
func (*BlackholeSink) AddSample(key []string, val float32) {}
func (*BlackholeSink) AddSampleWithLabels(key []string, val float32, labels []Label) {}
// FanoutSink is used to sink to fanout values to multiple sinks
type FanoutSink []MetricSink
func (fh FanoutSink) SetGauge(key []string, val float32) {
fh.SetGaugeWithLabels(key, val, nil)
}
func (fh FanoutSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
for _, s := range fh {
s.SetGaugeWithLabels(key, val, labels)
}
}
func (fh FanoutSink) EmitKey(key []string, val float32) {
for _, s := range fh {
s.EmitKey(key, val)
}
}
func (fh FanoutSink) IncrCounter(key []string, val float32) {
fh.IncrCounterWithLabels(key, val, nil)
}
func (fh FanoutSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
for _, s := range fh {
s.IncrCounterWithLabels(key, val, labels)
}
}
func (fh FanoutSink) AddSample(key []string, val float32) {
fh.AddSampleWithLabels(key, val, nil)
}
func (fh FanoutSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
for _, s := range fh {
s.AddSampleWithLabels(key, val, labels)
}
}
// sinkURLFactoryFunc is an generic interface around the *SinkFromURL() function provided
// by each sink type
type sinkURLFactoryFunc func(*url.URL) (MetricSink, error)
// sinkRegistry supports the generic NewMetricSink function by mapping URL
// schemes to metric sink factory functions
var sinkRegistry = map[string]sinkURLFactoryFunc{
"statsd": NewStatsdSinkFromURL,
"statsite": NewStatsiteSinkFromURL,
"inmem": NewInmemSinkFromURL,
}
// NewMetricSinkFromURL allows a generic URL input to configure any of the
// supported sinks. The scheme of the URL identifies the type of the sink, the
// and query parameters are used to set options.
//
// "statsd://" - Initializes a StatsdSink. The host and port are passed through
// as the "addr" of the sink
//
// "statsite://" - Initializes a StatsiteSink. The host and port become the
// "addr" of the sink
//
// "inmem://" - Initializes an InmemSink. The host and port are ignored. The
// "interval" and "duration" query parameters must be specified with valid
// durations, see NewInmemSink for details.
func NewMetricSinkFromURL(urlStr string) (MetricSink, error) {
u, err := url.Parse(urlStr)
if err != nil {
return nil, err
}
sinkURLFactoryFunc := sinkRegistry[u.Scheme]
if sinkURLFactoryFunc == nil {
return nil, fmt.Errorf(
"cannot create metric sink, unrecognized sink name: %q", u.Scheme)
}
return sinkURLFactoryFunc(u)
}

View File

@ -1,146 +0,0 @@
package metrics
import (
"os"
"sync"
"sync/atomic"
"time"
iradix "github.com/hashicorp/go-immutable-radix"
)
// Config is used to configure metrics settings
type Config struct {
ServiceName string // Prefixed with keys to separate services
HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname
EnableHostname bool // Enable prefixing gauge values with hostname
EnableHostnameLabel bool // Enable adding hostname to labels
EnableServiceLabel bool // Enable adding service to labels
EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory)
EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer")
TimerGranularity time.Duration // Granularity of timers.
ProfileInterval time.Duration // Interval to profile runtime metrics
AllowedPrefixes []string // A list of metric prefixes to allow, with '.' as the separator
BlockedPrefixes []string // A list of metric prefixes to block, with '.' as the separator
AllowedLabels []string // A list of metric labels to allow, with '.' as the separator
BlockedLabels []string // A list of metric labels to block, with '.' as the separator
FilterDefault bool // Whether to allow metrics by default
}
// Metrics represents an instance of a metrics sink that can
// be used to emit
type Metrics struct {
Config
lastNumGC uint32
sink MetricSink
filter *iradix.Tree
allowedLabels map[string]bool
blockedLabels map[string]bool
filterLock sync.RWMutex // Lock filters and allowedLabels/blockedLabels access
}
// Shared global metrics instance
var globalMetrics atomic.Value // *Metrics
func init() {
// Initialize to a blackhole sink to avoid errors
globalMetrics.Store(&Metrics{sink: &BlackholeSink{}})
}
// Default returns the shared global metrics instance.
func Default() *Metrics {
return globalMetrics.Load().(*Metrics)
}
// DefaultConfig provides a sane default configuration
func DefaultConfig(serviceName string) *Config {
c := &Config{
ServiceName: serviceName, // Use client provided service
HostName: "",
EnableHostname: true, // Enable hostname prefix
EnableRuntimeMetrics: true, // Enable runtime profiling
EnableTypePrefix: false, // Disable type prefix
TimerGranularity: time.Millisecond, // Timers are in milliseconds
ProfileInterval: time.Second, // Poll runtime every second
FilterDefault: true, // Don't filter metrics by default
}
// Try to get the hostname
name, _ := os.Hostname()
c.HostName = name
return c
}
// New is used to create a new instance of Metrics
func New(conf *Config, sink MetricSink) (*Metrics, error) {
met := &Metrics{}
met.Config = *conf
met.sink = sink
met.UpdateFilterAndLabels(conf.AllowedPrefixes, conf.BlockedPrefixes, conf.AllowedLabels, conf.BlockedLabels)
// Start the runtime collector
if conf.EnableRuntimeMetrics {
go met.collectStats()
}
return met, nil
}
// NewGlobal is the same as New, but it assigns the metrics object to be
// used globally as well as returning it.
func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) {
metrics, err := New(conf, sink)
if err == nil {
globalMetrics.Store(metrics)
}
return metrics, err
}
// Proxy all the methods to the globalMetrics instance
func SetGauge(key []string, val float32) {
globalMetrics.Load().(*Metrics).SetGauge(key, val)
}
func SetGaugeWithLabels(key []string, val float32, labels []Label) {
globalMetrics.Load().(*Metrics).SetGaugeWithLabels(key, val, labels)
}
func EmitKey(key []string, val float32) {
globalMetrics.Load().(*Metrics).EmitKey(key, val)
}
func IncrCounter(key []string, val float32) {
globalMetrics.Load().(*Metrics).IncrCounter(key, val)
}
func IncrCounterWithLabels(key []string, val float32, labels []Label) {
globalMetrics.Load().(*Metrics).IncrCounterWithLabels(key, val, labels)
}
func AddSample(key []string, val float32) {
globalMetrics.Load().(*Metrics).AddSample(key, val)
}
func AddSampleWithLabels(key []string, val float32, labels []Label) {
globalMetrics.Load().(*Metrics).AddSampleWithLabels(key, val, labels)
}
func MeasureSince(key []string, start time.Time) {
globalMetrics.Load().(*Metrics).MeasureSince(key, start)
}
func MeasureSinceWithLabels(key []string, start time.Time, labels []Label) {
globalMetrics.Load().(*Metrics).MeasureSinceWithLabels(key, start, labels)
}
func UpdateFilter(allow, block []string) {
globalMetrics.Load().(*Metrics).UpdateFilter(allow, block)
}
// UpdateFilterAndLabels set allow/block prefixes of metrics while allowedLabels
// and blockedLabels - when not nil - allow filtering of labels in order to
// block/allow globally labels (especially useful when having large number of
// values for a given label). See README.md for more information about usage.
func UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) {
globalMetrics.Load().(*Metrics).UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels)
}

View File

@ -1,184 +0,0 @@
package metrics
import (
"bytes"
"fmt"
"log"
"net"
"net/url"
"strings"
"time"
)
const (
// statsdMaxLen is the maximum size of a packet
// to send to statsd
statsdMaxLen = 1400
)
// StatsdSink provides a MetricSink that can be used
// with a statsite or statsd metrics server. It uses
// only UDP packets, while StatsiteSink uses TCP.
type StatsdSink struct {
addr string
metricQueue chan string
}
// NewStatsdSinkFromURL creates an StatsdSink from a URL. It is used
// (and tested) from NewMetricSinkFromURL.
func NewStatsdSinkFromURL(u *url.URL) (MetricSink, error) {
return NewStatsdSink(u.Host)
}
// NewStatsdSink is used to create a new StatsdSink
func NewStatsdSink(addr string) (*StatsdSink, error) {
s := &StatsdSink{
addr: addr,
metricQueue: make(chan string, 4096),
}
go s.flushMetrics()
return s, nil
}
// Close is used to stop flushing to statsd
func (s *StatsdSink) Shutdown() {
close(s.metricQueue)
}
func (s *StatsdSink) SetGauge(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
}
func (s *StatsdSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
}
func (s *StatsdSink) EmitKey(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
}
func (s *StatsdSink) IncrCounter(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
}
func (s *StatsdSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
}
func (s *StatsdSink) AddSample(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
}
func (s *StatsdSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
}
// Flattens the key for formatting, removes spaces
func (s *StatsdSink) flattenKey(parts []string) string {
joined := strings.Join(parts, ".")
return strings.Map(func(r rune) rune {
switch r {
case ':':
fallthrough
case ' ':
return '_'
default:
return r
}
}, joined)
}
// Flattens the key along with labels for formatting, removes spaces
func (s *StatsdSink) flattenKeyLabels(parts []string, labels []Label) string {
for _, label := range labels {
parts = append(parts, label.Value)
}
return s.flattenKey(parts)
}
// Does a non-blocking push to the metrics queue
func (s *StatsdSink) pushMetric(m string) {
select {
case s.metricQueue <- m:
default:
}
}
// Flushes metrics
func (s *StatsdSink) flushMetrics() {
var sock net.Conn
var err error
var wait <-chan time.Time
ticker := time.NewTicker(flushInterval)
defer ticker.Stop()
CONNECT:
// Create a buffer
buf := bytes.NewBuffer(nil)
// Attempt to connect
sock, err = net.Dial("udp", s.addr)
if err != nil {
log.Printf("[ERR] Error connecting to statsd! Err: %s", err)
goto WAIT
}
for {
select {
case metric, ok := <-s.metricQueue:
// Get a metric from the queue
if !ok {
goto QUIT
}
// Check if this would overflow the packet size
if len(metric)+buf.Len() > statsdMaxLen {
_, err := sock.Write(buf.Bytes())
buf.Reset()
if err != nil {
log.Printf("[ERR] Error writing to statsd! Err: %s", err)
goto WAIT
}
}
// Append to the buffer
buf.WriteString(metric)
case <-ticker.C:
if buf.Len() == 0 {
continue
}
_, err := sock.Write(buf.Bytes())
buf.Reset()
if err != nil {
log.Printf("[ERR] Error flushing to statsd! Err: %s", err)
goto WAIT
}
}
}
WAIT:
// Wait for a while
wait = time.After(time.Duration(5) * time.Second)
for {
select {
// Dequeue the messages to avoid backlog
case _, ok := <-s.metricQueue:
if !ok {
goto QUIT
}
case <-wait:
goto CONNECT
}
}
QUIT:
s.metricQueue = nil
}

View File

@ -1,172 +0,0 @@
package metrics
import (
"bufio"
"fmt"
"log"
"net"
"net/url"
"strings"
"time"
)
const (
// We force flush the statsite metrics after this period of
// inactivity. Prevents stats from getting stuck in a buffer
// forever.
flushInterval = 100 * time.Millisecond
)
// NewStatsiteSinkFromURL creates an StatsiteSink from a URL. It is used
// (and tested) from NewMetricSinkFromURL.
func NewStatsiteSinkFromURL(u *url.URL) (MetricSink, error) {
return NewStatsiteSink(u.Host)
}
// StatsiteSink provides a MetricSink that can be used with a
// statsite metrics server
type StatsiteSink struct {
addr string
metricQueue chan string
}
// NewStatsiteSink is used to create a new StatsiteSink
func NewStatsiteSink(addr string) (*StatsiteSink, error) {
s := &StatsiteSink{
addr: addr,
metricQueue: make(chan string, 4096),
}
go s.flushMetrics()
return s, nil
}
// Close is used to stop flushing to statsite
func (s *StatsiteSink) Shutdown() {
close(s.metricQueue)
}
func (s *StatsiteSink) SetGauge(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
}
func (s *StatsiteSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
}
func (s *StatsiteSink) EmitKey(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
}
func (s *StatsiteSink) IncrCounter(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
}
func (s *StatsiteSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
}
func (s *StatsiteSink) AddSample(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
}
func (s *StatsiteSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
}
// Flattens the key for formatting, removes spaces
func (s *StatsiteSink) flattenKey(parts []string) string {
joined := strings.Join(parts, ".")
return strings.Map(func(r rune) rune {
switch r {
case ':':
fallthrough
case ' ':
return '_'
default:
return r
}
}, joined)
}
// Flattens the key along with labels for formatting, removes spaces
func (s *StatsiteSink) flattenKeyLabels(parts []string, labels []Label) string {
for _, label := range labels {
parts = append(parts, label.Value)
}
return s.flattenKey(parts)
}
// Does a non-blocking push to the metrics queue
func (s *StatsiteSink) pushMetric(m string) {
select {
case s.metricQueue <- m:
default:
}
}
// Flushes metrics
func (s *StatsiteSink) flushMetrics() {
var sock net.Conn
var err error
var wait <-chan time.Time
var buffered *bufio.Writer
ticker := time.NewTicker(flushInterval)
defer ticker.Stop()
CONNECT:
// Attempt to connect
sock, err = net.Dial("tcp", s.addr)
if err != nil {
log.Printf("[ERR] Error connecting to statsite! Err: %s", err)
goto WAIT
}
// Create a buffered writer
buffered = bufio.NewWriter(sock)
for {
select {
case metric, ok := <-s.metricQueue:
// Get a metric from the queue
if !ok {
goto QUIT
}
// Try to send to statsite
_, err := buffered.Write([]byte(metric))
if err != nil {
log.Printf("[ERR] Error writing to statsite! Err: %s", err)
goto WAIT
}
case <-ticker.C:
if err := buffered.Flush(); err != nil {
log.Printf("[ERR] Error flushing to statsite! Err: %s", err)
goto WAIT
}
}
}
WAIT:
// Wait for a while
wait = time.After(time.Duration(5) * time.Second)
for {
select {
// Dequeue the messages to avoid backlog
case _, ok := <-s.metricQueue:
if !ok {
goto QUIT
}
case <-wait:
goto CONNECT
}
}
QUIT:
s.metricQueue = nil
}

View File

@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2013 Fatih Arslan
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,178 +0,0 @@
# color [![](https://github.com/fatih/color/workflows/build/badge.svg)](https://github.com/fatih/color/actions) [![PkgGoDev](https://pkg.go.dev/badge/github.com/fatih/color)](https://pkg.go.dev/github.com/fatih/color)
Color lets you use colorized outputs in terms of [ANSI Escape
Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It
has support for Windows too! The API can be used in several ways, pick one that
suits you.
![Color](https://user-images.githubusercontent.com/438920/96832689-03b3e000-13f4-11eb-9803-46f4c4de3406.jpg)
## Install
```bash
go get github.com/fatih/color
```
## Examples
### Standard colors
```go
// Print with default helper functions
color.Cyan("Prints text in cyan.")
// A newline will be appended automatically
color.Blue("Prints %s in blue.", "text")
// These are using the default foreground colors
color.Red("We have red")
color.Magenta("And many others ..")
```
### Mix and reuse colors
```go
// Create a new color object
c := color.New(color.FgCyan).Add(color.Underline)
c.Println("Prints cyan text with an underline.")
// Or just add them to New()
d := color.New(color.FgCyan, color.Bold)
d.Printf("This prints bold cyan %s\n", "too!.")
// Mix up foreground and background colors, create new mixes!
red := color.New(color.FgRed)
boldRed := red.Add(color.Bold)
boldRed.Println("This will print text in bold red.")
whiteBackground := red.Add(color.BgWhite)
whiteBackground.Println("Red text with white background.")
```
### Use your own output (io.Writer)
```go
// Use your own io.Writer output
color.New(color.FgBlue).Fprintln(myWriter, "blue color!")
blue := color.New(color.FgBlue)
blue.Fprint(writer, "This will print text in blue.")
```
### Custom print functions (PrintFunc)
```go
// Create a custom print function for convenience
red := color.New(color.FgRed).PrintfFunc()
red("Warning")
red("Error: %s", err)
// Mix up multiple attributes
notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
notice("Don't forget this...")
```
### Custom fprint functions (FprintFunc)
```go
blue := color.New(color.FgBlue).FprintfFunc()
blue(myWriter, "important notice: %s", stars)
// Mix up with multiple attributes
success := color.New(color.Bold, color.FgGreen).FprintlnFunc()
success(myWriter, "Don't forget this...")
```
### Insert into noncolor strings (SprintFunc)
```go
// Create SprintXxx functions to mix strings with other non-colorized strings:
yellow := color.New(color.FgYellow).SprintFunc()
red := color.New(color.FgRed).SprintFunc()
fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error"))
info := color.New(color.FgWhite, color.BgGreen).SprintFunc()
fmt.Printf("This %s rocks!\n", info("package"))
// Use helper functions
fmt.Println("This", color.RedString("warning"), "should be not neglected.")
fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.")
// Windows supported too! Just don't forget to change the output to color.Output
fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
```
### Plug into existing code
```go
// Use handy standard colors
color.Set(color.FgYellow)
fmt.Println("Existing text will now be in yellow")
fmt.Printf("This one %s\n", "too")
color.Unset() // Don't forget to unset
// You can mix up parameters
color.Set(color.FgMagenta, color.Bold)
defer color.Unset() // Use it in your function
fmt.Println("All text will now be bold magenta.")
```
### Disable/Enable color
There might be a case where you want to explicitly disable/enable color output. the
`go-isatty` package will automatically disable color output for non-tty output streams
(for example if the output were piped directly to `less`).
The `color` package also disables color output if the [`NO_COLOR`](https://no-color.org) environment
variable is set (regardless of its value).
`Color` has support to disable/enable colors programatically both globally and
for single color definitions. For example suppose you have a CLI app and a
`--no-color` bool flag. You can easily disable the color output with:
```go
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
if *flagNoColor {
color.NoColor = true // disables colorized output
}
```
It also has support for single color definitions (local). You can
disable/enable color output on the fly:
```go
c := color.New(color.FgCyan)
c.Println("Prints cyan text")
c.DisableColor()
c.Println("This is printed without any color")
c.EnableColor()
c.Println("This prints again cyan...")
```
## GitHub Actions
To output color in GitHub Actions (or other CI systems that support ANSI colors), make sure to set `color.NoColor = false` so that it bypasses the check for non-tty output streams.
## Todo
* Save/Return previous values
* Evaluate fmt.Formatter interface
## Credits
* [Fatih Arslan](https://github.com/fatih)
* Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable)
## License
The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details

View File

@ -1,618 +0,0 @@
package color
import (
"fmt"
"io"
"os"
"strconv"
"strings"
"sync"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
)
var (
// NoColor defines if the output is colorized or not. It's dynamically set to
// false or true based on the stdout's file descriptor referring to a terminal
// or not. It's also set to true if the NO_COLOR environment variable is
// set (regardless of its value). This is a global option and affects all
// colors. For more control over each color block use the methods
// DisableColor() individually.
NoColor = noColorExists() || os.Getenv("TERM") == "dumb" ||
(!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd()))
// Output defines the standard output of the print functions. By default
// os.Stdout is used.
Output = colorable.NewColorableStdout()
// Error defines a color supporting writer for os.Stderr.
Error = colorable.NewColorableStderr()
// colorsCache is used to reduce the count of created Color objects and
// allows to reuse already created objects with required Attribute.
colorsCache = make(map[Attribute]*Color)
colorsCacheMu sync.Mutex // protects colorsCache
)
// noColorExists returns true if the environment variable NO_COLOR exists.
func noColorExists() bool {
_, exists := os.LookupEnv("NO_COLOR")
return exists
}
// Color defines a custom color object which is defined by SGR parameters.
type Color struct {
params []Attribute
noColor *bool
}
// Attribute defines a single SGR Code
type Attribute int
const escape = "\x1b"
// Base attributes
const (
Reset Attribute = iota
Bold
Faint
Italic
Underline
BlinkSlow
BlinkRapid
ReverseVideo
Concealed
CrossedOut
)
// Foreground text colors
const (
FgBlack Attribute = iota + 30
FgRed
FgGreen
FgYellow
FgBlue
FgMagenta
FgCyan
FgWhite
)
// Foreground Hi-Intensity text colors
const (
FgHiBlack Attribute = iota + 90
FgHiRed
FgHiGreen
FgHiYellow
FgHiBlue
FgHiMagenta
FgHiCyan
FgHiWhite
)
// Background text colors
const (
BgBlack Attribute = iota + 40
BgRed
BgGreen
BgYellow
BgBlue
BgMagenta
BgCyan
BgWhite
)
// Background Hi-Intensity text colors
const (
BgHiBlack Attribute = iota + 100
BgHiRed
BgHiGreen
BgHiYellow
BgHiBlue
BgHiMagenta
BgHiCyan
BgHiWhite
)
// New returns a newly created color object.
func New(value ...Attribute) *Color {
c := &Color{
params: make([]Attribute, 0),
}
if noColorExists() {
c.noColor = boolPtr(true)
}
c.Add(value...)
return c
}
// Set sets the given parameters immediately. It will change the color of
// output with the given SGR parameters until color.Unset() is called.
func Set(p ...Attribute) *Color {
c := New(p...)
c.Set()
return c
}
// Unset resets all escape attributes and clears the output. Usually should
// be called after Set().
func Unset() {
if NoColor {
return
}
fmt.Fprintf(Output, "%s[%dm", escape, Reset)
}
// Set sets the SGR sequence.
func (c *Color) Set() *Color {
if c.isNoColorSet() {
return c
}
fmt.Fprintf(Output, c.format())
return c
}
func (c *Color) unset() {
if c.isNoColorSet() {
return
}
Unset()
}
func (c *Color) setWriter(w io.Writer) *Color {
if c.isNoColorSet() {
return c
}
fmt.Fprintf(w, c.format())
return c
}
func (c *Color) unsetWriter(w io.Writer) {
if c.isNoColorSet() {
return
}
if NoColor {
return
}
fmt.Fprintf(w, "%s[%dm", escape, Reset)
}
// Add is used to chain SGR parameters. Use as many as parameters to combine
// and create custom color objects. Example: Add(color.FgRed, color.Underline).
func (c *Color) Add(value ...Attribute) *Color {
c.params = append(c.params, value...)
return c
}
func (c *Color) prepend(value Attribute) {
c.params = append(c.params, 0)
copy(c.params[1:], c.params[0:])
c.params[0] = value
}
// Fprint formats using the default formats for its operands and writes to w.
// Spaces are added between operands when neither is a string.
// It returns the number of bytes written and any write error encountered.
// On Windows, users should wrap w with colorable.NewColorable() if w is of
// type *os.File.
func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
c.setWriter(w)
defer c.unsetWriter(w)
return fmt.Fprint(w, a...)
}
// Print formats using the default formats for its operands and writes to
// standard output. Spaces are added between operands when neither is a
// string. It returns the number of bytes written and any write error
// encountered. This is the standard fmt.Print() method wrapped with the given
// color.
func (c *Color) Print(a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprint(Output, a...)
}
// Fprintf formats according to a format specifier and writes to w.
// It returns the number of bytes written and any write error encountered.
// On Windows, users should wrap w with colorable.NewColorable() if w is of
// type *os.File.
func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
c.setWriter(w)
defer c.unsetWriter(w)
return fmt.Fprintf(w, format, a...)
}
// Printf formats according to a format specifier and writes to standard output.
// It returns the number of bytes written and any write error encountered.
// This is the standard fmt.Printf() method wrapped with the given color.
func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprintf(Output, format, a...)
}
// Fprintln formats using the default formats for its operands and writes to w.
// Spaces are always added between operands and a newline is appended.
// On Windows, users should wrap w with colorable.NewColorable() if w is of
// type *os.File.
func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
c.setWriter(w)
defer c.unsetWriter(w)
return fmt.Fprintln(w, a...)
}
// Println formats using the default formats for its operands and writes to
// standard output. Spaces are always added between operands and a newline is
// appended. It returns the number of bytes written and any write error
// encountered. This is the standard fmt.Print() method wrapped with the given
// color.
func (c *Color) Println(a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprintln(Output, a...)
}
// Sprint is just like Print, but returns a string instead of printing it.
func (c *Color) Sprint(a ...interface{}) string {
return c.wrap(fmt.Sprint(a...))
}
// Sprintln is just like Println, but returns a string instead of printing it.
func (c *Color) Sprintln(a ...interface{}) string {
return c.wrap(fmt.Sprintln(a...))
}
// Sprintf is just like Printf, but returns a string instead of printing it.
func (c *Color) Sprintf(format string, a ...interface{}) string {
return c.wrap(fmt.Sprintf(format, a...))
}
// FprintFunc returns a new function that prints the passed arguments as
// colorized with color.Fprint().
func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) {
return func(w io.Writer, a ...interface{}) {
c.Fprint(w, a...)
}
}
// PrintFunc returns a new function that prints the passed arguments as
// colorized with color.Print().
func (c *Color) PrintFunc() func(a ...interface{}) {
return func(a ...interface{}) {
c.Print(a...)
}
}
// FprintfFunc returns a new function that prints the passed arguments as
// colorized with color.Fprintf().
func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) {
return func(w io.Writer, format string, a ...interface{}) {
c.Fprintf(w, format, a...)
}
}
// PrintfFunc returns a new function that prints the passed arguments as
// colorized with color.Printf().
func (c *Color) PrintfFunc() func(format string, a ...interface{}) {
return func(format string, a ...interface{}) {
c.Printf(format, a...)
}
}
// FprintlnFunc returns a new function that prints the passed arguments as
// colorized with color.Fprintln().
func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) {
return func(w io.Writer, a ...interface{}) {
c.Fprintln(w, a...)
}
}
// PrintlnFunc returns a new function that prints the passed arguments as
// colorized with color.Println().
func (c *Color) PrintlnFunc() func(a ...interface{}) {
return func(a ...interface{}) {
c.Println(a...)
}
}
// SprintFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprint(). Useful to put into or mix into other
// string. Windows users should use this in conjunction with color.Output, example:
//
// put := New(FgYellow).SprintFunc()
// fmt.Fprintf(color.Output, "This is a %s", put("warning"))
func (c *Color) SprintFunc() func(a ...interface{}) string {
return func(a ...interface{}) string {
return c.wrap(fmt.Sprint(a...))
}
}
// SprintfFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprintf(). Useful to put into or mix into other
// string. Windows users should use this in conjunction with color.Output.
func (c *Color) SprintfFunc() func(format string, a ...interface{}) string {
return func(format string, a ...interface{}) string {
return c.wrap(fmt.Sprintf(format, a...))
}
}
// SprintlnFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprintln(). Useful to put into or mix into other
// string. Windows users should use this in conjunction with color.Output.
func (c *Color) SprintlnFunc() func(a ...interface{}) string {
return func(a ...interface{}) string {
return c.wrap(fmt.Sprintln(a...))
}
}
// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m"
// an example output might be: "1;36" -> bold cyan
func (c *Color) sequence() string {
format := make([]string, len(c.params))
for i, v := range c.params {
format[i] = strconv.Itoa(int(v))
}
return strings.Join(format, ";")
}
// wrap wraps the s string with the colors attributes. The string is ready to
// be printed.
func (c *Color) wrap(s string) string {
if c.isNoColorSet() {
return s
}
return c.format() + s + c.unformat()
}
func (c *Color) format() string {
return fmt.Sprintf("%s[%sm", escape, c.sequence())
}
func (c *Color) unformat() string {
return fmt.Sprintf("%s[%dm", escape, Reset)
}
// DisableColor disables the color output. Useful to not change any existing
// code and still being able to output. Can be used for flags like
// "--no-color". To enable back use EnableColor() method.
func (c *Color) DisableColor() {
c.noColor = boolPtr(true)
}
// EnableColor enables the color output. Use it in conjunction with
// DisableColor(). Otherwise this method has no side effects.
func (c *Color) EnableColor() {
c.noColor = boolPtr(false)
}
func (c *Color) isNoColorSet() bool {
// check first if we have user set action
if c.noColor != nil {
return *c.noColor
}
// if not return the global option, which is disabled by default
return NoColor
}
// Equals returns a boolean value indicating whether two colors are equal.
func (c *Color) Equals(c2 *Color) bool {
if len(c.params) != len(c2.params) {
return false
}
for _, attr := range c.params {
if !c2.attrExists(attr) {
return false
}
}
return true
}
func (c *Color) attrExists(a Attribute) bool {
for _, attr := range c.params {
if attr == a {
return true
}
}
return false
}
func boolPtr(v bool) *bool {
return &v
}
func getCachedColor(p Attribute) *Color {
colorsCacheMu.Lock()
defer colorsCacheMu.Unlock()
c, ok := colorsCache[p]
if !ok {
c = New(p)
colorsCache[p] = c
}
return c
}
func colorPrint(format string, p Attribute, a ...interface{}) {
c := getCachedColor(p)
if !strings.HasSuffix(format, "\n") {
format += "\n"
}
if len(a) == 0 {
c.Print(format)
} else {
c.Printf(format, a...)
}
}
func colorString(format string, p Attribute, a ...interface{}) string {
c := getCachedColor(p)
if len(a) == 0 {
return c.SprintFunc()(format)
}
return c.SprintfFunc()(format, a...)
}
// Black is a convenient helper function to print with black foreground. A
// newline is appended to format by default.
func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) }
// Red is a convenient helper function to print with red foreground. A
// newline is appended to format by default.
func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) }
// Green is a convenient helper function to print with green foreground. A
// newline is appended to format by default.
func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) }
// Yellow is a convenient helper function to print with yellow foreground.
// A newline is appended to format by default.
func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) }
// Blue is a convenient helper function to print with blue foreground. A
// newline is appended to format by default.
func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) }
// Magenta is a convenient helper function to print with magenta foreground.
// A newline is appended to format by default.
func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) }
// Cyan is a convenient helper function to print with cyan foreground. A
// newline is appended to format by default.
func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) }
// White is a convenient helper function to print with white foreground. A
// newline is appended to format by default.
func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) }
// BlackString is a convenient helper function to return a string with black
// foreground.
func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) }
// RedString is a convenient helper function to return a string with red
// foreground.
func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) }
// GreenString is a convenient helper function to return a string with green
// foreground.
func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) }
// YellowString is a convenient helper function to return a string with yellow
// foreground.
func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) }
// BlueString is a convenient helper function to return a string with blue
// foreground.
func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) }
// MagentaString is a convenient helper function to return a string with magenta
// foreground.
func MagentaString(format string, a ...interface{}) string {
return colorString(format, FgMagenta, a...)
}
// CyanString is a convenient helper function to return a string with cyan
// foreground.
func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) }
// WhiteString is a convenient helper function to return a string with white
// foreground.
func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) }
// HiBlack is a convenient helper function to print with hi-intensity black foreground. A
// newline is appended to format by default.
func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) }
// HiRed is a convenient helper function to print with hi-intensity red foreground. A
// newline is appended to format by default.
func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) }
// HiGreen is a convenient helper function to print with hi-intensity green foreground. A
// newline is appended to format by default.
func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) }
// HiYellow is a convenient helper function to print with hi-intensity yellow foreground.
// A newline is appended to format by default.
func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) }
// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A
// newline is appended to format by default.
func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) }
// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground.
// A newline is appended to format by default.
func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) }
// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A
// newline is appended to format by default.
func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) }
// HiWhite is a convenient helper function to print with hi-intensity white foreground. A
// newline is appended to format by default.
func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) }
// HiBlackString is a convenient helper function to return a string with hi-intensity black
// foreground.
func HiBlackString(format string, a ...interface{}) string {
return colorString(format, FgHiBlack, a...)
}
// HiRedString is a convenient helper function to return a string with hi-intensity red
// foreground.
func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) }
// HiGreenString is a convenient helper function to return a string with hi-intensity green
// foreground.
func HiGreenString(format string, a ...interface{}) string {
return colorString(format, FgHiGreen, a...)
}
// HiYellowString is a convenient helper function to return a string with hi-intensity yellow
// foreground.
func HiYellowString(format string, a ...interface{}) string {
return colorString(format, FgHiYellow, a...)
}
// HiBlueString is a convenient helper function to return a string with hi-intensity blue
// foreground.
func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) }
// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta
// foreground.
func HiMagentaString(format string, a ...interface{}) string {
return colorString(format, FgHiMagenta, a...)
}
// HiCyanString is a convenient helper function to return a string with hi-intensity cyan
// foreground.
func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) }
// HiWhiteString is a convenient helper function to return a string with hi-intensity white
// foreground.
func HiWhiteString(format string, a ...interface{}) string {
return colorString(format, FgHiWhite, a...)
}

135
vendor/github.com/fatih/color/doc.go generated vendored
View File

@ -1,135 +0,0 @@
/*
Package color is an ANSI color package to output colorized or SGR defined
output to the standard output. The API can be used in several way, pick one
that suits you.
Use simple and default helper functions with predefined foreground colors:
color.Cyan("Prints text in cyan.")
// a newline will be appended automatically
color.Blue("Prints %s in blue.", "text")
// More default foreground colors..
color.Red("We have red")
color.Yellow("Yellow color too!")
color.Magenta("And many others ..")
// Hi-intensity colors
color.HiGreen("Bright green color.")
color.HiBlack("Bright black means gray..")
color.HiWhite("Shiny white color!")
However there are times where custom color mixes are required. Below are some
examples to create custom color objects and use the print functions of each
separate color object.
// Create a new color object
c := color.New(color.FgCyan).Add(color.Underline)
c.Println("Prints cyan text with an underline.")
// Or just add them to New()
d := color.New(color.FgCyan, color.Bold)
d.Printf("This prints bold cyan %s\n", "too!.")
// Mix up foreground and background colors, create new mixes!
red := color.New(color.FgRed)
boldRed := red.Add(color.Bold)
boldRed.Println("This will print text in bold red.")
whiteBackground := red.Add(color.BgWhite)
whiteBackground.Println("Red text with White background.")
// Use your own io.Writer output
color.New(color.FgBlue).Fprintln(myWriter, "blue color!")
blue := color.New(color.FgBlue)
blue.Fprint(myWriter, "This will print text in blue.")
You can create PrintXxx functions to simplify even more:
// Create a custom print function for convenient
red := color.New(color.FgRed).PrintfFunc()
red("warning")
red("error: %s", err)
// Mix up multiple attributes
notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
notice("don't forget this...")
You can also FprintXxx functions to pass your own io.Writer:
blue := color.New(FgBlue).FprintfFunc()
blue(myWriter, "important notice: %s", stars)
// Mix up with multiple attributes
success := color.New(color.Bold, color.FgGreen).FprintlnFunc()
success(myWriter, don't forget this...")
Or create SprintXxx functions to mix strings with other non-colorized strings:
yellow := New(FgYellow).SprintFunc()
red := New(FgRed).SprintFunc()
fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error"))
info := New(FgWhite, BgGreen).SprintFunc()
fmt.Printf("this %s rocks!\n", info("package"))
Windows support is enabled by default. All Print functions work as intended.
However only for color.SprintXXX functions, user should use fmt.FprintXXX and
set the output to color.Output:
fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
info := New(FgWhite, BgGreen).SprintFunc()
fmt.Fprintf(color.Output, "this %s rocks!\n", info("package"))
Using with existing code is possible. Just use the Set() method to set the
standard output to the given parameters. That way a rewrite of an existing
code is not required.
// Use handy standard colors.
color.Set(color.FgYellow)
fmt.Println("Existing text will be now in Yellow")
fmt.Printf("This one %s\n", "too")
color.Unset() // don't forget to unset
// You can mix up parameters
color.Set(color.FgMagenta, color.Bold)
defer color.Unset() // use it in your function
fmt.Println("All text will be now bold magenta.")
There might be a case where you want to disable color output (for example to
pipe the standard output of your app to somewhere else). `Color` has support to
disable colors both globally and for single color definition. For example
suppose you have a CLI app and a `--no-color` bool flag. You can easily disable
the color output with:
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
if *flagNoColor {
color.NoColor = true // disables colorized output
}
You can also disable the color by setting the NO_COLOR environment variable to any value.
It also has support for single color definitions (local). You can
disable/enable color output on the fly:
c := color.New(color.FgCyan)
c.Println("Prints cyan text")
c.DisableColor()
c.Println("This is printed without any color")
c.EnableColor()
c.Println("This prints again cyan...")
*/
package color

View File

@ -1,16 +0,0 @@
cmd/snappytool/snappytool
testdata/bench
# These explicitly listed benchmark data files are for an obsolete version of
# snappy_test.go.
testdata/alice29.txt
testdata/asyoulik.txt
testdata/fireworks.jpeg
testdata/geo.protodata
testdata/html
testdata/html_x_4
testdata/kppkn.gtb
testdata/lcet10.txt
testdata/paper-100k.pdf
testdata/plrabn12.txt
testdata/urls.10K

View File

@ -1,18 +0,0 @@
# This is the official list of Snappy-Go authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
# Names should be added to this file as
# Name or Organization <email address>
# The email address is not required for organizations.
# Please keep the list sorted.
Amazon.com, Inc
Damian Gryski <dgryski@gmail.com>
Eric Buth <eric@topos.com>
Google Inc.
Jan Mercl <0xjnml@gmail.com>
Klaus Post <klauspost@gmail.com>
Rodolfo Carvalho <rhcarvalho@gmail.com>
Sebastien Binet <seb.binet@gmail.com>

View File

@ -1,41 +0,0 @@
# This is the official list of people who can contribute
# (and typically have contributed) code to the Snappy-Go repository.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# The submission process automatically checks to make sure
# that people submitting code are listed in this file (by email address).
#
# Names should be added to this file only after verifying that
# the individual or the individual's organization has agreed to
# the appropriate Contributor License Agreement, found here:
#
# http://code.google.com/legal/individual-cla-v1.0.html
# http://code.google.com/legal/corporate-cla-v1.0.html
#
# The agreement for individuals can be filled out on the web.
#
# When adding J Random Contributor's name to this file,
# either J's name or J's organization's name should be
# added to the AUTHORS file, depending on whether the
# individual or corporate CLA was used.
# Names should be added to this file like so:
# Name <email address>
# Please keep the list sorted.
Alex Legg <alexlegg@google.com>
Damian Gryski <dgryski@gmail.com>
Eric Buth <eric@topos.com>
Jan Mercl <0xjnml@gmail.com>
Jonathan Swinney <jswinney@amazon.com>
Kai Backman <kaib@golang.org>
Klaus Post <klauspost@gmail.com>
Marc-Antoine Ruel <maruel@chromium.org>
Nigel Tao <nigeltao@golang.org>
Rob Pike <r@golang.org>
Rodolfo Carvalho <rhcarvalho@gmail.com>
Russ Cox <rsc@golang.org>
Sebastien Binet <seb.binet@gmail.com>

View File

@ -1,27 +0,0 @@
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,107 +0,0 @@
The Snappy compression format in the Go programming language.
To download and install from source:
$ go get github.com/golang/snappy
Unless otherwise noted, the Snappy-Go source files are distributed
under the BSD-style license found in the LICENSE file.
Benchmarks.
The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
or so files, the same set used by the C++ Snappy code (github.com/google/snappy
and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
"go test -test.bench=."
_UFlat0-8 2.19GB/s ± 0% html
_UFlat1-8 1.41GB/s ± 0% urls
_UFlat2-8 23.5GB/s ± 2% jpg
_UFlat3-8 1.91GB/s ± 0% jpg_200
_UFlat4-8 14.0GB/s ± 1% pdf
_UFlat5-8 1.97GB/s ± 0% html4
_UFlat6-8 814MB/s ± 0% txt1
_UFlat7-8 785MB/s ± 0% txt2
_UFlat8-8 857MB/s ± 0% txt3
_UFlat9-8 719MB/s ± 1% txt4
_UFlat10-8 2.84GB/s ± 0% pb
_UFlat11-8 1.05GB/s ± 0% gaviota
_ZFlat0-8 1.04GB/s ± 0% html
_ZFlat1-8 534MB/s ± 0% urls
_ZFlat2-8 15.7GB/s ± 1% jpg
_ZFlat3-8 740MB/s ± 3% jpg_200
_ZFlat4-8 9.20GB/s ± 1% pdf
_ZFlat5-8 991MB/s ± 0% html4
_ZFlat6-8 379MB/s ± 0% txt1
_ZFlat7-8 352MB/s ± 0% txt2
_ZFlat8-8 396MB/s ± 1% txt3
_ZFlat9-8 327MB/s ± 1% txt4
_ZFlat10-8 1.33GB/s ± 1% pb
_ZFlat11-8 605MB/s ± 1% gaviota
"go test -test.bench=. -tags=noasm"
_UFlat0-8 621MB/s ± 2% html
_UFlat1-8 494MB/s ± 1% urls
_UFlat2-8 23.2GB/s ± 1% jpg
_UFlat3-8 1.12GB/s ± 1% jpg_200
_UFlat4-8 4.35GB/s ± 1% pdf
_UFlat5-8 609MB/s ± 0% html4
_UFlat6-8 296MB/s ± 0% txt1
_UFlat7-8 288MB/s ± 0% txt2
_UFlat8-8 309MB/s ± 1% txt3
_UFlat9-8 280MB/s ± 1% txt4
_UFlat10-8 753MB/s ± 0% pb
_UFlat11-8 400MB/s ± 0% gaviota
_ZFlat0-8 409MB/s ± 1% html
_ZFlat1-8 250MB/s ± 1% urls
_ZFlat2-8 12.3GB/s ± 1% jpg
_ZFlat3-8 132MB/s ± 0% jpg_200
_ZFlat4-8 2.92GB/s ± 0% pdf
_ZFlat5-8 405MB/s ± 1% html4
_ZFlat6-8 179MB/s ± 1% txt1
_ZFlat7-8 170MB/s ± 1% txt2
_ZFlat8-8 189MB/s ± 1% txt3
_ZFlat9-8 164MB/s ± 1% txt4
_ZFlat10-8 479MB/s ± 1% pb
_ZFlat11-8 270MB/s ± 1% gaviota
For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
are the numbers from C++ Snappy's
make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
BM_UFlat/0 2.4GB/s html
BM_UFlat/1 1.4GB/s urls
BM_UFlat/2 21.8GB/s jpg
BM_UFlat/3 1.5GB/s jpg_200
BM_UFlat/4 13.3GB/s pdf
BM_UFlat/5 2.1GB/s html4
BM_UFlat/6 1.0GB/s txt1
BM_UFlat/7 959.4MB/s txt2
BM_UFlat/8 1.0GB/s txt3
BM_UFlat/9 864.5MB/s txt4
BM_UFlat/10 2.9GB/s pb
BM_UFlat/11 1.2GB/s gaviota
BM_ZFlat/0 944.3MB/s html (22.31 %)
BM_ZFlat/1 501.6MB/s urls (47.78 %)
BM_ZFlat/2 14.3GB/s jpg (99.95 %)
BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %)
BM_ZFlat/4 8.3GB/s pdf (83.30 %)
BM_ZFlat/5 903.5MB/s html4 (22.52 %)
BM_ZFlat/6 336.0MB/s txt1 (57.88 %)
BM_ZFlat/7 312.3MB/s txt2 (61.91 %)
BM_ZFlat/8 353.1MB/s txt3 (54.99 %)
BM_ZFlat/9 289.9MB/s txt4 (66.26 %)
BM_ZFlat/10 1.2GB/s pb (19.68 %)
BM_ZFlat/11 527.4MB/s gaviota (37.72 %)

View File

@ -1,264 +0,0 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"errors"
"io"
)
var (
// ErrCorrupt reports that the input is invalid.
ErrCorrupt = errors.New("snappy: corrupt input")
// ErrTooLarge reports that the uncompressed length is too large.
ErrTooLarge = errors.New("snappy: decoded block is too large")
// ErrUnsupported reports that the input isn't supported.
ErrUnsupported = errors.New("snappy: unsupported input")
errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
)
// DecodedLen returns the length of the decoded block.
func DecodedLen(src []byte) (int, error) {
v, _, err := decodedLen(src)
return v, err
}
// decodedLen returns the length of the decoded block and the number of bytes
// that the length header occupied.
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
v, n := binary.Uvarint(src)
if n <= 0 || v > 0xffffffff {
return 0, 0, ErrCorrupt
}
const wordSize = 32 << (^uint(0) >> 32 & 1)
if wordSize == 32 && v > 0x7fffffff {
return 0, 0, ErrTooLarge
}
return int(v), n, nil
}
const (
decodeErrCodeCorrupt = 1
decodeErrCodeUnsupportedLiteralLength = 2
)
// Decode returns the decoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire decoded block.
// Otherwise, a newly allocated slice will be returned.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
//
// Decode handles the Snappy block format, not the Snappy stream format.
func Decode(dst, src []byte) ([]byte, error) {
dLen, s, err := decodedLen(src)
if err != nil {
return nil, err
}
if dLen <= len(dst) {
dst = dst[:dLen]
} else {
dst = make([]byte, dLen)
}
switch decode(dst, src[s:]) {
case 0:
return dst, nil
case decodeErrCodeUnsupportedLiteralLength:
return nil, errUnsupportedLiteralLength
}
return nil, ErrCorrupt
}
// NewReader returns a new Reader that decompresses from r, using the framing
// format described at
// https://github.com/google/snappy/blob/master/framing_format.txt
func NewReader(r io.Reader) *Reader {
return &Reader{
r: r,
decoded: make([]byte, maxBlockSize),
buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
}
}
// Reader is an io.Reader that can read Snappy-compressed bytes.
//
// Reader handles the Snappy stream format, not the Snappy block format.
type Reader struct {
r io.Reader
err error
decoded []byte
buf []byte
// decoded[i:j] contains decoded bytes that have not yet been passed on.
i, j int
readHeader bool
}
// Reset discards any buffered data, resets all state, and switches the Snappy
// reader to read from r. This permits reusing a Reader rather than allocating
// a new one.
func (r *Reader) Reset(reader io.Reader) {
r.r = reader
r.err = nil
r.i = 0
r.j = 0
r.readHeader = false
}
func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
r.err = ErrCorrupt
}
return false
}
return true
}
func (r *Reader) fill() error {
for r.i >= r.j {
if !r.readFull(r.buf[:4], true) {
return r.err
}
chunkType := r.buf[0]
if !r.readHeader {
if chunkType != chunkTypeStreamIdentifier {
r.err = ErrCorrupt
return r.err
}
r.readHeader = true
}
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
if chunkLen > len(r.buf) {
r.err = ErrUnsupported
return r.err
}
// The chunk types are specified at
// https://github.com/google/snappy/blob/master/framing_format.txt
switch chunkType {
case chunkTypeCompressedData:
// Section 4.2. Compressed data (chunk type 0x00).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return r.err
}
buf := r.buf[:chunkLen]
if !r.readFull(buf, false) {
return r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
buf = buf[checksumSize:]
n, err := DecodedLen(buf)
if err != nil {
r.err = err
return r.err
}
if n > len(r.decoded) {
r.err = ErrCorrupt
return r.err
}
if _, err := Decode(r.decoded, buf); err != nil {
r.err = err
return r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return r.err
}
r.i, r.j = 0, n
continue
case chunkTypeUncompressedData:
// Section 4.3. Uncompressed data (chunk type 0x01).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return r.err
}
buf := r.buf[:checksumSize]
if !r.readFull(buf, false) {
return r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
// Read directly into r.decoded instead of via r.buf.
n := chunkLen - checksumSize
if n > len(r.decoded) {
r.err = ErrCorrupt
return r.err
}
if !r.readFull(r.decoded[:n], false) {
return r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return r.err
}
r.i, r.j = 0, n
continue
case chunkTypeStreamIdentifier:
// Section 4.1. Stream identifier (chunk type 0xff).
if chunkLen != len(magicBody) {
r.err = ErrCorrupt
return r.err
}
if !r.readFull(r.buf[:len(magicBody)], false) {
return r.err
}
for i := 0; i < len(magicBody); i++ {
if r.buf[i] != magicBody[i] {
r.err = ErrCorrupt
return r.err
}
}
continue
}
if chunkType <= 0x7f {
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
r.err = ErrUnsupported
return r.err
}
// Section 4.4 Padding (chunk type 0xfe).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
if !r.readFull(r.buf[:chunkLen], false) {
return r.err
}
}
return nil
}
// Read satisfies the io.Reader interface.
func (r *Reader) Read(p []byte) (int, error) {
if r.err != nil {
return 0, r.err
}
if err := r.fill(); err != nil {
return 0, err
}
n := copy(p, r.decoded[r.i:r.j])
r.i += n
return n, nil
}
// ReadByte satisfies the io.ByteReader interface.
func (r *Reader) ReadByte() (byte, error) {
if r.err != nil {
return 0, r.err
}
if err := r.fill(); err != nil {
return 0, err
}
c := r.decoded[r.i]
r.i++
return c, nil
}

View File

@ -1,490 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
// The asm code generally follows the pure Go code in decode_other.go, except
// where marked with a "!!!".
// func decode(dst, src []byte) int
//
// All local variables fit into registers. The non-zero stack size is only to
// spill registers and push args when issuing a CALL. The register allocation:
// - AX scratch
// - BX scratch
// - CX length or x
// - DX offset
// - SI &src[s]
// - DI &dst[d]
// + R8 dst_base
// + R9 dst_len
// + R10 dst_base + dst_len
// + R11 src_base
// + R12 src_len
// + R13 src_base + src_len
// - R14 used by doCopy
// - R15 used by doCopy
//
// The registers R8-R13 (marked with a "+") are set at the start of the
// function, and after a CALL returns, and are not otherwise modified.
//
// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
TEXT ·decode(SB), NOSPLIT, $48-56
// Initialize SI, DI and R8-R13.
MOVQ dst_base+0(FP), R8
MOVQ dst_len+8(FP), R9
MOVQ R8, DI
MOVQ R8, R10
ADDQ R9, R10
MOVQ src_base+24(FP), R11
MOVQ src_len+32(FP), R12
MOVQ R11, SI
MOVQ R11, R13
ADDQ R12, R13
loop:
// for s < len(src)
CMPQ SI, R13
JEQ end
// CX = uint32(src[s])
//
// switch src[s] & 0x03
MOVBLZX (SI), CX
MOVL CX, BX
ANDL $3, BX
CMPL BX, $1
JAE tagCopy
// ----------------------------------------
// The code below handles literal tags.
// case tagLiteral:
// x := uint32(src[s] >> 2)
// switch
SHRL $2, CX
CMPL CX, $60
JAE tagLit60Plus
// case x < 60:
// s++
INCQ SI
doLit:
// This is the end of the inner "switch", when we have a literal tag.
//
// We assume that CX == x and x fits in a uint32, where x is the variable
// used in the pure Go decode_other.go code.
// length = int(x) + 1
//
// Unlike the pure Go code, we don't need to check if length <= 0 because
// CX can hold 64 bits, so the increment cannot overflow.
INCQ CX
// Prepare to check if copying length bytes will run past the end of dst or
// src.
//
// AX = len(dst) - d
// BX = len(src) - s
MOVQ R10, AX
SUBQ DI, AX
MOVQ R13, BX
SUBQ SI, BX
// !!! Try a faster technique for short (16 or fewer bytes) copies.
//
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
// goto callMemmove // Fall back on calling runtime·memmove.
// }
//
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
// against 21 instead of 16, because it cannot assume that all of its input
// is contiguous in memory and so it needs to leave enough source bytes to
// read the next tag without refilling buffers, but Go's Decode assumes
// contiguousness (the src argument is a []byte).
CMPQ CX, $16
JGT callMemmove
CMPQ AX, $16
JLT callMemmove
CMPQ BX, $16
JLT callMemmove
// !!! Implement the copy from src to dst as a 16-byte load and store.
// (Decode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only length bytes, but that's
// OK. If the input is a valid Snappy encoding then subsequent iterations
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
// non-nil error), so the overrun will be ignored.
//
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
MOVOU 0(SI), X0
MOVOU X0, 0(DI)
// d += length
// s += length
ADDQ CX, DI
ADDQ CX, SI
JMP loop
callMemmove:
// if length > len(dst)-d || length > len(src)-s { etc }
CMPQ CX, AX
JGT errCorrupt
CMPQ CX, BX
JGT errCorrupt
// copy(dst[d:], src[s:s+length])
//
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
// DI, SI and CX as arguments. Coincidentally, we also need to spill those
// three registers to the stack, to save local variables across the CALL.
MOVQ DI, 0(SP)
MOVQ SI, 8(SP)
MOVQ CX, 16(SP)
MOVQ DI, 24(SP)
MOVQ SI, 32(SP)
MOVQ CX, 40(SP)
CALL runtime·memmove(SB)
// Restore local variables: unspill registers from the stack and
// re-calculate R8-R13.
MOVQ 24(SP), DI
MOVQ 32(SP), SI
MOVQ 40(SP), CX
MOVQ dst_base+0(FP), R8
MOVQ dst_len+8(FP), R9
MOVQ R8, R10
ADDQ R9, R10
MOVQ src_base+24(FP), R11
MOVQ src_len+32(FP), R12
MOVQ R11, R13
ADDQ R12, R13
// d += length
// s += length
ADDQ CX, DI
ADDQ CX, SI
JMP loop
tagLit60Plus:
// !!! This fragment does the
//
// s += x - 58; if uint(s) > uint(len(src)) { etc }
//
// checks. In the asm version, we code it once instead of once per switch case.
ADDQ CX, SI
SUBQ $58, SI
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// case x == 60:
CMPL CX, $61
JEQ tagLit61
JA tagLit62Plus
// x = uint32(src[s-1])
MOVBLZX -1(SI), CX
JMP doLit
tagLit61:
// case x == 61:
// x = uint32(src[s-2]) | uint32(src[s-1])<<8
MOVWLZX -2(SI), CX
JMP doLit
tagLit62Plus:
CMPL CX, $62
JA tagLit63
// case x == 62:
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
MOVWLZX -3(SI), CX
MOVBLZX -1(SI), BX
SHLL $16, BX
ORL BX, CX
JMP doLit
tagLit63:
// case x == 63:
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
MOVL -4(SI), CX
JMP doLit
// The code above handles literal tags.
// ----------------------------------------
// The code below handles copy tags.
tagCopy4:
// case tagCopy4:
// s += 5
ADDQ $5, SI
// if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// length = 1 + int(src[s-5])>>2
SHRQ $2, CX
INCQ CX
// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
MOVLQZX -4(SI), DX
JMP doCopy
tagCopy2:
// case tagCopy2:
// s += 3
ADDQ $3, SI
// if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// length = 1 + int(src[s-3])>>2
SHRQ $2, CX
INCQ CX
// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
MOVWQZX -2(SI), DX
JMP doCopy
tagCopy:
// We have a copy tag. We assume that:
// - BX == src[s] & 0x03
// - CX == src[s]
CMPQ BX, $2
JEQ tagCopy2
JA tagCopy4
// case tagCopy1:
// s += 2
ADDQ $2, SI
// if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
MOVQ CX, DX
ANDQ $0xe0, DX
SHLQ $3, DX
MOVBQZX -1(SI), BX
ORQ BX, DX
// length = 4 + int(src[s-2])>>2&0x7
SHRQ $2, CX
ANDQ $7, CX
ADDQ $4, CX
doCopy:
// This is the end of the outer "switch", when we have a copy tag.
//
// We assume that:
// - CX == length && CX > 0
// - DX == offset
// if offset <= 0 { etc }
CMPQ DX, $0
JLE errCorrupt
// if d < offset { etc }
MOVQ DI, BX
SUBQ R8, BX
CMPQ BX, DX
JLT errCorrupt
// if length > len(dst)-d { etc }
MOVQ R10, BX
SUBQ DI, BX
CMPQ CX, BX
JGT errCorrupt
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
//
// Set:
// - R14 = len(dst)-d
// - R15 = &dst[d-offset]
MOVQ R10, R14
SUBQ DI, R14
MOVQ DI, R15
SUBQ DX, R15
// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
//
// First, try using two 8-byte load/stores, similar to the doLit technique
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
// and not one 16-byte load/store, and the first store has to be before the
// second load, due to the overlap if offset is in the range [8, 16).
//
// if length > 16 || offset < 8 || len(dst)-d < 16 {
// goto slowForwardCopy
// }
// copy 16 bytes
// d += length
CMPQ CX, $16
JGT slowForwardCopy
CMPQ DX, $8
JLT slowForwardCopy
CMPQ R14, $16
JLT slowForwardCopy
MOVQ 0(R15), AX
MOVQ AX, 0(DI)
MOVQ 8(R15), BX
MOVQ BX, 8(DI)
ADDQ CX, DI
JMP loop
slowForwardCopy:
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
// can still try 8-byte load stores, provided we can overrun up to 10 extra
// bytes. As above, the overrun will be fixed up by subsequent iterations
// of the outermost loop.
//
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
// commentary says:
//
// ----
//
// The main part of this loop is a simple copy of eight bytes at a time
// until we've copied (at least) the requested amount of bytes. However,
// if d and d-offset are less than eight bytes apart (indicating a
// repeating pattern of length < 8), we first need to expand the pattern in
// order to get the correct results. For instance, if the buffer looks like
// this, with the eight-byte <d-offset> and <d> patterns marked as
// intervals:
//
// abxxxxxxxxxxxx
// [------] d-offset
// [------] d
//
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
// once, after which we can move <d> two bytes without moving <d-offset>:
//
// ababxxxxxxxxxx
// [------] d-offset
// [------] d
//
// and repeat the exercise until the two no longer overlap.
//
// This allows us to do very well in the special case of one single byte
// repeated many times, without taking a big hit for more general cases.
//
// The worst case of extra writing past the end of the match occurs when
// offset == 1 and length == 1; the last copy will read from byte positions
// [0..7] and write to [4..11], whereas it was only supposed to write to
// position 1. Thus, ten excess bytes.
//
// ----
//
// That "10 byte overrun" worst case is confirmed by Go's
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
// and finishSlowForwardCopy algorithm.
//
// if length > len(dst)-d-10 {
// goto verySlowForwardCopy
// }
SUBQ $10, R14
CMPQ CX, R14
JGT verySlowForwardCopy
makeOffsetAtLeast8:
// !!! As above, expand the pattern so that offset >= 8 and we can use
// 8-byte load/stores.
//
// for offset < 8 {
// copy 8 bytes from dst[d-offset:] to dst[d:]
// length -= offset
// d += offset
// offset += offset
// // The two previous lines together means that d-offset, and therefore
// // R15, is unchanged.
// }
CMPQ DX, $8
JGE fixUpSlowForwardCopy
MOVQ (R15), BX
MOVQ BX, (DI)
SUBQ DX, CX
ADDQ DX, DI
ADDQ DX, DX
JMP makeOffsetAtLeast8
fixUpSlowForwardCopy:
// !!! Add length (which might be negative now) to d (implied by DI being
// &dst[d]) so that d ends up at the right place when we jump back to the
// top of the loop. Before we do that, though, we save DI to AX so that, if
// length is positive, copying the remaining length bytes will write to the
// right place.
MOVQ DI, AX
ADDQ CX, DI
finishSlowForwardCopy:
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
// length means that we overrun, but as above, that will be fixed up by
// subsequent iterations of the outermost loop.
CMPQ CX, $0
JLE loop
MOVQ (R15), BX
MOVQ BX, (AX)
ADDQ $8, R15
ADDQ $8, AX
SUBQ $8, CX
JMP finishSlowForwardCopy
verySlowForwardCopy:
// verySlowForwardCopy is a simple implementation of forward copy. In C
// parlance, this is a do/while loop instead of a while loop, since we know
// that length > 0. In Go syntax:
//
// for {
// dst[d] = dst[d - offset]
// d++
// length--
// if length == 0 {
// break
// }
// }
MOVB (R15), BX
MOVB BX, (DI)
INCQ R15
INCQ DI
DECQ CX
JNZ verySlowForwardCopy
JMP loop
// The code above handles copy tags.
// ----------------------------------------
end:
// This is the end of the "for s < len(src)".
//
// if d != len(dst) { etc }
CMPQ DI, R10
JNE errCorrupt
// return 0
MOVQ $0, ret+48(FP)
RET
errCorrupt:
// return decodeErrCodeCorrupt
MOVQ $1, ret+48(FP)
RET

View File

@ -1,494 +0,0 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
// The asm code generally follows the pure Go code in decode_other.go, except
// where marked with a "!!!".
// func decode(dst, src []byte) int
//
// All local variables fit into registers. The non-zero stack size is only to
// spill registers and push args when issuing a CALL. The register allocation:
// - R2 scratch
// - R3 scratch
// - R4 length or x
// - R5 offset
// - R6 &src[s]
// - R7 &dst[d]
// + R8 dst_base
// + R9 dst_len
// + R10 dst_base + dst_len
// + R11 src_base
// + R12 src_len
// + R13 src_base + src_len
// - R14 used by doCopy
// - R15 used by doCopy
//
// The registers R8-R13 (marked with a "+") are set at the start of the
// function, and after a CALL returns, and are not otherwise modified.
//
// The d variable is implicitly R7 - R8, and len(dst)-d is R10 - R7.
// The s variable is implicitly R6 - R11, and len(src)-s is R13 - R6.
TEXT ·decode(SB), NOSPLIT, $56-56
// Initialize R6, R7 and R8-R13.
MOVD dst_base+0(FP), R8
MOVD dst_len+8(FP), R9
MOVD R8, R7
MOVD R8, R10
ADD R9, R10, R10
MOVD src_base+24(FP), R11
MOVD src_len+32(FP), R12
MOVD R11, R6
MOVD R11, R13
ADD R12, R13, R13
loop:
// for s < len(src)
CMP R13, R6
BEQ end
// R4 = uint32(src[s])
//
// switch src[s] & 0x03
MOVBU (R6), R4
MOVW R4, R3
ANDW $3, R3
MOVW $1, R1
CMPW R1, R3
BGE tagCopy
// ----------------------------------------
// The code below handles literal tags.
// case tagLiteral:
// x := uint32(src[s] >> 2)
// switch
MOVW $60, R1
LSRW $2, R4, R4
CMPW R4, R1
BLS tagLit60Plus
// case x < 60:
// s++
ADD $1, R6, R6
doLit:
// This is the end of the inner "switch", when we have a literal tag.
//
// We assume that R4 == x and x fits in a uint32, where x is the variable
// used in the pure Go decode_other.go code.
// length = int(x) + 1
//
// Unlike the pure Go code, we don't need to check if length <= 0 because
// R4 can hold 64 bits, so the increment cannot overflow.
ADD $1, R4, R4
// Prepare to check if copying length bytes will run past the end of dst or
// src.
//
// R2 = len(dst) - d
// R3 = len(src) - s
MOVD R10, R2
SUB R7, R2, R2
MOVD R13, R3
SUB R6, R3, R3
// !!! Try a faster technique for short (16 or fewer bytes) copies.
//
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
// goto callMemmove // Fall back on calling runtime·memmove.
// }
//
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
// against 21 instead of 16, because it cannot assume that all of its input
// is contiguous in memory and so it needs to leave enough source bytes to
// read the next tag without refilling buffers, but Go's Decode assumes
// contiguousness (the src argument is a []byte).
CMP $16, R4
BGT callMemmove
CMP $16, R2
BLT callMemmove
CMP $16, R3
BLT callMemmove
// !!! Implement the copy from src to dst as a 16-byte load and store.
// (Decode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only length bytes, but that's
// OK. If the input is a valid Snappy encoding then subsequent iterations
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
// non-nil error), so the overrun will be ignored.
//
// Note that on arm64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
LDP 0(R6), (R14, R15)
STP (R14, R15), 0(R7)
// d += length
// s += length
ADD R4, R7, R7
ADD R4, R6, R6
B loop
callMemmove:
// if length > len(dst)-d || length > len(src)-s { etc }
CMP R2, R4
BGT errCorrupt
CMP R3, R4
BGT errCorrupt
// copy(dst[d:], src[s:s+length])
//
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
// R7, R6 and R4 as arguments. Coincidentally, we also need to spill those
// three registers to the stack, to save local variables across the CALL.
MOVD R7, 8(RSP)
MOVD R6, 16(RSP)
MOVD R4, 24(RSP)
MOVD R7, 32(RSP)
MOVD R6, 40(RSP)
MOVD R4, 48(RSP)
CALL runtime·memmove(SB)
// Restore local variables: unspill registers from the stack and
// re-calculate R8-R13.
MOVD 32(RSP), R7
MOVD 40(RSP), R6
MOVD 48(RSP), R4
MOVD dst_base+0(FP), R8
MOVD dst_len+8(FP), R9
MOVD R8, R10
ADD R9, R10, R10
MOVD src_base+24(FP), R11
MOVD src_len+32(FP), R12
MOVD R11, R13
ADD R12, R13, R13
// d += length
// s += length
ADD R4, R7, R7
ADD R4, R6, R6
B loop
tagLit60Plus:
// !!! This fragment does the
//
// s += x - 58; if uint(s) > uint(len(src)) { etc }
//
// checks. In the asm version, we code it once instead of once per switch case.
ADD R4, R6, R6
SUB $58, R6, R6
MOVD R6, R3
SUB R11, R3, R3
CMP R12, R3
BGT errCorrupt
// case x == 60:
MOVW $61, R1
CMPW R1, R4
BEQ tagLit61
BGT tagLit62Plus
// x = uint32(src[s-1])
MOVBU -1(R6), R4
B doLit
tagLit61:
// case x == 61:
// x = uint32(src[s-2]) | uint32(src[s-1])<<8
MOVHU -2(R6), R4
B doLit
tagLit62Plus:
CMPW $62, R4
BHI tagLit63
// case x == 62:
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
MOVHU -3(R6), R4
MOVBU -1(R6), R3
ORR R3<<16, R4
B doLit
tagLit63:
// case x == 63:
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
MOVWU -4(R6), R4
B doLit
// The code above handles literal tags.
// ----------------------------------------
// The code below handles copy tags.
tagCopy4:
// case tagCopy4:
// s += 5
ADD $5, R6, R6
// if uint(s) > uint(len(src)) { etc }
MOVD R6, R3
SUB R11, R3, R3
CMP R12, R3
BGT errCorrupt
// length = 1 + int(src[s-5])>>2
MOVD $1, R1
ADD R4>>2, R1, R4
// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
MOVWU -4(R6), R5
B doCopy
tagCopy2:
// case tagCopy2:
// s += 3
ADD $3, R6, R6
// if uint(s) > uint(len(src)) { etc }
MOVD R6, R3
SUB R11, R3, R3
CMP R12, R3
BGT errCorrupt
// length = 1 + int(src[s-3])>>2
MOVD $1, R1
ADD R4>>2, R1, R4
// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
MOVHU -2(R6), R5
B doCopy
tagCopy:
// We have a copy tag. We assume that:
// - R3 == src[s] & 0x03
// - R4 == src[s]
CMP $2, R3
BEQ tagCopy2
BGT tagCopy4
// case tagCopy1:
// s += 2
ADD $2, R6, R6
// if uint(s) > uint(len(src)) { etc }
MOVD R6, R3
SUB R11, R3, R3
CMP R12, R3
BGT errCorrupt
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
MOVD R4, R5
AND $0xe0, R5
MOVBU -1(R6), R3
ORR R5<<3, R3, R5
// length = 4 + int(src[s-2])>>2&0x7
MOVD $7, R1
AND R4>>2, R1, R4
ADD $4, R4, R4
doCopy:
// This is the end of the outer "switch", when we have a copy tag.
//
// We assume that:
// - R4 == length && R4 > 0
// - R5 == offset
// if offset <= 0 { etc }
MOVD $0, R1
CMP R1, R5
BLE errCorrupt
// if d < offset { etc }
MOVD R7, R3
SUB R8, R3, R3
CMP R5, R3
BLT errCorrupt
// if length > len(dst)-d { etc }
MOVD R10, R3
SUB R7, R3, R3
CMP R3, R4
BGT errCorrupt
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
//
// Set:
// - R14 = len(dst)-d
// - R15 = &dst[d-offset]
MOVD R10, R14
SUB R7, R14, R14
MOVD R7, R15
SUB R5, R15, R15
// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
//
// First, try using two 8-byte load/stores, similar to the doLit technique
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
// and not one 16-byte load/store, and the first store has to be before the
// second load, due to the overlap if offset is in the range [8, 16).
//
// if length > 16 || offset < 8 || len(dst)-d < 16 {
// goto slowForwardCopy
// }
// copy 16 bytes
// d += length
CMP $16, R4
BGT slowForwardCopy
CMP $8, R5
BLT slowForwardCopy
CMP $16, R14
BLT slowForwardCopy
MOVD 0(R15), R2
MOVD R2, 0(R7)
MOVD 8(R15), R3
MOVD R3, 8(R7)
ADD R4, R7, R7
B loop
slowForwardCopy:
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
// can still try 8-byte load stores, provided we can overrun up to 10 extra
// bytes. As above, the overrun will be fixed up by subsequent iterations
// of the outermost loop.
//
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
// commentary says:
//
// ----
//
// The main part of this loop is a simple copy of eight bytes at a time
// until we've copied (at least) the requested amount of bytes. However,
// if d and d-offset are less than eight bytes apart (indicating a
// repeating pattern of length < 8), we first need to expand the pattern in
// order to get the correct results. For instance, if the buffer looks like
// this, with the eight-byte <d-offset> and <d> patterns marked as
// intervals:
//
// abxxxxxxxxxxxx
// [------] d-offset
// [------] d
//
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
// once, after which we can move <d> two bytes without moving <d-offset>:
//
// ababxxxxxxxxxx
// [------] d-offset
// [------] d
//
// and repeat the exercise until the two no longer overlap.
//
// This allows us to do very well in the special case of one single byte
// repeated many times, without taking a big hit for more general cases.
//
// The worst case of extra writing past the end of the match occurs when
// offset == 1 and length == 1; the last copy will read from byte positions
// [0..7] and write to [4..11], whereas it was only supposed to write to
// position 1. Thus, ten excess bytes.
//
// ----
//
// That "10 byte overrun" worst case is confirmed by Go's
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
// and finishSlowForwardCopy algorithm.
//
// if length > len(dst)-d-10 {
// goto verySlowForwardCopy
// }
SUB $10, R14, R14
CMP R14, R4
BGT verySlowForwardCopy
makeOffsetAtLeast8:
// !!! As above, expand the pattern so that offset >= 8 and we can use
// 8-byte load/stores.
//
// for offset < 8 {
// copy 8 bytes from dst[d-offset:] to dst[d:]
// length -= offset
// d += offset
// offset += offset
// // The two previous lines together means that d-offset, and therefore
// // R15, is unchanged.
// }
CMP $8, R5
BGE fixUpSlowForwardCopy
MOVD (R15), R3
MOVD R3, (R7)
SUB R5, R4, R4
ADD R5, R7, R7
ADD R5, R5, R5
B makeOffsetAtLeast8
fixUpSlowForwardCopy:
// !!! Add length (which might be negative now) to d (implied by R7 being
// &dst[d]) so that d ends up at the right place when we jump back to the
// top of the loop. Before we do that, though, we save R7 to R2 so that, if
// length is positive, copying the remaining length bytes will write to the
// right place.
MOVD R7, R2
ADD R4, R7, R7
finishSlowForwardCopy:
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
// length means that we overrun, but as above, that will be fixed up by
// subsequent iterations of the outermost loop.
MOVD $0, R1
CMP R1, R4
BLE loop
MOVD (R15), R3
MOVD R3, (R2)
ADD $8, R15, R15
ADD $8, R2, R2
SUB $8, R4, R4
B finishSlowForwardCopy
verySlowForwardCopy:
// verySlowForwardCopy is a simple implementation of forward copy. In C
// parlance, this is a do/while loop instead of a while loop, since we know
// that length > 0. In Go syntax:
//
// for {
// dst[d] = dst[d - offset]
// d++
// length--
// if length == 0 {
// break
// }
// }
MOVB (R15), R3
MOVB R3, (R7)
ADD $1, R15, R15
ADD $1, R7, R7
SUB $1, R4, R4
CBNZ R4, verySlowForwardCopy
B loop
// The code above handles copy tags.
// ----------------------------------------
end:
// This is the end of the "for s < len(src)".
//
// if d != len(dst) { etc }
CMP R10, R7
BNE errCorrupt
// return 0
MOVD $0, ret+48(FP)
RET
errCorrupt:
// return decodeErrCodeCorrupt
MOVD $1, R2
MOVD R2, ret+48(FP)
RET

View File

@ -1,15 +0,0 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
// +build amd64 arm64
package snappy
// decode has the same semantics as in decode_other.go.
//
//go:noescape
func decode(dst, src []byte) int

View File

@ -1,115 +0,0 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64,!arm64 appengine !gc noasm
package snappy
// decode writes the decoding of src to dst. It assumes that the varint-encoded
// length of the decompressed bytes has already been read, and that len(dst)
// equals that length.
//
// It returns 0 on success or a decodeErrCodeXxx error code on failure.
func decode(dst, src []byte) int {
var d, s, offset, length int
for s < len(src) {
switch src[s] & 0x03 {
case tagLiteral:
x := uint32(src[s] >> 2)
switch {
case x < 60:
s++
case x == 60:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-1])
case x == 61:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-2]) | uint32(src[s-1])<<8
case x == 62:
s += 4
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
case x == 63:
s += 5
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
}
length = int(x) + 1
if length <= 0 {
return decodeErrCodeUnsupportedLiteralLength
}
if length > len(dst)-d || length > len(src)-s {
return decodeErrCodeCorrupt
}
copy(dst[d:], src[s:s+length])
d += length
s += length
continue
case tagCopy1:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = 4 + int(src[s-2])>>2&0x7
offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
case tagCopy2:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = 1 + int(src[s-3])>>2
offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
case tagCopy4:
s += 5
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = 1 + int(src[s-5])>>2
offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
}
if offset <= 0 || d < offset || length > len(dst)-d {
return decodeErrCodeCorrupt
}
// Copy from an earlier sub-slice of dst to a later sub-slice.
// If no overlap, use the built-in copy:
if offset >= length {
copy(dst[d:d+length], dst[d-offset:])
d += length
continue
}
// Unlike the built-in copy function, this byte-by-byte copy always runs
// forwards, even if the slices overlap. Conceptually, this is:
//
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
//
// We align the slices into a and b and show the compiler they are the same size.
// This allows the loop to run without bounds checks.
a := dst[d : d+length]
b := dst[d-offset:]
b = b[:len(a)]
for i := range a {
a[i] = b[i]
}
d += length
}
if d != len(dst) {
return decodeErrCodeCorrupt
}
return 0
}

View File

@ -1,289 +0,0 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"errors"
"io"
)
// Encode returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
//
// Encode handles the Snappy block format, not the Snappy stream format.
func Encode(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if len(dst) < n {
dst = make([]byte, n)
}
// The block starts with the varint-encoded length of the decompressed bytes.
d := binary.PutUvarint(dst, uint64(len(src)))
for len(src) > 0 {
p := src
src = nil
if len(p) > maxBlockSize {
p, src = p[:maxBlockSize], p[maxBlockSize:]
}
if len(p) < minNonLiteralBlockSize {
d += emitLiteral(dst[d:], p)
} else {
d += encodeBlock(dst[d:], p)
}
}
return dst[:d]
}
// inputMargin is the minimum number of extra input bytes to keep, inside
// encodeBlock's inner loop. On some architectures, this margin lets us
// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
// literals can be implemented as a single load to and store from a 16-byte
// register. That literal's actual length can be as short as 1 byte, so this
// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
// the encoding loop will fix up the copy overrun, and this inputMargin ensures
// that we don't overrun the dst and src buffers.
const inputMargin = 16 - 1
// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
// could be encoded with a copy tag. This is the minimum with respect to the
// algorithm used by encodeBlock, not a minimum enforced by the file format.
//
// The encoded output must start with at least a 1 byte literal, as there are
// no previous bytes to copy. A minimal (1 byte) copy after that, generated
// from an emitCopy call in encodeBlock's main loop, would require at least
// another inputMargin bytes, for the reason above: we want any emitLiteral
// calls inside encodeBlock's main loop to use the fast path if possible, which
// requires being able to overrun by inputMargin bytes. Thus,
// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
//
// The C++ code doesn't use this exact threshold, but it could, as discussed at
// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
// optimization. It should not affect the encoded form. This is tested by
// TestSameEncodingAsCppShortCopies.
const minNonLiteralBlockSize = 1 + 1 + inputMargin
// MaxEncodedLen returns the maximum length of a snappy block, given its
// uncompressed length.
//
// It will return a negative value if srcLen is too large to encode.
func MaxEncodedLen(srcLen int) int {
n := uint64(srcLen)
if n > 0xffffffff {
return -1
}
// Compressed data can be defined as:
// compressed := item* literal*
// item := literal* copy
//
// The trailing literal sequence has a space blowup of at most 62/60
// since a literal of length 60 needs one tag byte + one extra byte
// for length information.
//
// Item blowup is trickier to measure. Suppose the "copy" op copies
// 4 bytes of data. Because of a special check in the encoding code,
// we produce a 4-byte copy only if the offset is < 65536. Therefore
// the copy op takes 3 bytes to encode, and this type of item leads
// to at most the 62/60 blowup for representing literals.
//
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
// enough, it will take 5 bytes to encode the copy op. Therefore the
// worst case here is a one-byte literal followed by a five-byte copy.
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
//
// This last factor dominates the blowup, so the final estimate is:
n = 32 + n + n/6
if n > 0xffffffff {
return -1
}
return int(n)
}
var errClosed = errors.New("snappy: Writer is closed")
// NewWriter returns a new Writer that compresses to w.
//
// The Writer returned does not buffer writes. There is no need to Flush or
// Close such a Writer.
//
// Deprecated: the Writer returned is not suitable for many small writes, only
// for few large writes. Use NewBufferedWriter instead, which is efficient
// regardless of the frequency and shape of the writes, and remember to Close
// that Writer when done.
func NewWriter(w io.Writer) *Writer {
return &Writer{
w: w,
obuf: make([]byte, obufLen),
}
}
// NewBufferedWriter returns a new Writer that compresses to w, using the
// framing format described at
// https://github.com/google/snappy/blob/master/framing_format.txt
//
// The Writer returned buffers writes. Users must call Close to guarantee all
// data has been forwarded to the underlying io.Writer. They may also call
// Flush zero or more times before calling Close.
func NewBufferedWriter(w io.Writer) *Writer {
return &Writer{
w: w,
ibuf: make([]byte, 0, maxBlockSize),
obuf: make([]byte, obufLen),
}
}
// Writer is an io.Writer that can write Snappy-compressed bytes.
//
// Writer handles the Snappy stream format, not the Snappy block format.
type Writer struct {
w io.Writer
err error
// ibuf is a buffer for the incoming (uncompressed) bytes.
//
// Its use is optional. For backwards compatibility, Writers created by the
// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
// therefore do not need to be Flush'ed or Close'd.
ibuf []byte
// obuf is a buffer for the outgoing (compressed) bytes.
obuf []byte
// wroteStreamHeader is whether we have written the stream header.
wroteStreamHeader bool
}
// Reset discards the writer's state and switches the Snappy writer to write to
// w. This permits reusing a Writer rather than allocating a new one.
func (w *Writer) Reset(writer io.Writer) {
w.w = writer
w.err = nil
if w.ibuf != nil {
w.ibuf = w.ibuf[:0]
}
w.wroteStreamHeader = false
}
// Write satisfies the io.Writer interface.
func (w *Writer) Write(p []byte) (nRet int, errRet error) {
if w.ibuf == nil {
// Do not buffer incoming bytes. This does not perform or compress well
// if the caller of Writer.Write writes many small slices. This
// behavior is therefore deprecated, but still supported for backwards
// compatibility with code that doesn't explicitly Flush or Close.
return w.write(p)
}
// The remainder of this method is based on bufio.Writer.Write from the
// standard library.
for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
var n int
if len(w.ibuf) == 0 {
// Large write, empty buffer.
// Write directly from p to avoid copy.
n, _ = w.write(p)
} else {
n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
w.ibuf = w.ibuf[:len(w.ibuf)+n]
w.Flush()
}
nRet += n
p = p[n:]
}
if w.err != nil {
return nRet, w.err
}
n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
w.ibuf = w.ibuf[:len(w.ibuf)+n]
nRet += n
return nRet, nil
}
func (w *Writer) write(p []byte) (nRet int, errRet error) {
if w.err != nil {
return 0, w.err
}
for len(p) > 0 {
obufStart := len(magicChunk)
if !w.wroteStreamHeader {
w.wroteStreamHeader = true
copy(w.obuf, magicChunk)
obufStart = 0
}
var uncompressed []byte
if len(p) > maxBlockSize {
uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
} else {
uncompressed, p = p, nil
}
checksum := crc(uncompressed)
// Compress the buffer, discarding the result if the improvement
// isn't at least 12.5%.
compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
chunkType := uint8(chunkTypeCompressedData)
chunkLen := 4 + len(compressed)
obufEnd := obufHeaderLen + len(compressed)
if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
chunkType = chunkTypeUncompressedData
chunkLen = 4 + len(uncompressed)
obufEnd = obufHeaderLen
}
// Fill in the per-chunk header that comes before the body.
w.obuf[len(magicChunk)+0] = chunkType
w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
w.err = err
return nRet, err
}
if chunkType == chunkTypeUncompressedData {
if _, err := w.w.Write(uncompressed); err != nil {
w.err = err
return nRet, err
}
}
nRet += len(uncompressed)
}
return nRet, nil
}
// Flush flushes the Writer to its underlying io.Writer.
func (w *Writer) Flush() error {
if w.err != nil {
return w.err
}
if len(w.ibuf) == 0 {
return nil
}
w.write(w.ibuf)
w.ibuf = w.ibuf[:0]
return w.err
}
// Close calls Flush and then closes the Writer.
func (w *Writer) Close() error {
w.Flush()
ret := w.err
if w.err == nil {
w.err = errClosed
}
return ret
}

View File

@ -1,730 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
// https://github.com/golang/snappy/issues/29
//
// As a workaround, the package was built with a known good assembler, and
// those instructions were disassembled by "objdump -d" to yield the
// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
// style comments, in AT&T asm syntax. Note that rsp here is a physical
// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
// fine on Go 1.6.
// The asm code generally follows the pure Go code in encode_other.go, except
// where marked with a "!!!".
// ----------------------------------------------------------------------------
// func emitLiteral(dst, lit []byte) int
//
// All local variables fit into registers. The register allocation:
// - AX len(lit)
// - BX n
// - DX return value
// - DI &dst[i]
// - R10 &lit[0]
//
// The 24 bytes of stack space is to call runtime·memmove.
//
// The unusual register allocation of local variables, such as R10 for the
// source pointer, matches the allocation used at the call site in encodeBlock,
// which makes it easier to manually inline this function.
TEXT ·emitLiteral(SB), NOSPLIT, $24-56
MOVQ dst_base+0(FP), DI
MOVQ lit_base+24(FP), R10
MOVQ lit_len+32(FP), AX
MOVQ AX, DX
MOVL AX, BX
SUBL $1, BX
CMPL BX, $60
JLT oneByte
CMPL BX, $256
JLT twoBytes
threeBytes:
MOVB $0xf4, 0(DI)
MOVW BX, 1(DI)
ADDQ $3, DI
ADDQ $3, DX
JMP memmove
twoBytes:
MOVB $0xf0, 0(DI)
MOVB BX, 1(DI)
ADDQ $2, DI
ADDQ $2, DX
JMP memmove
oneByte:
SHLB $2, BX
MOVB BX, 0(DI)
ADDQ $1, DI
ADDQ $1, DX
memmove:
MOVQ DX, ret+48(FP)
// copy(dst[i:], lit)
//
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
// DI, R10 and AX as arguments.
MOVQ DI, 0(SP)
MOVQ R10, 8(SP)
MOVQ AX, 16(SP)
CALL runtime·memmove(SB)
RET
// ----------------------------------------------------------------------------
// func emitCopy(dst []byte, offset, length int) int
//
// All local variables fit into registers. The register allocation:
// - AX length
// - SI &dst[0]
// - DI &dst[i]
// - R11 offset
//
// The unusual register allocation of local variables, such as R11 for the
// offset, matches the allocation used at the call site in encodeBlock, which
// makes it easier to manually inline this function.
TEXT ·emitCopy(SB), NOSPLIT, $0-48
MOVQ dst_base+0(FP), DI
MOVQ DI, SI
MOVQ offset+24(FP), R11
MOVQ length+32(FP), AX
loop0:
// for length >= 68 { etc }
CMPL AX, $68
JLT step1
// Emit a length 64 copy, encoded as 3 bytes.
MOVB $0xfe, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $64, AX
JMP loop0
step1:
// if length > 64 { etc }
CMPL AX, $64
JLE step2
// Emit a length 60 copy, encoded as 3 bytes.
MOVB $0xee, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $60, AX
step2:
// if length >= 12 || offset >= 2048 { goto step3 }
CMPL AX, $12
JGE step3
CMPL R11, $2048
JGE step3
// Emit the remaining copy, encoded as 2 bytes.
MOVB R11, 1(DI)
SHRL $8, R11
SHLB $5, R11
SUBB $4, AX
SHLB $2, AX
ORB AX, R11
ORB $1, R11
MOVB R11, 0(DI)
ADDQ $2, DI
// Return the number of bytes written.
SUBQ SI, DI
MOVQ DI, ret+40(FP)
RET
step3:
// Emit the remaining copy, encoded as 3 bytes.
SUBL $1, AX
SHLB $2, AX
ORB $2, AX
MOVB AX, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
// Return the number of bytes written.
SUBQ SI, DI
MOVQ DI, ret+40(FP)
RET
// ----------------------------------------------------------------------------
// func extendMatch(src []byte, i, j int) int
//
// All local variables fit into registers. The register allocation:
// - DX &src[0]
// - SI &src[j]
// - R13 &src[len(src) - 8]
// - R14 &src[len(src)]
// - R15 &src[i]
//
// The unusual register allocation of local variables, such as R15 for a source
// pointer, matches the allocation used at the call site in encodeBlock, which
// makes it easier to manually inline this function.
TEXT ·extendMatch(SB), NOSPLIT, $0-48
MOVQ src_base+0(FP), DX
MOVQ src_len+8(FP), R14
MOVQ i+24(FP), R15
MOVQ j+32(FP), SI
ADDQ DX, R14
ADDQ DX, R15
ADDQ DX, SI
MOVQ R14, R13
SUBQ $8, R13
cmp8:
// As long as we are 8 or more bytes before the end of src, we can load and
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
CMPQ SI, R13
JA cmp1
MOVQ (R15), AX
MOVQ (SI), BX
CMPQ AX, BX
JNE bsf
ADDQ $8, R15
ADDQ $8, SI
JMP cmp8
bsf:
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
// the index of the first byte that differs. The BSF instruction finds the
// least significant 1 bit, the amd64 architecture is little-endian, and
// the shift by 3 converts a bit index to a byte index.
XORQ AX, BX
BSFQ BX, BX
SHRQ $3, BX
ADDQ BX, SI
// Convert from &src[ret] to ret.
SUBQ DX, SI
MOVQ SI, ret+40(FP)
RET
cmp1:
// In src's tail, compare 1 byte at a time.
CMPQ SI, R14
JAE extendMatchEnd
MOVB (R15), AX
MOVB (SI), BX
CMPB AX, BX
JNE extendMatchEnd
ADDQ $1, R15
ADDQ $1, SI
JMP cmp1
extendMatchEnd:
// Convert from &src[ret] to ret.
SUBQ DX, SI
MOVQ SI, ret+40(FP)
RET
// ----------------------------------------------------------------------------
// func encodeBlock(dst, src []byte) (d int)
//
// All local variables fit into registers, other than "var table". The register
// allocation:
// - AX . .
// - BX . .
// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
// - DX 64 &src[0], tableSize
// - SI 72 &src[s]
// - DI 80 &dst[d]
// - R9 88 sLimit
// - R10 . &src[nextEmit]
// - R11 96 prevHash, currHash, nextHash, offset
// - R12 104 &src[base], skip
// - R13 . &src[nextS], &src[len(src) - 8]
// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
// - R15 112 candidate
//
// The second column (56, 64, etc) is the stack offset to spill the registers
// when calling other functions. We could pack this slightly tighter, but it's
// simpler to have a dedicated spill map independent of the function called.
//
// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
TEXT ·encodeBlock(SB), 0, $32888-56
MOVQ dst_base+0(FP), DI
MOVQ src_base+24(FP), SI
MOVQ src_len+32(FP), R14
// shift, tableSize := uint32(32-8), 1<<8
MOVQ $24, CX
MOVQ $256, DX
calcShift:
// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
// shift--
// }
CMPQ DX, $16384
JGE varTable
CMPQ DX, R14
JGE varTable
SUBQ $1, CX
SHLQ $1, DX
JMP calcShift
varTable:
// var table [maxTableSize]uint16
//
// In the asm code, unlike the Go code, we can zero-initialize only the
// first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
// writes 16 bytes, so we can do only tableSize/8 writes instead of the
// 2048 writes that would zero-initialize all of table's 32768 bytes.
SHRQ $3, DX
LEAQ table-32768(SP), BX
PXOR X0, X0
memclr:
MOVOU X0, 0(BX)
ADDQ $16, BX
SUBQ $1, DX
JNZ memclr
// !!! DX = &src[0]
MOVQ SI, DX
// sLimit := len(src) - inputMargin
MOVQ R14, R9
SUBQ $15, R9
// !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
// change for the rest of the function.
MOVQ CX, 56(SP)
MOVQ DX, 64(SP)
MOVQ R9, 88(SP)
// nextEmit := 0
MOVQ DX, R10
// s := 1
ADDQ $1, SI
// nextHash := hash(load32(src, s), shift)
MOVL 0(SI), R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
outer:
// for { etc }
// skip := 32
MOVQ $32, R12
// nextS := s
MOVQ SI, R13
// candidate := 0
MOVQ $0, R15
inner0:
// for { etc }
// s := nextS
MOVQ R13, SI
// bytesBetweenHashLookups := skip >> 5
MOVQ R12, R14
SHRQ $5, R14
// nextS = s + bytesBetweenHashLookups
ADDQ R14, R13
// skip += bytesBetweenHashLookups
ADDQ R14, R12
// if nextS > sLimit { goto emitRemainder }
MOVQ R13, AX
SUBQ DX, AX
CMPQ AX, R9
JA emitRemainder
// candidate = int(table[nextHash])
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
BYTE $0x4e
BYTE $0x0f
BYTE $0xb7
BYTE $0x7c
BYTE $0x5c
BYTE $0x78
// table[nextHash] = uint16(s)
MOVQ SI, AX
SUBQ DX, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
BYTE $0x42
BYTE $0x89
BYTE $0x44
BYTE $0x5c
BYTE $0x78
// nextHash = hash(load32(src, nextS), shift)
MOVL 0(R13), R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// if load32(src, s) != load32(src, candidate) { continue } break
MOVL 0(SI), AX
MOVL (DX)(R15*1), BX
CMPL AX, BX
JNE inner0
fourByteMatch:
// As per the encode_other.go code:
//
// A 4-byte match has been found. We'll later see etc.
// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
// on inputMargin in encode.go.
MOVQ SI, AX
SUBQ R10, AX
CMPQ AX, $16
JLE emitLiteralFastPath
// ----------------------------------------
// Begin inline of the emitLiteral call.
//
// d += emitLiteral(dst[d:], src[nextEmit:s])
MOVL AX, BX
SUBL $1, BX
CMPL BX, $60
JLT inlineEmitLiteralOneByte
CMPL BX, $256
JLT inlineEmitLiteralTwoBytes
inlineEmitLiteralThreeBytes:
MOVB $0xf4, 0(DI)
MOVW BX, 1(DI)
ADDQ $3, DI
JMP inlineEmitLiteralMemmove
inlineEmitLiteralTwoBytes:
MOVB $0xf0, 0(DI)
MOVB BX, 1(DI)
ADDQ $2, DI
JMP inlineEmitLiteralMemmove
inlineEmitLiteralOneByte:
SHLB $2, BX
MOVB BX, 0(DI)
ADDQ $1, DI
inlineEmitLiteralMemmove:
// Spill local variables (registers) onto the stack; call; unspill.
//
// copy(dst[i:], lit)
//
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
// DI, R10 and AX as arguments.
MOVQ DI, 0(SP)
MOVQ R10, 8(SP)
MOVQ AX, 16(SP)
ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
MOVQ SI, 72(SP)
MOVQ DI, 80(SP)
MOVQ R15, 112(SP)
CALL runtime·memmove(SB)
MOVQ 56(SP), CX
MOVQ 64(SP), DX
MOVQ 72(SP), SI
MOVQ 80(SP), DI
MOVQ 88(SP), R9
MOVQ 112(SP), R15
JMP inner1
inlineEmitLiteralEnd:
// End inline of the emitLiteral call.
// ----------------------------------------
emitLiteralFastPath:
// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
MOVB AX, BX
SUBB $1, BX
SHLB $2, BX
MOVB BX, (DI)
ADDQ $1, DI
// !!! Implement the copy from lit to dst as a 16-byte load and store.
// (Encode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only len(lit) bytes, but that's
// OK. Subsequent iterations will fix up the overrun.
//
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
MOVOU 0(R10), X0
MOVOU X0, 0(DI)
ADDQ AX, DI
inner1:
// for { etc }
// base := s
MOVQ SI, R12
// !!! offset := base - candidate
MOVQ R12, R11
SUBQ R15, R11
SUBQ DX, R11
// ----------------------------------------
// Begin inline of the extendMatch call.
//
// s = extendMatch(src, candidate+4, s+4)
// !!! R14 = &src[len(src)]
MOVQ src_len+32(FP), R14
ADDQ DX, R14
// !!! R13 = &src[len(src) - 8]
MOVQ R14, R13
SUBQ $8, R13
// !!! R15 = &src[candidate + 4]
ADDQ $4, R15
ADDQ DX, R15
// !!! s += 4
ADDQ $4, SI
inlineExtendMatchCmp8:
// As long as we are 8 or more bytes before the end of src, we can load and
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
CMPQ SI, R13
JA inlineExtendMatchCmp1
MOVQ (R15), AX
MOVQ (SI), BX
CMPQ AX, BX
JNE inlineExtendMatchBSF
ADDQ $8, R15
ADDQ $8, SI
JMP inlineExtendMatchCmp8
inlineExtendMatchBSF:
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
// the index of the first byte that differs. The BSF instruction finds the
// least significant 1 bit, the amd64 architecture is little-endian, and
// the shift by 3 converts a bit index to a byte index.
XORQ AX, BX
BSFQ BX, BX
SHRQ $3, BX
ADDQ BX, SI
JMP inlineExtendMatchEnd
inlineExtendMatchCmp1:
// In src's tail, compare 1 byte at a time.
CMPQ SI, R14
JAE inlineExtendMatchEnd
MOVB (R15), AX
MOVB (SI), BX
CMPB AX, BX
JNE inlineExtendMatchEnd
ADDQ $1, R15
ADDQ $1, SI
JMP inlineExtendMatchCmp1
inlineExtendMatchEnd:
// End inline of the extendMatch call.
// ----------------------------------------
// ----------------------------------------
// Begin inline of the emitCopy call.
//
// d += emitCopy(dst[d:], base-candidate, s-base)
// !!! length := s - base
MOVQ SI, AX
SUBQ R12, AX
inlineEmitCopyLoop0:
// for length >= 68 { etc }
CMPL AX, $68
JLT inlineEmitCopyStep1
// Emit a length 64 copy, encoded as 3 bytes.
MOVB $0xfe, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $64, AX
JMP inlineEmitCopyLoop0
inlineEmitCopyStep1:
// if length > 64 { etc }
CMPL AX, $64
JLE inlineEmitCopyStep2
// Emit a length 60 copy, encoded as 3 bytes.
MOVB $0xee, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $60, AX
inlineEmitCopyStep2:
// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
CMPL AX, $12
JGE inlineEmitCopyStep3
CMPL R11, $2048
JGE inlineEmitCopyStep3
// Emit the remaining copy, encoded as 2 bytes.
MOVB R11, 1(DI)
SHRL $8, R11
SHLB $5, R11
SUBB $4, AX
SHLB $2, AX
ORB AX, R11
ORB $1, R11
MOVB R11, 0(DI)
ADDQ $2, DI
JMP inlineEmitCopyEnd
inlineEmitCopyStep3:
// Emit the remaining copy, encoded as 3 bytes.
SUBL $1, AX
SHLB $2, AX
ORB $2, AX
MOVB AX, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
inlineEmitCopyEnd:
// End inline of the emitCopy call.
// ----------------------------------------
// nextEmit = s
MOVQ SI, R10
// if s >= sLimit { goto emitRemainder }
MOVQ SI, AX
SUBQ DX, AX
CMPQ AX, R9
JAE emitRemainder
// As per the encode_other.go code:
//
// We could immediately etc.
// x := load64(src, s-1)
MOVQ -1(SI), R14
// prevHash := hash(uint32(x>>0), shift)
MOVL R14, R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// table[prevHash] = uint16(s-1)
MOVQ SI, AX
SUBQ DX, AX
SUBQ $1, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
BYTE $0x42
BYTE $0x89
BYTE $0x44
BYTE $0x5c
BYTE $0x78
// currHash := hash(uint32(x>>8), shift)
SHRQ $8, R14
MOVL R14, R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// candidate = int(table[currHash])
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
BYTE $0x4e
BYTE $0x0f
BYTE $0xb7
BYTE $0x7c
BYTE $0x5c
BYTE $0x78
// table[currHash] = uint16(s)
ADDQ $1, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
BYTE $0x42
BYTE $0x89
BYTE $0x44
BYTE $0x5c
BYTE $0x78
// if uint32(x>>8) == load32(src, candidate) { continue }
MOVL (DX)(R15*1), BX
CMPL R14, BX
JEQ inner1
// nextHash = hash(uint32(x>>16), shift)
SHRQ $8, R14
MOVL R14, R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// s++
ADDQ $1, SI
// break out of the inner1 for loop, i.e. continue the outer loop.
JMP outer
emitRemainder:
// if nextEmit < len(src) { etc }
MOVQ src_len+32(FP), AX
ADDQ DX, AX
CMPQ R10, AX
JEQ encodeBlockEnd
// d += emitLiteral(dst[d:], src[nextEmit:])
//
// Push args.
MOVQ DI, 0(SP)
MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
MOVQ R10, 24(SP)
SUBQ R10, AX
MOVQ AX, 32(SP)
MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
// Spill local variables (registers) onto the stack; call; unspill.
MOVQ DI, 80(SP)
CALL ·emitLiteral(SB)
MOVQ 80(SP), DI
// Finish the "d +=" part of "d += emitLiteral(etc)".
ADDQ 48(SP), DI
encodeBlockEnd:
MOVQ dst_base+0(FP), AX
SUBQ AX, DI
MOVQ DI, d+48(FP)
RET

View File

@ -1,722 +0,0 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
// The asm code generally follows the pure Go code in encode_other.go, except
// where marked with a "!!!".
// ----------------------------------------------------------------------------
// func emitLiteral(dst, lit []byte) int
//
// All local variables fit into registers. The register allocation:
// - R3 len(lit)
// - R4 n
// - R6 return value
// - R8 &dst[i]
// - R10 &lit[0]
//
// The 32 bytes of stack space is to call runtime·memmove.
//
// The unusual register allocation of local variables, such as R10 for the
// source pointer, matches the allocation used at the call site in encodeBlock,
// which makes it easier to manually inline this function.
TEXT ·emitLiteral(SB), NOSPLIT, $32-56
MOVD dst_base+0(FP), R8
MOVD lit_base+24(FP), R10
MOVD lit_len+32(FP), R3
MOVD R3, R6
MOVW R3, R4
SUBW $1, R4, R4
CMPW $60, R4
BLT oneByte
CMPW $256, R4
BLT twoBytes
threeBytes:
MOVD $0xf4, R2
MOVB R2, 0(R8)
MOVW R4, 1(R8)
ADD $3, R8, R8
ADD $3, R6, R6
B memmove
twoBytes:
MOVD $0xf0, R2
MOVB R2, 0(R8)
MOVB R4, 1(R8)
ADD $2, R8, R8
ADD $2, R6, R6
B memmove
oneByte:
LSLW $2, R4, R4
MOVB R4, 0(R8)
ADD $1, R8, R8
ADD $1, R6, R6
memmove:
MOVD R6, ret+48(FP)
// copy(dst[i:], lit)
//
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
// R8, R10 and R3 as arguments.
MOVD R8, 8(RSP)
MOVD R10, 16(RSP)
MOVD R3, 24(RSP)
CALL runtime·memmove(SB)
RET
// ----------------------------------------------------------------------------
// func emitCopy(dst []byte, offset, length int) int
//
// All local variables fit into registers. The register allocation:
// - R3 length
// - R7 &dst[0]
// - R8 &dst[i]
// - R11 offset
//
// The unusual register allocation of local variables, such as R11 for the
// offset, matches the allocation used at the call site in encodeBlock, which
// makes it easier to manually inline this function.
TEXT ·emitCopy(SB), NOSPLIT, $0-48
MOVD dst_base+0(FP), R8
MOVD R8, R7
MOVD offset+24(FP), R11
MOVD length+32(FP), R3
loop0:
// for length >= 68 { etc }
CMPW $68, R3
BLT step1
// Emit a length 64 copy, encoded as 3 bytes.
MOVD $0xfe, R2
MOVB R2, 0(R8)
MOVW R11, 1(R8)
ADD $3, R8, R8
SUB $64, R3, R3
B loop0
step1:
// if length > 64 { etc }
CMP $64, R3
BLE step2
// Emit a length 60 copy, encoded as 3 bytes.
MOVD $0xee, R2
MOVB R2, 0(R8)
MOVW R11, 1(R8)
ADD $3, R8, R8
SUB $60, R3, R3
step2:
// if length >= 12 || offset >= 2048 { goto step3 }
CMP $12, R3
BGE step3
CMPW $2048, R11
BGE step3
// Emit the remaining copy, encoded as 2 bytes.
MOVB R11, 1(R8)
LSRW $3, R11, R11
AND $0xe0, R11, R11
SUB $4, R3, R3
LSLW $2, R3
AND $0xff, R3, R3
ORRW R3, R11, R11
ORRW $1, R11, R11
MOVB R11, 0(R8)
ADD $2, R8, R8
// Return the number of bytes written.
SUB R7, R8, R8
MOVD R8, ret+40(FP)
RET
step3:
// Emit the remaining copy, encoded as 3 bytes.
SUB $1, R3, R3
AND $0xff, R3, R3
LSLW $2, R3, R3
ORRW $2, R3, R3
MOVB R3, 0(R8)
MOVW R11, 1(R8)
ADD $3, R8, R8
// Return the number of bytes written.
SUB R7, R8, R8
MOVD R8, ret+40(FP)
RET
// ----------------------------------------------------------------------------
// func extendMatch(src []byte, i, j int) int
//
// All local variables fit into registers. The register allocation:
// - R6 &src[0]
// - R7 &src[j]
// - R13 &src[len(src) - 8]
// - R14 &src[len(src)]
// - R15 &src[i]
//
// The unusual register allocation of local variables, such as R15 for a source
// pointer, matches the allocation used at the call site in encodeBlock, which
// makes it easier to manually inline this function.
TEXT ·extendMatch(SB), NOSPLIT, $0-48
MOVD src_base+0(FP), R6
MOVD src_len+8(FP), R14
MOVD i+24(FP), R15
MOVD j+32(FP), R7
ADD R6, R14, R14
ADD R6, R15, R15
ADD R6, R7, R7
MOVD R14, R13
SUB $8, R13, R13
cmp8:
// As long as we are 8 or more bytes before the end of src, we can load and
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
CMP R13, R7
BHI cmp1
MOVD (R15), R3
MOVD (R7), R4
CMP R4, R3
BNE bsf
ADD $8, R15, R15
ADD $8, R7, R7
B cmp8
bsf:
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
// the index of the first byte that differs.
// RBIT reverses the bit order, then CLZ counts the leading zeros, the
// combination of which finds the least significant bit which is set.
// The arm64 architecture is little-endian, and the shift by 3 converts
// a bit index to a byte index.
EOR R3, R4, R4
RBIT R4, R4
CLZ R4, R4
ADD R4>>3, R7, R7
// Convert from &src[ret] to ret.
SUB R6, R7, R7
MOVD R7, ret+40(FP)
RET
cmp1:
// In src's tail, compare 1 byte at a time.
CMP R7, R14
BLS extendMatchEnd
MOVB (R15), R3
MOVB (R7), R4
CMP R4, R3
BNE extendMatchEnd
ADD $1, R15, R15
ADD $1, R7, R7
B cmp1
extendMatchEnd:
// Convert from &src[ret] to ret.
SUB R6, R7, R7
MOVD R7, ret+40(FP)
RET
// ----------------------------------------------------------------------------
// func encodeBlock(dst, src []byte) (d int)
//
// All local variables fit into registers, other than "var table". The register
// allocation:
// - R3 . .
// - R4 . .
// - R5 64 shift
// - R6 72 &src[0], tableSize
// - R7 80 &src[s]
// - R8 88 &dst[d]
// - R9 96 sLimit
// - R10 . &src[nextEmit]
// - R11 104 prevHash, currHash, nextHash, offset
// - R12 112 &src[base], skip
// - R13 . &src[nextS], &src[len(src) - 8]
// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
// - R15 120 candidate
// - R16 . hash constant, 0x1e35a7bd
// - R17 . &table
// - . 128 table
//
// The second column (64, 72, etc) is the stack offset to spill the registers
// when calling other functions. We could pack this slightly tighter, but it's
// simpler to have a dedicated spill map independent of the function called.
//
// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
// extra 64 bytes, to call other functions, and an extra 64 bytes, to spill
// local variables (registers) during calls gives 32768 + 64 + 64 = 32896.
TEXT ·encodeBlock(SB), 0, $32896-56
MOVD dst_base+0(FP), R8
MOVD src_base+24(FP), R7
MOVD src_len+32(FP), R14
// shift, tableSize := uint32(32-8), 1<<8
MOVD $24, R5
MOVD $256, R6
MOVW $0xa7bd, R16
MOVKW $(0x1e35<<16), R16
calcShift:
// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
// shift--
// }
MOVD $16384, R2
CMP R2, R6
BGE varTable
CMP R14, R6
BGE varTable
SUB $1, R5, R5
LSL $1, R6, R6
B calcShift
varTable:
// var table [maxTableSize]uint16
//
// In the asm code, unlike the Go code, we can zero-initialize only the
// first tableSize elements. Each uint16 element is 2 bytes and each
// iterations writes 64 bytes, so we can do only tableSize/32 writes
// instead of the 2048 writes that would zero-initialize all of table's
// 32768 bytes. This clear could overrun the first tableSize elements, but
// it won't overrun the allocated stack size.
ADD $128, RSP, R17
MOVD R17, R4
// !!! R6 = &src[tableSize]
ADD R6<<1, R17, R6
memclr:
STP.P (ZR, ZR), 64(R4)
STP (ZR, ZR), -48(R4)
STP (ZR, ZR), -32(R4)
STP (ZR, ZR), -16(R4)
CMP R4, R6
BHI memclr
// !!! R6 = &src[0]
MOVD R7, R6
// sLimit := len(src) - inputMargin
MOVD R14, R9
SUB $15, R9, R9
// !!! Pre-emptively spill R5, R6 and R9 to the stack. Their values don't
// change for the rest of the function.
MOVD R5, 64(RSP)
MOVD R6, 72(RSP)
MOVD R9, 96(RSP)
// nextEmit := 0
MOVD R6, R10
// s := 1
ADD $1, R7, R7
// nextHash := hash(load32(src, s), shift)
MOVW 0(R7), R11
MULW R16, R11, R11
LSRW R5, R11, R11
outer:
// for { etc }
// skip := 32
MOVD $32, R12
// nextS := s
MOVD R7, R13
// candidate := 0
MOVD $0, R15
inner0:
// for { etc }
// s := nextS
MOVD R13, R7
// bytesBetweenHashLookups := skip >> 5
MOVD R12, R14
LSR $5, R14, R14
// nextS = s + bytesBetweenHashLookups
ADD R14, R13, R13
// skip += bytesBetweenHashLookups
ADD R14, R12, R12
// if nextS > sLimit { goto emitRemainder }
MOVD R13, R3
SUB R6, R3, R3
CMP R9, R3
BHI emitRemainder
// candidate = int(table[nextHash])
MOVHU 0(R17)(R11<<1), R15
// table[nextHash] = uint16(s)
MOVD R7, R3
SUB R6, R3, R3
MOVH R3, 0(R17)(R11<<1)
// nextHash = hash(load32(src, nextS), shift)
MOVW 0(R13), R11
MULW R16, R11
LSRW R5, R11, R11
// if load32(src, s) != load32(src, candidate) { continue } break
MOVW 0(R7), R3
MOVW (R6)(R15), R4
CMPW R4, R3
BNE inner0
fourByteMatch:
// As per the encode_other.go code:
//
// A 4-byte match has been found. We'll later see etc.
// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
// on inputMargin in encode.go.
MOVD R7, R3
SUB R10, R3, R3
CMP $16, R3
BLE emitLiteralFastPath
// ----------------------------------------
// Begin inline of the emitLiteral call.
//
// d += emitLiteral(dst[d:], src[nextEmit:s])
MOVW R3, R4
SUBW $1, R4, R4
MOVW $60, R2
CMPW R2, R4
BLT inlineEmitLiteralOneByte
MOVW $256, R2
CMPW R2, R4
BLT inlineEmitLiteralTwoBytes
inlineEmitLiteralThreeBytes:
MOVD $0xf4, R1
MOVB R1, 0(R8)
MOVW R4, 1(R8)
ADD $3, R8, R8
B inlineEmitLiteralMemmove
inlineEmitLiteralTwoBytes:
MOVD $0xf0, R1
MOVB R1, 0(R8)
MOVB R4, 1(R8)
ADD $2, R8, R8
B inlineEmitLiteralMemmove
inlineEmitLiteralOneByte:
LSLW $2, R4, R4
MOVB R4, 0(R8)
ADD $1, R8, R8
inlineEmitLiteralMemmove:
// Spill local variables (registers) onto the stack; call; unspill.
//
// copy(dst[i:], lit)
//
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
// R8, R10 and R3 as arguments.
MOVD R8, 8(RSP)
MOVD R10, 16(RSP)
MOVD R3, 24(RSP)
// Finish the "d +=" part of "d += emitLiteral(etc)".
ADD R3, R8, R8
MOVD R7, 80(RSP)
MOVD R8, 88(RSP)
MOVD R15, 120(RSP)
CALL runtime·memmove(SB)
MOVD 64(RSP), R5
MOVD 72(RSP), R6
MOVD 80(RSP), R7
MOVD 88(RSP), R8
MOVD 96(RSP), R9
MOVD 120(RSP), R15
ADD $128, RSP, R17
MOVW $0xa7bd, R16
MOVKW $(0x1e35<<16), R16
B inner1
inlineEmitLiteralEnd:
// End inline of the emitLiteral call.
// ----------------------------------------
emitLiteralFastPath:
// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
MOVB R3, R4
SUBW $1, R4, R4
AND $0xff, R4, R4
LSLW $2, R4, R4
MOVB R4, (R8)
ADD $1, R8, R8
// !!! Implement the copy from lit to dst as a 16-byte load and store.
// (Encode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only len(lit) bytes, but that's
// OK. Subsequent iterations will fix up the overrun.
//
// Note that on arm64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
LDP 0(R10), (R0, R1)
STP (R0, R1), 0(R8)
ADD R3, R8, R8
inner1:
// for { etc }
// base := s
MOVD R7, R12
// !!! offset := base - candidate
MOVD R12, R11
SUB R15, R11, R11
SUB R6, R11, R11
// ----------------------------------------
// Begin inline of the extendMatch call.
//
// s = extendMatch(src, candidate+4, s+4)
// !!! R14 = &src[len(src)]
MOVD src_len+32(FP), R14
ADD R6, R14, R14
// !!! R13 = &src[len(src) - 8]
MOVD R14, R13
SUB $8, R13, R13
// !!! R15 = &src[candidate + 4]
ADD $4, R15, R15
ADD R6, R15, R15
// !!! s += 4
ADD $4, R7, R7
inlineExtendMatchCmp8:
// As long as we are 8 or more bytes before the end of src, we can load and
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
CMP R13, R7
BHI inlineExtendMatchCmp1
MOVD (R15), R3
MOVD (R7), R4
CMP R4, R3
BNE inlineExtendMatchBSF
ADD $8, R15, R15
ADD $8, R7, R7
B inlineExtendMatchCmp8
inlineExtendMatchBSF:
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
// the index of the first byte that differs.
// RBIT reverses the bit order, then CLZ counts the leading zeros, the
// combination of which finds the least significant bit which is set.
// The arm64 architecture is little-endian, and the shift by 3 converts
// a bit index to a byte index.
EOR R3, R4, R4
RBIT R4, R4
CLZ R4, R4
ADD R4>>3, R7, R7
B inlineExtendMatchEnd
inlineExtendMatchCmp1:
// In src's tail, compare 1 byte at a time.
CMP R7, R14
BLS inlineExtendMatchEnd
MOVB (R15), R3
MOVB (R7), R4
CMP R4, R3
BNE inlineExtendMatchEnd
ADD $1, R15, R15
ADD $1, R7, R7
B inlineExtendMatchCmp1
inlineExtendMatchEnd:
// End inline of the extendMatch call.
// ----------------------------------------
// ----------------------------------------
// Begin inline of the emitCopy call.
//
// d += emitCopy(dst[d:], base-candidate, s-base)
// !!! length := s - base
MOVD R7, R3
SUB R12, R3, R3
inlineEmitCopyLoop0:
// for length >= 68 { etc }
MOVW $68, R2
CMPW R2, R3
BLT inlineEmitCopyStep1
// Emit a length 64 copy, encoded as 3 bytes.
MOVD $0xfe, R1
MOVB R1, 0(R8)
MOVW R11, 1(R8)
ADD $3, R8, R8
SUBW $64, R3, R3
B inlineEmitCopyLoop0
inlineEmitCopyStep1:
// if length > 64 { etc }
MOVW $64, R2
CMPW R2, R3
BLE inlineEmitCopyStep2
// Emit a length 60 copy, encoded as 3 bytes.
MOVD $0xee, R1
MOVB R1, 0(R8)
MOVW R11, 1(R8)
ADD $3, R8, R8
SUBW $60, R3, R3
inlineEmitCopyStep2:
// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
MOVW $12, R2
CMPW R2, R3
BGE inlineEmitCopyStep3
MOVW $2048, R2
CMPW R2, R11
BGE inlineEmitCopyStep3
// Emit the remaining copy, encoded as 2 bytes.
MOVB R11, 1(R8)
LSRW $8, R11, R11
LSLW $5, R11, R11
SUBW $4, R3, R3
AND $0xff, R3, R3
LSLW $2, R3, R3
ORRW R3, R11, R11
ORRW $1, R11, R11
MOVB R11, 0(R8)
ADD $2, R8, R8
B inlineEmitCopyEnd
inlineEmitCopyStep3:
// Emit the remaining copy, encoded as 3 bytes.
SUBW $1, R3, R3
LSLW $2, R3, R3
ORRW $2, R3, R3
MOVB R3, 0(R8)
MOVW R11, 1(R8)
ADD $3, R8, R8
inlineEmitCopyEnd:
// End inline of the emitCopy call.
// ----------------------------------------
// nextEmit = s
MOVD R7, R10
// if s >= sLimit { goto emitRemainder }
MOVD R7, R3
SUB R6, R3, R3
CMP R3, R9
BLS emitRemainder
// As per the encode_other.go code:
//
// We could immediately etc.
// x := load64(src, s-1)
MOVD -1(R7), R14
// prevHash := hash(uint32(x>>0), shift)
MOVW R14, R11
MULW R16, R11, R11
LSRW R5, R11, R11
// table[prevHash] = uint16(s-1)
MOVD R7, R3
SUB R6, R3, R3
SUB $1, R3, R3
MOVHU R3, 0(R17)(R11<<1)
// currHash := hash(uint32(x>>8), shift)
LSR $8, R14, R14
MOVW R14, R11
MULW R16, R11, R11
LSRW R5, R11, R11
// candidate = int(table[currHash])
MOVHU 0(R17)(R11<<1), R15
// table[currHash] = uint16(s)
ADD $1, R3, R3
MOVHU R3, 0(R17)(R11<<1)
// if uint32(x>>8) == load32(src, candidate) { continue }
MOVW (R6)(R15), R4
CMPW R4, R14
BEQ inner1
// nextHash = hash(uint32(x>>16), shift)
LSR $8, R14, R14
MOVW R14, R11
MULW R16, R11, R11
LSRW R5, R11, R11
// s++
ADD $1, R7, R7
// break out of the inner1 for loop, i.e. continue the outer loop.
B outer
emitRemainder:
// if nextEmit < len(src) { etc }
MOVD src_len+32(FP), R3
ADD R6, R3, R3
CMP R3, R10
BEQ encodeBlockEnd
// d += emitLiteral(dst[d:], src[nextEmit:])
//
// Push args.
MOVD R8, 8(RSP)
MOVD $0, 16(RSP) // Unnecessary, as the callee ignores it, but conservative.
MOVD $0, 24(RSP) // Unnecessary, as the callee ignores it, but conservative.
MOVD R10, 32(RSP)
SUB R10, R3, R3
MOVD R3, 40(RSP)
MOVD R3, 48(RSP) // Unnecessary, as the callee ignores it, but conservative.
// Spill local variables (registers) onto the stack; call; unspill.
MOVD R8, 88(RSP)
CALL ·emitLiteral(SB)
MOVD 88(RSP), R8
// Finish the "d +=" part of "d += emitLiteral(etc)".
MOVD 56(RSP), R1
ADD R1, R8, R8
encodeBlockEnd:
MOVD dst_base+0(FP), R3
SUB R3, R8, R8
MOVD R8, d+48(FP)
RET

View File

@ -1,30 +0,0 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
// +build amd64 arm64
package snappy
// emitLiteral has the same semantics as in encode_other.go.
//
//go:noescape
func emitLiteral(dst, lit []byte) int
// emitCopy has the same semantics as in encode_other.go.
//
//go:noescape
func emitCopy(dst []byte, offset, length int) int
// extendMatch has the same semantics as in encode_other.go.
//
//go:noescape
func extendMatch(src []byte, i, j int) int
// encodeBlock has the same semantics as in encode_other.go.
//
//go:noescape
func encodeBlock(dst, src []byte) (d int)

View File

@ -1,238 +0,0 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64,!arm64 appengine !gc noasm
package snappy
func load32(b []byte, i int) uint32 {
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
func load64(b []byte, i int) uint64 {
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
// It assumes that:
// dst is long enough to hold the encoded bytes
// 1 <= len(lit) && len(lit) <= 65536
func emitLiteral(dst, lit []byte) int {
i, n := 0, uint(len(lit)-1)
switch {
case n < 60:
dst[0] = uint8(n)<<2 | tagLiteral
i = 1
case n < 1<<8:
dst[0] = 60<<2 | tagLiteral
dst[1] = uint8(n)
i = 2
default:
dst[0] = 61<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
i = 3
}
return i + copy(dst[i:], lit)
}
// emitCopy writes a copy chunk and returns the number of bytes written.
//
// It assumes that:
// dst is long enough to hold the encoded bytes
// 1 <= offset && offset <= 65535
// 4 <= length && length <= 65535
func emitCopy(dst []byte, offset, length int) int {
i := 0
// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
// threshold for this loop is a little higher (at 68 = 64 + 4), and the
// length emitted down below is is a little lower (at 60 = 64 - 4), because
// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
for length >= 68 {
// Emit a length 64 copy, encoded as 3 bytes.
dst[i+0] = 63<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
i += 3
length -= 64
}
if length > 64 {
// Emit a length 60 copy, encoded as 3 bytes.
dst[i+0] = 59<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
i += 3
length -= 60
}
if length >= 12 || offset >= 2048 {
// Emit the remaining copy, encoded as 3 bytes.
dst[i+0] = uint8(length-1)<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
return i + 3
}
// Emit the remaining copy, encoded as 2 bytes.
dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
dst[i+1] = uint8(offset)
return i + 2
}
// extendMatch returns the largest k such that k <= len(src) and that
// src[i:i+k-j] and src[j:k] have the same contents.
//
// It assumes that:
// 0 <= i && i < j && j <= len(src)
func extendMatch(src []byte, i, j int) int {
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
}
return j
}
func hash(u, shift uint32) uint32 {
return (u * 0x1e35a7bd) >> shift
}
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlock(dst, src []byte) (d int) {
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
// The table element type is uint16, as s < sLimit and sLimit < len(src)
// and len(src) <= maxBlockSize and maxBlockSize == 65536.
const (
maxTableSize = 1 << 14
// tableMask is redundant, but helps the compiler eliminate bounds
// checks.
tableMask = maxTableSize - 1
)
shift := uint32(32 - 8)
for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
shift--
}
// In Go, all array elements are zero-initialized, so there is no advantage
// to a smaller tableSize per se. However, it matches the C++ algorithm,
// and in the asm versions of this code, we can get away with zeroing only
// the first tableSize elements.
var table [maxTableSize]uint16
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
nextHash := hash(load32(src, s), shift)
for {
// Copied from the C++ snappy implementation:
//
// Heuristic match skipping: If 32 bytes are scanned with no matches
// found, start looking only at every other byte. If 32 more bytes are
// scanned (or skipped), look at every third byte, etc.. When a match
// is found, immediately go back to looking at every byte. This is a
// small loss (~5% performance, ~0.1% density) for compressible data
// due to more bookkeeping, but for non-compressible data (such as
// JPEG) it's a huge win since the compressor quickly "realizes" the
// data is incompressible and doesn't bother looking for matches
// everywhere.
//
// The "skip" variable keeps track of how many bytes there are since
// the last match; dividing it by 32 (ie. right-shifting by five) gives
// the number of bytes to move ahead for each iteration.
skip := 32
nextS := s
candidate := 0
for {
s = nextS
bytesBetweenHashLookups := skip >> 5
nextS = s + bytesBetweenHashLookups
skip += bytesBetweenHashLookups
if nextS > sLimit {
goto emitRemainder
}
candidate = int(table[nextHash&tableMask])
table[nextHash&tableMask] = uint16(s)
nextHash = hash(load32(src, nextS), shift)
if load32(src, s) == load32(src, candidate) {
break
}
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
d += emitLiteral(dst[d:], src[nextEmit:s])
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
// Extend the 4-byte match as long as possible.
//
// This is an inlined version of:
// s = extendMatch(src, candidate+4, s+4)
s += 4
for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
}
d += emitCopy(dst[d:], base-candidate, s-base)
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s. If
// another emitCopy is not our next move, also calculate nextHash
// at s+1. At least on GOARCH=amd64, these three hash calculations
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
x := load64(src, s-1)
prevHash := hash(uint32(x>>0), shift)
table[prevHash&tableMask] = uint16(s - 1)
currHash := hash(uint32(x>>8), shift)
candidate = int(table[currHash&tableMask])
table[currHash&tableMask] = uint16(s)
if uint32(x>>8) != load32(src, candidate) {
nextHash = hash(uint32(x>>16), shift)
s++
break
}
}
}
emitRemainder:
if nextEmit < len(src) {
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}

View File

@ -1,98 +0,0 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package snappy implements the Snappy compression format. It aims for very
// high speeds and reasonable compression.
//
// There are actually two Snappy formats: block and stream. They are related,
// but different: trying to decompress block-compressed data as a Snappy stream
// will fail, and vice versa. The block format is the Decode and Encode
// functions and the stream format is the Reader and Writer types.
//
// The block format, the more common case, is used when the complete size (the
// number of bytes) of the original data is known upfront, at the time
// compression starts. The stream format, also known as the framing format, is
// for when that isn't always true.
//
// The canonical, C++ implementation is at https://github.com/google/snappy and
// it only implements the block format.
package snappy // import "github.com/golang/snappy"
import (
"hash/crc32"
)
/*
Each encoded block begins with the varint-encoded length of the decoded data,
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
first byte of each chunk is broken into its 2 least and 6 most significant bits
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
Zero means a literal tag. All other values mean a copy tag.
For literal tags:
- If m < 60, the next 1 + m bytes are literal bytes.
- Otherwise, let n be the little-endian unsigned integer denoted by the next
m - 59 bytes. The next 1 + n bytes after that are literal bytes.
For copy tags, length bytes are copied from offset bytes ago, in the style of
Lempel-Ziv compression algorithms. In particular:
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
of the offset. The next byte is bits 0-7 of the offset.
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
The length is 1 + m. The offset is the little-endian unsigned integer
denoted by the next 2 bytes.
- For l == 3, this tag is a legacy format that is no longer issued by most
encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
[1, 65). The length is 1 + m. The offset is the little-endian unsigned
integer denoted by the next 4 bytes.
*/
const (
tagLiteral = 0x00
tagCopy1 = 0x01
tagCopy2 = 0x02
tagCopy4 = 0x03
)
const (
checksumSize = 4
chunkHeaderSize = 4
magicChunk = "\xff\x06\x00\x00" + magicBody
magicBody = "sNaPpY"
// maxBlockSize is the maximum size of the input to encodeBlock. It is not
// part of the wire format per se, but some parts of the encoder assume
// that an offset fits into a uint16.
//
// Also, for the framing format (Writer type instead of Encode function),
// https://github.com/google/snappy/blob/master/framing_format.txt says
// that "the uncompressed data in a chunk must be no longer than 65536
// bytes".
maxBlockSize = 65536
// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
// hard coded to be a const instead of a variable, so that obufLen can also
// be a const. Their equivalence is confirmed by
// TestMaxEncodedLenOfMaxBlockSize.
maxEncodedLenOfMaxBlockSize = 76490
obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
)
const (
chunkTypeCompressedData = 0x00
chunkTypeUncompressedData = 0x01
chunkTypePadding = 0xfe
chunkTypeStreamIdentifier = 0xff
)
var crcTable = crc32.MakeTable(crc32.Castagnoli)
// crc implements the checksum specified in section 3 of
// https://github.com/google/snappy/blob/master/framing_format.txt
func crc(b []byte) uint32 {
c := crc32.Update(0, crcTable, b)
return uint32(c>>15|c<<17) + 0xa282ead8
}

View File

@ -1 +0,0 @@
.idea*

View File

@ -1,148 +0,0 @@
# go-hclog
[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
[godocs]: https://godoc.org/github.com/hashicorp/go-hclog
`go-hclog` is a package for Go that provides a simple key/value logging
interface for use in development and production environments.
It provides logging levels that provide decreased output based upon the
desired amount of output, unlike the standard library `log` package.
It provides `Printf` style logging of values via `hclog.Fmt()`.
It provides a human readable output mode for use in development as well as
JSON output mode for production.
## Stability Note
This library has reached 1.0 stability. Its API can be considered solidified
and promised through future versions.
## Installation and Docs
Install using `go get github.com/hashicorp/go-hclog`.
Full documentation is available at
http://godoc.org/github.com/hashicorp/go-hclog
## Usage
### Use the global logger
```go
hclog.Default().Info("hello world")
```
```text
2017-07-05T16:15:55.167-0700 [INFO ] hello world
```
(Note timestamps are removed in future examples for brevity.)
### Create a new logger
```go
appLogger := hclog.New(&hclog.LoggerOptions{
Name: "my-app",
Level: hclog.LevelFromString("DEBUG"),
})
```
### Emit an Info level message with 2 key/value pairs
```go
input := "5.5"
_, err := strconv.ParseInt(input, 10, 32)
if err != nil {
appLogger.Info("Invalid input for ParseInt", "input", input, "error", err)
}
```
```text
... [INFO ] my-app: Invalid input for ParseInt: input=5.5 error="strconv.ParseInt: parsing "5.5": invalid syntax"
```
### Create a new Logger for a major subsystem
```go
subsystemLogger := appLogger.Named("transport")
subsystemLogger.Info("we are transporting something")
```
```text
... [INFO ] my-app.transport: we are transporting something
```
Notice that logs emitted by `subsystemLogger` contain `my-app.transport`,
reflecting both the application and subsystem names.
### Create a new Logger with fixed key/value pairs
Using `With()` will include a specific key-value pair in all messages emitted
by that logger.
```go
requestID := "5fb446b6-6eba-821d-df1b-cd7501b6a363"
requestLogger := subsystemLogger.With("request", requestID)
requestLogger.Info("we are transporting a request")
```
```text
... [INFO ] my-app.transport: we are transporting a request: request=5fb446b6-6eba-821d-df1b-cd7501b6a363
```
This allows sub Loggers to be context specific without having to thread that
into all the callers.
### Using `hclog.Fmt()`
```go
totalBandwidth := 200
appLogger.Info("total bandwidth exceeded", "bandwidth", hclog.Fmt("%d GB/s", totalBandwidth))
```
```text
... [INFO ] my-app: total bandwidth exceeded: bandwidth="200 GB/s"
```
### Use this with code that uses the standard library logger
If you want to use the standard library's `log.Logger` interface you can wrap
`hclog.Logger` by calling the `StandardLogger()` method. This allows you to use
it with the familiar `Println()`, `Printf()`, etc. For example:
```go
stdLogger := appLogger.StandardLogger(&hclog.StandardLoggerOptions{
InferLevels: true,
})
// Printf() is provided by stdlib log.Logger interface, not hclog.Logger
stdLogger.Printf("[DEBUG] %+v", stdLogger)
```
```text
... [DEBUG] my-app: &{mu:{state:0 sema:0} prefix: flag:0 out:0xc42000a0a0 buf:[]}
```
Alternatively, you may configure the system-wide logger:
```go
// log the standard logger from 'import "log"'
log.SetOutput(appLogger.StandardWriter(&hclog.StandardLoggerOptions{InferLevels: true}))
log.SetPrefix("")
log.SetFlags(0)
log.Printf("[DEBUG] %d", 42)
```
```text
... [DEBUG] my-app: 42
```
Notice that if `appLogger` is initialized with the `INFO` log level _and_ you
specify `InferLevels: true`, you will not see any output here. You must change
`appLogger` to `DEBUG` to see output. See the docs for more information.
If the log lines start with a timestamp you can use the
`InferLevelsWithTimestamp` option to try and ignore them.

View File

@ -1,29 +0,0 @@
//go:build !windows
// +build !windows
package hclog
import (
"github.com/mattn/go-isatty"
)
// setColorization will mutate the values of this logger
// to appropriately configure colorization options. It provides
// a wrapper to the output stream on Windows systems.
func (l *intLogger) setColorization(opts *LoggerOptions) {
switch opts.Color {
case ColorOff:
fallthrough
case ForceColor:
return
case AutoColor:
fi := l.checkWriterIsFile()
isUnixTerm := isatty.IsTerminal(fi.Fd())
isCygwinTerm := isatty.IsCygwinTerminal(fi.Fd())
isTerm := isUnixTerm || isCygwinTerm
if !isTerm {
l.headerColor = ColorOff
l.writer.color = ColorOff
}
}
}

View File

@ -1,38 +0,0 @@
//go:build windows
// +build windows
package hclog
import (
"os"
colorable "github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
)
// setColorization will mutate the values of this logger
// to appropriately configure colorization options. It provides
// a wrapper to the output stream on Windows systems.
func (l *intLogger) setColorization(opts *LoggerOptions) {
switch opts.Color {
case ColorOff:
return
case ForceColor:
fi := l.checkWriterIsFile()
l.writer.w = colorable.NewColorable(fi)
case AutoColor:
fi := l.checkWriterIsFile()
isUnixTerm := isatty.IsTerminal(os.Stdout.Fd())
isCygwinTerm := isatty.IsCygwinTerminal(os.Stdout.Fd())
isTerm := isUnixTerm || isCygwinTerm
if !isTerm {
l.writer.color = ColorOff
l.headerColor = ColorOff
return
}
if l.headerColor == ColorOff {
l.writer.w = colorable.NewColorable(fi)
}
}
}

View File

@ -1,38 +0,0 @@
package hclog
import (
"context"
)
// WithContext inserts a logger into the context and is retrievable
// with FromContext. The optional args can be set with the same syntax as
// Logger.With to set fields on the inserted logger. This will not modify
// the logger argument in-place.
func WithContext(ctx context.Context, logger Logger, args ...interface{}) context.Context {
// While we could call logger.With even with zero args, we have this
// check to avoid unnecessary allocations around creating a copy of a
// logger.
if len(args) > 0 {
logger = logger.With(args...)
}
return context.WithValue(ctx, contextKey, logger)
}
// FromContext returns a logger from the context. This will return L()
// (the default logger) if no logger is found in the context. Therefore,
// this will never return a nil value.
func FromContext(ctx context.Context) Logger {
logger, _ := ctx.Value(contextKey).(Logger)
if logger == nil {
return L()
}
return logger
}
// Unexported new type so that our context key never collides with another.
type contextKeyType struct{}
// contextKey is the key used for the context to store the logger.
var contextKey = contextKeyType{}

View File

@ -1,71 +0,0 @@
package hclog
import (
"regexp"
"strings"
)
// ExcludeByMessage provides a simple way to build a list of log messages that
// can be queried and matched. This is meant to be used with the Exclude
// option on Options to suppress log messages. This does not hold any mutexs
// within itself, so normal usage would be to Add entries at setup and none after
// Exclude is going to be called. Exclude is called with a mutex held within
// the Logger, so that doesn't need to use a mutex. Example usage:
//
// f := new(ExcludeByMessage)
// f.Add("Noisy log message text")
// appLogger.Exclude = f.Exclude
type ExcludeByMessage struct {
messages map[string]struct{}
}
// Add a message to be filtered. Do not call this after Exclude is to be called
// due to concurrency issues.
func (f *ExcludeByMessage) Add(msg string) {
if f.messages == nil {
f.messages = make(map[string]struct{})
}
f.messages[msg] = struct{}{}
}
// Return true if the given message should be included
func (f *ExcludeByMessage) Exclude(level Level, msg string, args ...interface{}) bool {
_, ok := f.messages[msg]
return ok
}
// ExcludeByPrefix is a simple type to match a message string that has a common prefix.
type ExcludeByPrefix string
// Matches an message that starts with the prefix.
func (p ExcludeByPrefix) Exclude(level Level, msg string, args ...interface{}) bool {
return strings.HasPrefix(msg, string(p))
}
// ExcludeByRegexp takes a regexp and uses it to match a log message string. If it matches
// the log entry is excluded.
type ExcludeByRegexp struct {
Regexp *regexp.Regexp
}
// Exclude the log message if the message string matches the regexp
func (e ExcludeByRegexp) Exclude(level Level, msg string, args ...interface{}) bool {
return e.Regexp.MatchString(msg)
}
// ExcludeFuncs is a slice of functions that will called to see if a log entry
// should be filtered or not. It stops calling functions once at least one returns
// true.
type ExcludeFuncs []func(level Level, msg string, args ...interface{}) bool
// Calls each function until one of them returns true
func (ff ExcludeFuncs) Exclude(level Level, msg string, args ...interface{}) bool {
for _, f := range ff {
if f(level, msg, args...) {
return true
}
}
return false
}

View File

@ -1,64 +0,0 @@
package hclog
import (
"sync"
"time"
)
var (
protect sync.Once
def Logger
// DefaultOptions is used to create the Default logger. These are read
// only when the Default logger is created, so set them as soon as the
// process starts.
DefaultOptions = &LoggerOptions{
Level: DefaultLevel,
Output: DefaultOutput,
TimeFn: time.Now,
}
)
// Default returns a globally held logger. This can be a good starting
// place, and then you can use .With() and .Named() to create sub-loggers
// to be used in more specific contexts.
// The value of the Default logger can be set via SetDefault() or by
// changing the options in DefaultOptions.
//
// This method is goroutine safe, returning a global from memory, but
// care should be used if SetDefault() is called it random times
// in the program as that may result in race conditions and an unexpected
// Logger being returned.
func Default() Logger {
protect.Do(func() {
// If SetDefault was used before Default() was called, we need to
// detect that here.
if def == nil {
def = New(DefaultOptions)
}
})
return def
}
// L is a short alias for Default().
func L() Logger {
return Default()
}
// SetDefault changes the logger to be returned by Default()and L()
// to the one given. This allows packages to use the default logger
// and have higher level packages change it to match the execution
// environment. It returns any old default if there is one.
//
// NOTE: This is expected to be called early in the program to setup
// a default logger. As such, it does not attempt to make itself
// not racy with regard to the value of the default logger. Ergo
// if it is called in goroutines, you may experience race conditions
// with other goroutines retrieving the default logger. Basically,
// don't do that.
func SetDefault(log Logger) Logger {
old := def
def = log
return old
}

View File

@ -1,204 +0,0 @@
package hclog
import (
"io"
"log"
"sync"
"sync/atomic"
)
var _ Logger = &interceptLogger{}
type interceptLogger struct {
Logger
mu *sync.Mutex
sinkCount *int32
Sinks map[SinkAdapter]struct{}
}
func NewInterceptLogger(opts *LoggerOptions) InterceptLogger {
l := newLogger(opts)
if l.callerOffset > 0 {
// extra frames for interceptLogger.{Warn,Info,Log,etc...}, and interceptLogger.log
l.callerOffset += 2
}
intercept := &interceptLogger{
Logger: l,
mu: new(sync.Mutex),
sinkCount: new(int32),
Sinks: make(map[SinkAdapter]struct{}),
}
atomic.StoreInt32(intercept.sinkCount, 0)
return intercept
}
func (i *interceptLogger) Log(level Level, msg string, args ...interface{}) {
i.log(level, msg, args...)
}
// log is used to make the caller stack frame lookup consistent. If Warn,Info,etc
// all called Log then direct calls to Log would have a different stack frame
// depth. By having all the methods call the same helper we ensure the stack
// frame depth is the same.
func (i *interceptLogger) log(level Level, msg string, args ...interface{}) {
i.Logger.Log(level, msg, args...)
if atomic.LoadInt32(i.sinkCount) == 0 {
return
}
i.mu.Lock()
defer i.mu.Unlock()
for s := range i.Sinks {
s.Accept(i.Name(), level, msg, i.retrieveImplied(args...)...)
}
}
// Emit the message and args at TRACE level to log and sinks
func (i *interceptLogger) Trace(msg string, args ...interface{}) {
i.log(Trace, msg, args...)
}
// Emit the message and args at DEBUG level to log and sinks
func (i *interceptLogger) Debug(msg string, args ...interface{}) {
i.log(Debug, msg, args...)
}
// Emit the message and args at INFO level to log and sinks
func (i *interceptLogger) Info(msg string, args ...interface{}) {
i.log(Info, msg, args...)
}
// Emit the message and args at WARN level to log and sinks
func (i *interceptLogger) Warn(msg string, args ...interface{}) {
i.log(Warn, msg, args...)
}
// Emit the message and args at ERROR level to log and sinks
func (i *interceptLogger) Error(msg string, args ...interface{}) {
i.log(Error, msg, args...)
}
func (i *interceptLogger) retrieveImplied(args ...interface{}) []interface{} {
top := i.Logger.ImpliedArgs()
cp := make([]interface{}, len(top)+len(args))
copy(cp, top)
copy(cp[len(top):], args)
return cp
}
// Create a new sub-Logger that a name descending from the current name.
// This is used to create a subsystem specific Logger.
// Registered sinks will subscribe to these messages as well.
func (i *interceptLogger) Named(name string) Logger {
return i.NamedIntercept(name)
}
// Create a new sub-Logger with an explicit name. This ignores the current
// name. This is used to create a standalone logger that doesn't fall
// within the normal hierarchy. Registered sinks will subscribe
// to these messages as well.
func (i *interceptLogger) ResetNamed(name string) Logger {
return i.ResetNamedIntercept(name)
}
// Create a new sub-Logger that a name decending from the current name.
// This is used to create a subsystem specific Logger.
// Registered sinks will subscribe to these messages as well.
func (i *interceptLogger) NamedIntercept(name string) InterceptLogger {
var sub interceptLogger
sub = *i
sub.Logger = i.Logger.Named(name)
return &sub
}
// Create a new sub-Logger with an explicit name. This ignores the current
// name. This is used to create a standalone logger that doesn't fall
// within the normal hierarchy. Registered sinks will subscribe
// to these messages as well.
func (i *interceptLogger) ResetNamedIntercept(name string) InterceptLogger {
var sub interceptLogger
sub = *i
sub.Logger = i.Logger.ResetNamed(name)
return &sub
}
// Return a sub-Logger for which every emitted log message will contain
// the given key/value pairs. This is used to create a context specific
// Logger.
func (i *interceptLogger) With(args ...interface{}) Logger {
var sub interceptLogger
sub = *i
sub.Logger = i.Logger.With(args...)
return &sub
}
// RegisterSink attaches a SinkAdapter to interceptLoggers sinks.
func (i *interceptLogger) RegisterSink(sink SinkAdapter) {
i.mu.Lock()
defer i.mu.Unlock()
i.Sinks[sink] = struct{}{}
atomic.AddInt32(i.sinkCount, 1)
}
// DeregisterSink removes a SinkAdapter from interceptLoggers sinks.
func (i *interceptLogger) DeregisterSink(sink SinkAdapter) {
i.mu.Lock()
defer i.mu.Unlock()
delete(i.Sinks, sink)
atomic.AddInt32(i.sinkCount, -1)
}
func (i *interceptLogger) StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger {
return i.StandardLogger(opts)
}
func (i *interceptLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger {
if opts == nil {
opts = &StandardLoggerOptions{}
}
return log.New(i.StandardWriter(opts), "", 0)
}
func (i *interceptLogger) StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer {
return i.StandardWriter(opts)
}
func (i *interceptLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer {
return &stdlogAdapter{
log: i,
inferLevels: opts.InferLevels,
inferLevelsWithTimestamp: opts.InferLevelsWithTimestamp,
forceLevel: opts.ForceLevel,
}
}
func (i *interceptLogger) ResetOutput(opts *LoggerOptions) error {
if or, ok := i.Logger.(OutputResettable); ok {
return or.ResetOutput(opts)
} else {
return nil
}
}
func (i *interceptLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error {
if or, ok := i.Logger.(OutputResettable); ok {
return or.ResetOutputWithFlush(opts, flushable)
} else {
return nil
}
}

View File

@ -1,776 +0,0 @@
package hclog
import (
"bytes"
"encoding"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"os"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/fatih/color"
)
// TimeFormat is the time format to use for plain (non-JSON) output.
// This is a version of RFC3339 that contains millisecond precision.
const TimeFormat = "2006-01-02T15:04:05.000Z0700"
// TimeFormatJSON is the time format to use for JSON output.
// This is a version of RFC3339 that contains microsecond precision.
const TimeFormatJSON = "2006-01-02T15:04:05.000000Z07:00"
// errJsonUnsupportedTypeMsg is included in log json entries, if an arg cannot be serialized to json
const errJsonUnsupportedTypeMsg = "logging contained values that don't serialize to json"
var (
_levelToBracket = map[Level]string{
Debug: "[DEBUG]",
Trace: "[TRACE]",
Info: "[INFO] ",
Warn: "[WARN] ",
Error: "[ERROR]",
}
_levelToColor = map[Level]*color.Color{
Debug: color.New(color.FgHiWhite),
Trace: color.New(color.FgHiGreen),
Info: color.New(color.FgHiBlue),
Warn: color.New(color.FgHiYellow),
Error: color.New(color.FgHiRed),
}
)
// Make sure that intLogger is a Logger
var _ Logger = &intLogger{}
// intLogger is an internal logger implementation. Internal in that it is
// defined entirely by this package.
type intLogger struct {
json bool
callerOffset int
name string
timeFormat string
timeFn TimeFunction
disableTime bool
// This is an interface so that it's shared by any derived loggers, since
// those derived loggers share the bufio.Writer as well.
mutex Locker
writer *writer
level *int32
headerColor ColorOption
implied []interface{}
exclude func(level Level, msg string, args ...interface{}) bool
// create subloggers with their own level setting
independentLevels bool
}
// New returns a configured logger.
func New(opts *LoggerOptions) Logger {
return newLogger(opts)
}
// NewSinkAdapter returns a SinkAdapter with configured settings
// defined by LoggerOptions
func NewSinkAdapter(opts *LoggerOptions) SinkAdapter {
l := newLogger(opts)
if l.callerOffset > 0 {
// extra frames for interceptLogger.{Warn,Info,Log,etc...}, and SinkAdapter.Accept
l.callerOffset += 2
}
return l
}
func newLogger(opts *LoggerOptions) *intLogger {
if opts == nil {
opts = &LoggerOptions{}
}
output := opts.Output
if output == nil {
output = DefaultOutput
}
level := opts.Level
if level == NoLevel {
level = DefaultLevel
}
mutex := opts.Mutex
if mutex == nil {
mutex = new(sync.Mutex)
}
var primaryColor, headerColor ColorOption
if opts.ColorHeaderOnly {
primaryColor = ColorOff
headerColor = opts.Color
} else {
primaryColor = opts.Color
headerColor = ColorOff
}
l := &intLogger{
json: opts.JSONFormat,
name: opts.Name,
timeFormat: TimeFormat,
timeFn: time.Now,
disableTime: opts.DisableTime,
mutex: mutex,
writer: newWriter(output, primaryColor),
level: new(int32),
exclude: opts.Exclude,
independentLevels: opts.IndependentLevels,
headerColor: headerColor,
}
if opts.IncludeLocation {
l.callerOffset = offsetIntLogger + opts.AdditionalLocationOffset
}
if l.json {
l.timeFormat = TimeFormatJSON
}
if opts.TimeFn != nil {
l.timeFn = opts.TimeFn
}
if opts.TimeFormat != "" {
l.timeFormat = opts.TimeFormat
}
l.setColorization(opts)
atomic.StoreInt32(l.level, int32(level))
return l
}
// offsetIntLogger is the stack frame offset in the call stack for the caller to
// one of the Warn, Info, Log, etc methods.
const offsetIntLogger = 3
// Log a message and a set of key/value pairs if the given level is at
// or more severe that the threshold configured in the Logger.
func (l *intLogger) log(name string, level Level, msg string, args ...interface{}) {
if level < Level(atomic.LoadInt32(l.level)) {
return
}
t := l.timeFn()
l.mutex.Lock()
defer l.mutex.Unlock()
if l.exclude != nil && l.exclude(level, msg, args...) {
return
}
if l.json {
l.logJSON(t, name, level, msg, args...)
} else {
l.logPlain(t, name, level, msg, args...)
}
l.writer.Flush(level)
}
// Cleanup a path by returning the last 2 segments of the path only.
func trimCallerPath(path string) string {
// lovely borrowed from zap
// nb. To make sure we trim the path correctly on Windows too, we
// counter-intuitively need to use '/' and *not* os.PathSeparator here,
// because the path given originates from Go stdlib, specifically
// runtime.Caller() which (as of Mar/17) returns forward slashes even on
// Windows.
//
// See https://github.com/golang/go/issues/3335
// and https://github.com/golang/go/issues/18151
//
// for discussion on the issue on Go side.
// Find the last separator.
idx := strings.LastIndexByte(path, '/')
if idx == -1 {
return path
}
// Find the penultimate separator.
idx = strings.LastIndexByte(path[:idx], '/')
if idx == -1 {
return path
}
return path[idx+1:]
}
// isNormal indicates if the rune is one allowed to exist as an unquoted
// string value. This is a subset of ASCII, `-` through `~`.
func isNormal(r rune) bool {
return 0x2D <= r && r <= 0x7E // - through ~
}
// needsQuoting returns false if all the runes in string are normal, according
// to isNormal
func needsQuoting(str string) bool {
for _, r := range str {
if !isNormal(r) {
return true
}
}
return false
}
// Non-JSON logging format function
func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) {
if !l.disableTime {
l.writer.WriteString(t.Format(l.timeFormat))
l.writer.WriteByte(' ')
}
s, ok := _levelToBracket[level]
if ok {
if l.headerColor != ColorOff {
color := _levelToColor[level]
color.Fprint(l.writer, s)
} else {
l.writer.WriteString(s)
}
} else {
l.writer.WriteString("[?????]")
}
if l.callerOffset > 0 {
if _, file, line, ok := runtime.Caller(l.callerOffset); ok {
l.writer.WriteByte(' ')
l.writer.WriteString(trimCallerPath(file))
l.writer.WriteByte(':')
l.writer.WriteString(strconv.Itoa(line))
l.writer.WriteByte(':')
}
}
l.writer.WriteByte(' ')
if name != "" {
l.writer.WriteString(name)
if msg != "" {
l.writer.WriteString(": ")
l.writer.WriteString(msg)
}
} else if msg != "" {
l.writer.WriteString(msg)
}
args = append(l.implied, args...)
var stacktrace CapturedStacktrace
if args != nil && len(args) > 0 {
if len(args)%2 != 0 {
cs, ok := args[len(args)-1].(CapturedStacktrace)
if ok {
args = args[:len(args)-1]
stacktrace = cs
} else {
extra := args[len(args)-1]
args = append(args[:len(args)-1], MissingKey, extra)
}
}
l.writer.WriteByte(':')
FOR:
for i := 0; i < len(args); i = i + 2 {
var (
val string
raw bool
)
switch st := args[i+1].(type) {
case string:
val = st
if st == "" {
val = `""`
raw = true
}
case int:
val = strconv.FormatInt(int64(st), 10)
case int64:
val = strconv.FormatInt(int64(st), 10)
case int32:
val = strconv.FormatInt(int64(st), 10)
case int16:
val = strconv.FormatInt(int64(st), 10)
case int8:
val = strconv.FormatInt(int64(st), 10)
case uint:
val = strconv.FormatUint(uint64(st), 10)
case uint64:
val = strconv.FormatUint(uint64(st), 10)
case uint32:
val = strconv.FormatUint(uint64(st), 10)
case uint16:
val = strconv.FormatUint(uint64(st), 10)
case uint8:
val = strconv.FormatUint(uint64(st), 10)
case Hex:
val = "0x" + strconv.FormatUint(uint64(st), 16)
case Octal:
val = "0" + strconv.FormatUint(uint64(st), 8)
case Binary:
val = "0b" + strconv.FormatUint(uint64(st), 2)
case CapturedStacktrace:
stacktrace = st
continue FOR
case Format:
val = fmt.Sprintf(st[0].(string), st[1:]...)
case Quote:
raw = true
val = strconv.Quote(string(st))
default:
v := reflect.ValueOf(st)
if v.Kind() == reflect.Slice {
val = l.renderSlice(v)
raw = true
} else {
val = fmt.Sprintf("%v", st)
}
}
var key string
switch st := args[i].(type) {
case string:
key = st
default:
key = fmt.Sprintf("%s", st)
}
if strings.Contains(val, "\n") {
l.writer.WriteString("\n ")
l.writer.WriteString(key)
l.writer.WriteString("=\n")
writeIndent(l.writer, val, " | ")
l.writer.WriteString(" ")
} else if !raw && needsQuoting(val) {
l.writer.WriteByte(' ')
l.writer.WriteString(key)
l.writer.WriteByte('=')
l.writer.WriteString(strconv.Quote(val))
} else {
l.writer.WriteByte(' ')
l.writer.WriteString(key)
l.writer.WriteByte('=')
l.writer.WriteString(val)
}
}
}
l.writer.WriteString("\n")
if stacktrace != "" {
l.writer.WriteString(string(stacktrace))
l.writer.WriteString("\n")
}
}
func writeIndent(w *writer, str string, indent string) {
for {
nl := strings.IndexByte(str, "\n"[0])
if nl == -1 {
if str != "" {
w.WriteString(indent)
w.WriteString(str)
w.WriteString("\n")
}
return
}
w.WriteString(indent)
w.WriteString(str[:nl])
w.WriteString("\n")
str = str[nl+1:]
}
}
func (l *intLogger) renderSlice(v reflect.Value) string {
var buf bytes.Buffer
buf.WriteRune('[')
for i := 0; i < v.Len(); i++ {
if i > 0 {
buf.WriteString(", ")
}
sv := v.Index(i)
var val string
switch sv.Kind() {
case reflect.String:
val = strconv.Quote(sv.String())
case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64:
val = strconv.FormatInt(sv.Int(), 10)
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
val = strconv.FormatUint(sv.Uint(), 10)
default:
val = fmt.Sprintf("%v", sv.Interface())
if strings.ContainsAny(val, " \t\n\r") {
val = strconv.Quote(val)
}
}
buf.WriteString(val)
}
buf.WriteRune(']')
return buf.String()
}
// JSON logging function
func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, args ...interface{}) {
vals := l.jsonMapEntry(t, name, level, msg)
args = append(l.implied, args...)
if args != nil && len(args) > 0 {
if len(args)%2 != 0 {
cs, ok := args[len(args)-1].(CapturedStacktrace)
if ok {
args = args[:len(args)-1]
vals["stacktrace"] = cs
} else {
extra := args[len(args)-1]
args = append(args[:len(args)-1], MissingKey, extra)
}
}
for i := 0; i < len(args); i = i + 2 {
val := args[i+1]
switch sv := val.(type) {
case error:
// Check if val is of type error. If error type doesn't
// implement json.Marshaler or encoding.TextMarshaler
// then set val to err.Error() so that it gets marshaled
switch sv.(type) {
case json.Marshaler, encoding.TextMarshaler:
default:
val = sv.Error()
}
case Format:
val = fmt.Sprintf(sv[0].(string), sv[1:]...)
}
var key string
switch st := args[i].(type) {
case string:
key = st
default:
key = fmt.Sprintf("%s", st)
}
vals[key] = val
}
}
err := json.NewEncoder(l.writer).Encode(vals)
if err != nil {
if _, ok := err.(*json.UnsupportedTypeError); ok {
plainVal := l.jsonMapEntry(t, name, level, msg)
plainVal["@warn"] = errJsonUnsupportedTypeMsg
json.NewEncoder(l.writer).Encode(plainVal)
}
}
}
func (l intLogger) jsonMapEntry(t time.Time, name string, level Level, msg string) map[string]interface{} {
vals := map[string]interface{}{
"@message": msg,
}
if !l.disableTime {
vals["@timestamp"] = t.Format(l.timeFormat)
}
var levelStr string
switch level {
case Error:
levelStr = "error"
case Warn:
levelStr = "warn"
case Info:
levelStr = "info"
case Debug:
levelStr = "debug"
case Trace:
levelStr = "trace"
default:
levelStr = "all"
}
vals["@level"] = levelStr
if name != "" {
vals["@module"] = name
}
if l.callerOffset > 0 {
if _, file, line, ok := runtime.Caller(l.callerOffset + 1); ok {
vals["@caller"] = fmt.Sprintf("%s:%d", file, line)
}
}
return vals
}
// Emit the message and args at the provided level
func (l *intLogger) Log(level Level, msg string, args ...interface{}) {
l.log(l.Name(), level, msg, args...)
}
// Emit the message and args at DEBUG level
func (l *intLogger) Debug(msg string, args ...interface{}) {
l.log(l.Name(), Debug, msg, args...)
}
// Emit the message and args at TRACE level
func (l *intLogger) Trace(msg string, args ...interface{}) {
l.log(l.Name(), Trace, msg, args...)
}
// Emit the message and args at INFO level
func (l *intLogger) Info(msg string, args ...interface{}) {
l.log(l.Name(), Info, msg, args...)
}
// Emit the message and args at WARN level
func (l *intLogger) Warn(msg string, args ...interface{}) {
l.log(l.Name(), Warn, msg, args...)
}
// Emit the message and args at ERROR level
func (l *intLogger) Error(msg string, args ...interface{}) {
l.log(l.Name(), Error, msg, args...)
}
// Indicate that the logger would emit TRACE level logs
func (l *intLogger) IsTrace() bool {
return Level(atomic.LoadInt32(l.level)) == Trace
}
// Indicate that the logger would emit DEBUG level logs
func (l *intLogger) IsDebug() bool {
return Level(atomic.LoadInt32(l.level)) <= Debug
}
// Indicate that the logger would emit INFO level logs
func (l *intLogger) IsInfo() bool {
return Level(atomic.LoadInt32(l.level)) <= Info
}
// Indicate that the logger would emit WARN level logs
func (l *intLogger) IsWarn() bool {
return Level(atomic.LoadInt32(l.level)) <= Warn
}
// Indicate that the logger would emit ERROR level logs
func (l *intLogger) IsError() bool {
return Level(atomic.LoadInt32(l.level)) <= Error
}
const MissingKey = "EXTRA_VALUE_AT_END"
// Return a sub-Logger for which every emitted log message will contain
// the given key/value pairs. This is used to create a context specific
// Logger.
func (l *intLogger) With(args ...interface{}) Logger {
var extra interface{}
if len(args)%2 != 0 {
extra = args[len(args)-1]
args = args[:len(args)-1]
}
sl := l.copy()
result := make(map[string]interface{}, len(l.implied)+len(args))
keys := make([]string, 0, len(l.implied)+len(args))
// Read existing args, store map and key for consistent sorting
for i := 0; i < len(l.implied); i += 2 {
key := l.implied[i].(string)
keys = append(keys, key)
result[key] = l.implied[i+1]
}
// Read new args, store map and key for consistent sorting
for i := 0; i < len(args); i += 2 {
key := args[i].(string)
_, exists := result[key]
if !exists {
keys = append(keys, key)
}
result[key] = args[i+1]
}
// Sort keys to be consistent
sort.Strings(keys)
sl.implied = make([]interface{}, 0, len(l.implied)+len(args))
for _, k := range keys {
sl.implied = append(sl.implied, k)
sl.implied = append(sl.implied, result[k])
}
if extra != nil {
sl.implied = append(sl.implied, MissingKey, extra)
}
return sl
}
// Create a new sub-Logger that a name decending from the current name.
// This is used to create a subsystem specific Logger.
func (l *intLogger) Named(name string) Logger {
sl := l.copy()
if sl.name != "" {
sl.name = sl.name + "." + name
} else {
sl.name = name
}
return sl
}
// Create a new sub-Logger with an explicit name. This ignores the current
// name. This is used to create a standalone logger that doesn't fall
// within the normal hierarchy.
func (l *intLogger) ResetNamed(name string) Logger {
sl := l.copy()
sl.name = name
return sl
}
func (l *intLogger) ResetOutput(opts *LoggerOptions) error {
if opts.Output == nil {
return errors.New("given output is nil")
}
l.mutex.Lock()
defer l.mutex.Unlock()
return l.resetOutput(opts)
}
func (l *intLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error {
if opts.Output == nil {
return errors.New("given output is nil")
}
if flushable == nil {
return errors.New("flushable is nil")
}
l.mutex.Lock()
defer l.mutex.Unlock()
if err := flushable.Flush(); err != nil {
return err
}
return l.resetOutput(opts)
}
func (l *intLogger) resetOutput(opts *LoggerOptions) error {
l.writer = newWriter(opts.Output, opts.Color)
l.setColorization(opts)
return nil
}
// Update the logging level on-the-fly. This will affect all subloggers as
// well.
func (l *intLogger) SetLevel(level Level) {
atomic.StoreInt32(l.level, int32(level))
}
// Create a *log.Logger that will send it's data through this Logger. This
// allows packages that expect to be using the standard library log to actually
// use this logger.
func (l *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger {
if opts == nil {
opts = &StandardLoggerOptions{}
}
return log.New(l.StandardWriter(opts), "", 0)
}
func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer {
newLog := *l
if l.callerOffset > 0 {
// the stack is
// logger.printf() -> l.Output() ->l.out.writer(hclog:stdlogAdaptor.write) -> hclog:stdlogAdaptor.dispatch()
// So plus 4.
newLog.callerOffset = l.callerOffset + 4
}
return &stdlogAdapter{
log: &newLog,
inferLevels: opts.InferLevels,
inferLevelsWithTimestamp: opts.InferLevelsWithTimestamp,
forceLevel: opts.ForceLevel,
}
}
// checks if the underlying io.Writer is a file, and
// panics if not. For use by colorization.
func (l *intLogger) checkWriterIsFile() *os.File {
fi, ok := l.writer.w.(*os.File)
if !ok {
panic("Cannot enable coloring of non-file Writers")
}
return fi
}
// Accept implements the SinkAdapter interface
func (i *intLogger) Accept(name string, level Level, msg string, args ...interface{}) {
i.log(name, level, msg, args...)
}
// ImpliedArgs returns the loggers implied args
func (i *intLogger) ImpliedArgs() []interface{} {
return i.implied
}
// Name returns the loggers name
func (i *intLogger) Name() string {
return i.name
}
// copy returns a shallow copy of the intLogger, replacing the level pointer
// when necessary
func (l *intLogger) copy() *intLogger {
sl := *l
if l.independentLevels {
sl.level = new(int32)
*sl.level = *l.level
}
return &sl
}

View File

@ -1,369 +0,0 @@
package hclog
import (
"io"
"log"
"os"
"strings"
"time"
)
var (
// DefaultOutput is used as the default log output.
DefaultOutput io.Writer = os.Stderr
// DefaultLevel is used as the default log level.
DefaultLevel = Info
)
// Level represents a log level.
type Level int32
const (
// NoLevel is a special level used to indicate that no level has been
// set and allow for a default to be used.
NoLevel Level = 0
// Trace is the most verbose level. Intended to be used for the tracing
// of actions in code, such as function enters/exits, etc.
Trace Level = 1
// Debug information for programmer low-level analysis.
Debug Level = 2
// Info information about steady state operations.
Info Level = 3
// Warn information about rare but handled events.
Warn Level = 4
// Error information about unrecoverable events.
Error Level = 5
// Off disables all logging output.
Off Level = 6
)
// Format is a simple convenience type for when formatting is required. When
// processing a value of this type, the logger automatically treats the first
// argument as a Printf formatting string and passes the rest as the values
// to be formatted. For example: L.Info(Fmt{"%d beans/day", beans}).
type Format []interface{}
// Fmt returns a Format type. This is a convenience function for creating a Format
// type.
func Fmt(str string, args ...interface{}) Format {
return append(Format{str}, args...)
}
// A simple shortcut to format numbers in hex when displayed with the normal
// text output. For example: L.Info("header value", Hex(17))
type Hex int
// A simple shortcut to format numbers in octal when displayed with the normal
// text output. For example: L.Info("perms", Octal(17))
type Octal int
// A simple shortcut to format numbers in binary when displayed with the normal
// text output. For example: L.Info("bits", Binary(17))
type Binary int
// A simple shortcut to format strings with Go quoting. Control and
// non-printable characters will be escaped with their backslash equivalents in
// output. Intended for untrusted or multiline strings which should be logged
// as concisely as possible.
type Quote string
// ColorOption expresses how the output should be colored, if at all.
type ColorOption uint8
const (
// ColorOff is the default coloration, and does not
// inject color codes into the io.Writer.
ColorOff ColorOption = iota
// AutoColor checks if the io.Writer is a tty,
// and if so enables coloring.
AutoColor
// ForceColor will enable coloring, regardless of whether
// the io.Writer is a tty or not.
ForceColor
)
// LevelFromString returns a Level type for the named log level, or "NoLevel" if
// the level string is invalid. This facilitates setting the log level via
// config or environment variable by name in a predictable way.
func LevelFromString(levelStr string) Level {
// We don't care about case. Accept both "INFO" and "info".
levelStr = strings.ToLower(strings.TrimSpace(levelStr))
switch levelStr {
case "trace":
return Trace
case "debug":
return Debug
case "info":
return Info
case "warn":
return Warn
case "error":
return Error
case "off":
return Off
default:
return NoLevel
}
}
func (l Level) String() string {
switch l {
case Trace:
return "trace"
case Debug:
return "debug"
case Info:
return "info"
case Warn:
return "warn"
case Error:
return "error"
case NoLevel:
return "none"
case Off:
return "off"
default:
return "unknown"
}
}
// Logger describes the interface that must be implemented by all loggers.
type Logger interface {
// Args are alternating key, val pairs
// keys must be strings
// vals can be any type, but display is implementation specific
// Emit a message and key/value pairs at a provided log level
Log(level Level, msg string, args ...interface{})
// Emit a message and key/value pairs at the TRACE level
Trace(msg string, args ...interface{})
// Emit a message and key/value pairs at the DEBUG level
Debug(msg string, args ...interface{})
// Emit a message and key/value pairs at the INFO level
Info(msg string, args ...interface{})
// Emit a message and key/value pairs at the WARN level
Warn(msg string, args ...interface{})
// Emit a message and key/value pairs at the ERROR level
Error(msg string, args ...interface{})
// Indicate if TRACE logs would be emitted. This and the other Is* guards
// are used to elide expensive logging code based on the current level.
IsTrace() bool
// Indicate if DEBUG logs would be emitted. This and the other Is* guards
IsDebug() bool
// Indicate if INFO logs would be emitted. This and the other Is* guards
IsInfo() bool
// Indicate if WARN logs would be emitted. This and the other Is* guards
IsWarn() bool
// Indicate if ERROR logs would be emitted. This and the other Is* guards
IsError() bool
// ImpliedArgs returns With key/value pairs
ImpliedArgs() []interface{}
// Creates a sublogger that will always have the given key/value pairs
With(args ...interface{}) Logger
// Returns the Name of the logger
Name() string
// Create a logger that will prepend the name string on the front of all messages.
// If the logger already has a name, the new value will be appended to the current
// name. That way, a major subsystem can use this to decorate all it's own logs
// without losing context.
Named(name string) Logger
// Create a logger that will prepend the name string on the front of all messages.
// This sets the name of the logger to the value directly, unlike Named which honor
// the current name as well.
ResetNamed(name string) Logger
// Updates the level. This should affect all related loggers as well,
// unless they were created with IndependentLevels. If an
// implementation cannot update the level on the fly, it should no-op.
SetLevel(level Level)
// Return a value that conforms to the stdlib log.Logger interface
StandardLogger(opts *StandardLoggerOptions) *log.Logger
// Return a value that conforms to io.Writer, which can be passed into log.SetOutput()
StandardWriter(opts *StandardLoggerOptions) io.Writer
}
// StandardLoggerOptions can be used to configure a new standard logger.
type StandardLoggerOptions struct {
// Indicate that some minimal parsing should be done on strings to try
// and detect their level and re-emit them.
// This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO],
// [DEBUG] and strip it off before reapplying it.
InferLevels bool
// Indicate that some minimal parsing should be done on strings to try
// and detect their level and re-emit them while ignoring possible
// timestamp values in the beginning of the string.
// This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO],
// [DEBUG] and strip it off before reapplying it.
// The timestamp detection may result in false positives and incomplete
// string outputs.
InferLevelsWithTimestamp bool
// ForceLevel is used to force all output from the standard logger to be at
// the specified level. Similar to InferLevels, this will strip any level
// prefix contained in the logged string before applying the forced level.
// If set, this override InferLevels.
ForceLevel Level
}
type TimeFunction = func() time.Time
// LoggerOptions can be used to configure a new logger.
type LoggerOptions struct {
// Name of the subsystem to prefix logs with
Name string
// The threshold for the logger. Anything less severe is suppressed
Level Level
// Where to write the logs to. Defaults to os.Stderr if nil
Output io.Writer
// An optional Locker in case Output is shared. This can be a sync.Mutex or
// a NoopLocker if the caller wants control over output, e.g. for batching
// log lines.
Mutex Locker
// Control if the output should be in JSON.
JSONFormat bool
// Include file and line information in each log line
IncludeLocation bool
// AdditionalLocationOffset is the number of additional stack levels to skip
// when finding the file and line information for the log line
AdditionalLocationOffset int
// The time format to use instead of the default
TimeFormat string
// A function which is called to get the time object that is formatted using `TimeFormat`
TimeFn TimeFunction
// Control whether or not to display the time at all. This is required
// because setting TimeFormat to empty assumes the default format.
DisableTime bool
// Color the output. On Windows, colored logs are only available for io.Writers that
// are concretely instances of *os.File.
Color ColorOption
// Only color the header, not the body. This can help with readability of long messages.
ColorHeaderOnly bool
// A function which is called with the log information and if it returns true the value
// should not be logged.
// This is useful when interacting with a system that you wish to suppress the log
// message for (because it's too noisy, etc)
Exclude func(level Level, msg string, args ...interface{}) bool
// IndependentLevels causes subloggers to be created with an independent
// copy of this logger's level. This means that using SetLevel on this
// logger will not affect any subloggers, and SetLevel on any subloggers
// will not affect the parent or sibling loggers.
IndependentLevels bool
}
// InterceptLogger describes the interface for using a logger
// that can register different output sinks.
// This is useful for sending lower level log messages
// to a different output while keeping the root logger
// at a higher one.
type InterceptLogger interface {
// Logger is the root logger for an InterceptLogger
Logger
// RegisterSink adds a SinkAdapter to the InterceptLogger
RegisterSink(sink SinkAdapter)
// DeregisterSink removes a SinkAdapter from the InterceptLogger
DeregisterSink(sink SinkAdapter)
// Create a interceptlogger that will prepend the name string on the front of all messages.
// If the logger already has a name, the new value will be appended to the current
// name. That way, a major subsystem can use this to decorate all it's own logs
// without losing context.
NamedIntercept(name string) InterceptLogger
// Create a interceptlogger that will prepend the name string on the front of all messages.
// This sets the name of the logger to the value directly, unlike Named which honor
// the current name as well.
ResetNamedIntercept(name string) InterceptLogger
// Deprecated: use StandardLogger
StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger
// Deprecated: use StandardWriter
StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer
}
// SinkAdapter describes the interface that must be implemented
// in order to Register a new sink to an InterceptLogger
type SinkAdapter interface {
Accept(name string, level Level, msg string, args ...interface{})
}
// Flushable represents a method for flushing an output buffer. It can be used
// if Resetting the log to use a new output, in order to flush the writes to
// the existing output beforehand.
type Flushable interface {
Flush() error
}
// OutputResettable provides ways to swap the output in use at runtime
type OutputResettable interface {
// ResetOutput swaps the current output writer with the one given in the
// opts. Color options given in opts will be used for the new output.
ResetOutput(opts *LoggerOptions) error
// ResetOutputWithFlush swaps the current output writer with the one given
// in the opts, first calling Flush on the given Flushable. Color options
// given in opts will be used for the new output.
ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error
}
// Locker is used for locking output. If not set when creating a logger, a
// sync.Mutex will be used internally.
type Locker interface {
// Lock is called when the output is going to be changed or written to
Lock()
// Unlock is called when the operation that called Lock() completes
Unlock()
}
// NoopLocker implements locker but does nothing. This is useful if the client
// wants tight control over locking, in order to provide grouping of log
// entries or other functionality.
type NoopLocker struct{}
// Lock does nothing
func (n NoopLocker) Lock() {}
// Unlock does nothing
func (n NoopLocker) Unlock() {}
var _ Locker = (*NoopLocker)(nil)

View File

@ -1,58 +0,0 @@
package hclog
import (
"io"
"io/ioutil"
"log"
)
// NewNullLogger instantiates a Logger for which all calls
// will succeed without doing anything.
// Useful for testing purposes.
func NewNullLogger() Logger {
return &nullLogger{}
}
type nullLogger struct{}
func (l *nullLogger) Log(level Level, msg string, args ...interface{}) {}
func (l *nullLogger) Trace(msg string, args ...interface{}) {}
func (l *nullLogger) Debug(msg string, args ...interface{}) {}
func (l *nullLogger) Info(msg string, args ...interface{}) {}
func (l *nullLogger) Warn(msg string, args ...interface{}) {}
func (l *nullLogger) Error(msg string, args ...interface{}) {}
func (l *nullLogger) IsTrace() bool { return false }
func (l *nullLogger) IsDebug() bool { return false }
func (l *nullLogger) IsInfo() bool { return false }
func (l *nullLogger) IsWarn() bool { return false }
func (l *nullLogger) IsError() bool { return false }
func (l *nullLogger) ImpliedArgs() []interface{} { return []interface{}{} }
func (l *nullLogger) With(args ...interface{}) Logger { return l }
func (l *nullLogger) Name() string { return "" }
func (l *nullLogger) Named(name string) Logger { return l }
func (l *nullLogger) ResetNamed(name string) Logger { return l }
func (l *nullLogger) SetLevel(level Level) {}
func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger {
return log.New(l.StandardWriter(opts), "", log.LstdFlags)
}
func (l *nullLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer {
return ioutil.Discard
}

View File

@ -1,109 +0,0 @@
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package hclog
import (
"bytes"
"runtime"
"strconv"
"strings"
"sync"
)
var (
_stacktraceIgnorePrefixes = []string{
"runtime.goexit",
"runtime.main",
}
_stacktracePool = sync.Pool{
New: func() interface{} {
return newProgramCounters(64)
},
}
)
// CapturedStacktrace represents a stacktrace captured by a previous call
// to log.Stacktrace. If passed to a logging function, the stacktrace
// will be appended.
type CapturedStacktrace string
// Stacktrace captures a stacktrace of the current goroutine and returns
// it to be passed to a logging function.
func Stacktrace() CapturedStacktrace {
return CapturedStacktrace(takeStacktrace())
}
func takeStacktrace() string {
programCounters := _stacktracePool.Get().(*programCounters)
defer _stacktracePool.Put(programCounters)
var buffer bytes.Buffer
for {
// Skip the call to runtime.Counters and takeStacktrace so that the
// program counters start at the caller of takeStacktrace.
n := runtime.Callers(2, programCounters.pcs)
if n < cap(programCounters.pcs) {
programCounters.pcs = programCounters.pcs[:n]
break
}
// Don't put the too-short counter slice back into the pool; this lets
// the pool adjust if we consistently take deep stacktraces.
programCounters = newProgramCounters(len(programCounters.pcs) * 2)
}
i := 0
frames := runtime.CallersFrames(programCounters.pcs)
for frame, more := frames.Next(); more; frame, more = frames.Next() {
if shouldIgnoreStacktraceFunction(frame.Function) {
continue
}
if i != 0 {
buffer.WriteByte('\n')
}
i++
buffer.WriteString(frame.Function)
buffer.WriteByte('\n')
buffer.WriteByte('\t')
buffer.WriteString(frame.File)
buffer.WriteByte(':')
buffer.WriteString(strconv.Itoa(int(frame.Line)))
}
return buffer.String()
}
func shouldIgnoreStacktraceFunction(function string) bool {
for _, prefix := range _stacktraceIgnorePrefixes {
if strings.HasPrefix(function, prefix) {
return true
}
}
return false
}
type programCounters struct {
pcs []uintptr
}
func newProgramCounters(size int) *programCounters {
return &programCounters{make([]uintptr, size)}
}

View File

@ -1,110 +0,0 @@
package hclog
import (
"bytes"
"log"
"regexp"
"strings"
)
// Regex to ignore characters commonly found in timestamp formats from the
// beginning of inputs.
var logTimestampRegexp = regexp.MustCompile(`^[\d\s\:\/\.\+-TZ]*`)
// Provides a io.Writer to shim the data out of *log.Logger
// and back into our Logger. This is basically the only way to
// build upon *log.Logger.
type stdlogAdapter struct {
log Logger
inferLevels bool
inferLevelsWithTimestamp bool
forceLevel Level
}
// Take the data, infer the levels if configured, and send it through
// a regular Logger.
func (s *stdlogAdapter) Write(data []byte) (int, error) {
str := string(bytes.TrimRight(data, " \t\n"))
if s.forceLevel != NoLevel {
// Use pickLevel to strip log levels included in the line since we are
// forcing the level
_, str := s.pickLevel(str)
// Log at the forced level
s.dispatch(str, s.forceLevel)
} else if s.inferLevels {
if s.inferLevelsWithTimestamp {
str = s.trimTimestamp(str)
}
level, str := s.pickLevel(str)
s.dispatch(str, level)
} else {
s.log.Info(str)
}
return len(data), nil
}
func (s *stdlogAdapter) dispatch(str string, level Level) {
switch level {
case Trace:
s.log.Trace(str)
case Debug:
s.log.Debug(str)
case Info:
s.log.Info(str)
case Warn:
s.log.Warn(str)
case Error:
s.log.Error(str)
default:
s.log.Info(str)
}
}
// Detect, based on conventions, what log level this is.
func (s *stdlogAdapter) pickLevel(str string) (Level, string) {
switch {
case strings.HasPrefix(str, "[DEBUG]"):
return Debug, strings.TrimSpace(str[7:])
case strings.HasPrefix(str, "[TRACE]"):
return Trace, strings.TrimSpace(str[7:])
case strings.HasPrefix(str, "[INFO]"):
return Info, strings.TrimSpace(str[6:])
case strings.HasPrefix(str, "[WARN]"):
return Warn, strings.TrimSpace(str[6:])
case strings.HasPrefix(str, "[ERROR]"):
return Error, strings.TrimSpace(str[7:])
case strings.HasPrefix(str, "[ERR]"):
return Error, strings.TrimSpace(str[5:])
default:
return Info, str
}
}
func (s *stdlogAdapter) trimTimestamp(str string) string {
idx := logTimestampRegexp.FindStringIndex(str)
return str[idx[1]:]
}
type logWriter struct {
l *log.Logger
}
func (l *logWriter) Write(b []byte) (int, error) {
l.l.Println(string(bytes.TrimRight(b, " \n\t")))
return len(b), nil
}
// Takes a standard library logger and returns a Logger that will write to it
func FromStandardLogger(l *log.Logger, opts *LoggerOptions) Logger {
var dl LoggerOptions = *opts
// Use the time format that log.Logger uses
dl.DisableTime = true
dl.Output = &logWriter{l}
return New(&dl)
}

View File

@ -1,82 +0,0 @@
package hclog
import (
"bytes"
"io"
)
type writer struct {
b bytes.Buffer
w io.Writer
color ColorOption
}
func newWriter(w io.Writer, color ColorOption) *writer {
return &writer{w: w, color: color}
}
func (w *writer) Flush(level Level) (err error) {
var unwritten = w.b.Bytes()
if w.color != ColorOff {
color := _levelToColor[level]
unwritten = []byte(color.Sprintf("%s", unwritten))
}
if lw, ok := w.w.(LevelWriter); ok {
_, err = lw.LevelWrite(level, unwritten)
} else {
_, err = w.w.Write(unwritten)
}
w.b.Reset()
return err
}
func (w *writer) Write(p []byte) (int, error) {
return w.b.Write(p)
}
func (w *writer) WriteByte(c byte) error {
return w.b.WriteByte(c)
}
func (w *writer) WriteString(s string) (int, error) {
return w.b.WriteString(s)
}
// LevelWriter is the interface that wraps the LevelWrite method.
type LevelWriter interface {
LevelWrite(level Level, p []byte) (n int, err error)
}
// LeveledWriter writes all log messages to the standard writer,
// except for log levels that are defined in the overrides map.
type LeveledWriter struct {
standard io.Writer
overrides map[Level]io.Writer
}
// NewLeveledWriter returns an initialized LeveledWriter.
//
// standard will be used as the default writer for all log levels,
// except for log levels that are defined in the overrides map.
func NewLeveledWriter(standard io.Writer, overrides map[Level]io.Writer) *LeveledWriter {
return &LeveledWriter{
standard: standard,
overrides: overrides,
}
}
// Write implements io.Writer.
func (lw *LeveledWriter) Write(p []byte) (int, error) {
return lw.standard.Write(p)
}
// LevelWrite implements LevelWriter.
func (lw *LeveledWriter) LevelWrite(level Level, p []byte) (int, error) {
w, ok := lw.overrides[level]
if !ok {
w = lw.standard
}
return w.Write(p)
}

View File

@ -1,24 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

View File

@ -1,23 +0,0 @@
# UNRELEASED
# 1.3.0 (September 17th, 2020)
FEATURES
* Add reverse tree traversal [[GH-30](https://github.com/hashicorp/go-immutable-radix/pull/30)]
# 1.2.0 (March 18th, 2020)
FEATURES
* Adds a `Clone` method to `Txn` allowing transactions to be split either into two independently mutable trees. [[GH-26](https://github.com/hashicorp/go-immutable-radix/pull/26)]
# 1.1.0 (May 22nd, 2019)
FEATURES
* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)]
# 1.0.0 (August 30th, 2018)
* go mod adopted

View File

@ -1,363 +0,0 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

View File

@ -1,66 +0,0 @@
go-immutable-radix [![CircleCI](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master)
=========
Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree).
The package only provides a single `Tree` implementation, optimized for sparse nodes.
As a radix tree, it provides the following:
* O(k) operations. In many cases, this can be faster than a hash table since
the hash function is an O(k) operation, and hash tables have very poor cache locality.
* Minimum / Maximum value lookups
* Ordered iteration
A tree supports using a transaction to batch multiple updates (insert, delete)
in a more efficient manner than performing each operation one at a time.
For a mutable variant, see [go-radix](https://github.com/armon/go-radix).
Documentation
=============
The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix).
Example
=======
Below is a simple example of usage
```go
// Create a tree
r := iradix.New()
r, _, _ = r.Insert([]byte("foo"), 1)
r, _, _ = r.Insert([]byte("bar"), 2)
r, _, _ = r.Insert([]byte("foobar"), 2)
// Find the longest prefix match
m, _, _ := r.Root().LongestPrefix([]byte("foozip"))
if string(m) != "foo" {
panic("should be foo")
}
```
Here is an example of performing a range scan of the keys.
```go
// Create a tree
r := iradix.New()
r, _, _ = r.Insert([]byte("001"), 1)
r, _, _ = r.Insert([]byte("002"), 2)
r, _, _ = r.Insert([]byte("005"), 5)
r, _, _ = r.Insert([]byte("010"), 10)
r, _, _ = r.Insert([]byte("100"), 10)
// Range scan over the keys that sort lexicographically between [003, 050)
it := r.Root().Iterator()
it.SeekLowerBound([]byte("003"))
for key, _, ok := it.Next(); ok; key, _, ok = it.Next() {
if key >= "050" {
break
}
fmt.Println(key)
}
// Output:
// 005
// 010
```

View File

@ -1,21 +0,0 @@
package iradix
import "sort"
type edges []edge
func (e edges) Len() int {
return len(e)
}
func (e edges) Less(i, j int) bool {
return e[i].label < e[j].label
}
func (e edges) Swap(i, j int) {
e[i], e[j] = e[j], e[i]
}
func (e edges) Sort() {
sort.Sort(e)
}

View File

@ -1,676 +0,0 @@
package iradix
import (
"bytes"
"strings"
"github.com/hashicorp/golang-lru/simplelru"
)
const (
// defaultModifiedCache is the default size of the modified node
// cache used per transaction. This is used to cache the updates
// to the nodes near the root, while the leaves do not need to be
// cached. This is important for very large transactions to prevent
// the modified cache from growing to be enormous. This is also used
// to set the max size of the mutation notify maps since those should
// also be bounded in a similar way.
defaultModifiedCache = 8192
)
// Tree implements an immutable radix tree. This can be treated as a
// Dictionary abstract data type. The main advantage over a standard
// hash map is prefix-based lookups and ordered iteration. The immutability
// means that it is safe to concurrently read from a Tree without any
// coordination.
type Tree struct {
root *Node
size int
}
// New returns an empty Tree
func New() *Tree {
t := &Tree{
root: &Node{
mutateCh: make(chan struct{}),
},
}
return t
}
// Len is used to return the number of elements in the tree
func (t *Tree) Len() int {
return t.size
}
// Txn is a transaction on the tree. This transaction is applied
// atomically and returns a new tree when committed. A transaction
// is not thread safe, and should only be used by a single goroutine.
type Txn struct {
// root is the modified root for the transaction.
root *Node
// snap is a snapshot of the root node for use if we have to run the
// slow notify algorithm.
snap *Node
// size tracks the size of the tree as it is modified during the
// transaction.
size int
// writable is a cache of writable nodes that have been created during
// the course of the transaction. This allows us to re-use the same
// nodes for further writes and avoid unnecessary copies of nodes that
// have never been exposed outside the transaction. This will only hold
// up to defaultModifiedCache number of entries.
writable *simplelru.LRU
// trackChannels is used to hold channels that need to be notified to
// signal mutation of the tree. This will only hold up to
// defaultModifiedCache number of entries, after which we will set the
// trackOverflow flag, which will cause us to use a more expensive
// algorithm to perform the notifications. Mutation tracking is only
// performed if trackMutate is true.
trackChannels map[chan struct{}]struct{}
trackOverflow bool
trackMutate bool
}
// Txn starts a new transaction that can be used to mutate the tree
func (t *Tree) Txn() *Txn {
txn := &Txn{
root: t.root,
snap: t.root,
size: t.size,
}
return txn
}
// Clone makes an independent copy of the transaction. The new transaction
// does not track any nodes and has TrackMutate turned off. The cloned transaction will contain any uncommitted writes in the original transaction but further mutations to either will be independent and result in different radix trees on Commit. A cloned transaction may be passed to another goroutine and mutated there independently however each transaction may only be mutated in a single thread.
func (t *Txn) Clone() *Txn {
// reset the writable node cache to avoid leaking future writes into the clone
t.writable = nil
txn := &Txn{
root: t.root,
snap: t.snap,
size: t.size,
}
return txn
}
// TrackMutate can be used to toggle if mutations are tracked. If this is enabled
// then notifications will be issued for affected internal nodes and leaves when
// the transaction is committed.
func (t *Txn) TrackMutate(track bool) {
t.trackMutate = track
}
// trackChannel safely attempts to track the given mutation channel, setting the
// overflow flag if we can no longer track any more. This limits the amount of
// state that will accumulate during a transaction and we have a slower algorithm
// to switch to if we overflow.
func (t *Txn) trackChannel(ch chan struct{}) {
// In overflow, make sure we don't store any more objects.
if t.trackOverflow {
return
}
// If this would overflow the state we reject it and set the flag (since
// we aren't tracking everything that's required any longer).
if len(t.trackChannels) >= defaultModifiedCache {
// Mark that we are in the overflow state
t.trackOverflow = true
// Clear the map so that the channels can be garbage collected. It is
// safe to do this since we have already overflowed and will be using
// the slow notify algorithm.
t.trackChannels = nil
return
}
// Create the map on the fly when we need it.
if t.trackChannels == nil {
t.trackChannels = make(map[chan struct{}]struct{})
}
// Otherwise we are good to track it.
t.trackChannels[ch] = struct{}{}
}
// writeNode returns a node to be modified, if the current node has already been
// modified during the course of the transaction, it is used in-place. Set
// forLeafUpdate to true if you are getting a write node to update the leaf,
// which will set leaf mutation tracking appropriately as well.
func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node {
// Ensure the writable set exists.
if t.writable == nil {
lru, err := simplelru.NewLRU(defaultModifiedCache, nil)
if err != nil {
panic(err)
}
t.writable = lru
}
// If this node has already been modified, we can continue to use it
// during this transaction. We know that we don't need to track it for
// a node update since the node is writable, but if this is for a leaf
// update we track it, in case the initial write to this node didn't
// update the leaf.
if _, ok := t.writable.Get(n); ok {
if t.trackMutate && forLeafUpdate && n.leaf != nil {
t.trackChannel(n.leaf.mutateCh)
}
return n
}
// Mark this node as being mutated.
if t.trackMutate {
t.trackChannel(n.mutateCh)
}
// Mark its leaf as being mutated, if appropriate.
if t.trackMutate && forLeafUpdate && n.leaf != nil {
t.trackChannel(n.leaf.mutateCh)
}
// Copy the existing node. If you have set forLeafUpdate it will be
// safe to replace this leaf with another after you get your node for
// writing. You MUST replace it, because the channel associated with
// this leaf will be closed when this transaction is committed.
nc := &Node{
mutateCh: make(chan struct{}),
leaf: n.leaf,
}
if n.prefix != nil {
nc.prefix = make([]byte, len(n.prefix))
copy(nc.prefix, n.prefix)
}
if len(n.edges) != 0 {
nc.edges = make([]edge, len(n.edges))
copy(nc.edges, n.edges)
}
// Mark this node as writable.
t.writable.Add(nc, nil)
return nc
}
// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction
// Returns the size of the subtree visited
func (t *Txn) trackChannelsAndCount(n *Node) int {
// Count only leaf nodes
leaves := 0
if n.leaf != nil {
leaves = 1
}
// Mark this node as being mutated.
if t.trackMutate {
t.trackChannel(n.mutateCh)
}
// Mark its leaf as being mutated, if appropriate.
if t.trackMutate && n.leaf != nil {
t.trackChannel(n.leaf.mutateCh)
}
// Recurse on the children
for _, e := range n.edges {
leaves += t.trackChannelsAndCount(e.node)
}
return leaves
}
// mergeChild is called to collapse the given node with its child. This is only
// called when the given node is not a leaf and has a single edge.
func (t *Txn) mergeChild(n *Node) {
// Mark the child node as being mutated since we are about to abandon
// it. We don't need to mark the leaf since we are retaining it if it
// is there.
e := n.edges[0]
child := e.node
if t.trackMutate {
t.trackChannel(child.mutateCh)
}
// Merge the nodes.
n.prefix = concat(n.prefix, child.prefix)
n.leaf = child.leaf
if len(child.edges) != 0 {
n.edges = make([]edge, len(child.edges))
copy(n.edges, child.edges)
} else {
n.edges = nil
}
}
// insert does a recursive insertion
func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) {
// Handle key exhaustion
if len(search) == 0 {
var oldVal interface{}
didUpdate := false
if n.isLeaf() {
oldVal = n.leaf.val
didUpdate = true
}
nc := t.writeNode(n, true)
nc.leaf = &leafNode{
mutateCh: make(chan struct{}),
key: k,
val: v,
}
return nc, oldVal, didUpdate
}
// Look for the edge
idx, child := n.getEdge(search[0])
// No edge, create one
if child == nil {
e := edge{
label: search[0],
node: &Node{
mutateCh: make(chan struct{}),
leaf: &leafNode{
mutateCh: make(chan struct{}),
key: k,
val: v,
},
prefix: search,
},
}
nc := t.writeNode(n, false)
nc.addEdge(e)
return nc, nil, false
}
// Determine longest prefix of the search key on match
commonPrefix := longestPrefix(search, child.prefix)
if commonPrefix == len(child.prefix) {
search = search[commonPrefix:]
newChild, oldVal, didUpdate := t.insert(child, k, search, v)
if newChild != nil {
nc := t.writeNode(n, false)
nc.edges[idx].node = newChild
return nc, oldVal, didUpdate
}
return nil, oldVal, didUpdate
}
// Split the node
nc := t.writeNode(n, false)
splitNode := &Node{
mutateCh: make(chan struct{}),
prefix: search[:commonPrefix],
}
nc.replaceEdge(edge{
label: search[0],
node: splitNode,
})
// Restore the existing child node
modChild := t.writeNode(child, false)
splitNode.addEdge(edge{
label: modChild.prefix[commonPrefix],
node: modChild,
})
modChild.prefix = modChild.prefix[commonPrefix:]
// Create a new leaf node
leaf := &leafNode{
mutateCh: make(chan struct{}),
key: k,
val: v,
}
// If the new key is a subset, add to to this node
search = search[commonPrefix:]
if len(search) == 0 {
splitNode.leaf = leaf
return nc, nil, false
}
// Create a new edge for the node
splitNode.addEdge(edge{
label: search[0],
node: &Node{
mutateCh: make(chan struct{}),
leaf: leaf,
prefix: search,
},
})
return nc, nil, false
}
// delete does a recursive deletion
func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) {
// Check for key exhaustion
if len(search) == 0 {
if !n.isLeaf() {
return nil, nil
}
// Copy the pointer in case we are in a transaction that already
// modified this node since the node will be reused. Any changes
// made to the node will not affect returning the original leaf
// value.
oldLeaf := n.leaf
// Remove the leaf node
nc := t.writeNode(n, true)
nc.leaf = nil
// Check if this node should be merged
if n != t.root && len(nc.edges) == 1 {
t.mergeChild(nc)
}
return nc, oldLeaf
}
// Look for an edge
label := search[0]
idx, child := n.getEdge(label)
if child == nil || !bytes.HasPrefix(search, child.prefix) {
return nil, nil
}
// Consume the search prefix
search = search[len(child.prefix):]
newChild, leaf := t.delete(n, child, search)
if newChild == nil {
return nil, nil
}
// Copy this node. WATCH OUT - it's safe to pass "false" here because we
// will only ADD a leaf via nc.mergeChild() if there isn't one due to
// the !nc.isLeaf() check in the logic just below. This is pretty subtle,
// so be careful if you change any of the logic here.
nc := t.writeNode(n, false)
// Delete the edge if the node has no edges
if newChild.leaf == nil && len(newChild.edges) == 0 {
nc.delEdge(label)
if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
t.mergeChild(nc)
}
} else {
nc.edges[idx].node = newChild
}
return nc, leaf
}
// delete does a recursive deletion
func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) {
// Check for key exhaustion
if len(search) == 0 {
nc := t.writeNode(n, true)
if n.isLeaf() {
nc.leaf = nil
}
nc.edges = nil
return nc, t.trackChannelsAndCount(n)
}
// Look for an edge
label := search[0]
idx, child := n.getEdge(label)
// We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix
// Need to do both so that we can delete prefixes that don't correspond to any node in the tree
if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) {
return nil, 0
}
// Consume the search prefix
if len(child.prefix) > len(search) {
search = []byte("")
} else {
search = search[len(child.prefix):]
}
newChild, numDeletions := t.deletePrefix(n, child, search)
if newChild == nil {
return nil, 0
}
// Copy this node. WATCH OUT - it's safe to pass "false" here because we
// will only ADD a leaf via nc.mergeChild() if there isn't one due to
// the !nc.isLeaf() check in the logic just below. This is pretty subtle,
// so be careful if you change any of the logic here.
nc := t.writeNode(n, false)
// Delete the edge if the node has no edges
if newChild.leaf == nil && len(newChild.edges) == 0 {
nc.delEdge(label)
if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
t.mergeChild(nc)
}
} else {
nc.edges[idx].node = newChild
}
return nc, numDeletions
}
// Insert is used to add or update a given key. The return provides
// the previous value and a bool indicating if any was set.
func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) {
newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v)
if newRoot != nil {
t.root = newRoot
}
if !didUpdate {
t.size++
}
return oldVal, didUpdate
}
// Delete is used to delete a given key. Returns the old value if any,
// and a bool indicating if the key was set.
func (t *Txn) Delete(k []byte) (interface{}, bool) {
newRoot, leaf := t.delete(nil, t.root, k)
if newRoot != nil {
t.root = newRoot
}
if leaf != nil {
t.size--
return leaf.val, true
}
return nil, false
}
// DeletePrefix is used to delete an entire subtree that matches the prefix
// This will delete all nodes under that prefix
func (t *Txn) DeletePrefix(prefix []byte) bool {
newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix)
if newRoot != nil {
t.root = newRoot
t.size = t.size - numDeletions
return true
}
return false
}
// Root returns the current root of the radix tree within this
// transaction. The root is not safe across insert and delete operations,
// but can be used to read the current state during a transaction.
func (t *Txn) Root() *Node {
return t.root
}
// Get is used to lookup a specific key, returning
// the value and if it was found
func (t *Txn) Get(k []byte) (interface{}, bool) {
return t.root.Get(k)
}
// GetWatch is used to lookup a specific key, returning
// the watch channel, value and if it was found
func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
return t.root.GetWatch(k)
}
// Commit is used to finalize the transaction and return a new tree. If mutation
// tracking is turned on then notifications will also be issued.
func (t *Txn) Commit() *Tree {
nt := t.CommitOnly()
if t.trackMutate {
t.Notify()
}
return nt
}
// CommitOnly is used to finalize the transaction and return a new tree, but
// does not issue any notifications until Notify is called.
func (t *Txn) CommitOnly() *Tree {
nt := &Tree{t.root, t.size}
t.writable = nil
return nt
}
// slowNotify does a complete comparison of the before and after trees in order
// to trigger notifications. This doesn't require any additional state but it
// is very expensive to compute.
func (t *Txn) slowNotify() {
snapIter := t.snap.rawIterator()
rootIter := t.root.rawIterator()
for snapIter.Front() != nil || rootIter.Front() != nil {
// If we've exhausted the nodes in the old snapshot, we know
// there's nothing remaining to notify.
if snapIter.Front() == nil {
return
}
snapElem := snapIter.Front()
// If we've exhausted the nodes in the new root, we know we need
// to invalidate everything that remains in the old snapshot. We
// know from the loop condition there's something in the old
// snapshot.
if rootIter.Front() == nil {
close(snapElem.mutateCh)
if snapElem.isLeaf() {
close(snapElem.leaf.mutateCh)
}
snapIter.Next()
continue
}
// Do one string compare so we can check the various conditions
// below without repeating the compare.
cmp := strings.Compare(snapIter.Path(), rootIter.Path())
// If the snapshot is behind the root, then we must have deleted
// this node during the transaction.
if cmp < 0 {
close(snapElem.mutateCh)
if snapElem.isLeaf() {
close(snapElem.leaf.mutateCh)
}
snapIter.Next()
continue
}
// If the snapshot is ahead of the root, then we must have added
// this node during the transaction.
if cmp > 0 {
rootIter.Next()
continue
}
// If we have the same path, then we need to see if we mutated a
// node and possibly the leaf.
rootElem := rootIter.Front()
if snapElem != rootElem {
close(snapElem.mutateCh)
if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) {
close(snapElem.leaf.mutateCh)
}
}
snapIter.Next()
rootIter.Next()
}
}
// Notify is used along with TrackMutate to trigger notifications. This must
// only be done once a transaction is committed via CommitOnly, and it is called
// automatically by Commit.
func (t *Txn) Notify() {
if !t.trackMutate {
return
}
// If we've overflowed the tracking state we can't use it in any way and
// need to do a full tree compare.
if t.trackOverflow {
t.slowNotify()
} else {
for ch := range t.trackChannels {
close(ch)
}
}
// Clean up the tracking state so that a re-notify is safe (will trigger
// the else clause above which will be a no-op).
t.trackChannels = nil
t.trackOverflow = false
}
// Insert is used to add or update a given key. The return provides
// the new tree, previous value and a bool indicating if any was set.
func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) {
txn := t.Txn()
old, ok := txn.Insert(k, v)
return txn.Commit(), old, ok
}
// Delete is used to delete a given key. Returns the new tree,
// old value if any, and a bool indicating if the key was set.
func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) {
txn := t.Txn()
old, ok := txn.Delete(k)
return txn.Commit(), old, ok
}
// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree,
// and a bool indicating if the prefix matched any nodes
func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) {
txn := t.Txn()
ok := txn.DeletePrefix(k)
return txn.Commit(), ok
}
// Root returns the root node of the tree which can be used for richer
// query operations.
func (t *Tree) Root() *Node {
return t.root
}
// Get is used to lookup a specific key, returning
// the value and if it was found
func (t *Tree) Get(k []byte) (interface{}, bool) {
return t.root.Get(k)
}
// longestPrefix finds the length of the shared prefix
// of two strings
func longestPrefix(k1, k2 []byte) int {
max := len(k1)
if l := len(k2); l < max {
max = l
}
var i int
for i = 0; i < max; i++ {
if k1[i] != k2[i] {
break
}
}
return i
}
// concat two byte slices, returning a third new copy
func concat(a, b []byte) []byte {
c := make([]byte, len(a)+len(b))
copy(c, a)
copy(c[len(a):], b)
return c
}

View File

@ -1,205 +0,0 @@
package iradix
import (
"bytes"
)
// Iterator is used to iterate over a set of nodes
// in pre-order
type Iterator struct {
node *Node
stack []edges
}
// SeekPrefixWatch is used to seek the iterator to a given prefix
// and returns the watch channel of the finest granularity
func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) {
// Wipe the stack
i.stack = nil
n := i.node
watch = n.mutateCh
search := prefix
for {
// Check for key exhaustion
if len(search) == 0 {
i.node = n
return
}
// Look for an edge
_, n = n.getEdge(search[0])
if n == nil {
i.node = nil
return
}
// Update to the finest granularity as the search makes progress
watch = n.mutateCh
// Consume the search prefix
if bytes.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
} else if bytes.HasPrefix(n.prefix, search) {
i.node = n
return
} else {
i.node = nil
return
}
}
}
// SeekPrefix is used to seek the iterator to a given prefix
func (i *Iterator) SeekPrefix(prefix []byte) {
i.SeekPrefixWatch(prefix)
}
func (i *Iterator) recurseMin(n *Node) *Node {
// Traverse to the minimum child
if n.leaf != nil {
return n
}
nEdges := len(n.edges)
if nEdges > 1 {
// Add all the other edges to the stack (the min node will be added as
// we recurse)
i.stack = append(i.stack, n.edges[1:])
}
if nEdges > 0 {
return i.recurseMin(n.edges[0].node)
}
// Shouldn't be possible
return nil
}
// SeekLowerBound is used to seek the iterator to the smallest key that is
// greater or equal to the given key. There is no watch variant as it's hard to
// predict based on the radix structure which node(s) changes might affect the
// result.
func (i *Iterator) SeekLowerBound(key []byte) {
// Wipe the stack. Unlike Prefix iteration, we need to build the stack as we
// go because we need only a subset of edges of many nodes in the path to the
// leaf with the lower bound. Note that the iterator will still recurse into
// children that we don't traverse on the way to the reverse lower bound as it
// walks the stack.
i.stack = []edges{}
// i.node starts off in the common case as pointing to the root node of the
// tree. By the time we return we have either found a lower bound and setup
// the stack to traverse all larger keys, or we have not and the stack and
// node should both be nil to prevent the iterator from assuming it is just
// iterating the whole tree from the root node. Either way this needs to end
// up as nil so just set it here.
n := i.node
i.node = nil
search := key
found := func(n *Node) {
i.stack = append(i.stack, edges{edge{node: n}})
}
findMin := func(n *Node) {
n = i.recurseMin(n)
if n != nil {
found(n)
return
}
}
for {
// Compare current prefix with the search key's same-length prefix.
var prefixCmp int
if len(n.prefix) < len(search) {
prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)])
} else {
prefixCmp = bytes.Compare(n.prefix, search)
}
if prefixCmp > 0 {
// Prefix is larger, that means the lower bound is greater than the search
// and from now on we need to follow the minimum path to the smallest
// leaf under this subtree.
findMin(n)
return
}
if prefixCmp < 0 {
// Prefix is smaller than search prefix, that means there is no lower
// bound
i.node = nil
return
}
// Prefix is equal, we are still heading for an exact match. If this is a
// leaf and an exact match we're done.
if n.leaf != nil && bytes.Equal(n.leaf.key, key) {
found(n)
return
}
// Consume the search prefix if the current node has one. Note that this is
// safe because if n.prefix is longer than the search slice prefixCmp would
// have been > 0 above and the method would have already returned.
search = search[len(n.prefix):]
if len(search) == 0 {
// We've exhausted the search key, but the current node is not an exact
// match or not a leaf. That means that the leaf value if it exists, and
// all child nodes must be strictly greater, the smallest key in this
// subtree must be the lower bound.
findMin(n)
return
}
// Otherwise, take the lower bound next edge.
idx, lbNode := n.getLowerBoundEdge(search[0])
if lbNode == nil {
return
}
// Create stack edges for the all strictly higher edges in this node.
if idx+1 < len(n.edges) {
i.stack = append(i.stack, n.edges[idx+1:])
}
// Recurse
n = lbNode
}
}
// Next returns the next node in order
func (i *Iterator) Next() ([]byte, interface{}, bool) {
// Initialize our stack if needed
if i.stack == nil && i.node != nil {
i.stack = []edges{
{
edge{node: i.node},
},
}
}
for len(i.stack) > 0 {
// Inspect the last element of the stack
n := len(i.stack)
last := i.stack[n-1]
elem := last[0].node
// Update the stack
if len(last) > 1 {
i.stack[n-1] = last[1:]
} else {
i.stack = i.stack[:n-1]
}
// Push the edges onto the frontier
if len(elem.edges) > 0 {
i.stack = append(i.stack, elem.edges)
}
// Return the leaf values if any
if elem.leaf != nil {
return elem.leaf.key, elem.leaf.val, true
}
}
return nil, nil, false
}

View File

@ -1,334 +0,0 @@
package iradix
import (
"bytes"
"sort"
)
// WalkFn is used when walking the tree. Takes a
// key and value, returning if iteration should
// be terminated.
type WalkFn func(k []byte, v interface{}) bool
// leafNode is used to represent a value
type leafNode struct {
mutateCh chan struct{}
key []byte
val interface{}
}
// edge is used to represent an edge node
type edge struct {
label byte
node *Node
}
// Node is an immutable node in the radix tree
type Node struct {
// mutateCh is closed if this node is modified
mutateCh chan struct{}
// leaf is used to store possible leaf
leaf *leafNode
// prefix is the common prefix we ignore
prefix []byte
// Edges should be stored in-order for iteration.
// We avoid a fully materialized slice to save memory,
// since in most cases we expect to be sparse
edges edges
}
func (n *Node) isLeaf() bool {
return n.leaf != nil
}
func (n *Node) addEdge(e edge) {
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
return n.edges[i].label >= e.label
})
n.edges = append(n.edges, e)
if idx != num {
copy(n.edges[idx+1:], n.edges[idx:num])
n.edges[idx] = e
}
}
func (n *Node) replaceEdge(e edge) {
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
return n.edges[i].label >= e.label
})
if idx < num && n.edges[idx].label == e.label {
n.edges[idx].node = e.node
return
}
panic("replacing missing edge")
}
func (n *Node) getEdge(label byte) (int, *Node) {
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
return n.edges[i].label >= label
})
if idx < num && n.edges[idx].label == label {
return idx, n.edges[idx].node
}
return -1, nil
}
func (n *Node) getLowerBoundEdge(label byte) (int, *Node) {
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
return n.edges[i].label >= label
})
// we want lower bound behavior so return even if it's not an exact match
if idx < num {
return idx, n.edges[idx].node
}
return -1, nil
}
func (n *Node) delEdge(label byte) {
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
return n.edges[i].label >= label
})
if idx < num && n.edges[idx].label == label {
copy(n.edges[idx:], n.edges[idx+1:])
n.edges[len(n.edges)-1] = edge{}
n.edges = n.edges[:len(n.edges)-1]
}
}
func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
search := k
watch := n.mutateCh
for {
// Check for key exhaustion
if len(search) == 0 {
if n.isLeaf() {
return n.leaf.mutateCh, n.leaf.val, true
}
break
}
// Look for an edge
_, n = n.getEdge(search[0])
if n == nil {
break
}
// Update to the finest granularity as the search makes progress
watch = n.mutateCh
// Consume the search prefix
if bytes.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
} else {
break
}
}
return watch, nil, false
}
func (n *Node) Get(k []byte) (interface{}, bool) {
_, val, ok := n.GetWatch(k)
return val, ok
}
// LongestPrefix is like Get, but instead of an
// exact match, it will return the longest prefix match.
func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) {
var last *leafNode
search := k
for {
// Look for a leaf node
if n.isLeaf() {
last = n.leaf
}
// Check for key exhaution
if len(search) == 0 {
break
}
// Look for an edge
_, n = n.getEdge(search[0])
if n == nil {
break
}
// Consume the search prefix
if bytes.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
} else {
break
}
}
if last != nil {
return last.key, last.val, true
}
return nil, nil, false
}
// Minimum is used to return the minimum value in the tree
func (n *Node) Minimum() ([]byte, interface{}, bool) {
for {
if n.isLeaf() {
return n.leaf.key, n.leaf.val, true
}
if len(n.edges) > 0 {
n = n.edges[0].node
} else {
break
}
}
return nil, nil, false
}
// Maximum is used to return the maximum value in the tree
func (n *Node) Maximum() ([]byte, interface{}, bool) {
for {
if num := len(n.edges); num > 0 {
n = n.edges[num-1].node
continue
}
if n.isLeaf() {
return n.leaf.key, n.leaf.val, true
} else {
break
}
}
return nil, nil, false
}
// Iterator is used to return an iterator at
// the given node to walk the tree
func (n *Node) Iterator() *Iterator {
return &Iterator{node: n}
}
// ReverseIterator is used to return an iterator at
// the given node to walk the tree backwards
func (n *Node) ReverseIterator() *ReverseIterator {
return NewReverseIterator(n)
}
// rawIterator is used to return a raw iterator at the given node to walk the
// tree.
func (n *Node) rawIterator() *rawIterator {
iter := &rawIterator{node: n}
iter.Next()
return iter
}
// Walk is used to walk the tree
func (n *Node) Walk(fn WalkFn) {
recursiveWalk(n, fn)
}
// WalkBackwards is used to walk the tree in reverse order
func (n *Node) WalkBackwards(fn WalkFn) {
reverseRecursiveWalk(n, fn)
}
// WalkPrefix is used to walk the tree under a prefix
func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) {
search := prefix
for {
// Check for key exhaution
if len(search) == 0 {
recursiveWalk(n, fn)
return
}
// Look for an edge
_, n = n.getEdge(search[0])
if n == nil {
break
}
// Consume the search prefix
if bytes.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
} else if bytes.HasPrefix(n.prefix, search) {
// Child may be under our search prefix
recursiveWalk(n, fn)
return
} else {
break
}
}
}
// WalkPath is used to walk the tree, but only visiting nodes
// from the root down to a given leaf. Where WalkPrefix walks
// all the entries *under* the given prefix, this walks the
// entries *above* the given prefix.
func (n *Node) WalkPath(path []byte, fn WalkFn) {
search := path
for {
// Visit the leaf values if any
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
return
}
// Check for key exhaution
if len(search) == 0 {
return
}
// Look for an edge
_, n = n.getEdge(search[0])
if n == nil {
return
}
// Consume the search prefix
if bytes.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
} else {
break
}
}
}
// recursiveWalk is used to do a pre-order walk of a node
// recursively. Returns true if the walk should be aborted
func recursiveWalk(n *Node, fn WalkFn) bool {
// Visit the leaf values if any
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
return true
}
// Recurse on the children
for _, e := range n.edges {
if recursiveWalk(e.node, fn) {
return true
}
}
return false
}
// reverseRecursiveWalk is used to do a reverse pre-order
// walk of a node recursively. Returns true if the walk
// should be aborted
func reverseRecursiveWalk(n *Node, fn WalkFn) bool {
// Visit the leaf values if any
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
return true
}
// Recurse on the children in reverse order
for i := len(n.edges) - 1; i >= 0; i-- {
e := n.edges[i]
if reverseRecursiveWalk(e.node, fn) {
return true
}
}
return false
}

View File

@ -1,78 +0,0 @@
package iradix
// rawIterator visits each of the nodes in the tree, even the ones that are not
// leaves. It keeps track of the effective path (what a leaf at a given node
// would be called), which is useful for comparing trees.
type rawIterator struct {
// node is the starting node in the tree for the iterator.
node *Node
// stack keeps track of edges in the frontier.
stack []rawStackEntry
// pos is the current position of the iterator.
pos *Node
// path is the effective path of the current iterator position,
// regardless of whether the current node is a leaf.
path string
}
// rawStackEntry is used to keep track of the cumulative common path as well as
// its associated edges in the frontier.
type rawStackEntry struct {
path string
edges edges
}
// Front returns the current node that has been iterated to.
func (i *rawIterator) Front() *Node {
return i.pos
}
// Path returns the effective path of the current node, even if it's not actually
// a leaf.
func (i *rawIterator) Path() string {
return i.path
}
// Next advances the iterator to the next node.
func (i *rawIterator) Next() {
// Initialize our stack if needed.
if i.stack == nil && i.node != nil {
i.stack = []rawStackEntry{
{
edges: edges{
edge{node: i.node},
},
},
}
}
for len(i.stack) > 0 {
// Inspect the last element of the stack.
n := len(i.stack)
last := i.stack[n-1]
elem := last.edges[0].node
// Update the stack.
if len(last.edges) > 1 {
i.stack[n-1].edges = last.edges[1:]
} else {
i.stack = i.stack[:n-1]
}
// Push the edges onto the frontier.
if len(elem.edges) > 0 {
path := last.path + string(elem.prefix)
i.stack = append(i.stack, rawStackEntry{path, elem.edges})
}
i.pos = elem
i.path = last.path + string(elem.prefix)
return
}
i.pos = nil
i.path = ""
}

View File

@ -1,239 +0,0 @@
package iradix
import (
"bytes"
)
// ReverseIterator is used to iterate over a set of nodes
// in reverse in-order
type ReverseIterator struct {
i *Iterator
// expandedParents stores the set of parent nodes whose relevant children have
// already been pushed into the stack. This can happen during seek or during
// iteration.
//
// Unlike forward iteration we need to recurse into children before we can
// output the value stored in an internal leaf since all children are greater.
// We use this to track whether we have already ensured all the children are
// in the stack.
expandedParents map[*Node]struct{}
}
// NewReverseIterator returns a new ReverseIterator at a node
func NewReverseIterator(n *Node) *ReverseIterator {
return &ReverseIterator{
i: &Iterator{node: n},
}
}
// SeekPrefixWatch is used to seek the iterator to a given prefix
// and returns the watch channel of the finest granularity
func (ri *ReverseIterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) {
return ri.i.SeekPrefixWatch(prefix)
}
// SeekPrefix is used to seek the iterator to a given prefix
func (ri *ReverseIterator) SeekPrefix(prefix []byte) {
ri.i.SeekPrefixWatch(prefix)
}
// SeekReverseLowerBound is used to seek the iterator to the largest key that is
// lower or equal to the given key. There is no watch variant as it's hard to
// predict based on the radix structure which node(s) changes might affect the
// result.
func (ri *ReverseIterator) SeekReverseLowerBound(key []byte) {
// Wipe the stack. Unlike Prefix iteration, we need to build the stack as we
// go because we need only a subset of edges of many nodes in the path to the
// leaf with the lower bound. Note that the iterator will still recurse into
// children that we don't traverse on the way to the reverse lower bound as it
// walks the stack.
ri.i.stack = []edges{}
// ri.i.node starts off in the common case as pointing to the root node of the
// tree. By the time we return we have either found a lower bound and setup
// the stack to traverse all larger keys, or we have not and the stack and
// node should both be nil to prevent the iterator from assuming it is just
// iterating the whole tree from the root node. Either way this needs to end
// up as nil so just set it here.
n := ri.i.node
ri.i.node = nil
search := key
if ri.expandedParents == nil {
ri.expandedParents = make(map[*Node]struct{})
}
found := func(n *Node) {
ri.i.stack = append(ri.i.stack, edges{edge{node: n}})
// We need to mark this node as expanded in advance too otherwise the
// iterator will attempt to walk all of its children even though they are
// greater than the lower bound we have found. We've expanded it in the
// sense that all of its children that we want to walk are already in the
// stack (i.e. none of them).
ri.expandedParents[n] = struct{}{}
}
for {
// Compare current prefix with the search key's same-length prefix.
var prefixCmp int
if len(n.prefix) < len(search) {
prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)])
} else {
prefixCmp = bytes.Compare(n.prefix, search)
}
if prefixCmp < 0 {
// Prefix is smaller than search prefix, that means there is no exact
// match for the search key. But we are looking in reverse, so the reverse
// lower bound will be the largest leaf under this subtree, since it is
// the value that would come right before the current search key if it
// were in the tree. So we need to follow the maximum path in this subtree
// to find it. Note that this is exactly what the iterator will already do
// if it finds a node in the stack that has _not_ been marked as expanded
// so in this one case we don't call `found` and instead let the iterator
// do the expansion and recursion through all the children.
ri.i.stack = append(ri.i.stack, edges{edge{node: n}})
return
}
if prefixCmp > 0 {
// Prefix is larger than search prefix, or there is no prefix but we've
// also exhausted the search key. Either way, that means there is no
// reverse lower bound since nothing comes before our current search
// prefix.
return
}
// If this is a leaf, something needs to happen! Note that if it's a leaf
// and prefixCmp was zero (which it must be to get here) then the leaf value
// is either an exact match for the search, or it's lower. It can't be
// greater.
if n.isLeaf() {
// Firstly, if it's an exact match, we're done!
if bytes.Equal(n.leaf.key, key) {
found(n)
return
}
// It's not so this node's leaf value must be lower and could still be a
// valid contender for reverse lower bound.
// If it has no children then we are also done.
if len(n.edges) == 0 {
// This leaf is the lower bound.
found(n)
return
}
// Finally, this leaf is internal (has children) so we'll keep searching,
// but we need to add it to the iterator's stack since it has a leaf value
// that needs to be iterated over. It needs to be added to the stack
// before its children below as it comes first.
ri.i.stack = append(ri.i.stack, edges{edge{node: n}})
// We also need to mark it as expanded since we'll be adding any of its
// relevant children below and so don't want the iterator to re-add them
// on its way back up the stack.
ri.expandedParents[n] = struct{}{}
}
// Consume the search prefix. Note that this is safe because if n.prefix is
// longer than the search slice prefixCmp would have been > 0 above and the
// method would have already returned.
search = search[len(n.prefix):]
if len(search) == 0 {
// We've exhausted the search key but we are not at a leaf. That means all
// children are greater than the search key so a reverse lower bound
// doesn't exist in this subtree. Note that there might still be one in
// the whole radix tree by following a different path somewhere further
// up. If that's the case then the iterator's stack will contain all the
// smaller nodes already and Previous will walk through them correctly.
return
}
// Otherwise, take the lower bound next edge.
idx, lbNode := n.getLowerBoundEdge(search[0])
// From here, we need to update the stack with all values lower than
// the lower bound edge. Since getLowerBoundEdge() returns -1 when the
// search prefix is larger than all edges, we need to place idx at the
// last edge index so they can all be place in the stack, since they
// come before our search prefix.
if idx == -1 {
idx = len(n.edges)
}
// Create stack edges for the all strictly lower edges in this node.
if len(n.edges[:idx]) > 0 {
ri.i.stack = append(ri.i.stack, n.edges[:idx])
}
// Exit if there's no lower bound edge. The stack will have the previous
// nodes already.
if lbNode == nil {
return
}
// Recurse
n = lbNode
}
}
// Previous returns the previous node in reverse order
func (ri *ReverseIterator) Previous() ([]byte, interface{}, bool) {
// Initialize our stack if needed
if ri.i.stack == nil && ri.i.node != nil {
ri.i.stack = []edges{
{
edge{node: ri.i.node},
},
}
}
if ri.expandedParents == nil {
ri.expandedParents = make(map[*Node]struct{})
}
for len(ri.i.stack) > 0 {
// Inspect the last element of the stack
n := len(ri.i.stack)
last := ri.i.stack[n-1]
m := len(last)
elem := last[m-1].node
_, alreadyExpanded := ri.expandedParents[elem]
// If this is an internal node and we've not seen it already, we need to
// leave it in the stack so we can return its possible leaf value _after_
// we've recursed through all its children.
if len(elem.edges) > 0 && !alreadyExpanded {
// record that we've seen this node!
ri.expandedParents[elem] = struct{}{}
// push child edges onto stack and skip the rest of the loop to recurse
// into the largest one.
ri.i.stack = append(ri.i.stack, elem.edges)
continue
}
// Remove the node from the stack
if m > 1 {
ri.i.stack[n-1] = last[:m-1]
} else {
ri.i.stack = ri.i.stack[:n-1]
}
// We don't need this state any more as it's no longer in the stack so we
// won't visit it again
if alreadyExpanded {
delete(ri.expandedParents, elem)
}
// If this is a leaf, return it
if elem.leaf != nil {
return elem.leaf.key, elem.leaf.val, true
}
// it's not a leaf so keep walking the stack to find the previous leaf
}
return nil, nil, false
}

View File

@ -45,6 +45,25 @@ The returned response object is an `*http.Response`, the same thing you would
usually get from `net/http`. Had the request failed one or more times, the above
call would block and retry with exponential backoff.
## Retrying cases that fail after a seeming success
It's possible for a request to succeed in the sense that the expected response headers are received, but then to encounter network-level errors while reading the response body. In go-retryablehttp's most basic usage, this error would not be retryable, due to the out-of-band handling of the response body. In some cases it may be desirable to handle the response body as part of the retryable operation.
A toy example (which will retry the full request and succeed on the second attempt) is shown below:
```go
c := retryablehttp.NewClient()
r := retryablehttp.NewRequest("GET", "://foo", nil)
handlerShouldRetry := true
r.SetResponseHandler(func(*http.Response) error {
if !handlerShouldRetry {
return nil
}
handlerShouldRetry = false
return errors.New("retryable error")
})
```
## Getting a stdlib `*http.Client` with retries
It's possible to convert a `*retryablehttp.Client` directly to a `*http.Client`.

View File

@ -69,11 +69,21 @@ var (
// scheme specified in the URL is invalid. This error isn't typed
// specifically so we resort to matching on the error string.
schemeErrorRe = regexp.MustCompile(`unsupported protocol scheme`)
// A regular expression to match the error returned by net/http when the
// TLS certificate is not trusted. This error isn't typed
// specifically so we resort to matching on the error string.
notTrustedErrorRe = regexp.MustCompile(`certificate is not trusted`)
)
// ReaderFunc is the type of function that can be given natively to NewRequest
type ReaderFunc func() (io.Reader, error)
// ResponseHandlerFunc is a type of function that takes in a Response, and does something with it.
// It only runs if the initial part of the request was successful.
// If an error is returned, the client's retry policy will be used to determine whether to retry the whole request.
type ResponseHandlerFunc func(*http.Response) error
// LenReader is an interface implemented by many in-memory io.Reader's. Used
// for automatically sending the right Content-Length header when possible.
type LenReader interface {
@ -86,6 +96,8 @@ type Request struct {
// used to rewind the request data in between retries.
body ReaderFunc
responseHandler ResponseHandlerFunc
// Embed an HTTP request directly. This makes a *Request act exactly
// like an *http.Request so that all meta methods are supported.
*http.Request
@ -94,8 +106,16 @@ type Request struct {
// WithContext returns wrapped Request with a shallow copy of underlying *http.Request
// with its context changed to ctx. The provided ctx must be non-nil.
func (r *Request) WithContext(ctx context.Context) *Request {
r.Request = r.Request.WithContext(ctx)
return r
return &Request{
body: r.body,
responseHandler: r.responseHandler,
Request: r.Request.WithContext(ctx),
}
}
// SetResponseHandler allows setting the response handler.
func (r *Request) SetResponseHandler(fn ResponseHandlerFunc) {
r.responseHandler = fn
}
// BodyBytes allows accessing the request body. It is an analogue to
@ -252,23 +272,31 @@ func FromRequest(r *http.Request) (*Request, error) {
return nil, err
}
// Could assert contentLength == r.ContentLength
return &Request{bodyReader, r}, nil
return &Request{body: bodyReader, Request: r}, nil
}
// NewRequest creates a new wrapped request.
func NewRequest(method, url string, rawBody interface{}) (*Request, error) {
return NewRequestWithContext(context.Background(), method, url, rawBody)
}
// NewRequestWithContext creates a new wrapped request with the provided context.
//
// The context controls the entire lifetime of a request and its response:
// obtaining a connection, sending the request, and reading the response headers and body.
func NewRequestWithContext(ctx context.Context, method, url string, rawBody interface{}) (*Request, error) {
bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody)
if err != nil {
return nil, err
}
httpReq, err := http.NewRequest(method, url, nil)
httpReq, err := http.NewRequestWithContext(ctx, method, url, nil)
if err != nil {
return nil, err
}
httpReq.ContentLength = contentLength
return &Request{bodyReader, httpReq}, nil
return &Request{body: bodyReader, Request: httpReq}, nil
}
// Logger interface allows to use other loggers than
@ -435,6 +463,9 @@ func baseRetryPolicy(resp *http.Response, err error) (bool, error) {
}
// Don't retry if the error was due to TLS cert verification failure.
if notTrustedErrorRe.MatchString(v.Error()) {
return false, v
}
if _, ok := v.Err.(x509.UnknownAuthorityError); ok {
return false, v
}
@ -455,7 +486,7 @@ func baseRetryPolicy(resp *http.Response, err error) (bool, error) {
// the server time to recover, as 500's are typically not permanent
// errors and may relate to outages on the server side. This will catch
// invalid response codes as well, like 0 and 999.
if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) {
if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != http.StatusNotImplemented) {
return true, fmt.Errorf("unexpected HTTP status %s", resp.Status)
}
@ -555,13 +586,12 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
var resp *http.Response
var attempt int
var shouldRetry bool
var doErr, checkErr error
var doErr, respErr, checkErr error
for i := 0; ; i++ {
doErr, respErr = nil, nil
attempt++
var code int // HTTP response code
// Always rewind the request body when non-nil.
if req.body != nil {
body, err := req.body()
@ -589,19 +619,24 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
// Attempt the request
resp, doErr = c.HTTPClient.Do(req.Request)
if resp != nil {
code = resp.StatusCode
}
// Check if we should continue with retries.
shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, doErr)
if !shouldRetry && doErr == nil && req.responseHandler != nil {
respErr = req.responseHandler(resp)
shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, respErr)
}
if doErr != nil {
err := doErr
if respErr != nil {
err = respErr
}
if err != nil {
switch v := logger.(type) {
case LeveledLogger:
v.Error("request failed", "error", doErr, "method", req.Method, "url", req.URL)
v.Error("request failed", "error", err, "method", req.Method, "url", req.URL)
case Logger:
v.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, doErr)
v.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err)
}
} else {
// Call this here to maintain the behavior of logging all requests,
@ -636,11 +671,11 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
}
wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp)
desc := fmt.Sprintf("%s %s", req.Method, req.URL)
if code > 0 {
desc = fmt.Sprintf("%s (status: %d)", desc, code)
}
if logger != nil {
desc := fmt.Sprintf("%s %s", req.Method, req.URL)
if resp != nil {
desc = fmt.Sprintf("%s (status: %d)", desc, resp.StatusCode)
}
switch v := logger.(type) {
case LeveledLogger:
v.Debug("retrying request", "request", desc, "timeout", wait, "remaining", remain)
@ -648,11 +683,13 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
v.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain)
}
}
timer := time.NewTimer(wait)
select {
case <-req.Context().Done():
timer.Stop()
c.HTTPClient.CloseIdleConnections()
return nil, req.Context().Err()
case <-time.After(wait):
case <-timer.C:
}
// Make shallow copy of http Request so that we can modify its body
@ -662,15 +699,19 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
}
// this is the closest we have to success criteria
if doErr == nil && checkErr == nil && !shouldRetry {
if doErr == nil && respErr == nil && checkErr == nil && !shouldRetry {
return resp, nil
}
defer c.HTTPClient.CloseIdleConnections()
err := doErr
var err error
if checkErr != nil {
err = checkErr
} else if respErr != nil {
err = respErr
} else {
err = doErr
}
if c.ErrorHandler != nil {

View File

@ -1,362 +0,0 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

View File

@ -1,177 +0,0 @@
package simplelru
import (
"container/list"
"errors"
)
// EvictCallback is used to get a callback when a cache entry is evicted
type EvictCallback func(key interface{}, value interface{})
// LRU implements a non-thread safe fixed size LRU cache
type LRU struct {
size int
evictList *list.List
items map[interface{}]*list.Element
onEvict EvictCallback
}
// entry is used to hold a value in the evictList
type entry struct {
key interface{}
value interface{}
}
// NewLRU constructs an LRU of the given size
func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
if size <= 0 {
return nil, errors.New("Must provide a positive size")
}
c := &LRU{
size: size,
evictList: list.New(),
items: make(map[interface{}]*list.Element),
onEvict: onEvict,
}
return c, nil
}
// Purge is used to completely clear the cache.
func (c *LRU) Purge() {
for k, v := range c.items {
if c.onEvict != nil {
c.onEvict(k, v.Value.(*entry).value)
}
delete(c.items, k)
}
c.evictList.Init()
}
// Add adds a value to the cache. Returns true if an eviction occurred.
func (c *LRU) Add(key, value interface{}) (evicted bool) {
// Check for existing item
if ent, ok := c.items[key]; ok {
c.evictList.MoveToFront(ent)
ent.Value.(*entry).value = value
return false
}
// Add new item
ent := &entry{key, value}
entry := c.evictList.PushFront(ent)
c.items[key] = entry
evict := c.evictList.Len() > c.size
// Verify size not exceeded
if evict {
c.removeOldest()
}
return evict
}
// Get looks up a key's value from the cache.
func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
if ent, ok := c.items[key]; ok {
c.evictList.MoveToFront(ent)
if ent.Value.(*entry) == nil {
return nil, false
}
return ent.Value.(*entry).value, true
}
return
}
// Contains checks if a key is in the cache, without updating the recent-ness
// or deleting it for being stale.
func (c *LRU) Contains(key interface{}) (ok bool) {
_, ok = c.items[key]
return ok
}
// Peek returns the key value (or undefined if not found) without updating
// the "recently used"-ness of the key.
func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
var ent *list.Element
if ent, ok = c.items[key]; ok {
return ent.Value.(*entry).value, true
}
return nil, ok
}
// Remove removes the provided key from the cache, returning if the
// key was contained.
func (c *LRU) Remove(key interface{}) (present bool) {
if ent, ok := c.items[key]; ok {
c.removeElement(ent)
return true
}
return false
}
// RemoveOldest removes the oldest item from the cache.
func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) {
ent := c.evictList.Back()
if ent != nil {
c.removeElement(ent)
kv := ent.Value.(*entry)
return kv.key, kv.value, true
}
return nil, nil, false
}
// GetOldest returns the oldest entry
func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) {
ent := c.evictList.Back()
if ent != nil {
kv := ent.Value.(*entry)
return kv.key, kv.value, true
}
return nil, nil, false
}
// Keys returns a slice of the keys in the cache, from oldest to newest.
func (c *LRU) Keys() []interface{} {
keys := make([]interface{}, len(c.items))
i := 0
for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {
keys[i] = ent.Value.(*entry).key
i++
}
return keys
}
// Len returns the number of items in the cache.
func (c *LRU) Len() int {
return c.evictList.Len()
}
// Resize changes the cache size.
func (c *LRU) Resize(size int) (evicted int) {
diff := c.Len() - size
if diff < 0 {
diff = 0
}
for i := 0; i < diff; i++ {
c.removeOldest()
}
c.size = size
return diff
}
// removeOldest removes the oldest item from the cache.
func (c *LRU) removeOldest() {
ent := c.evictList.Back()
if ent != nil {
c.removeElement(ent)
}
}
// removeElement is used to remove a given list element from the cache
func (c *LRU) removeElement(e *list.Element) {
c.evictList.Remove(e)
kv := e.Value.(*entry)
delete(c.items, kv.key)
if c.onEvict != nil {
c.onEvict(kv.key, kv.value)
}
}

View File

@ -1,39 +0,0 @@
package simplelru
// LRUCache is the interface for simple LRU cache.
type LRUCache interface {
// Adds a value to the cache, returns true if an eviction occurred and
// updates the "recently used"-ness of the key.
Add(key, value interface{}) bool
// Returns key's value from the cache and
// updates the "recently used"-ness of the key. #value, isFound
Get(key interface{}) (value interface{}, ok bool)
// Checks if a key exists in cache without updating the recent-ness.
Contains(key interface{}) (ok bool)
// Returns key's value without updating the "recently used"-ness of the key.
Peek(key interface{}) (value interface{}, ok bool)
// Removes a key from the cache.
Remove(key interface{}) bool
// Removes the oldest entry from cache.
RemoveOldest() (interface{}, interface{}, bool)
// Returns the oldest entry from the cache. #key, value, isFound
GetOldest() (interface{}, interface{}, bool)
// Returns a slice of the keys in the cache, from oldest to newest.
Keys() []interface{}
// Returns the number of items in the cache.
Len() int
// Clears all cache entries.
Purge()
// Resizes cache, returning number evicted
Resize(int) int
}

View File

@ -632,12 +632,16 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
// fill unusedNodeKeys with keys from the AST
// a slice because we have to do equals case fold to match Filter
unusedNodeKeys := make(map[string][]token.Pos, 0)
for _, item := range list.Items {
for _, k := range item.Keys{
if k.Token.JSON || k.Token.Type == token.IDENT {
for i, item := range list.Items {
for _, k := range item.Keys {
// isNestedJSON returns true for e.g. bar in
// { "foo": { "bar": {...} } }
// This isn't an unused node key, so we want to skip it
isNestedJSON := i > 0 && len(item.Keys) > 1
if !isNestedJSON && (k.Token.JSON || k.Token.Type == token.IDENT) {
fn := k.Token.Value().(string)
sl := unusedNodeKeys[fn]
unusedNodeKeys[fn] = append(sl, k.Token.Pos)
unusedNodeKeys[fn] = append(sl, k.Token.Pos)
}
}
}

View File

@ -1,363 +0,0 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

View File

@ -0,0 +1,61 @@
License text copyright (c) 2020 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
Parameters
Licensor: HashiCorp, Inc.
Licensed Work: Vault 1.15.0. The Licensed Work is (c) 2023 HashiCorp, Inc.
Additional Use Grant: You may make production use of the Licensed Work,
provided such use does not include offering the Licensed Work
to third parties on a hosted or embedded basis which is
competitive with HashiCorp's products.
Change Date: Four years from the date the Licensed Work is published.
Change License: MPL 2.0
For information about alternative licensing arrangements for the Licensed Work,
please contact licensing@hashicorp.com.
Notice
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.

View File

@ -0,0 +1,208 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package approle
import (
"context"
"fmt"
"io"
"os"
"strings"
"github.com/hashicorp/vault/api"
)
type AppRoleAuth struct {
mountPath string
roleID string
secretID string
secretIDFile string
secretIDEnv string
unwrap bool
}
var _ api.AuthMethod = (*AppRoleAuth)(nil)
// SecretID is a struct that allows you to specify where your application is
// storing the secret ID required for login to the AppRole auth method. The
// recommended secure pattern is to use response-wrapping tokens rather than
// a plaintext value, by passing WithWrappingToken() to NewAppRoleAuth.
// https://learn.hashicorp.com/tutorials/vault/approle-best-practices?in=vault/auth-methods#secretid-delivery-best-practices
type SecretID struct {
// Path on the file system where the secret ID can be found.
FromFile string
// The name of the environment variable containing the application's
// secret ID.
FromEnv string
// The secret ID as a plaintext string value.
FromString string
}
type LoginOption func(a *AppRoleAuth) error
const (
defaultMountPath = "approle"
)
// NewAppRoleAuth initializes a new AppRole auth method interface to be
// passed as a parameter to the client.Auth().Login method.
//
// For a secret ID, the recommended secure pattern is to unwrap a one-time-use
// response-wrapping token that was placed here by a trusted orchestrator
// (https://learn.hashicorp.com/tutorials/vault/approle-best-practices?in=vault/auth-methods#secretid-delivery-best-practices)
// To indicate that the filepath points to this wrapping token and not just
// a plaintext secret ID, initialize NewAppRoleAuth with the
// WithWrappingToken LoginOption.
//
// Supported options: WithMountPath, WithWrappingToken
func NewAppRoleAuth(roleID string, secretID *SecretID, opts ...LoginOption) (*AppRoleAuth, error) {
if roleID == "" {
return nil, fmt.Errorf("no role ID provided for login")
}
if secretID == nil {
return nil, fmt.Errorf("no secret ID provided for login")
}
err := secretID.validate()
if err != nil {
return nil, fmt.Errorf("invalid secret ID: %w", err)
}
a := &AppRoleAuth{
mountPath: defaultMountPath,
roleID: roleID,
}
// secret ID will be read in at login time if it comes from a file or environment variable, in case the underlying value changes
if secretID.FromFile != "" {
a.secretIDFile = secretID.FromFile
}
if secretID.FromEnv != "" {
a.secretIDEnv = secretID.FromEnv
}
if secretID.FromString != "" {
a.secretID = secretID.FromString
}
// Loop through each option
for _, opt := range opts {
// Call the option giving the instantiated
// *AppRoleAuth as the argument
err := opt(a)
if err != nil {
return nil, fmt.Errorf("error with login option: %w", err)
}
}
// return the modified auth struct instance
return a, nil
}
func (a *AppRoleAuth) Login(ctx context.Context, client *api.Client) (*api.Secret, error) {
if ctx == nil {
ctx = context.Background()
}
loginData := map[string]interface{}{
"role_id": a.roleID,
}
var secretIDValue string
switch {
case a.secretIDFile != "":
s, err := a.readSecretIDFromFile()
if err != nil {
return nil, fmt.Errorf("error reading secret ID: %w", err)
}
secretIDValue = s
case a.secretIDEnv != "":
s := os.Getenv(a.secretIDEnv)
if s == "" {
return nil, fmt.Errorf("secret ID was specified with an environment variable %q with an empty value", a.secretIDEnv)
}
secretIDValue = s
default:
secretIDValue = a.secretID
}
// if the caller indicated that the value was actually a wrapping token, unwrap it first
if a.unwrap {
unwrappedToken, err := client.Logical().UnwrapWithContext(ctx, secretIDValue)
if err != nil {
return nil, fmt.Errorf("unable to unwrap response wrapping token: %w", err)
}
loginData["secret_id"] = unwrappedToken.Data["secret_id"]
} else {
loginData["secret_id"] = secretIDValue
}
path := fmt.Sprintf("auth/%s/login", a.mountPath)
resp, err := client.Logical().WriteWithContext(ctx, path, loginData)
if err != nil {
return nil, fmt.Errorf("unable to log in with app role auth: %w", err)
}
return resp, nil
}
func WithMountPath(mountPath string) LoginOption {
return func(a *AppRoleAuth) error {
a.mountPath = mountPath
return nil
}
}
func WithWrappingToken() LoginOption {
return func(a *AppRoleAuth) error {
a.unwrap = true
return nil
}
}
func (a *AppRoleAuth) readSecretIDFromFile() (string, error) {
secretIDFile, err := os.Open(a.secretIDFile)
if err != nil {
return "", fmt.Errorf("unable to open file containing secret ID: %w", err)
}
defer secretIDFile.Close()
limitedReader := io.LimitReader(secretIDFile, 1000)
secretIDBytes, err := io.ReadAll(limitedReader)
if err != nil {
return "", fmt.Errorf("unable to read secret ID: %w", err)
}
secretIDValue := strings.TrimSuffix(string(secretIDBytes), "\n")
return secretIDValue, nil
}
func (secretID *SecretID) validate() error {
if secretID.FromFile == "" && secretID.FromEnv == "" && secretID.FromString == "" {
return fmt.Errorf("secret ID for AppRole must be provided with a source file, environment variable, or plaintext string")
}
if secretID.FromFile != "" {
if secretID.FromEnv != "" || secretID.FromString != "" {
return fmt.Errorf("only one source for the secret ID should be specified")
}
}
if secretID.FromEnv != "" {
if secretID.FromFile != "" || secretID.FromString != "" {
return fmt.Errorf("only one source for the secret ID should be specified")
}
}
if secretID.FromString != "" {
if secretID.FromFile != "" || secretID.FromEnv != "" {
return fmt.Errorf("only one source for the secret ID should be specified")
}
}
return nil
}

View File

@ -0,0 +1,61 @@
License text copyright (c) 2020 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
Parameters
Licensor: HashiCorp, Inc.
Licensed Work: Vault 1.15.0. The Licensed Work is (c) 2023 HashiCorp, Inc.
Additional Use Grant: You may make production use of the Licensed Work,
provided such use does not include offering the Licensed Work
to third parties on a hosted or embedded basis which is
competitive with HashiCorp's products.
Change Date: Four years from the date the Licensed Work is published.
Change License: MPL 2.0
For information about alternative licensing arrangements for the Licensed Work,
please contact licensing@hashicorp.com.
Notice
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.

View File

@ -0,0 +1,136 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package kubernetes
import (
"context"
"fmt"
"os"
"github.com/hashicorp/vault/api"
)
type KubernetesAuth struct {
roleName string
mountPath string
serviceAccountToken string
}
var _ api.AuthMethod = (*KubernetesAuth)(nil)
type LoginOption func(a *KubernetesAuth) error
const (
defaultMountPath = "kubernetes"
defaultServiceAccountTokenPath = "/var/run/secrets/kubernetes.io/serviceaccount/token"
)
// NewKubernetesAuth creates a KubernetesAuth struct which can be passed to
// the client.Auth().Login method to authenticate to Vault. The roleName
// parameter should be the name of the role in Vault that was created with
// this app's Kubernetes service account bound to it.
//
// The Kubernetes service account token JWT is retrieved from
// /var/run/secrets/kubernetes.io/serviceaccount/token by default. To change this
// path, pass the WithServiceAccountTokenPath option. To instead pass the
// JWT directly as a string, or to read the value from an environment
// variable, use WithServiceAccountToken and WithServiceAccountTokenEnv respectively.
//
// Supported options: WithMountPath, WithServiceAccountTokenPath, WithServiceAccountTokenEnv, WithServiceAccountToken
func NewKubernetesAuth(roleName string, opts ...LoginOption) (*KubernetesAuth, error) {
if roleName == "" {
return nil, fmt.Errorf("no role name was provided")
}
a := &KubernetesAuth{
roleName: roleName,
mountPath: defaultMountPath,
}
// Loop through each option
for _, opt := range opts {
// Call the option giving the instantiated
// *KubernetesAuth as the argument
err := opt(a)
if err != nil {
return nil, fmt.Errorf("error with login option: %w", err)
}
}
if a.serviceAccountToken == "" {
token, err := readTokenFromFile(defaultServiceAccountTokenPath)
if err != nil {
return nil, fmt.Errorf("error reading service account token from default location: %w", err)
}
a.serviceAccountToken = token
}
// return the modified auth struct instance
return a, nil
}
func (a *KubernetesAuth) Login(ctx context.Context, client *api.Client) (*api.Secret, error) {
if ctx == nil {
ctx = context.Background()
}
loginData := map[string]interface{}{
"jwt": a.serviceAccountToken,
"role": a.roleName,
}
path := fmt.Sprintf("auth/%s/login", a.mountPath)
resp, err := client.Logical().WriteWithContext(ctx, path, loginData)
if err != nil {
return nil, fmt.Errorf("unable to log in with Kubernetes auth: %w", err)
}
return resp, nil
}
func WithMountPath(mountPath string) LoginOption {
return func(a *KubernetesAuth) error {
a.mountPath = mountPath
return nil
}
}
// WithServiceAccountTokenPath allows you to specify a different path to
// where your application's Kubernetes service account token is mounted,
// instead of the default of /var/run/secrets/kubernetes.io/serviceaccount/token
func WithServiceAccountTokenPath(pathToToken string) LoginOption {
return func(a *KubernetesAuth) error {
token, err := readTokenFromFile(pathToToken)
if err != nil {
return fmt.Errorf("unable to read service account token from file: %w", err)
}
a.serviceAccountToken = token
return nil
}
}
func WithServiceAccountToken(jwt string) LoginOption {
return func(a *KubernetesAuth) error {
a.serviceAccountToken = jwt
return nil
}
}
func WithServiceAccountTokenEnv(envVar string) LoginOption {
return func(a *KubernetesAuth) error {
token := os.Getenv(envVar)
if token == "" {
return fmt.Errorf("service account token was specified with an environment variable with an empty value")
}
a.serviceAccountToken = token
return nil
}
}
func readTokenFromFile(filepath string) (string, error) {
jwt, err := os.ReadFile(filepath)
if err != nil {
return "", fmt.Errorf("unable to read file containing service account token: %w", err)
}
return string(jwt), nil
}

View File

@ -1,445 +0,0 @@
package auth
import (
"context"
"encoding/json"
"errors"
"math/rand"
"net/http"
"time"
"github.com/armon/go-metrics"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/sdk/helper/jsonutil"
)
const (
defaultMinBackoff = 1 * time.Second
defaultMaxBackoff = 5 * time.Minute
)
// AuthMethod is the interface that auto-auth methods implement for the agent
// to use.
type AuthMethod interface {
// Authenticate returns a mount path, header, request body, and error.
// The header may be nil if no special header is needed.
Authenticate(context.Context, *api.Client) (string, http.Header, map[string]interface{}, error)
NewCreds() chan struct{}
CredSuccess()
Shutdown()
}
// AuthMethodWithClient is an extended interface that can return an API client
// for use during the authentication call.
type AuthMethodWithClient interface {
AuthMethod
AuthClient(client *api.Client) (*api.Client, error)
}
type AuthConfig struct {
Logger hclog.Logger
MountPath string
WrapTTL time.Duration
Config map[string]interface{}
}
// AuthHandler is responsible for keeping a token alive and renewed and passing
// new tokens to the sink server
type AuthHandler struct {
OutputCh chan string
TemplateTokenCh chan string
token string
logger hclog.Logger
client *api.Client
random *rand.Rand
wrapTTL time.Duration
maxBackoff time.Duration
minBackoff time.Duration
enableReauthOnNewCredentials bool
enableTemplateTokenCh bool
exitOnError bool
}
type AuthHandlerConfig struct {
Logger hclog.Logger
Client *api.Client
WrapTTL time.Duration
MaxBackoff time.Duration
MinBackoff time.Duration
Token string
EnableReauthOnNewCredentials bool
EnableTemplateTokenCh bool
ExitOnError bool
}
func NewAuthHandler(conf *AuthHandlerConfig) *AuthHandler {
ah := &AuthHandler{
// This is buffered so that if we try to output after the sink server
// has been shut down, during agent shutdown, we won't block
OutputCh: make(chan string, 1),
TemplateTokenCh: make(chan string, 1),
token: conf.Token,
logger: conf.Logger,
client: conf.Client,
random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
wrapTTL: conf.WrapTTL,
minBackoff: conf.MinBackoff,
maxBackoff: conf.MaxBackoff,
enableReauthOnNewCredentials: conf.EnableReauthOnNewCredentials,
enableTemplateTokenCh: conf.EnableTemplateTokenCh,
exitOnError: conf.ExitOnError,
}
return ah
}
func backoff(ctx context.Context, backoff *agentBackoff) bool {
if backoff.exitOnErr {
return false
}
select {
case <-time.After(backoff.current):
case <-ctx.Done():
}
// Increase exponential backoff for the next time if we don't
// successfully auth/renew/etc.
backoff.next()
return true
}
func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error {
if am == nil {
return errors.New("auth handler: nil auth method")
}
if ah.minBackoff <= 0 {
ah.minBackoff = defaultMinBackoff
}
backoffCfg := newAgentBackoff(ah.minBackoff, ah.maxBackoff, ah.exitOnError)
if backoffCfg.min >= backoffCfg.max {
return errors.New("auth handler: min_backoff cannot be greater than max_backoff")
}
ah.logger.Info("starting auth handler")
defer func() {
am.Shutdown()
close(ah.OutputCh)
close(ah.TemplateTokenCh)
ah.logger.Info("auth handler stopped")
}()
credCh := am.NewCreds()
if !ah.enableReauthOnNewCredentials {
realCredCh := credCh
credCh = nil
if realCredCh != nil {
go func() {
for {
select {
case <-ctx.Done():
return
case <-realCredCh:
}
}
}()
}
}
if credCh == nil {
credCh = make(chan struct{})
}
var watcher *api.LifetimeWatcher
first := true
for {
select {
case <-ctx.Done():
return nil
default:
}
var clientToUse *api.Client
var err error
var path string
var data map[string]interface{}
var header http.Header
switch am.(type) {
case AuthMethodWithClient:
clientToUse, err = am.(AuthMethodWithClient).AuthClient(ah.client)
if err != nil {
ah.logger.Error("error creating client for authentication call", "error", err, "backoff", backoff)
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
if backoff(ctx, backoffCfg) {
continue
}
return err
}
default:
clientToUse = ah.client
}
// Disable retry on the client to ensure our backoffOrQuit function is
// the only source of retry/backoff.
clientToUse.SetMaxRetries(0)
var secret *api.Secret = new(api.Secret)
if first && ah.token != "" {
ah.logger.Debug("using preloaded token")
first = false
ah.logger.Debug("lookup-self with preloaded token")
clientToUse.SetToken(ah.token)
secret, err = clientToUse.Auth().Token().LookupSelfWithContext(ctx)
if err != nil {
ah.logger.Error("could not look up token", "err", err, "backoff", backoffCfg)
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
if backoff(ctx, backoffCfg) {
continue
}
return err
}
duration, _ := secret.Data["ttl"].(json.Number).Int64()
secret.Auth = &api.SecretAuth{
ClientToken: secret.Data["id"].(string),
LeaseDuration: int(duration),
Renewable: secret.Data["renewable"].(bool),
}
} else {
ah.logger.Info("authenticating")
path, header, data, err = am.Authenticate(ctx, ah.client)
if err != nil {
ah.logger.Error("error getting path or data from method", "error", err, "backoff", backoffCfg)
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
if backoff(ctx, backoffCfg) {
continue
}
return err
}
}
if ah.wrapTTL > 0 {
wrapClient, err := clientToUse.Clone()
if err != nil {
ah.logger.Error("error creating client for wrapped call", "error", err, "backoff", backoffCfg)
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
if backoff(ctx, backoffCfg) {
continue
}
return err
}
wrapClient.SetWrappingLookupFunc(func(string, string) string {
return ah.wrapTTL.String()
})
clientToUse = wrapClient
}
for key, values := range header {
for _, value := range values {
clientToUse.AddHeader(key, value)
}
}
// This should only happen if there's no preloaded token (regular auto-auth login)
// or if a preloaded token has expired and is now switching to auto-auth.
if secret.Auth == nil {
secret, err = clientToUse.Logical().WriteWithContext(ctx, path, data)
// Check errors/sanity
if err != nil {
ah.logger.Error("error authenticating", "error", err, "backoff", backoffCfg)
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
if backoff(ctx, backoffCfg) {
continue
}
return err
}
}
switch {
case ah.wrapTTL > 0:
if secret.WrapInfo == nil {
ah.logger.Error("authentication returned nil wrap info", "backoff", backoffCfg)
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
if backoff(ctx, backoffCfg) {
continue
}
return err
}
if secret.WrapInfo.Token == "" {
ah.logger.Error("authentication returned empty wrapped client token", "backoff", backoffCfg)
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
if backoff(ctx, backoffCfg) {
continue
}
return err
}
wrappedResp, err := jsonutil.EncodeJSON(secret.WrapInfo)
if err != nil {
ah.logger.Error("failed to encode wrapinfo", "error", err, "backoff", backoffCfg)
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
if backoff(ctx, backoffCfg) {
continue
}
return err
}
ah.logger.Info("authentication successful, sending wrapped token to sinks and pausing")
ah.OutputCh <- string(wrappedResp)
if ah.enableTemplateTokenCh {
ah.TemplateTokenCh <- string(wrappedResp)
}
am.CredSuccess()
backoffCfg.reset()
select {
case <-ctx.Done():
ah.logger.Info("shutdown triggered")
continue
case <-credCh:
ah.logger.Info("auth method found new credentials, re-authenticating")
continue
}
default:
if secret == nil || secret.Auth == nil {
ah.logger.Error("authentication returned nil auth info", "backoff", backoffCfg)
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
if backoff(ctx, backoffCfg) {
continue
}
return err
}
if secret.Auth.ClientToken == "" {
ah.logger.Error("authentication returned empty client token", "backoff", backoffCfg)
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
if backoff(ctx, backoffCfg) {
continue
}
return err
}
ah.logger.Info("authentication successful, sending token to sinks")
ah.OutputCh <- secret.Auth.ClientToken
if ah.enableTemplateTokenCh {
ah.TemplateTokenCh <- secret.Auth.ClientToken
}
am.CredSuccess()
backoffCfg.reset()
}
if watcher != nil {
watcher.Stop()
}
watcher, err = clientToUse.NewLifetimeWatcher(&api.LifetimeWatcherInput{
Secret: secret,
})
if err != nil {
ah.logger.Error("error creating lifetime watcher", "error", err, "backoff", backoffCfg)
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
if backoff(ctx, backoffCfg) {
continue
}
return err
}
// Start the renewal process
ah.logger.Info("starting renewal process")
metrics.IncrCounter([]string{"agent", "auth", "success"}, 1)
go watcher.Renew()
LifetimeWatcherLoop:
for {
select {
case <-ctx.Done():
ah.logger.Info("shutdown triggered, stopping lifetime watcher")
watcher.Stop()
break LifetimeWatcherLoop
case err := <-watcher.DoneCh():
ah.logger.Info("lifetime watcher done channel triggered")
if err != nil {
metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1)
ah.logger.Error("error renewing token", "error", err)
}
break LifetimeWatcherLoop
case <-watcher.RenewCh():
metrics.IncrCounter([]string{"agent", "auth", "success"}, 1)
ah.logger.Info("renewed auth token")
case <-credCh:
ah.logger.Info("auth method found new credentials, re-authenticating")
break LifetimeWatcherLoop
}
}
}
}
// agentBackoff tracks exponential backoff state.
type agentBackoff struct {
min time.Duration
max time.Duration
current time.Duration
exitOnErr bool
}
func newAgentBackoff(min, max time.Duration, exitErr bool) *agentBackoff {
if max <= 0 {
max = defaultMaxBackoff
}
if min <= 0 {
min = defaultMinBackoff
}
return &agentBackoff{
current: min,
max: max,
min: min,
exitOnErr: exitErr,
}
}
// next determines the next backoff duration that is roughly twice
// the current value, capped to a max value, with a measure of randomness.
func (b *agentBackoff) next() {
maxBackoff := 2 * b.current
if maxBackoff > b.max {
maxBackoff = b.max
}
// Trim a random amount (0-25%) off the doubled duration
trim := rand.Int63n(int64(maxBackoff) / 4)
b.current = maxBackoff - time.Duration(trim)
}
func (b *agentBackoff) reset() {
b.current = b.min
}
func (b agentBackoff) String() string {
return b.current.Truncate(10 * time.Millisecond).String()
}

View File

@ -1,129 +0,0 @@
package kubernetes
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"strings"
hclog "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/command/agent/auth"
)
const (
serviceAccountFile = "/var/run/secrets/kubernetes.io/serviceaccount/token"
)
type kubernetesMethod struct {
logger hclog.Logger
mountPath string
role string
// tokenPath is an optional path to a projected service account token inside
// the pod, for use instead of the default service account token.
tokenPath string
// jwtData is a ReadCloser used to inject a ReadCloser for mocking tests.
jwtData io.ReadCloser
}
// NewKubernetesAuthMethod reads the user configuration and returns a configured
// AuthMethod
func NewKubernetesAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) {
if conf == nil {
return nil, errors.New("empty config")
}
if conf.Config == nil {
return nil, errors.New("empty config data")
}
k := &kubernetesMethod{
logger: conf.Logger,
mountPath: conf.MountPath,
}
roleRaw, ok := conf.Config["role"]
if !ok {
return nil, errors.New("missing 'role' value")
}
k.role, ok = roleRaw.(string)
if !ok {
return nil, errors.New("could not convert 'role' config value to string")
}
tokenPathRaw, ok := conf.Config["token_path"]
if ok {
k.tokenPath, ok = tokenPathRaw.(string)
if !ok {
return nil, errors.New("could not convert 'token_path' config value to string")
}
}
if k.role == "" {
return nil, errors.New("'role' value is empty")
}
return k, nil
}
func (k *kubernetesMethod) Authenticate(ctx context.Context, client *api.Client) (string, http.Header, map[string]interface{}, error) {
k.logger.Trace("beginning authentication")
jwtString, err := k.readJWT()
if err != nil {
return "", nil, nil, fmt.Errorf("error reading JWT with Kubernetes Auth: %w", err)
}
return fmt.Sprintf("%s/login", k.mountPath), nil, map[string]interface{}{
"role": k.role,
"jwt": jwtString,
}, nil
}
func (k *kubernetesMethod) NewCreds() chan struct{} {
return nil
}
func (k *kubernetesMethod) CredSuccess() {
}
func (k *kubernetesMethod) Shutdown() {
}
// readJWT reads the JWT data for the Agent to submit to Vault. The default is
// to read the JWT from the default service account location, defined by the
// constant serviceAccountFile. In normal use k.jwtData is nil at invocation and
// the method falls back to reading the token path with os.Open, opening a file
// from either the default location or from the token_path path specified in
// configuration.
func (k *kubernetesMethod) readJWT() (string, error) {
// load configured token path if set, default to serviceAccountFile
tokenFilePath := serviceAccountFile
if k.tokenPath != "" {
tokenFilePath = k.tokenPath
}
data := k.jwtData
// k.jwtData should only be non-nil in tests
if data == nil {
f, err := os.Open(tokenFilePath)
if err != nil {
return "", err
}
data = f
}
defer data.Close()
contentBytes, err := ioutil.ReadAll(data)
if err != nil {
return "", err
}
return strings.TrimSpace(string(contentBytes)), nil
}

View File

@ -1,365 +0,0 @@
Copyright (c) 2015 HashiCorp, Inc.
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

View File

@ -1,222 +0,0 @@
package compressutil
import (
"bytes"
"compress/gzip"
"compress/lzw"
"fmt"
"io"
"github.com/golang/snappy"
"github.com/hashicorp/errwrap"
"github.com/pierrec/lz4"
)
const (
// A byte value used as a canary prefix for the compressed information
// which is used to distinguish if a JSON input is compressed or not.
// The value of this constant should not be a first character of any
// valid JSON string.
CompressionTypeGzip = "gzip"
CompressionCanaryGzip byte = 'G'
CompressionTypeLZW = "lzw"
CompressionCanaryLZW byte = 'L'
CompressionTypeSnappy = "snappy"
CompressionCanarySnappy byte = 'S'
CompressionTypeLZ4 = "lz4"
CompressionCanaryLZ4 byte = '4'
)
// SnappyReadCloser embeds the snappy reader which implements the io.Reader
// interface. The decompress procedure in this utility expects an
// io.ReadCloser. This type implements the io.Closer interface to retain the
// generic way of decompression.
type CompressUtilReadCloser struct {
io.Reader
}
// Close is a noop method implemented only to satisfy the io.Closer interface
func (c *CompressUtilReadCloser) Close() error {
return nil
}
// CompressionConfig is used to select a compression type to be performed by
// Compress and Decompress utilities.
// Supported types are:
// * CompressionTypeLZW
// * CompressionTypeGzip
// * CompressionTypeSnappy
// * CompressionTypeLZ4
//
// When using CompressionTypeGzip, the compression levels can also be chosen:
// * gzip.DefaultCompression
// * gzip.BestSpeed
// * gzip.BestCompression
type CompressionConfig struct {
// Type of the compression algorithm to be used
Type string
// When using Gzip format, the compression level to employ
GzipCompressionLevel int
}
// Compress places the canary byte in a buffer and uses the same buffer to fill
// in the compressed information of the given input. The configuration supports
// two type of compression: LZW and Gzip. When using Gzip compression format,
// if GzipCompressionLevel is not specified, the 'gzip.DefaultCompression' will
// be assumed.
func Compress(data []byte, config *CompressionConfig) ([]byte, error) {
var buf bytes.Buffer
var writer io.WriteCloser
var err error
if config == nil {
return nil, fmt.Errorf("config is nil")
}
// Write the canary into the buffer and create writer to compress the
// input data based on the configured type
switch config.Type {
case CompressionTypeLZW:
buf.Write([]byte{CompressionCanaryLZW})
writer = lzw.NewWriter(&buf, lzw.LSB, 8)
case CompressionTypeGzip:
buf.Write([]byte{CompressionCanaryGzip})
switch {
case config.GzipCompressionLevel == gzip.BestCompression,
config.GzipCompressionLevel == gzip.BestSpeed,
config.GzipCompressionLevel == gzip.DefaultCompression:
// These are valid compression levels
default:
// If compression level is set to NoCompression or to
// any invalid value, fallback to Defaultcompression
config.GzipCompressionLevel = gzip.DefaultCompression
}
writer, err = gzip.NewWriterLevel(&buf, config.GzipCompressionLevel)
case CompressionTypeSnappy:
buf.Write([]byte{CompressionCanarySnappy})
writer = snappy.NewBufferedWriter(&buf)
case CompressionTypeLZ4:
buf.Write([]byte{CompressionCanaryLZ4})
writer = lz4.NewWriter(&buf)
default:
return nil, fmt.Errorf("unsupported compression type")
}
if err != nil {
return nil, errwrap.Wrapf("failed to create a compression writer: {{err}}", err)
}
if writer == nil {
return nil, fmt.Errorf("failed to create a compression writer")
}
// Compress the input and place it in the same buffer containing the
// canary byte.
if _, err = writer.Write(data); err != nil {
return nil, errwrap.Wrapf("failed to compress input data: err: {{err}}", err)
}
// Close the io.WriteCloser
if err = writer.Close(); err != nil {
return nil, err
}
// Return the compressed bytes with canary byte at the start
return buf.Bytes(), nil
}
// Decompress checks if the first byte in the input matches the canary byte.
// If the first byte is a canary byte, then the input past the canary byte
// will be decompressed using the method specified in the given configuration.
// If the first byte isn't a canary byte, then the utility returns a boolean
// value indicating that the input was not compressed.
func Decompress(data []byte) ([]byte, bool, error) {
bytes, _, notCompressed, err := DecompressWithCanary(data)
return bytes, notCompressed, err
}
// DecompressWithCanary checks if the first byte in the input matches the canary byte.
// If the first byte is a canary byte, then the input past the canary byte
// will be decompressed using the method specified in the given configuration. The type of compression used is also
// returned. If the first byte isn't a canary byte, then the utility returns a boolean
// value indicating that the input was not compressed.
func DecompressWithCanary(data []byte) ([]byte, string, bool, error) {
var err error
var reader io.ReadCloser
var compressionType string
if data == nil || len(data) == 0 {
return nil, "", false, fmt.Errorf("'data' being decompressed is empty")
}
canary := data[0]
cData := data[1:]
switch canary {
// If the first byte matches the canary byte, remove the canary
// byte and try to decompress the data that is after the canary.
case CompressionCanaryGzip:
if len(data) < 2 {
return nil, "", false, fmt.Errorf("invalid 'data' after the canary")
}
reader, err = gzip.NewReader(bytes.NewReader(cData))
compressionType = CompressionTypeGzip
case CompressionCanaryLZW:
if len(data) < 2 {
return nil, "", false, fmt.Errorf("invalid 'data' after the canary")
}
reader = lzw.NewReader(bytes.NewReader(cData), lzw.LSB, 8)
compressionType = CompressionTypeLZW
case CompressionCanarySnappy:
if len(data) < 2 {
return nil, "", false, fmt.Errorf("invalid 'data' after the canary")
}
reader = &CompressUtilReadCloser{
Reader: snappy.NewReader(bytes.NewReader(cData)),
}
compressionType = CompressionTypeSnappy
case CompressionCanaryLZ4:
if len(data) < 2 {
return nil, "", false, fmt.Errorf("invalid 'data' after the canary")
}
reader = &CompressUtilReadCloser{
Reader: lz4.NewReader(bytes.NewReader(cData)),
}
compressionType = CompressionTypeLZ4
default:
// If the first byte doesn't match the canary byte, it means
// that the content was not compressed at all. Indicate the
// caller that the input was not compressed.
return nil, "", true, nil
}
if err != nil {
return nil, "", false, errwrap.Wrapf("failed to create a compression reader: {{err}}", err)
}
if reader == nil {
return nil, "", false, fmt.Errorf("failed to create a compression reader")
}
// Close the io.ReadCloser
defer reader.Close()
// Read all the compressed data into a buffer
var buf bytes.Buffer
if _, err = io.Copy(&buf, reader); err != nil {
return nil, "", false, err
}
return buf.Bytes(), compressionType, false, nil
}

View File

@ -1,100 +0,0 @@
package jsonutil
import (
"bytes"
"compress/gzip"
"encoding/json"
"fmt"
"io"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/sdk/helper/compressutil"
)
// Encodes/Marshals the given object into JSON
func EncodeJSON(in interface{}) ([]byte, error) {
if in == nil {
return nil, fmt.Errorf("input for encoding is nil")
}
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
if err := enc.Encode(in); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// EncodeJSONAndCompress encodes the given input into JSON and compresses the
// encoded value (using Gzip format BestCompression level, by default). A
// canary byte is placed at the beginning of the returned bytes for the logic
// in decompression method to identify compressed input.
func EncodeJSONAndCompress(in interface{}, config *compressutil.CompressionConfig) ([]byte, error) {
if in == nil {
return nil, fmt.Errorf("input for encoding is nil")
}
// First JSON encode the given input
encodedBytes, err := EncodeJSON(in)
if err != nil {
return nil, err
}
if config == nil {
config = &compressutil.CompressionConfig{
Type: compressutil.CompressionTypeGzip,
GzipCompressionLevel: gzip.BestCompression,
}
}
return compressutil.Compress(encodedBytes, config)
}
// DecodeJSON tries to decompress the given data. The call to decompress, fails
// if the content was not compressed in the first place, which is identified by
// a canary byte before the compressed data. If the data is not compressed, it
// is JSON decoded directly. Otherwise the decompressed data will be JSON
// decoded.
func DecodeJSON(data []byte, out interface{}) error {
if data == nil || len(data) == 0 {
return fmt.Errorf("'data' being decoded is nil")
}
if out == nil {
return fmt.Errorf("output parameter 'out' is nil")
}
// Decompress the data if it was compressed in the first place
decompressedBytes, uncompressed, err := compressutil.Decompress(data)
if err != nil {
return errwrap.Wrapf("failed to decompress JSON: {{err}}", err)
}
if !uncompressed && (decompressedBytes == nil || len(decompressedBytes) == 0) {
return fmt.Errorf("decompressed data being decoded is invalid")
}
// If the input supplied failed to contain the compression canary, it
// will be notified by the compression utility. Decode the decompressed
// input.
if !uncompressed {
data = decompressedBytes
}
return DecodeJSONFromReader(bytes.NewReader(data), out)
}
// Decodes/Unmarshals the given io.Reader pointing to a JSON, into a desired object
func DecodeJSONFromReader(r io.Reader, out interface{}) error {
if r == nil {
return fmt.Errorf("'io.Reader' being decoded is nil")
}
if out == nil {
return fmt.Errorf("output parameter 'out' is nil")
}
dec := json.NewDecoder(r)
// While decoding JSON values, interpret the integer values as `json.Number`s instead of `float64`.
dec.UseNumber()
// Since 'out' is an interface representing a pointer, pass it to the decoder without an '&'
return dec.Decode(out)
}

View File

@ -1,6 +1,6 @@
language: go
go:
- 1.14.6
- 1.19.x
script:
- |
make unit-test

View File

@ -6,3 +6,6 @@ unit-test:
ci-test:
go test -timeout 1800s -v github.com/libopenstorage/secrets/vault -tags ci
integration-test:
go test -timeout 1800s -v github.com/libopenstorage/secrets/vault -tags integration

View File

@ -42,16 +42,17 @@ const (
)
const (
TypeAWS = "aws-kms"
TypeAzure = "azure-kv"
TypeDCOS = "dcos"
TypeDocker = "docker"
TypeGCloud = "gcloud-kms"
TypeIBM = "ibm-kp"
TypeK8s = "k8s"
TypeKVDB = "kvdb"
TypeVault = "vault"
TypeVaultTransit = "vault-transit"
TypeAWSKMS = "aws-kms"
TypeAzure = "azure-kv"
TypeDCOS = "dcos"
TypeDocker = "docker"
TypeGCloud = "gcloud-kms"
TypeIBM = "ibm-kp"
TypeK8s = "k8s"
TypeKVDB = "kvdb"
TypeVault = "vault"
TypeVaultTransit = "vault-transit"
TypeAWSSecretsManager = "aws-secrets-manager"
)
const (
@ -64,6 +65,14 @@ const (
DestroySecret = "destroy-all-secret-versions"
)
// Version represents the unique identifier associated with the version of the new secret.
type Version string
const (
// NoVersion indicates that the provider does not support versions for secrets
NoVersion Version = "noversion"
)
// Secrets interface implemented by backend Key Management Systems (KMS)
type Secrets interface {
// String representation of the backend KMS
@ -76,15 +85,17 @@ type Secrets interface {
GetSecret(
secretId string,
keyContext map[string]string,
) (map[string]interface{}, error)
) (map[string]interface{}, Version, error)
// PutSecret will associate an secretId to its secret data
// provided in the arguments and store it into the secret backend
// The caller should ensure they use unique secretIDs so that they won't
// unknowingly overwrite an existing secret.
PutSecret(
secretId string,
plainText map[string]interface{},
keyContext map[string]string,
) error
) (Version, error)
// DeleteSecret deletes the secret data associated with the
// supplied secretId.
@ -141,6 +152,17 @@ func (e *ErrInvalidKeyContext) Error() string {
return fmt.Sprintf("invalid key context: %v", e.Reason)
}
// ErrProviderInternal is returned when an error is received from the secrets provider which
// is not known to this library
type ErrProviderInternal struct {
Provider string
Reason string
}
func (e *ErrProviderInternal) Error() string {
return fmt.Sprintf("%v returned error: %v", e.Provider, e.Reason)
}
// KeyContextChecks performs a series of checks on the keys and values
// passed through the key context map
func KeyContextChecks(

View File

@ -0,0 +1,97 @@
package secrets
import (
"context"
"fmt"
"sync"
)
type ReaderInit func(map[string]interface{}) (SecretReader, error)
type StoreInit func(map[string]interface{}) (SecretStore, error)
var (
secretReaders = make(map[string]ReaderInit)
secretStores = make(map[string]StoreInit)
readersLock sync.RWMutex
storesLock sync.RWMutex
)
// NewReader returns a new instance of SecretReader backend SM identified by
// the supplied name. SecretConfig is a map of key value pairs which could
// be used for authenticating with the backend
func NewReader(name string, secretConfig map[string]interface{}) (SecretReader, error) {
readersLock.RLock()
defer readersLock.RUnlock()
if init, exists := secretReaders[name]; exists {
return init(secretConfig)
}
return nil, ErrNotSupported
}
// NewStore returns a new instance of SecretStore backend SM identified by
// the supplied name. SecretConfig is a map of key value pairs which could
// be used for authenticating with the backend
func NewStore(name string, secretConfig map[string]interface{}) (SecretStore, error) {
storesLock.RLock()
defer storesLock.RUnlock()
if init, exists := secretStores[name]; exists {
return init(secretConfig)
}
return nil, ErrNotSupported
}
// RegisterReader adds a new backend KMS that implements SecretReader
func RegisterReader(name string, init ReaderInit) error {
readersLock.Lock()
defer readersLock.Unlock()
if _, exists := secretReaders[name]; exists {
return fmt.Errorf("secrets reader %v is already registered", name)
}
secretReaders[name] = init
return nil
}
// RegisterStore adds a new backend KMS that implements SecretStore and SecretReader
func RegisterStore(name string, init StoreInit) error {
storesLock.Lock()
defer storesLock.Unlock()
if _, exists := secretStores[name]; exists {
return fmt.Errorf("secrets store %v is already registered", name)
}
secretStores[name] = init
return RegisterReader(name, func(m map[string]interface{}) (SecretReader, error) {
return init(m)
})
}
// A SecretKey identifies a secret
type SecretKey struct {
// Prefix is an optional part of the SecretKey.
Prefix string
// Name is a mandatory part of the SecretKey.
Name string
}
// SecretReader interface implemented by Secrets Managers to read secrets
type SecretReader interface {
// String representation of the backend.
String() string
// Get returns the secret associate with the supplied key.
Get(ctx context.Context, key SecretKey) (secret map[string]interface{}, err error)
}
// SecretStore interface implemented by Secrets Managers to set and delete secrets.
type SecretStore interface {
SecretReader
// Set stores the secret data identified by the key.
// The caller should ensure they use unique key so that they won't
// unknowingly overwrite an existing secret.
Set(ctx context.Context, key SecretKey, secret map[string]interface{}) error
// Delete deletes the secret data associated with the supplied key.
Delete(ctx context.Context, key SecretKey) error
}

View File

@ -3,24 +3,23 @@ package utils
import (
"context"
"errors"
"fmt"
"net/http"
"os"
"path"
"strconv"
"strings"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/command/agent/auth"
"github.com/hashicorp/vault/command/agent/auth/kubernetes"
"github.com/hashicorp/vault/api/auth/approle"
"github.com/hashicorp/vault/api/auth/kubernetes"
)
const (
vaultAddressPrefix = "http"
vaultAddressPrefix = "http"
// AuthMethodKubernetes is a named auth method.
AuthMethodKubernetes = "kubernetes"
// AuthMethodApprole
AuthMethodAppRole = "approle"
// AuthMethod is a vault authentication method used.
// https://www.vaultproject.io/docs/auth#auth-methods
AuthMethod = "VAULT_AUTH_METHOD"
@ -33,6 +32,10 @@ const (
AuthKubernetesTokenPath = "VAULT_AUTH_KUBERNETES_TOKEN_PATH"
// AuthKubernetesMountPath
AuthKubernetesMountPath = "kubernetes"
// AuthAppRoleRoleID
AuthAppRoleRoleID = "VAULT_APPROLE_ROLE_ID"
// AuthAppRoleSecretID
AuthAppRoleSecretID = "VAULT_APPROLE_SECRET_ID"
)
var (
@ -40,6 +43,7 @@ var (
ErrVaultAddressNotSet = errors.New("VAULT_ADDR not set")
ErrInvalidVaultToken = errors.New("VAULT_TOKEN is invalid")
ErrInvalidSkipVerify = errors.New("VAULT_SKIP_VERIFY is invalid")
ErrAppRoleIDNotSet = errors.New("VAULT_APPROLE_ROLE_ID or VAULT_APPROLE_SECRET_ID not set")
ErrInvalidVaultAddress = errors.New("VAULT_ADDRESS is invalid. " +
"Should be of the form http(s)://<ip>:<port>")
@ -117,23 +121,29 @@ func Authenticate(client *api.Client, config map[string]interface{}) (token stri
return token, false, nil
}
// or try to use the kubernetes auth method
// or use other authentication method: kubernetes, approle
if GetVaultParam(config, AuthMethod) != "" {
token, err = GetAuthToken(client, config)
return token, true, err
}
return "", false, ErrVaultAuthParamsNotSet
}
}
// GetAuthToken tries to get the vault token for the provided authentication method.
func GetAuthToken(client *api.Client, config map[string]interface{}) (string, error) {
path, _, data, err := authenticate(client, config)
if err != nil {
return "", err
method := GetVaultParam(config, AuthMethod)
var secret *api.Secret
var err error
switch method {
case AuthMethodKubernetes:
secret, err = authKubernetes(client, config)
case AuthMethodAppRole:
secret, err = authAppRole(client, config)
default:
return "", ErrAuthMethodUnknown
}
secret, err := client.Logical().Write(path, data)
if err != nil {
return "", err
}
@ -144,49 +154,52 @@ func GetAuthToken(client *api.Client, config map[string]interface{}) (string, er
return "", errors.New("authentication returned empty client token")
}
return secret.Auth.ClientToken, err
return secret.Auth.ClientToken, nil
}
func authenticate(client *api.Client, config map[string]interface{}) (string, http.Header, map[string]interface{}, error) {
method := GetVaultParam(config, AuthMethod)
switch method {
case AuthMethodKubernetes:
return authKubernetes(client, config)
}
return "", nil, nil, fmt.Errorf("%s method: %s", method, ErrAuthMethodUnknown)
}
func authKubernetes(client *api.Client, config map[string]interface{}) (string, http.Header, map[string]interface{}, error) {
authConfig, err := buildAuthConfig(config)
if err != nil {
return "", nil, nil, err
}
method, err := kubernetes.NewKubernetesAuthMethod(authConfig)
if err != nil {
return "", nil, nil, err
}
return method.Authenticate(context.TODO(), client)
}
func buildAuthConfig(config map[string]interface{}) (*auth.AuthConfig, error) {
func authKubernetes(client *api.Client, config map[string]interface{}) (*api.Secret, error) {
role := GetVaultParam(config, AuthKubernetesRole)
if role == "" {
return nil, ErrKubernetesRole
}
loginOpts := []kubernetes.LoginOption{}
mountPath := GetVaultParam(config, AuthMountPath)
if mountPath == "" {
mountPath = AuthKubernetesMountPath
}
tokenPath := GetVaultParam(config, AuthKubernetesTokenPath)
loginOpts = append(loginOpts, kubernetes.WithMountPath(mountPath))
authMountPath := path.Join("auth", mountPath)
return &auth.AuthConfig{
Logger: hclog.NewNullLogger(),
MountPath: authMountPath,
Config: map[string]interface{}{
"role": role,
"token_path": tokenPath,
},
}, nil
tokenPath := GetVaultParam(config, AuthKubernetesTokenPath)
if tokenPath != "" {
loginOpts = append(loginOpts, kubernetes.WithServiceAccountTokenPath(tokenPath))
}
kAuth, err := kubernetes.NewKubernetesAuth(role, loginOpts...)
if err != nil {
return nil, err
}
return kAuth.Login(context.TODO(), client)
}
func authAppRole(client *api.Client, config map[string]interface{}) (*api.Secret, error) {
roleID := GetVaultParam(config, AuthAppRoleRoleID)
secretIDRaw := GetVaultParam(config, AuthAppRoleSecretID)
if roleID == "" || secretIDRaw == "" {
return nil, ErrAppRoleIDNotSet
}
secretID := &approle.SecretID{FromString: secretIDRaw}
appRoleAuth, err := approle.NewAppRoleAuth(
roleID,
secretID,
)
if err != nil {
return nil, err
}
return client.Auth().Login(context.TODO(), appRoleAuth)
}

View File

@ -9,6 +9,7 @@ import (
"github.com/hashicorp/vault/api"
"github.com/libopenstorage/secrets"
"github.com/libopenstorage/secrets/vault/utils"
"github.com/sirupsen/logrus"
)
const (
@ -28,6 +29,8 @@ const (
AuthKubernetesRole = utils.AuthKubernetesRole
AuthKubernetesTokenPath = utils.AuthKubernetesTokenPath
AuthKubernetesMountPath = utils.AuthKubernetesMountPath
AuthAppRoleRoleID = utils.AuthAppRoleRoleID
AuthAppRoleSecretID = utils.AuthAppRoleSecretID
)
func init() {
@ -100,6 +103,14 @@ func New(
}
client.SetToken(token)
authMethod := "token"
method := utils.GetVaultParam(secretConfig, AuthMethod)
if method != "" && utils.GetVaultParam(secretConfig, api.EnvVaultToken) == "" {
authMethod = method
}
logrus.Infof("Authenticated to Vault with %v\n", authMethod)
backendPath := utils.GetVaultParam(secretConfig, VaultBackendPathKey)
if backendPath == "" {
backendPath = DefaultBackendPath
@ -164,25 +175,29 @@ func (v *vaultSecrets) keyPath(secretID string, keyContext map[string]string) ke
func (v *vaultSecrets) GetSecret(
secretID string,
keyContext map[string]string,
) (map[string]interface{}, error) {
) (map[string]interface{}, secrets.Version, error) {
key := v.keyPath(secretID, keyContext)
secretValue, err := v.read(key)
if err != nil {
return nil, fmt.Errorf("failed to get secret: %s: %s", key, err)
return nil, secrets.NoVersion, fmt.Errorf("failed to get secret: %s: %s", key, err)
}
if secretValue == nil {
return nil, secrets.ErrInvalidSecretId
return nil, secrets.NoVersion, secrets.ErrInvalidSecretId
}
if v.isKvBackendV2 {
if data, exists := secretValue.Data[kvDataKey]; exists && data != nil {
if data, ok := data.(map[string]interface{}); ok {
return data, nil
// TODO: Vault does support versioned secrets with KV Backend 2
// However it requires clients to invoke a different metadata API call
// to fetch the version. Once there is a need for versions from Vault
// we can add support for it.
return data, secrets.NoVersion, nil
}
}
return nil, secrets.ErrInvalidSecretId
return nil, secrets.NoVersion, secrets.ErrInvalidSecretId
} else {
return secretValue.Data, nil
return secretValue.Data, secrets.NoVersion, nil
}
}
@ -190,7 +205,7 @@ func (v *vaultSecrets) PutSecret(
secretID string,
secretData map[string]interface{},
keyContext map[string]string,
) error {
) (secrets.Version, error) {
if v.isKvBackendV2 {
secretData = map[string]interface{}{
kvDataKey: secretData,
@ -199,9 +214,9 @@ func (v *vaultSecrets) PutSecret(
key := v.keyPath(secretID, keyContext)
if _, err := v.write(key, secretData); err != nil {
return fmt.Errorf("failed to put secret: %s: %s", key, err)
return secrets.NoVersion, fmt.Errorf("failed to put secret: %s: %s", key, err)
}
return nil
return secrets.NoVersion, nil
}
func (v *vaultSecrets) DeleteSecret(

View File

@ -1,34 +0,0 @@
# Created by https://www.gitignore.io/api/macos
### macOS ###
*.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
# End of https://www.gitignore.io/api/macos
cmd/*/*exe
.idea

View File

@ -1,24 +0,0 @@
language: go
env:
- GO111MODULE=off
go:
- 1.9.x
- 1.10.x
- 1.11.x
- 1.12.x
- master
matrix:
fast_finish: true
allow_failures:
- go: master
sudo: false
script:
- go test -v -cpu=2
- go test -v -cpu=2 -race
- go test -v -cpu=2 -tags noasm
- go test -v -cpu=2 -race -tags noasm

View File

@ -1,28 +0,0 @@
Copyright (c) 2015, Pierre Curto
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of xxHash nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,90 +0,0 @@
# lz4 : LZ4 compression in pure Go
[![GoDoc](https://godoc.org/github.com/pierrec/lz4?status.svg)](https://godoc.org/github.com/pierrec/lz4)
[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4)
[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4)
[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags)
## Overview
This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks.
The implementation is based on the reference C [one](https://github.com/lz4/lz4).
## Install
Assuming you have the go toolchain installed:
```
go get github.com/pierrec/lz4
```
There is a command line interface tool to compress and decompress LZ4 files.
```
go install github.com/pierrec/lz4/cmd/lz4c
```
Usage
```
Usage of lz4c:
-version
print the program version
Subcommands:
Compress the given files or from stdin to stdout.
compress [arguments] [<file name> ...]
-bc
enable block checksum
-l int
compression level (0=fastest)
-sc
disable stream checksum
-size string
block max size [64K,256K,1M,4M] (default "4M")
Uncompress the given files or from stdin to stdout.
uncompress [arguments] [<file name> ...]
```
## Example
```
// Compress and uncompress an input string.
s := "hello world"
r := strings.NewReader(s)
// The pipe will uncompress the data from the writer.
pr, pw := io.Pipe()
zw := lz4.NewWriter(pw)
zr := lz4.NewReader(pr)
go func() {
// Compress the input string.
_, _ = io.Copy(zw, r)
_ = zw.Close() // Make sure the writer is closed
_ = pw.Close() // Terminate the pipe
}()
_, _ = io.Copy(os.Stdout, zr)
// Output:
// hello world
```
## Contributing
Contributions are very welcome for bug fixing, performance improvements...!
- Open an issue with a proper description
- Send a pull request with appropriate test case(s)
## Contributors
Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far!
Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder.
Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code.

View File

@ -1,413 +0,0 @@
package lz4
import (
"encoding/binary"
"math/bits"
"sync"
)
// blockHash hashes the lower 6 bytes into a value < htSize.
func blockHash(x uint64) uint32 {
const prime6bytes = 227718039650203
return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog))
}
// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
func CompressBlockBound(n int) int {
return n + n/255 + 16
}
// UncompressBlock uncompresses the source buffer into the destination one,
// and returns the uncompressed size.
//
// The destination buffer must be sized appropriately.
//
// An error is returned if the source data is invalid or the destination buffer is too small.
func UncompressBlock(src, dst []byte) (int, error) {
if len(src) == 0 {
return 0, nil
}
if di := decodeBlock(dst, src); di >= 0 {
return di, nil
}
return 0, ErrInvalidSourceShortBuffer
}
// CompressBlock compresses the source buffer into the destination one.
// This is the fast version of LZ4 compression and also the default one.
//
// The argument hashTable is scratch space for a hash table used by the
// compressor. If provided, it should have length at least 1<<16. If it is
// shorter (or nil), CompressBlock allocates its own hash table.
//
// The size of the compressed data is returned.
//
// If the destination buffer size is lower than CompressBlockBound and
// the compressed size is 0 and no error, then the data is incompressible.
//
// An error is returned if the destination buffer is too small.
func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) {
defer recoverBlock(&err)
// Return 0, nil only if the destination buffer size is < CompressBlockBound.
isNotCompressible := len(dst) < CompressBlockBound(len(src))
// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
// This significantly speeds up incompressible data and usually has very small impact on compression.
// bytes to skip = 1 + (bytes since last match >> adaptSkipLog)
const adaptSkipLog = 7
if len(hashTable) < htSize {
htIface := htPool.Get()
defer htPool.Put(htIface)
hashTable = (*(htIface).(*[htSize]int))[:]
}
// Prove to the compiler the table has at least htSize elements.
// The compiler can see that "uint32() >> hashShift" cannot be out of bounds.
hashTable = hashTable[:htSize]
// si: Current position of the search.
// anchor: Position of the current literals.
var si, di, anchor int
sn := len(src) - mfLimit
if sn <= 0 {
goto lastLiterals
}
// Fast scan strategy: the hash table only stores the last 4 bytes sequences.
for si < sn {
// Hash the next 6 bytes (sequence)...
match := binary.LittleEndian.Uint64(src[si:])
h := blockHash(match)
h2 := blockHash(match >> 8)
// We check a match at s, s+1 and s+2 and pick the first one we get.
// Checking 3 only requires us to load the source one.
ref := hashTable[h]
ref2 := hashTable[h2]
hashTable[h] = si
hashTable[h2] = si + 1
offset := si - ref
// If offset <= 0 we got an old entry in the hash table.
if offset <= 0 || offset >= winSize || // Out of window.
uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches.
// No match. Start calculating another hash.
// The processor can usually do this out-of-order.
h = blockHash(match >> 16)
ref = hashTable[h]
// Check the second match at si+1
si += 1
offset = si - ref2
if offset <= 0 || offset >= winSize ||
uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) {
// No match. Check the third match at si+2
si += 1
offset = si - ref
hashTable[h] = si
if offset <= 0 || offset >= winSize ||
uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) {
// Skip one extra byte (at si+3) before we check 3 matches again.
si += 2 + (si-anchor)>>adaptSkipLog
continue
}
}
}
// Match found.
lLen := si - anchor // Literal length.
// We already matched 4 bytes.
mLen := 4
// Extend backwards if we can, reducing literals.
tOff := si - offset - 1
for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] {
si--
tOff--
lLen--
mLen++
}
// Add the match length, so we continue search at the end.
// Use mLen to store the offset base.
si, mLen = si+mLen, si+minMatch
// Find the longest match by looking by batches of 8 bytes.
for si+8 < sn {
x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:])
if x == 0 {
si += 8
} else {
// Stop is first non-zero byte.
si += bits.TrailingZeros64(x) >> 3
break
}
}
mLen = si - mLen
if mLen < 0xF {
dst[di] = byte(mLen)
} else {
dst[di] = 0xF
}
// Encode literals length.
if lLen < 0xF {
dst[di] |= byte(lLen << 4)
} else {
dst[di] |= 0xF0
di++
l := lLen - 0xF
for ; l >= 0xFF; l -= 0xFF {
dst[di] = 0xFF
di++
}
dst[di] = byte(l)
}
di++
// Literals.
copy(dst[di:di+lLen], src[anchor:anchor+lLen])
di += lLen + 2
anchor = si
// Encode offset.
_ = dst[di] // Bound check elimination.
dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
// Encode match length part 2.
if mLen >= 0xF {
for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
dst[di] = 0xFF
di++
}
dst[di] = byte(mLen)
di++
}
// Check if we can load next values.
if si >= sn {
break
}
// Hash match end-2
h = blockHash(binary.LittleEndian.Uint64(src[si-2:]))
hashTable[h] = si - 2
}
lastLiterals:
if isNotCompressible && anchor == 0 {
// Incompressible.
return 0, nil
}
// Last literals.
lLen := len(src) - anchor
if lLen < 0xF {
dst[di] = byte(lLen << 4)
} else {
dst[di] = 0xF0
di++
for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF {
dst[di] = 0xFF
di++
}
dst[di] = byte(lLen)
}
di++
// Write the last literals.
if isNotCompressible && di >= anchor {
// Incompressible.
return 0, nil
}
di += copy(dst[di:di+len(src)-anchor], src[anchor:])
return di, nil
}
// Pool of hash tables for CompressBlock.
var htPool = sync.Pool{
New: func() interface{} {
return new([htSize]int)
},
}
// blockHash hashes 4 bytes into a value < winSize.
func blockHashHC(x uint32) uint32 {
const hasher uint32 = 2654435761 // Knuth multiplicative hash.
return x * hasher >> (32 - winSizeLog)
}
// CompressBlockHC compresses the source buffer src into the destination dst
// with max search depth (use 0 or negative value for no max).
//
// CompressBlockHC compression ratio is better than CompressBlock but it is also slower.
//
// The size of the compressed data is returned.
//
// If the destination buffer size is lower than CompressBlockBound and
// the compressed size is 0 and no error, then the data is incompressible.
//
// An error is returned if the destination buffer is too small.
func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) {
defer recoverBlock(&err)
// Return 0, nil only if the destination buffer size is < CompressBlockBound.
isNotCompressible := len(dst) < CompressBlockBound(len(src))
// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
// This significantly speeds up incompressible data and usually has very small impact on compression.
// bytes to skip = 1 + (bytes since last match >> adaptSkipLog)
const adaptSkipLog = 7
var si, di, anchor int
// hashTable: stores the last position found for a given hash
// chainTable: stores previous positions for a given hash
var hashTable, chainTable [winSize]int
if depth <= 0 {
depth = winSize
}
sn := len(src) - mfLimit
if sn <= 0 {
goto lastLiterals
}
for si < sn {
// Hash the next 4 bytes (sequence).
match := binary.LittleEndian.Uint32(src[si:])
h := blockHashHC(match)
// Follow the chain until out of window and give the longest match.
mLen := 0
offset := 0
for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] {
// The first (mLen==0) or next byte (mLen>=minMatch) at current match length
// must match to improve on the match length.
if src[next+mLen] != src[si+mLen] {
continue
}
ml := 0
// Compare the current position with a previous with the same hash.
for ml < sn-si {
x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:])
if x == 0 {
ml += 8
} else {
// Stop is first non-zero byte.
ml += bits.TrailingZeros64(x) >> 3
break
}
}
if ml < minMatch || ml <= mLen {
// Match too small (<minMath) or smaller than the current match.
continue
}
// Found a longer match, keep its position and length.
mLen = ml
offset = si - next
// Try another previous position with the same hash.
try--
}
chainTable[si&winMask] = hashTable[h]
hashTable[h] = si
// No match found.
if mLen == 0 {
si += 1 + (si-anchor)>>adaptSkipLog
continue
}
// Match found.
// Update hash/chain tables with overlapping bytes:
// si already hashed, add everything from si+1 up to the match length.
winStart := si + 1
if ws := si + mLen - winSize; ws > winStart {
winStart = ws
}
for si, ml := winStart, si+mLen; si < ml; {
match >>= 8
match |= uint32(src[si+3]) << 24
h := blockHashHC(match)
chainTable[si&winMask] = hashTable[h]
hashTable[h] = si
si++
}
lLen := si - anchor
si += mLen
mLen -= minMatch // Match length does not include minMatch.
if mLen < 0xF {
dst[di] = byte(mLen)
} else {
dst[di] = 0xF
}
// Encode literals length.
if lLen < 0xF {
dst[di] |= byte(lLen << 4)
} else {
dst[di] |= 0xF0
di++
l := lLen - 0xF
for ; l >= 0xFF; l -= 0xFF {
dst[di] = 0xFF
di++
}
dst[di] = byte(l)
}
di++
// Literals.
copy(dst[di:di+lLen], src[anchor:anchor+lLen])
di += lLen
anchor = si
// Encode offset.
di += 2
dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
// Encode match length part 2.
if mLen >= 0xF {
for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
dst[di] = 0xFF
di++
}
dst[di] = byte(mLen)
di++
}
}
if isNotCompressible && anchor == 0 {
// Incompressible.
return 0, nil
}
// Last literals.
lastLiterals:
lLen := len(src) - anchor
if lLen < 0xF {
dst[di] = byte(lLen << 4)
} else {
dst[di] = 0xF0
di++
lLen -= 0xF
for ; lLen >= 0xFF; lLen -= 0xFF {
dst[di] = 0xFF
di++
}
dst[di] = byte(lLen)
}
di++
// Write the last literals.
if isNotCompressible && di >= anchor {
// Incompressible.
return 0, nil
}
di += copy(dst[di:di+len(src)-anchor], src[anchor:])
return di, nil
}

View File

@ -1,23 +0,0 @@
// +build lz4debug
package lz4
import (
"fmt"
"os"
"path/filepath"
"runtime"
)
const debugFlag = true
func debug(args ...interface{}) {
_, file, line, _ := runtime.Caller(1)
file = filepath.Base(file)
f := fmt.Sprintf("LZ4: %s:%d %s", file, line, args[0])
if f[len(f)-1] != '\n' {
f += "\n"
}
fmt.Fprintf(os.Stderr, f, args[1:]...)
}

View File

@ -1,7 +0,0 @@
// +build !lz4debug
package lz4
const debugFlag = false
func debug(args ...interface{}) {}

View File

@ -1,8 +0,0 @@
// +build !appengine
// +build gc
// +build !noasm
package lz4
//go:noescape
func decodeBlock(dst, src []byte) int

View File

@ -1,375 +0,0 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
// AX scratch
// BX scratch
// CX scratch
// DX token
//
// DI &dst
// SI &src
// R8 &dst + len(dst)
// R9 &src + len(src)
// R11 &dst
// R12 short output end
// R13 short input end
// func decodeBlock(dst, src []byte) int
// using 50 bytes of stack currently
TEXT ·decodeBlock(SB), NOSPLIT, $64-56
MOVQ dst_base+0(FP), DI
MOVQ DI, R11
MOVQ dst_len+8(FP), R8
ADDQ DI, R8
MOVQ src_base+24(FP), SI
MOVQ src_len+32(FP), R9
ADDQ SI, R9
// shortcut ends
// short output end
MOVQ R8, R12
SUBQ $32, R12
// short input end
MOVQ R9, R13
SUBQ $16, R13
loop:
// for si < len(src)
CMPQ SI, R9
JGE end
// token := uint32(src[si])
MOVBQZX (SI), DX
INCQ SI
// lit_len = token >> 4
// if lit_len > 0
// CX = lit_len
MOVQ DX, CX
SHRQ $4, CX
// if lit_len != 0xF
CMPQ CX, $0xF
JEQ lit_len_loop_pre
CMPQ DI, R12
JGE lit_len_loop_pre
CMPQ SI, R13
JGE lit_len_loop_pre
// copy shortcut
// A two-stage shortcut for the most common case:
// 1) If the literal length is 0..14, and there is enough space,
// enter the shortcut and copy 16 bytes on behalf of the literals
// (in the fast mode, only 8 bytes can be safely copied this way).
// 2) Further if the match length is 4..18, copy 18 bytes in a similar
// manner; but we ensure that there's enough space in the output for
// those 18 bytes earlier, upon entering the shortcut (in other words,
// there is a combined check for both stages).
// copy literal
MOVOU (SI), X0
MOVOU X0, (DI)
ADDQ CX, DI
ADDQ CX, SI
MOVQ DX, CX
ANDQ $0xF, CX
// The second stage: prepare for match copying, decode full info.
// If it doesn't work out, the info won't be wasted.
// offset := uint16(data[:2])
MOVWQZX (SI), DX
ADDQ $2, SI
MOVQ DI, AX
SUBQ DX, AX
CMPQ AX, DI
JGT err_short_buf
// if we can't do the second stage then jump straight to read the
// match length, we already have the offset.
CMPQ CX, $0xF
JEQ match_len_loop_pre
CMPQ DX, $8
JLT match_len_loop_pre
CMPQ AX, R11
JLT err_short_buf
// memcpy(op + 0, match + 0, 8);
MOVQ (AX), BX
MOVQ BX, (DI)
// memcpy(op + 8, match + 8, 8);
MOVQ 8(AX), BX
MOVQ BX, 8(DI)
// memcpy(op +16, match +16, 2);
MOVW 16(AX), BX
MOVW BX, 16(DI)
ADDQ $4, DI // minmatch
ADDQ CX, DI
// shortcut complete, load next token
JMP loop
lit_len_loop_pre:
// if lit_len > 0
CMPQ CX, $0
JEQ offset
CMPQ CX, $0xF
JNE copy_literal
lit_len_loop:
// for src[si] == 0xFF
CMPB (SI), $0xFF
JNE lit_len_finalise
// bounds check src[si+1]
MOVQ SI, AX
ADDQ $1, AX
CMPQ AX, R9
JGT err_short_buf
// lit_len += 0xFF
ADDQ $0xFF, CX
INCQ SI
JMP lit_len_loop
lit_len_finalise:
// lit_len += int(src[si])
// si++
MOVBQZX (SI), AX
ADDQ AX, CX
INCQ SI
copy_literal:
// bounds check src and dst
MOVQ SI, AX
ADDQ CX, AX
CMPQ AX, R9
JGT err_short_buf
MOVQ DI, AX
ADDQ CX, AX
CMPQ AX, R8
JGT err_short_buf
// whats a good cut off to call memmove?
CMPQ CX, $16
JGT memmove_lit
// if len(dst[di:]) < 16
MOVQ R8, AX
SUBQ DI, AX
CMPQ AX, $16
JLT memmove_lit
// if len(src[si:]) < 16
MOVQ R9, AX
SUBQ SI, AX
CMPQ AX, $16
JLT memmove_lit
MOVOU (SI), X0
MOVOU X0, (DI)
JMP finish_lit_copy
memmove_lit:
// memmove(to, from, len)
MOVQ DI, 0(SP)
MOVQ SI, 8(SP)
MOVQ CX, 16(SP)
// spill
MOVQ DI, 24(SP)
MOVQ SI, 32(SP)
MOVQ CX, 40(SP) // need len to inc SI, DI after
MOVB DX, 48(SP)
CALL runtime·memmove(SB)
// restore registers
MOVQ 24(SP), DI
MOVQ 32(SP), SI
MOVQ 40(SP), CX
MOVB 48(SP), DX
// recalc initial values
MOVQ dst_base+0(FP), R8
MOVQ R8, R11
ADDQ dst_len+8(FP), R8
MOVQ src_base+24(FP), R9
ADDQ src_len+32(FP), R9
MOVQ R8, R12
SUBQ $32, R12
MOVQ R9, R13
SUBQ $16, R13
finish_lit_copy:
ADDQ CX, SI
ADDQ CX, DI
CMPQ SI, R9
JGE end
offset:
// CX := mLen
// free up DX to use for offset
MOVQ DX, CX
MOVQ SI, AX
ADDQ $2, AX
CMPQ AX, R9
JGT err_short_buf
// offset
// DX := int(src[si]) | int(src[si+1])<<8
MOVWQZX (SI), DX
ADDQ $2, SI
// 0 offset is invalid
CMPQ DX, $0
JEQ err_corrupt
ANDB $0xF, CX
match_len_loop_pre:
// if mlen != 0xF
CMPB CX, $0xF
JNE copy_match
match_len_loop:
// for src[si] == 0xFF
// lit_len += 0xFF
CMPB (SI), $0xFF
JNE match_len_finalise
// bounds check src[si+1]
MOVQ SI, AX
ADDQ $1, AX
CMPQ AX, R9
JGT err_short_buf
ADDQ $0xFF, CX
INCQ SI
JMP match_len_loop
match_len_finalise:
// lit_len += int(src[si])
// si++
MOVBQZX (SI), AX
ADDQ AX, CX
INCQ SI
copy_match:
// mLen += minMatch
ADDQ $4, CX
// check we have match_len bytes left in dst
// di+match_len < len(dst)
MOVQ DI, AX
ADDQ CX, AX
CMPQ AX, R8
JGT err_short_buf
// DX = offset
// CX = match_len
// BX = &dst + (di - offset)
MOVQ DI, BX
SUBQ DX, BX
// check BX is within dst
// if BX < &dst
CMPQ BX, R11
JLT err_short_buf
// if offset + match_len < di
MOVQ BX, AX
ADDQ CX, AX
CMPQ DI, AX
JGT copy_interior_match
// AX := len(dst[:di])
// MOVQ DI, AX
// SUBQ R11, AX
// copy 16 bytes at a time
// if di-offset < 16 copy 16-(di-offset) bytes to di
// then do the remaining
copy_match_loop:
// for match_len >= 0
// dst[di] = dst[i]
// di++
// i++
MOVB (BX), AX
MOVB AX, (DI)
INCQ DI
INCQ BX
DECQ CX
CMPQ CX, $0
JGT copy_match_loop
JMP loop
copy_interior_match:
CMPQ CX, $16
JGT memmove_match
// if len(dst[di:]) < 16
MOVQ R8, AX
SUBQ DI, AX
CMPQ AX, $16
JLT memmove_match
MOVOU (BX), X0
MOVOU X0, (DI)
ADDQ CX, DI
JMP loop
memmove_match:
// memmove(to, from, len)
MOVQ DI, 0(SP)
MOVQ BX, 8(SP)
MOVQ CX, 16(SP)
// spill
MOVQ DI, 24(SP)
MOVQ SI, 32(SP)
MOVQ CX, 40(SP) // need len to inc SI, DI after
CALL runtime·memmove(SB)
// restore registers
MOVQ 24(SP), DI
MOVQ 32(SP), SI
MOVQ 40(SP), CX
// recalc initial values
MOVQ dst_base+0(FP), R8
MOVQ R8, R11 // TODO: make these sensible numbers
ADDQ dst_len+8(FP), R8
MOVQ src_base+24(FP), R9
ADDQ src_len+32(FP), R9
MOVQ R8, R12
SUBQ $32, R12
MOVQ R9, R13
SUBQ $16, R13
ADDQ CX, DI
JMP loop
err_corrupt:
MOVQ $-1, ret+48(FP)
RET
err_short_buf:
MOVQ $-2, ret+48(FP)
RET
end:
SUBQ R11, DI
MOVQ DI, ret+48(FP)
RET

View File

@ -1,98 +0,0 @@
// +build !amd64 appengine !gc noasm
package lz4
func decodeBlock(dst, src []byte) (ret int) {
const hasError = -2
defer func() {
if recover() != nil {
ret = hasError
}
}()
var si, di int
for {
// Literals and match lengths (token).
b := int(src[si])
si++
// Literals.
if lLen := b >> 4; lLen > 0 {
switch {
case lLen < 0xF && si+16 < len(src):
// Shortcut 1
// if we have enough room in src and dst, and the literals length
// is small enough (0..14) then copy all 16 bytes, even if not all
// are part of the literals.
copy(dst[di:], src[si:si+16])
si += lLen
di += lLen
if mLen := b & 0xF; mLen < 0xF {
// Shortcut 2
// if the match length (4..18) fits within the literals, then copy
// all 18 bytes, even if not all are part of the literals.
mLen += 4
if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset {
i := di - offset
end := i + 18
if end > len(dst) {
// The remaining buffer may not hold 18 bytes.
// See https://github.com/pierrec/lz4/issues/51.
end = len(dst)
}
copy(dst[di:], dst[i:end])
si += 2
di += mLen
continue
}
}
case lLen == 0xF:
for src[si] == 0xFF {
lLen += 0xFF
si++
}
lLen += int(src[si])
si++
fallthrough
default:
copy(dst[di:di+lLen], src[si:si+lLen])
si += lLen
di += lLen
}
}
if si >= len(src) {
return di
}
offset := int(src[si]) | int(src[si+1])<<8
if offset == 0 {
return hasError
}
si += 2
// Match.
mLen := b & 0xF
if mLen == 0xF {
for src[si] == 0xFF {
mLen += 0xFF
si++
}
mLen += int(src[si])
si++
}
mLen += minMatch
// Copy the match.
expanded := dst[di-offset:]
if mLen > offset {
// Efficiently copy the match dst[di-offset:di] into the dst slice.
bytesToCopy := offset * (mLen / offset)
for n := offset; n <= bytesToCopy+offset; n *= 2 {
copy(expanded[n:], expanded[:n])
}
di += bytesToCopy
mLen -= bytesToCopy
}
di += copy(dst[di:di+mLen], expanded[:mLen])
}
}

View File

@ -1,30 +0,0 @@
package lz4
import (
"errors"
"fmt"
"os"
rdebug "runtime/debug"
)
var (
// ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
// block is corrupted or the destination buffer is not large enough for the uncompressed data.
ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short")
// ErrInvalid is returned when reading an invalid LZ4 archive.
ErrInvalid = errors.New("lz4: bad magic number")
// ErrBlockDependency is returned when attempting to decompress an archive created with block dependency.
ErrBlockDependency = errors.New("lz4: block dependency not supported")
// ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position.
ErrUnsupportedSeek = errors.New("lz4: can only seek forward from io.SeekCurrent")
)
func recoverBlock(e *error) {
if r := recover(); r != nil && *e == nil {
if debugFlag {
fmt.Fprintln(os.Stderr, r)
rdebug.PrintStack()
}
*e = ErrInvalidSourceShortBuffer
}
}

View File

@ -1,223 +0,0 @@
// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version).
// (https://github.com/Cyan4973/XXH/)
package xxh32
import (
"encoding/binary"
)
const (
prime1 uint32 = 2654435761
prime2 uint32 = 2246822519
prime3 uint32 = 3266489917
prime4 uint32 = 668265263
prime5 uint32 = 374761393
primeMask = 0xFFFFFFFF
prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984
prime1minus = uint32((-int64(prime1)) & primeMask) // 1640531535
)
// XXHZero represents an xxhash32 object with seed 0.
type XXHZero struct {
v1 uint32
v2 uint32
v3 uint32
v4 uint32
totalLen uint64
buf [16]byte
bufused int
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
func (xxh XXHZero) Sum(b []byte) []byte {
h32 := xxh.Sum32()
return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24))
}
// Reset resets the Hash to its initial state.
func (xxh *XXHZero) Reset() {
xxh.v1 = prime1plus2
xxh.v2 = prime2
xxh.v3 = 0
xxh.v4 = prime1minus
xxh.totalLen = 0
xxh.bufused = 0
}
// Size returns the number of bytes returned by Sum().
func (xxh *XXHZero) Size() int {
return 4
}
// BlockSize gives the minimum number of bytes accepted by Write().
func (xxh *XXHZero) BlockSize() int {
return 1
}
// Write adds input bytes to the Hash.
// It never returns an error.
func (xxh *XXHZero) Write(input []byte) (int, error) {
if xxh.totalLen == 0 {
xxh.Reset()
}
n := len(input)
m := xxh.bufused
xxh.totalLen += uint64(n)
r := len(xxh.buf) - m
if n < r {
copy(xxh.buf[m:], input)
xxh.bufused += len(input)
return n, nil
}
p := 0
// Causes compiler to work directly from registers instead of stack:
v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4
if m > 0 {
// some data left from previous update
copy(xxh.buf[xxh.bufused:], input[:r])
xxh.bufused += len(input) - r
// fast rotl(13)
buf := xxh.buf[:16] // BCE hint.
v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1
v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1
v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1
v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1
p = r
xxh.bufused = 0
}
for n := n - 16; p <= n; p += 16 {
sub := input[p:][:16] //BCE hint for compiler
v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
}
xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4
copy(xxh.buf[xxh.bufused:], input[p:])
xxh.bufused += len(input) - p
return n, nil
}
// Sum32 returns the 32 bits Hash value.
func (xxh *XXHZero) Sum32() uint32 {
h32 := uint32(xxh.totalLen)
if h32 >= 16 {
h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4)
} else {
h32 += prime5
}
p := 0
n := xxh.bufused
buf := xxh.buf
for n := n - 4; p <= n; p += 4 {
h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3
h32 = rol17(h32) * prime4
}
for ; p < n; p++ {
h32 += uint32(buf[p]) * prime5
h32 = rol11(h32) * prime1
}
h32 ^= h32 >> 15
h32 *= prime2
h32 ^= h32 >> 13
h32 *= prime3
h32 ^= h32 >> 16
return h32
}
// ChecksumZero returns the 32bits Hash value.
func ChecksumZero(input []byte) uint32 {
n := len(input)
h32 := uint32(n)
if n < 16 {
h32 += prime5
} else {
v1 := prime1plus2
v2 := prime2
v3 := uint32(0)
v4 := prime1minus
p := 0
for n := n - 16; p <= n; p += 16 {
sub := input[p:][:16] //BCE hint for compiler
v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
}
input = input[p:]
n -= p
h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
}
p := 0
for n := n - 4; p <= n; p += 4 {
h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3
h32 = rol17(h32) * prime4
}
for p < n {
h32 += uint32(input[p]) * prime5
h32 = rol11(h32) * prime1
p++
}
h32 ^= h32 >> 15
h32 *= prime2
h32 ^= h32 >> 13
h32 *= prime3
h32 ^= h32 >> 16
return h32
}
// Uint32Zero hashes x with seed 0.
func Uint32Zero(x uint32) uint32 {
h := prime5 + 4 + x*prime3
h = rol17(h) * prime4
h ^= h >> 15
h *= prime2
h ^= h >> 13
h *= prime3
h ^= h >> 16
return h
}
func rol1(u uint32) uint32 {
return u<<1 | u>>31
}
func rol7(u uint32) uint32 {
return u<<7 | u>>25
}
func rol11(u uint32) uint32 {
return u<<11 | u>>21
}
func rol12(u uint32) uint32 {
return u<<12 | u>>20
}
func rol13(u uint32) uint32 {
return u<<13 | u>>19
}
func rol17(u uint32) uint32 {
return u<<17 | u>>15
}
func rol18(u uint32) uint32 {
return u<<18 | u>>14
}

116
vendor/github.com/pierrec/lz4/lz4.go generated vendored
View File

@ -1,116 +0,0 @@
// Package lz4 implements reading and writing lz4 compressed data (a frame),
// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html.
//
// Although the block level compression and decompression functions are exposed and are fully compatible
// with the lz4 block format definition, they are low level and should not be used directly.
// For a complete description of an lz4 compressed block, see:
// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html
//
// See https://github.com/Cyan4973/lz4 for the reference C implementation.
//
package lz4
import (
"math/bits"
"sync"
)
const (
// Extension is the LZ4 frame file name extension
Extension = ".lz4"
// Version is the LZ4 frame format version
Version = 1
frameMagic uint32 = 0x184D2204
frameSkipMagic uint32 = 0x184D2A50
frameMagicLegacy uint32 = 0x184C2102
// The following constants are used to setup the compression algorithm.
minMatch = 4 // the minimum size of the match sequence size (4 bytes)
winSizeLog = 16 // LZ4 64Kb window size limit
winSize = 1 << winSizeLog
winMask = winSize - 1 // 64Kb window of previous data for dependent blocks
compressedBlockFlag = 1 << 31
compressedBlockMask = compressedBlockFlag - 1
// hashLog determines the size of the hash table used to quickly find a previous match position.
// Its value influences the compression speed and memory usage, the lower the faster,
// but at the expense of the compression ratio.
// 16 seems to be the best compromise for fast compression.
hashLog = 16
htSize = 1 << hashLog
mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes.
)
// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb.
const (
blockSize64K = 1 << (16 + 2*iota)
blockSize256K
blockSize1M
blockSize4M
)
var (
// Keep a pool of buffers for each valid block sizes.
bsMapValue = [...]*sync.Pool{
newBufferPool(2 * blockSize64K),
newBufferPool(2 * blockSize256K),
newBufferPool(2 * blockSize1M),
newBufferPool(2 * blockSize4M),
}
)
// newBufferPool returns a pool for buffers of the given size.
func newBufferPool(size int) *sync.Pool {
return &sync.Pool{
New: func() interface{} {
return make([]byte, size)
},
}
}
// getBuffer returns a buffer to its pool.
func getBuffer(size int) []byte {
idx := blockSizeValueToIndex(size) - 4
return bsMapValue[idx].Get().([]byte)
}
// putBuffer returns a buffer to its pool.
func putBuffer(size int, buf []byte) {
if cap(buf) > 0 {
idx := blockSizeValueToIndex(size) - 4
bsMapValue[idx].Put(buf[:cap(buf)])
}
}
func blockSizeIndexToValue(i byte) int {
return 1 << (16 + 2*uint(i))
}
func isValidBlockSize(size int) bool {
const blockSizeMask = blockSize64K | blockSize256K | blockSize1M | blockSize4M
return size&blockSizeMask > 0 && bits.OnesCount(uint(size)) == 1
}
func blockSizeValueToIndex(size int) byte {
return 4 + byte(bits.TrailingZeros(uint(size)>>16)/2)
}
// Header describes the various flags that can be set on a Writer or obtained from a Reader.
// The default values match those of the LZ4 frame format definition
// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html).
//
// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls.
// It is the caller's responsibility to check them if necessary.
type Header struct {
BlockChecksum bool // Compressed blocks checksum flag.
NoChecksum bool // Frame checksum flag.
BlockMaxSize int // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB.
Size uint64 // Frame total size. It is _not_ computed by the Writer.
CompressionLevel int // Compression level (higher is better, use 0 for fastest compression).
done bool // Header processed flag (Read or Write and checked).
}
// Reset reset internal status
func (h *Header) Reset() {
h.done = false
}

View File

@ -1,29 +0,0 @@
//+build go1.10
package lz4
import (
"fmt"
"strings"
)
func (h Header) String() string {
var s strings.Builder
s.WriteString(fmt.Sprintf("%T{", h))
if h.BlockChecksum {
s.WriteString("BlockChecksum: true ")
}
if h.NoChecksum {
s.WriteString("NoChecksum: true ")
}
if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 {
s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs))
}
if l := h.CompressionLevel; l != 0 {
s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l))
}
s.WriteByte('}')
return s.String()
}

View File

@ -1,29 +0,0 @@
//+build !go1.10
package lz4
import (
"bytes"
"fmt"
)
func (h Header) String() string {
var s bytes.Buffer
s.WriteString(fmt.Sprintf("%T{", h))
if h.BlockChecksum {
s.WriteString("BlockChecksum: true ")
}
if h.NoChecksum {
s.WriteString("NoChecksum: true ")
}
if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 {
s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs))
}
if l := h.CompressionLevel; l != 0 {
s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l))
}
s.WriteByte('}')
return s.String()
}

View File

@ -1,335 +0,0 @@
package lz4
import (
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"github.com/pierrec/lz4/internal/xxh32"
)
// Reader implements the LZ4 frame decoder.
// The Header is set after the first call to Read().
// The Header may change between Read() calls in case of concatenated frames.
type Reader struct {
Header
// Handler called when a block has been successfully read.
// It provides the number of bytes read.
OnBlockDone func(size int)
buf [8]byte // Scrap buffer.
pos int64 // Current position in src.
src io.Reader // Source.
zdata []byte // Compressed data.
data []byte // Uncompressed data.
idx int // Index of unread bytes into data.
checksum xxh32.XXHZero // Frame hash.
skip int64 // Bytes to skip before next read.
dpos int64 // Position in dest
}
// NewReader returns a new LZ4 frame decoder.
// No access to the underlying io.Reader is performed.
func NewReader(src io.Reader) *Reader {
r := &Reader{src: src}
return r
}
// readHeader checks the frame magic number and parses the frame descriptoz.
// Skippable frames are supported even as a first frame although the LZ4
// specifications recommends skippable frames not to be used as first frames.
func (z *Reader) readHeader(first bool) error {
defer z.checksum.Reset()
buf := z.buf[:]
for {
magic, err := z.readUint32()
if err != nil {
z.pos += 4
if !first && err == io.ErrUnexpectedEOF {
return io.EOF
}
return err
}
if magic == frameMagic {
break
}
if magic>>8 != frameSkipMagic>>8 {
return ErrInvalid
}
skipSize, err := z.readUint32()
if err != nil {
return err
}
z.pos += 4
m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize))
if err != nil {
return err
}
z.pos += m
}
// Header.
if _, err := io.ReadFull(z.src, buf[:2]); err != nil {
return err
}
z.pos += 8
b := buf[0]
if v := b >> 6; v != Version {
return fmt.Errorf("lz4: invalid version: got %d; expected %d", v, Version)
}
if b>>5&1 == 0 {
return ErrBlockDependency
}
z.BlockChecksum = b>>4&1 > 0
frameSize := b>>3&1 > 0
z.NoChecksum = b>>2&1 == 0
bmsID := buf[1] >> 4 & 0x7
if bmsID < 4 || bmsID > 7 {
return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID)
}
bSize := blockSizeIndexToValue(bmsID - 4)
z.BlockMaxSize = bSize
// Allocate the compressed/uncompressed buffers.
// The compressed buffer cannot exceed the uncompressed one.
if n := 2 * bSize; cap(z.zdata) < n {
z.zdata = make([]byte, n, n)
}
if debugFlag {
debug("header block max size id=%d size=%d", bmsID, bSize)
}
z.zdata = z.zdata[:bSize]
z.data = z.zdata[:cap(z.zdata)][bSize:]
z.idx = len(z.data)
_, _ = z.checksum.Write(buf[0:2])
if frameSize {
buf := buf[:8]
if _, err := io.ReadFull(z.src, buf); err != nil {
return err
}
z.Size = binary.LittleEndian.Uint64(buf)
z.pos += 8
_, _ = z.checksum.Write(buf)
}
// Header checksum.
if _, err := io.ReadFull(z.src, buf[:1]); err != nil {
return err
}
z.pos++
if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] {
return fmt.Errorf("lz4: invalid header checksum: got %x; expected %x", buf[0], h)
}
z.Header.done = true
if debugFlag {
debug("header read: %v", z.Header)
}
return nil
}
// Read decompresses data from the underlying source into the supplied buffer.
//
// Since there can be multiple streams concatenated, Header values may
// change between calls to Read(). If that is the case, no data is actually read from
// the underlying io.Reader, to allow for potential input buffer resizing.
func (z *Reader) Read(buf []byte) (int, error) {
if debugFlag {
debug("Read buf len=%d", len(buf))
}
if !z.Header.done {
if err := z.readHeader(true); err != nil {
return 0, err
}
if debugFlag {
debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d",
len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx)
}
}
if len(buf) == 0 {
return 0, nil
}
if z.idx == len(z.data) {
// No data ready for reading, process the next block.
if debugFlag {
debug("reading block from writer")
}
// Reset uncompressed buffer
z.data = z.zdata[:cap(z.zdata)][len(z.zdata):]
// Block length: 0 = end of frame, highest bit set: uncompressed.
bLen, err := z.readUint32()
if err != nil {
return 0, err
}
z.pos += 4
if bLen == 0 {
// End of frame reached.
if !z.NoChecksum {
// Validate the frame checksum.
checksum, err := z.readUint32()
if err != nil {
return 0, err
}
if debugFlag {
debug("frame checksum got=%x / want=%x", z.checksum.Sum32(), checksum)
}
z.pos += 4
if h := z.checksum.Sum32(); checksum != h {
return 0, fmt.Errorf("lz4: invalid frame checksum: got %x; expected %x", h, checksum)
}
}
// Get ready for the next concatenated frame and keep the position.
pos := z.pos
z.Reset(z.src)
z.pos = pos
// Since multiple frames can be concatenated, check for more.
return 0, z.readHeader(false)
}
if debugFlag {
debug("raw block size %d", bLen)
}
if bLen&compressedBlockFlag > 0 {
// Uncompressed block.
bLen &= compressedBlockMask
if debugFlag {
debug("uncompressed block size %d", bLen)
}
if int(bLen) > cap(z.data) {
return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
}
z.data = z.data[:bLen]
if _, err := io.ReadFull(z.src, z.data); err != nil {
return 0, err
}
z.pos += int64(bLen)
if z.OnBlockDone != nil {
z.OnBlockDone(int(bLen))
}
if z.BlockChecksum {
checksum, err := z.readUint32()
if err != nil {
return 0, err
}
z.pos += 4
if h := xxh32.ChecksumZero(z.data); h != checksum {
return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
}
}
} else {
// Compressed block.
if debugFlag {
debug("compressed block size %d", bLen)
}
if int(bLen) > cap(z.data) {
return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
}
zdata := z.zdata[:bLen]
if _, err := io.ReadFull(z.src, zdata); err != nil {
return 0, err
}
z.pos += int64(bLen)
if z.BlockChecksum {
checksum, err := z.readUint32()
if err != nil {
return 0, err
}
z.pos += 4
if h := xxh32.ChecksumZero(zdata); h != checksum {
return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
}
}
n, err := UncompressBlock(zdata, z.data)
if err != nil {
return 0, err
}
z.data = z.data[:n]
if z.OnBlockDone != nil {
z.OnBlockDone(n)
}
}
if !z.NoChecksum {
_, _ = z.checksum.Write(z.data)
if debugFlag {
debug("current frame checksum %x", z.checksum.Sum32())
}
}
z.idx = 0
}
if z.skip > int64(len(z.data[z.idx:])) {
z.skip -= int64(len(z.data[z.idx:]))
z.dpos += int64(len(z.data[z.idx:]))
z.idx = len(z.data)
return 0, nil
}
z.idx += int(z.skip)
z.dpos += z.skip
z.skip = 0
n := copy(buf, z.data[z.idx:])
z.idx += n
z.dpos += int64(n)
if debugFlag {
debug("copied %d bytes to input", n)
}
return n, nil
}
// Seek implements io.Seeker, but supports seeking forward from the current
// position only. Any other seek will return an error. Allows skipping output
// bytes which aren't needed, which in some scenarios is faster than reading
// and discarding them.
// Note this may cause future calls to Read() to read 0 bytes if all of the
// data they would have returned is skipped.
func (z *Reader) Seek(offset int64, whence int) (int64, error) {
if offset < 0 || whence != io.SeekCurrent {
return z.dpos + z.skip, ErrUnsupportedSeek
}
z.skip += offset
return z.dpos + z.skip, nil
}
// Reset discards the Reader's state and makes it equivalent to the
// result of its original state from NewReader, but reading from r instead.
// This permits reusing a Reader rather than allocating a new one.
func (z *Reader) Reset(r io.Reader) {
z.Header = Header{}
z.pos = 0
z.src = r
z.zdata = z.zdata[:0]
z.data = z.data[:0]
z.idx = 0
z.checksum.Reset()
}
// readUint32 reads an uint32 into the supplied buffer.
// The idea is to make use of the already allocated buffers avoiding additional allocations.
func (z *Reader) readUint32() (uint32, error) {
buf := z.buf[:4]
_, err := io.ReadFull(z.src, buf)
x := binary.LittleEndian.Uint32(buf)
return x, err
}

Some files were not shown because too many files have changed in this diff Show More