mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: vendor files required for kmip
Signed-off-by: Rakshith R <rar@redhat.com>
This commit is contained in:
2
vendor/github.com/gemalto/flume/.gitignore
generated
vendored
Normal file
2
vendor/github.com/gemalto/flume/.gitignore
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
vendor
|
||||
build
|
332
vendor/github.com/gemalto/flume/.golangci.yml
generated
vendored
Normal file
332
vendor/github.com/gemalto/flume/.golangci.yml
generated
vendored
Normal file
@ -0,0 +1,332 @@
|
||||
# This file contains all available configuration options
|
||||
# with their default values.
|
||||
|
||||
# options for analysis running
|
||||
run:
|
||||
# default concurrency is a available CPU number
|
||||
# concurrency: 4
|
||||
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
# deadline: 1m
|
||||
|
||||
# exit code when at least one issue was found, default is 1
|
||||
# issues-exit-code: 1
|
||||
|
||||
# include test files or not, default is true
|
||||
tests: true
|
||||
|
||||
# list of build tags, all linters use it. Default is empty list.
|
||||
# build-tags:
|
||||
# - mytag
|
||||
|
||||
# which dirs to skip: they won't be analyzed;
|
||||
# can use regexp here: generated.*, regexp is applied on full path;
|
||||
# default value is empty list, but next dirs are always skipped independently
|
||||
# from this option's value:
|
||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||
# skip-dirs:
|
||||
# - src/external_libs
|
||||
# - autogenerated_by_my_lib
|
||||
|
||||
# which files to skip: they will be analyzed, but issues from them
|
||||
# won't be reported. Default value is empty list, but there is
|
||||
# no need to include all autogenerated files, we confidently recognize
|
||||
# autogenerated files. If it's not please let us know.
|
||||
# skip-files:
|
||||
# - ".*\\.my\\.go$"
|
||||
# - lib/bad.go
|
||||
|
||||
# by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules":
|
||||
# If invoked with -mod=readonly, the go command is disallowed from the implicit
|
||||
# automatic updating of go.mod described above. Instead, it fails when any changes
|
||||
# to go.mod are needed. This setting is most useful to check that go.mod does
|
||||
# not need updates, such as in a continuous integration and testing system.
|
||||
# If invoked with -mod=vendor, the go command assumes that the vendor
|
||||
# directory holds the correct copies of dependencies and ignores
|
||||
# the dependency descriptions in go.mod.
|
||||
# modules-download-mode:
|
||||
|
||||
|
||||
# output configuration options
|
||||
output:
|
||||
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
||||
format: colored-line-number
|
||||
|
||||
# print lines of code with issue, default is true
|
||||
print-issued-lines: true
|
||||
|
||||
# print linter name in the end of issue text, default is true
|
||||
print-linter-name: true
|
||||
|
||||
|
||||
# all available settings of specific linters
|
||||
linters-settings:
|
||||
errcheck:
|
||||
# report about not checking of errors in type assetions: `a := b.(MyStruct)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
# check-type-assertions: false
|
||||
|
||||
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
# check-blank: false
|
||||
|
||||
# [deprecated] comma-separated list of pairs of the form pkg:regex
|
||||
# the regex is used to ignore names within pkg. (default "fmt:.*").
|
||||
# see https://github.com/kisielk/errcheck#the-deprecated-method for details
|
||||
# ignore: fmt:.*,io/ioutil:^Read.*
|
||||
|
||||
# path to a file containing a list of functions to exclude from checking
|
||||
# see https://github.com/kisielk/errcheck#excluding-functions for details
|
||||
# exclude: /path/to/file.txt
|
||||
govet:
|
||||
# report about shadowed variables
|
||||
check-shadowing: false
|
||||
|
||||
# settings per analyzer
|
||||
# settings:
|
||||
# printf: # analyzer name, run `go tool vet help` to see all analyzers
|
||||
# funcs: # run `go tool vet help printf` to see available settings for `printf` analyzer
|
||||
# - (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof
|
||||
# - (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf
|
||||
# - (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf
|
||||
# - (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf
|
||||
gofmt:
|
||||
# simplify code: gofmt with `-s` option, true by default
|
||||
# simplify: true
|
||||
goimports:
|
||||
# put imports beginning with prefix after 3rd-party packages;
|
||||
# it's a comma-separated list of prefixes
|
||||
# local-prefixes: github.com/org/project
|
||||
gocyclo:
|
||||
# minimal code complexity to report, 30 by default (but we recommend 10-20)
|
||||
min-complexity: 10
|
||||
maligned:
|
||||
# print struct with more effective memory layout or not, false by default
|
||||
suggest-new: true
|
||||
dupl:
|
||||
# tokens count to trigger issue, 150 by default
|
||||
threshold: 100
|
||||
goconst:
|
||||
# minimal length of string constant, 3 by default
|
||||
min-len: 3
|
||||
# minimal occurrences count to trigger, 3 by default
|
||||
min-occurrences: 3
|
||||
depguard:
|
||||
list-type: blacklist
|
||||
include-go-root: false
|
||||
packages:
|
||||
- github.com/magiconair/properties/assert
|
||||
inTests:
|
||||
- github.com/davecgh/go-spew/spew
|
||||
- github.com/stretchr/testify
|
||||
gomodguard:
|
||||
blocked:
|
||||
modules:
|
||||
- gopkg.in/go-playground/assert.v1:
|
||||
recommendations:
|
||||
- github.com/stretchr/testify
|
||||
reason: "testify is the test assertion framework we use"
|
||||
- github.com/pborman/uuid:
|
||||
recommendations:
|
||||
- github.com/google/uuid
|
||||
misspell:
|
||||
# Correct spellings using locale preferences for US or UK.
|
||||
# Default is to use a neutral variety of English.
|
||||
# Setting locale to US will correct the British spelling of 'colour' to 'color'.
|
||||
locale: US
|
||||
# ignore-words:
|
||||
# - someword
|
||||
lll:
|
||||
# max line length, lines longer will be reported. Default is 120.
|
||||
# '\t' is counted as 1 character by default, and can be changed with the tab-width option
|
||||
# line-length: 120
|
||||
# tab width in spaces. Default to 1.
|
||||
# tab-width: 1
|
||||
unused:
|
||||
# treat code as a program (not a library) and report unused exported identifiers; default is false.
|
||||
# XXX: if you enable this setting, unused will report a lot of false-positives in text editors:
|
||||
# if it's called for subdir of a project it can't find funcs usages. All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
unparam:
|
||||
# Inspect exported functions, default is false. Set to true if no external program/library imports your code.
|
||||
# XXX: if you enable this setting, unparam will report a lot of false-positives in text editors:
|
||||
# if it's called for subdir of a project it can't find external interfaces. All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
nakedret:
|
||||
# make an issue if func has more lines of code than this setting and it has naked returns; default is 30
|
||||
# max-func-lines: 30
|
||||
prealloc:
|
||||
# XXX: we don't recommend using this linter before doing performance profiling.
|
||||
# For most programs usage of prealloc will be a premature optimization.
|
||||
|
||||
# Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them.
|
||||
# True by default.
|
||||
simple: true
|
||||
range-loops: true # Report preallocation suggestions on range loops, true by default
|
||||
for-loops: false # Report preallocation suggestions on for loops, false by default
|
||||
gocritic:
|
||||
# Which checks should be enabled; can't be combined with 'disabled-checks';
|
||||
# See https://go-critic.github.io/overview#checks-overview
|
||||
# To check which checks are enabled run `GL_DEBUG=gocritic golangci-lint run`
|
||||
# By default list of stable checks is used.
|
||||
# enabled-checks:
|
||||
# - rangeValCopy
|
||||
|
||||
# Which checks should be disabled; can't be combined with 'enabled-checks'; default is empty
|
||||
# disabled-checks:
|
||||
# - regexpMust
|
||||
|
||||
# Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint` run to see all tags and checks.
|
||||
# Empty list by default. See https://github.com/go-critic/go-critic#usage -> section "Tags".
|
||||
# enabled-tags:
|
||||
# - performance
|
||||
|
||||
# settings: # settings passed to gocritic
|
||||
# captLocal: # must be valid enabled check name
|
||||
# paramsOnly: true
|
||||
# rangeValCopy:
|
||||
# sizeThreshold: 32
|
||||
|
||||
linters:
|
||||
# to try out individual linters: golangci-lint run -E gocyclo,gosimple
|
||||
enable:
|
||||
- staticcheck
|
||||
- deadcode
|
||||
- errcheck
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- structcheck
|
||||
## - typecheck # redundant? compiler does this
|
||||
- unused
|
||||
- varcheck
|
||||
## - bodyclose # its all false positives with requester and sling, which both close the body already
|
||||
- depguard
|
||||
## - dogsled # checks for too many blank identifiers. don't care
|
||||
- dupl
|
||||
- errorlint
|
||||
# - exhaustive
|
||||
# - exhaustivestruct
|
||||
- exportloopref
|
||||
## - funlen # checks function length. don't care
|
||||
# - gci
|
||||
## - gochecknoglobals # too common
|
||||
- gochecknoinits
|
||||
- gocognit
|
||||
- goconst
|
||||
- gocritic
|
||||
## - gocyclo # checks cyclomatic complexity. don't care
|
||||
# - godot
|
||||
## - godox # checks for TODO comments. not standardized
|
||||
- goerr113
|
||||
## - gofmt # checks code is formatted, handled by make prep
|
||||
# - gofumpt
|
||||
# - goheader
|
||||
## - goimports # checks import order. We're not using goimports
|
||||
- revive
|
||||
# - gomnd
|
||||
- gomodguard
|
||||
- goprintffuncname
|
||||
- gosec
|
||||
## - lll # checks line length. not enforced
|
||||
## - maligned # optimizies struct field order, but structs are usually ordered for legibility
|
||||
- misspell
|
||||
- nakedret
|
||||
- nestif
|
||||
# - nlreturn # don't really like how this looks in all cases. wsl covers similar ground anyway.
|
||||
- noctx
|
||||
- nolintlint
|
||||
# - prealloc # slice optimizations, but promotes too much premature optimization
|
||||
- rowserrcheck
|
||||
- exportloopref
|
||||
- stylecheck
|
||||
# - testpackage
|
||||
- tparallel
|
||||
- unconvert
|
||||
## - unparam # too many false positives
|
||||
## - whitespace # not enforced
|
||||
disable-all: true
|
||||
# presets:
|
||||
# - bugs
|
||||
# - unused
|
||||
# fast: false
|
||||
|
||||
|
||||
issues:
|
||||
# List of regexps of issue texts to exclude, empty list by default.
|
||||
# But independently from this option we use default exclude patterns,
|
||||
# it can be disabled by `exclude-use-default: false`. To list all
|
||||
# excluded by default patterns execute `golangci-lint run --help`
|
||||
exclude:
|
||||
- Error return value of .(.*\.Write). is not checked
|
||||
# we use merry errors a lot, and goerr113 doesn't recognize it as a valid sentinel error
|
||||
- use wrapped static errors instead
|
||||
|
||||
# Excluding configuration per-path, per-linter, per-text and per-source
|
||||
exclude-rules:
|
||||
# Exclude some linters from running on tests files.
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- gocyclo
|
||||
- errcheck
|
||||
- dupl
|
||||
- gosec
|
||||
- scopelint
|
||||
- gochecknoinits
|
||||
- gochecknoglobals
|
||||
- wsl
|
||||
- goconst
|
||||
- path: cmd
|
||||
linters:
|
||||
# init() functions are pretty common in main packages
|
||||
- gochecknoinits
|
||||
- gochecknoglobals
|
||||
# exclude requiring comments on all exported stuff
|
||||
- linters:
|
||||
- revive
|
||||
text: "exported:"
|
||||
|
||||
# Exclude known linters from partially hard-vendored code,
|
||||
# which is impossible to exclude via "nolint" comments.
|
||||
# - path: internal/hmac/
|
||||
# text: "weak cryptographic primitive"
|
||||
# linters:
|
||||
# - gosec
|
||||
|
||||
# Exclude some staticcheck messages
|
||||
# - linters:
|
||||
# - staticcheck
|
||||
# text: "SA9003:"
|
||||
|
||||
# Exclude lll issues for long lines with go:generate
|
||||
- linters:
|
||||
- lll
|
||||
source: "^//go:generate "
|
||||
|
||||
# Independently from option `exclude` we use default exclude patterns,
|
||||
# it can be disabled by this option. To list all
|
||||
# excluded by default patterns execute `golangci-lint run --help`.
|
||||
# Default value for this option is true.
|
||||
# exclude-use-default: false
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
# max-issues-per-linter: 0
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
# max-same-issues: 0
|
||||
|
||||
# Show only new issues: if there are unstaged changes or untracked files,
|
||||
# only those changes are analyzed, else only changes in HEAD~ are analyzed.
|
||||
# It's a super-useful option for integration of golangci-lint into existing
|
||||
# large codebase. It's not practical to fix all existing issues at the moment
|
||||
# of integration: much better don't allow issues in new code.
|
||||
# Default is false.
|
||||
new: false
|
||||
|
||||
# Show only new issues created after git revision `REV`
|
||||
# new-from-rev: REV
|
||||
|
||||
# Show only new issues created in git patch with set file path.
|
||||
# new-from-patch: path/to/patch/file
|
12
vendor/github.com/gemalto/flume/Dockerfile
generated
vendored
Normal file
12
vendor/github.com/gemalto/flume/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
FROM golang:1.14-alpine
|
||||
|
||||
RUN apk --no-cache add make bash fish build-base
|
||||
|
||||
WORKDIR /flume
|
||||
|
||||
COPY ./Makefile ./go.mod ./go.sum /flume/
|
||||
RUN make tools
|
||||
|
||||
COPY ./ /flume
|
||||
|
||||
CMD make all
|
21
vendor/github.com/gemalto/flume/LICENSE
generated
vendored
Normal file
21
vendor/github.com/gemalto/flume/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2018 Russ Egan
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
54
vendor/github.com/gemalto/flume/Makefile
generated
vendored
Normal file
54
vendor/github.com/gemalto/flume/Makefile
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
SHELL = bash
|
||||
BUILD_FLAGS =
|
||||
TEST_FLAGS =
|
||||
|
||||
all: fmt build lint test
|
||||
|
||||
build:
|
||||
go build $(BUILD_FLAGS) ./...
|
||||
|
||||
builddir:
|
||||
mkdir -p -m 0777 build
|
||||
|
||||
lint:
|
||||
golangci-lint run
|
||||
|
||||
clean:
|
||||
rm -rf build/*
|
||||
|
||||
fmt:
|
||||
go fmt ./...
|
||||
|
||||
test:
|
||||
go test -race $(BUILD_FLAGS) $(TEST_FLAGS) ./...
|
||||
|
||||
# creates a test coverage report, and produces json test output. useful for ci.
|
||||
cover: builddir
|
||||
go test $(TEST_FLAGS) -v -covermode=count -coverprofile=build/coverage.out -json ./...
|
||||
go tool cover -html=build/coverage.out -o build/coverage.html
|
||||
|
||||
builder:
|
||||
docker-compose build --pull builder
|
||||
|
||||
docker: builder
|
||||
docker-compose run --rm builder make all cover
|
||||
|
||||
fish: builder
|
||||
docker-compose run --rm builder fish
|
||||
|
||||
tidy:
|
||||
go mod tidy
|
||||
|
||||
update:
|
||||
go get -u ./...
|
||||
go mod tidy
|
||||
|
||||
### TOOLS
|
||||
|
||||
tools:
|
||||
# installs tools used during build
|
||||
go get -u golang.org/x/tools/cmd/cover
|
||||
sh -c "$$(wget -O - -q https://install.goreleaser.com/github.com/golangci/golangci-lint.sh || echo exit 2)" -- -b $(shell go env GOPATH)/bin $(GOLANGCI_LINT_VERSION)
|
||||
|
||||
.PHONY: all build builddir run artifacts vet lint clean fmt test testall testreport up down pull builder runc ci bash fish image prep vendor.update vendor.ensure tools buildtools migratetool db.migrate
|
||||
|
219
vendor/github.com/gemalto/flume/README.md
generated
vendored
Normal file
219
vendor/github.com/gemalto/flume/README.md
generated
vendored
Normal file
@ -0,0 +1,219 @@
|
||||
flume [](https://godoc.org/github.com/gemalto/flume) [](https://goreportcard.com/report/gemalto/flume) [](https://github.com/gemalto/flume/actions?query=branch%3Amaster+workflow%3ABuild+)
|
||||
=====
|
||||
|
||||
flume is a logging package, build on top of [zap](https://github.com/uber-go/zap). It's structured and leveled logs, like zap/logrus/etc.
|
||||
It adds a global registry of all loggers, allowing global re-configuration at runtime. Instantiating
|
||||
new loggers automatically registers them: even loggers created in init() functions, package variable
|
||||
initializers, or 3rd party code, can all be managed from the central registry.
|
||||
|
||||
Features
|
||||
|
||||
- Structured: Log entries have key/value attributes.
|
||||
- Leveled:
|
||||
|
||||
- Error: Something that would be reported up to an error reporting service
|
||||
- Info: High priority, low volume messages. Appropriate for production runtime use. Used for coarse-grained
|
||||
feedback
|
||||
- Debug: Slow, verbose logging, not appropriate for long-term production use
|
||||
|
||||
Flume is a little opinionated about having only a few logs levels. Warns should either be errors
|
||||
or infos, trace should just be debug, and a log package shouldn't be responsible for panics or exits.
|
||||
- Named: Loggers have a name. Levels can be configured for each named logger. For example, a common usage
|
||||
pattern is to create a unique logger for each go package, then selectively turn on debug logging for
|
||||
specific packages.
|
||||
- Built on top of zap, which is super fast.
|
||||
- Supports JSON, LTSV, and colorized console output formats.
|
||||
- Optional call site logging (file and line number of log call)
|
||||
- Output can be directed to any writer, defaults to stdout
|
||||
- Helpers for managing application logs during tests
|
||||
- Supports creating child loggers with pre-set context: `Logger.With()`
|
||||
- Levels can be configured via a single string, which is convenient for configuration via env var, see `LevelsString()`
|
||||
- All loggers can be reconfigured dynamically at runtime.
|
||||
- Thoughtful handling of multi-line log output: Multi-line output is collapsed to a single line, or encoded,
|
||||
depending on the encoder. The terminal encoders, which are optimized for human viewing, retain multi-line
|
||||
formatting.
|
||||
- By default, all logs are discarded. Flume is completely silent unless explicitly configured otherwise.
|
||||
This is ideal for logging inside libraries, where the log level and output will be managed by
|
||||
the code importing the library.
|
||||
|
||||
This package does not offer package level log functions, so you need to create a logger instance first:
|
||||
A common pattern is to create a single, package-wide logger, named after the package:
|
||||
|
||||
var log = flume.New("mypkg")
|
||||
|
||||
Then, write some logs:
|
||||
|
||||
log.Debug("created user", "username", "frank", "role", "admin")
|
||||
|
||||
Logs have a message, then matched pairs of key/value properties. Child loggers can be created
|
||||
and pre-seeded with a set of properties:
|
||||
|
||||
reqLogger := log.With("remoteAddr", req.RemoteAddr)
|
||||
|
||||
Expensive log events can be avoid by explicitly checking level:
|
||||
|
||||
if log.IsDebug() {
|
||||
log.Debug("created resource", "resource", resource.ExpensiveToString())
|
||||
}
|
||||
|
||||
Loggers can be bound to context.Context, which is convenient for carrying
|
||||
per-transaction loggers (pre-seeded with transaction specific context) through layers of request
|
||||
processing code:
|
||||
|
||||
ctx = flume.WithLogger(ctx, log.With("transactionID", tid))
|
||||
// ...later...
|
||||
flume.FromContext(ctx).Info("Request handled.")
|
||||
|
||||
By default, all these messages will simply be discard. To enable output, flume needs to
|
||||
be configured. Only entry-point code, like main() or test setup, should configure flume.
|
||||
|
||||
To configure logging settings from environment variables, call the configuration function from main():
|
||||
|
||||
flume.ConfigFromEnv()
|
||||
|
||||
Other configuration methods are available: see `ConfigString()`, `LevelString()`, and `Configure()`.
|
||||
|
||||
This reads the log configuration from the environment variable "FLUME" (the default, which can be
|
||||
overridden). The value is JSON, e.g.:
|
||||
|
||||
{"level":"INF","levels":"http=DBG","development"="true"}
|
||||
|
||||
The properties of the config string:
|
||||
|
||||
- "level": ERR, INF, or DBG. The default level for all loggers.
|
||||
- "levels": A string configuring log levels for specific loggers, overriding the default level.
|
||||
See note below for syntax.
|
||||
- "development": true or false. In development mode, the defaults for the other
|
||||
settings change to be more suitable for developers at a terminal (colorized, multiline, human
|
||||
readable, etc). See note below for exact defaults.
|
||||
- "addCaller": true or false. Adds call site information to log entries (file and line).
|
||||
- "encoding": json, ltsv, term, or term-color. Configures how log entries are encoded in the output.
|
||||
"term" and "term-color" are multi-line, human-friendly
|
||||
formats, intended for terminal output.
|
||||
- "encoderConfig": a JSON object which configures advanced encoding settings, like how timestamps
|
||||
are formatted. See docs for go.uber.org/zap/zapcore/EncoderConfig
|
||||
|
||||
- "messageKey": the label of the message property of the log entry. If empty, message is omitted.
|
||||
- "levelKey": the label of the level property of the log entry. If empty, level is omitted.
|
||||
- "timeKey": the label of the timestamp of the log entry. If empty, timestamp is omitted.
|
||||
- "nameKey": the label of the logger name in the log entry. If empty, logger name is omitted.
|
||||
- "callerKey": the label of the logger name in the log entry. If empty, logger name is omitted.
|
||||
- "stacktraceKey": the label of the stacktrace in the log entry. If empty, stacktrace is omitted.
|
||||
- "lineEnding": the end of each log output line.
|
||||
- "levelEncoder": capital, capitalColor, color, lower, or abbr. Controls how the log entry level
|
||||
is rendered. "abbr" renders 3-letter abbreviations, like ERR and INF.
|
||||
- "timeEncoder": iso8601, millis, nanos, unix, or justtime. Controls how timestamps are rendered.
|
||||
"millis", "nanos", and "unix" are since UNIX epoch. "unix" is in floating point seconds.
|
||||
"justtime" omits the date, and just prints the time in the format "15:04:05.000".
|
||||
- "durationEncoder": string, nanos, or seconds. Controls how time.Duration values are rendered.
|
||||
- "callerEncoder": full or short. Controls how the call site is rendered.
|
||||
"full" includes the entire package path, "short" only includes the last folder of the package.
|
||||
|
||||
Defaults:
|
||||
|
||||
{
|
||||
"level":"INF",
|
||||
"levels":"",
|
||||
"development":false,
|
||||
"addCaller":false,
|
||||
"encoding":"term-color",
|
||||
"encoderConfig":nil
|
||||
}
|
||||
|
||||
If "encoderConfig" is omitted, it defaults to:
|
||||
|
||||
{
|
||||
"messageKey":"msg",
|
||||
"levelKey":"level",
|
||||
"timeKey":"time",
|
||||
"nameKey":"name",
|
||||
"callerKey":"caller",
|
||||
"stacktraceKey":"stacktrace",
|
||||
"lineEnding":"\n",
|
||||
"levelEncoder":"abbr",
|
||||
"timeEncoder":"iso8601",
|
||||
"durationEncoder":"seconds",
|
||||
"callerEncoder":"short",
|
||||
}
|
||||
|
||||
These defaults are only applied if one of the configuration functions is called, like ConfigFromEnv(), ConfigString(),
|
||||
Configure(), or LevelsString(). Initially, all loggers are configured to discard everything, following
|
||||
flume's opinion that log packages should be silent unless spoken too. Ancillary to this: library packages
|
||||
should *not* call these functions, or configure logging levels or output in anyway. Only program entry points,
|
||||
like main() or test code, should configure logging. Libraries should just create loggers and log to them.
|
||||
|
||||
Development mode: if "development"=true, the defaults for the rest of the settings change, equivalent to:
|
||||
|
||||
{
|
||||
"addCaller":true,
|
||||
"encoding":"term-color",
|
||||
"encodingConfig": {
|
||||
"timeEncoder":"justtime",
|
||||
"durationEncoder":"string",
|
||||
}
|
||||
}
|
||||
|
||||
The "levels" value is a list of key=value pairs, configuring the level of individual named loggers.
|
||||
If the key is "*", it sets the default level. If "level" and "levels" both configure the default
|
||||
level, "levels" wins.
|
||||
Examples:
|
||||
|
||||
* // set the default level to ALL, equivalent to {"level"="ALL"}
|
||||
*=INF // same, but set default level to INF
|
||||
*,sql=WRN // set default to ALL, set "sql" logger to WRN
|
||||
*=INF,http=ALL // set default to INF, set "http" to ALL
|
||||
*=INF,http // same as above. If name has no level, level is set to ALL
|
||||
*=INF,-http // set default to INF, set "http" to OFF
|
||||
http=INF // leave default setting unchanged.
|
||||
|
||||
Examples of log output:
|
||||
|
||||
"term"
|
||||
|
||||
11:42:08.126 INF | Hello World! @:root@flume.git/example_test.go:15
|
||||
11:42:08.127 INF | This entry has properties color:red @:root@flume.git/example_test.go:16
|
||||
11:42:08.127 DBG | This is a debug message @:root@flume.git/example_test.go:17
|
||||
11:42:08.127 ERR | This is an error message @:root@flume.git/example_test.go:18
|
||||
11:42:08.127 INF | This message has a multiline value essay:
|
||||
Four score and seven years ago
|
||||
our fathers brought forth on this continent, a new nation,
|
||||
conceived in Liberty, and dedicated to the proposition that all men are created equal.
|
||||
@:root@flume.git/example_test.go:19
|
||||
|
||||
"term-color"
|
||||
|
||||

|
||||
|
||||
"json"
|
||||
|
||||
{"level":"INF","time":"15:06:28.422","name":"root","caller":"flume.git/example_test.go:15","msg":"Hello World!"}
|
||||
{"level":"INF","time":"15:06:28.423","name":"root","caller":"flume.git/example_test.go:16","msg":"This entry has properties","color":"red"}
|
||||
{"level":"DBG","time":"15:06:28.423","name":"root","caller":"flume.git/example_test.go:17","msg":"This is a debug message"}
|
||||
{"level":"ERR","time":"15:06:28.423","name":"root","caller":"flume.git/example_test.go:18","msg":"This is an error message"}
|
||||
{"level":"INF","time":"15:06:28.423","name":"root","caller":"flume.git/example_test.go:19","msg":"This message has a multiline value","essay":"Four score and seven years ago\nour fathers brought forth on this continent, a new nation, \nconceived in Liberty, and dedicated to the proposition that all men are created equal."}
|
||||
|
||||
"ltsv"
|
||||
|
||||
level:INF time:15:06:55.325 msg:Hello World! name:root caller:flume.git/example_test.go:15
|
||||
level:INF time:15:06:55.325 msg:This entry has properties name:root caller:flume.git/example_test.go:16 color:red
|
||||
level:DBG time:15:06:55.325 msg:This is a debug message name:root caller:flume.git/example_test.go:17
|
||||
level:ERR time:15:06:55.325 msg:This is an error message name:root caller:flume.git/example_test.go:18
|
||||
level:INF time:15:06:55.325 msg:This message has a multiline value name:root caller:flume.git/example_test.go:19 essay:Four score and seven years ago\nour fathers brought forth on this continent, a new nation, \nconceived in Liberty, and dedicated to the proposition that all men are created equal.
|
||||
|
||||
tl;dr
|
||||
|
||||
The implementation is a wrapper around zap. zap does levels, structured logs, and is very fast.
|
||||
zap doesn't do centralized, global configuration, so this package
|
||||
adds that by maintaining an internal registry of all loggers, and using the sync.atomic stuff to swap out
|
||||
levels and writers in a thread safe way.
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
To build, be sure to have a recent go SDK, and make. Run `make tools` to install other dependencies. Then run `make`.
|
||||
|
||||
There is also a dockerized build, which only requires make and docker-compose: `make docker`. You can also
|
||||
do `make fish` or `make bash` to shell into the docker build container.
|
||||
|
||||
Merge requests are welcome! Before submitting, please run `make` and make sure all tests pass and there are
|
||||
no linter findings.
|
5
vendor/github.com/gemalto/flume/buffer.go
generated
vendored
Normal file
5
vendor/github.com/gemalto/flume/buffer.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
package flume
|
||||
|
||||
import "go.uber.org/zap/buffer"
|
||||
|
||||
var bufPool = buffer.NewPool()
|
221
vendor/github.com/gemalto/flume/config.go
generated
vendored
Normal file
221
vendor/github.com/gemalto/flume/config.go
generated
vendored
Normal file
@ -0,0 +1,221 @@
|
||||
package flume
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DefaultConfigEnvVars is a list of the environment variables
|
||||
// that ConfigFromEnv will search by default.
|
||||
var DefaultConfigEnvVars = []string{"FLUME"}
|
||||
|
||||
// ConfigFromEnv configures flume from environment variables.
|
||||
// It should be called from main():
|
||||
//
|
||||
// func main() {
|
||||
// flume.ConfigFromEnv()
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// It searches envvars for the first environment
|
||||
// variable that is set, and attempts to parse the value.
|
||||
//
|
||||
// If no environment variable is set, it silently does nothing.
|
||||
//
|
||||
// If an environment variable with a value is found, but parsing
|
||||
// fails, an error is printed to stdout, and the error is returned.
|
||||
//
|
||||
// If envvars is empty, it defaults to DefaultConfigEnvVars.
|
||||
//
|
||||
func ConfigFromEnv(envvars ...string) error {
|
||||
if len(envvars) == 0 {
|
||||
envvars = DefaultConfigEnvVars
|
||||
}
|
||||
|
||||
var configString string
|
||||
|
||||
for _, v := range envvars {
|
||||
configString = os.Getenv(v)
|
||||
if configString != "" {
|
||||
err := ConfigString(configString)
|
||||
if err != nil {
|
||||
fmt.Println("error parsing log config from env var " + v + ": " + err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Config offers a declarative way to configure a Factory.
|
||||
//
|
||||
// The same things can be done by calling Factory methods, but
|
||||
// Configs can be unmarshaled from JSON, making it a convenient
|
||||
// way to configure most logging options from env vars or files, i.e.:
|
||||
//
|
||||
// err := flume.ConfigString(os.Getenv("flume"))
|
||||
//
|
||||
// Configs can be created and applied programmatically:
|
||||
//
|
||||
// err := flume.Configure(flume.Config{})
|
||||
//
|
||||
// Defaults are appropriate for a JSON encoded production logger:
|
||||
//
|
||||
// - LTSV encoder
|
||||
// - full timestamps
|
||||
// - default log level set to INFO
|
||||
// - call sites are not logged
|
||||
//
|
||||
// An alternate set of defaults, more appropriate for development environments,
|
||||
// can be configured with `Config{Development:true}`:
|
||||
//
|
||||
// err := flume.Configure(flume.Config{Development:true})
|
||||
//
|
||||
// - colorized terminal encoder
|
||||
// - short timestamps
|
||||
// - call sites are logged
|
||||
//
|
||||
// err := flume.Configure(flume.Config{Development:true})
|
||||
//
|
||||
// Any of the other configuration options can be specified to override
|
||||
// the defaults.
|
||||
//
|
||||
// Note: If configuring the EncoderConfig setting, if any of the *Key properties
|
||||
// are omitted, that entire field will be omitted.
|
||||
type Config struct {
|
||||
// DefaultLevel is the default log level for all loggers not
|
||||
// otherwise configured by Levels. Defaults to Info.
|
||||
DefaultLevel Level `json:"level" yaml:"level"`
|
||||
// Levels configures log levels for particular named loggers. See
|
||||
// LevelsString for format.
|
||||
Levels string `json:"levels" yaml:"levels"`
|
||||
// AddCaller annotates logs with the calling function's file
|
||||
// name and line number. Defaults to true when the Development
|
||||
// flag is set, false otherwise.
|
||||
AddCaller *bool `json:"addCaller" yaml:"addCaller"`
|
||||
// Encoding sets the logger's encoding. Valid values are "json",
|
||||
// "console", "ltsv", "term", and "term-color".
|
||||
// Defaults to "term-color" if development is true, else
|
||||
// "ltsv"
|
||||
Encoding string `json:"encoding" yaml:"encoding"`
|
||||
// Development toggles the defaults used for the other
|
||||
// settings. Defaults to false.
|
||||
Development bool `json:"development" yaml:"development"`
|
||||
// EncoderConfig sets options for the chosen encoder. See
|
||||
// EncoderConfig for details. Defaults to NewEncoderConfig() if
|
||||
// Development is false, otherwise defaults to NewDevelopmentEncoderConfig().
|
||||
EncoderConfig *EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"`
|
||||
}
|
||||
|
||||
// SetAddCaller sets the Config's AddCaller flag.
|
||||
func (c *Config) SetAddCaller(b bool) {
|
||||
c.AddCaller = &b
|
||||
}
|
||||
|
||||
// UnsetAddCaller unsets the Config's AddCaller flag (reverting to defaults).
|
||||
func (c *Config) UnsetAddCaller() {
|
||||
c.AddCaller = nil
|
||||
}
|
||||
|
||||
// EncoderConfig captures the options for encoders.
|
||||
// Type alias to avoid exporting zap.
|
||||
type EncoderConfig zapcore.EncoderConfig
|
||||
|
||||
type privEncCfg struct {
|
||||
EncodeLevel string `json:"levelEncoder" yaml:"levelEncoder"`
|
||||
EncodeTime string `json:"timeEncoder" yaml:"timeEncoder"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Marshaler
|
||||
func (enc *EncoderConfig) UnmarshalJSON(b []byte) error {
|
||||
var zapCfg zapcore.EncoderConfig
|
||||
err := json.Unmarshal(b, &zapCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var pc privEncCfg
|
||||
err = json.Unmarshal(b, &pc)
|
||||
if err == nil {
|
||||
switch pc.EncodeLevel {
|
||||
case "", "abbr":
|
||||
zapCfg.EncodeLevel = AbbrLevelEncoder
|
||||
}
|
||||
switch pc.EncodeTime {
|
||||
case "":
|
||||
zapCfg.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
case "justtime":
|
||||
zapCfg.EncodeTime = JustTimeEncoder
|
||||
}
|
||||
}
|
||||
*enc = EncoderConfig(zapCfg)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewEncoderConfig returns an EncoderConfig with default settings.
|
||||
func NewEncoderConfig() *EncoderConfig {
|
||||
return &EncoderConfig{
|
||||
MessageKey: "msg",
|
||||
TimeKey: "time",
|
||||
LevelKey: "level",
|
||||
NameKey: "name",
|
||||
CallerKey: "caller",
|
||||
StacktraceKey: "stacktrace",
|
||||
EncodeTime: zapcore.ISO8601TimeEncoder,
|
||||
EncodeDuration: zapcore.SecondsDurationEncoder,
|
||||
EncodeLevel: AbbrLevelEncoder,
|
||||
EncodeCaller: zapcore.ShortCallerEncoder,
|
||||
}
|
||||
}
|
||||
|
||||
// NewDevelopmentEncoderConfig returns an EncoderConfig which is intended
|
||||
// for local development.
|
||||
func NewDevelopmentEncoderConfig() *EncoderConfig {
|
||||
cfg := NewEncoderConfig()
|
||||
cfg.EncodeTime = JustTimeEncoder
|
||||
cfg.EncodeDuration = zapcore.StringDurationEncoder
|
||||
return cfg
|
||||
}
|
||||
|
||||
// JustTimeEncoder is a timestamp encoder function which encodes time
|
||||
// as a simple time of day, without a date. Intended for development and testing.
|
||||
// Not good in a production system, where you probably need to know the date.
|
||||
//
|
||||
// encConfig := flume.EncoderConfig{}
|
||||
// encConfig.EncodeTime = flume.JustTimeEncoder
|
||||
//
|
||||
func JustTimeEncoder(t time.Time, enc zapcore.PrimitiveArrayEncoder) {
|
||||
enc.AppendString(t.Format("15:04:05.000"))
|
||||
}
|
||||
|
||||
// AbbrLevelEncoder encodes logging levels to the strings in the log entries.
|
||||
// Encodes levels as 3-char abbreviations in upper case.
|
||||
//
|
||||
// encConfig := flume.EncoderConfig{}
|
||||
// encConfig.EncodeTime = flume.AbbrLevelEncoder
|
||||
//
|
||||
func AbbrLevelEncoder(l zapcore.Level, enc zapcore.PrimitiveArrayEncoder) {
|
||||
switch l {
|
||||
case zapcore.DebugLevel:
|
||||
enc.AppendString("DBG")
|
||||
case zapcore.InfoLevel:
|
||||
enc.AppendString("INF")
|
||||
case zapcore.WarnLevel:
|
||||
enc.AppendString("WRN")
|
||||
case zapcore.ErrorLevel:
|
||||
enc.AppendString("ERR")
|
||||
case zapcore.PanicLevel, zapcore.FatalLevel, zapcore.DPanicLevel:
|
||||
enc.AppendString("FTL")
|
||||
default:
|
||||
s := l.String()
|
||||
if len(s) > 3 {
|
||||
s = s[:3]
|
||||
}
|
||||
enc.AppendString(strings.ToUpper(s))
|
||||
|
||||
}
|
||||
}
|
225
vendor/github.com/gemalto/flume/console_encoder.go
generated
vendored
Normal file
225
vendor/github.com/gemalto/flume/console_encoder.go
generated
vendored
Normal file
@ -0,0 +1,225 @@
|
||||
package flume
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"github.com/mgutz/ansi"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/buffer"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
//nolint:gochecknoinits
|
||||
func init() {
|
||||
_ = zap.RegisterEncoder("term", func(cfg zapcore.EncoderConfig) (zapcore.Encoder, error) {
|
||||
return NewConsoleEncoder((*EncoderConfig)(&cfg)), nil
|
||||
})
|
||||
_ = zap.RegisterEncoder("term-color", func(cfg zapcore.EncoderConfig) (zapcore.Encoder, error) {
|
||||
return NewColorizedConsoleEncoder((*EncoderConfig)(&cfg), nil), nil
|
||||
})
|
||||
}
|
||||
|
||||
// Colorizer returns ansi escape sequences for the colors for each log level.
|
||||
// See Colors for a default implementation.
|
||||
type Colorizer interface {
|
||||
Level(l Level) string
|
||||
}
|
||||
|
||||
// Colors is an implementation of the Colorizer interface, which assigns colors
|
||||
// to the default log levels.
|
||||
type Colors struct {
|
||||
Debug, Info, Warn, Error string
|
||||
}
|
||||
|
||||
// Level implements Colorizer
|
||||
func (c *Colors) Level(l Level) string {
|
||||
if l < DebugLevel {
|
||||
return Dim
|
||||
}
|
||||
switch l {
|
||||
case DebugLevel:
|
||||
return c.Debug
|
||||
case InfoLevel:
|
||||
return c.Info
|
||||
case Level(zapcore.WarnLevel):
|
||||
return c.Warn
|
||||
default:
|
||||
return c.Error
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultColors is the default instance of Colors, used as the default colors if
|
||||
// a nil Colorizer is passed to NewColorizedConsoleEncoder.
|
||||
var DefaultColors = Colors{
|
||||
Debug: ansi.ColorCode("cyan"),
|
||||
Info: ansi.ColorCode("green+h"),
|
||||
Warn: ansi.ColorCode("yellow+bh"),
|
||||
Error: ansi.ColorCode("red+bh"),
|
||||
}
|
||||
|
||||
type consoleEncoder struct {
|
||||
*ltsvEncoder
|
||||
colorizer Colorizer
|
||||
}
|
||||
|
||||
// NewConsoleEncoder creates an encoder whose output is designed for human -
|
||||
// rather than machine - consumption. It serializes the core log entry data
|
||||
// (message, level, timestamp, etc.) in a plain-text format. The context is
|
||||
// encoded in LTSV.
|
||||
//
|
||||
// Note that although the console encoder doesn't use the keys specified in the
|
||||
// encoder configuration, it will omit any element whose key is set to the empty
|
||||
// string.
|
||||
func NewConsoleEncoder(cfg *EncoderConfig) Encoder {
|
||||
ltsvEncoder := NewLTSVEncoder(cfg).(*ltsvEncoder)
|
||||
ltsvEncoder.allowNewLines = true
|
||||
ltsvEncoder.allowTabs = true
|
||||
ltsvEncoder.blankKey = "value"
|
||||
ltsvEncoder.binaryEncoder = hex.Dump
|
||||
|
||||
return &consoleEncoder{ltsvEncoder: ltsvEncoder}
|
||||
}
|
||||
|
||||
// NewColorizedConsoleEncoder creates a console encoder, like NewConsoleEncoder, but
|
||||
// colors the text with ansi escape codes. `colorize` configures which colors to
|
||||
// use for each level.
|
||||
//
|
||||
// If `colorizer` is nil, it will default to DefaultColors.
|
||||
//
|
||||
// `github.com/mgutz/ansi` is a convenient package for getting color codes, e.g.:
|
||||
//
|
||||
// ansi.ColorCode("red")
|
||||
//
|
||||
func NewColorizedConsoleEncoder(cfg *EncoderConfig, colorizer Colorizer) Encoder {
|
||||
e := NewConsoleEncoder(cfg).(*consoleEncoder)
|
||||
e.colorizer = colorizer
|
||||
if e.colorizer == nil {
|
||||
e.colorizer = &DefaultColors
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// Clone implements the Encoder interface
|
||||
func (c *consoleEncoder) Clone() zapcore.Encoder {
|
||||
return &consoleEncoder{
|
||||
ltsvEncoder: c.ltsvEncoder.Clone().(*ltsvEncoder),
|
||||
colorizer: c.colorizer,
|
||||
}
|
||||
}
|
||||
|
||||
// Dim is the color used for context keys, time, and caller information
|
||||
var Dim = ansi.ColorCode("240")
|
||||
|
||||
// Bright is the color used for the message
|
||||
var Bright = ansi.ColorCode("default+b")
|
||||
|
||||
// EncodeEntry implements the Encoder interface
|
||||
func (c *consoleEncoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) {
|
||||
final := *c.ltsvEncoder
|
||||
context := final.buf
|
||||
final.buf = bufPool.Get()
|
||||
|
||||
origLen := final.buf.Len()
|
||||
|
||||
if c.TimeKey != "" {
|
||||
c.colorDim(final.buf)
|
||||
final.skipNextElementSeparator = true
|
||||
c.EncodeTime(ent.Time, &final)
|
||||
}
|
||||
|
||||
if c.LevelKey != "" {
|
||||
c.colorLevel(final.buf, ent.Level)
|
||||
if final.buf.Len() > origLen {
|
||||
final.buf.AppendByte(' ')
|
||||
}
|
||||
final.skipNextElementSeparator = true
|
||||
|
||||
c.EncodeLevel(ent.Level, &final)
|
||||
|
||||
}
|
||||
|
||||
if final.buf.Len() > origLen {
|
||||
c.colorDim(final.buf)
|
||||
final.buf.AppendString(" | ")
|
||||
} else {
|
||||
final.buf.Reset()
|
||||
}
|
||||
|
||||
// Add the message itself.
|
||||
if c.MessageKey != "" {
|
||||
c.colorReset(final.buf)
|
||||
// c.colorBright(&final)
|
||||
final.safeAddString(ent.Message, false)
|
||||
// ensure a minimum of 2 spaces between the message and the fields,
|
||||
// to improve readability
|
||||
final.buf.AppendString(" ")
|
||||
}
|
||||
|
||||
c.colorDim(final.buf)
|
||||
|
||||
// Add fields.
|
||||
for _, f := range fields {
|
||||
f.AddTo(&final)
|
||||
}
|
||||
|
||||
// Add context
|
||||
if context.Len() > 0 {
|
||||
final.addFieldSeparator()
|
||||
_, _ = final.buf.Write(context.Bytes())
|
||||
}
|
||||
|
||||
// Add callsite
|
||||
c.writeCallSite(&final, ent.LoggerName, ent.Caller)
|
||||
|
||||
// If there's no stacktrace key, honor that; this allows users to force
|
||||
// single-line output.
|
||||
if ent.Stack != "" && c.StacktraceKey != "" {
|
||||
final.buf.AppendByte('\n')
|
||||
final.buf.AppendString(ent.Stack)
|
||||
}
|
||||
c.colorReset(final.buf)
|
||||
final.buf.AppendByte('\n')
|
||||
|
||||
return final.buf, nil
|
||||
}
|
||||
|
||||
func (c *consoleEncoder) writeCallSite(final *ltsvEncoder, name string, caller zapcore.EntryCaller) {
|
||||
shouldWriteName := name != "" && c.NameKey != ""
|
||||
shouldWriteCaller := caller.Defined && c.CallerKey != ""
|
||||
if !shouldWriteName && !shouldWriteCaller {
|
||||
return
|
||||
}
|
||||
final.addKey("@")
|
||||
if shouldWriteName {
|
||||
final.buf.AppendString(name)
|
||||
if shouldWriteCaller {
|
||||
final.buf.AppendByte('@')
|
||||
}
|
||||
}
|
||||
if shouldWriteCaller {
|
||||
final.skipNextElementSeparator = true
|
||||
final.EncodeCaller(caller, final)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *consoleEncoder) colorDim(buf *buffer.Buffer) {
|
||||
c.applyColor(buf, Dim)
|
||||
}
|
||||
|
||||
func (c *consoleEncoder) colorLevel(buf *buffer.Buffer, level zapcore.Level) {
|
||||
if c.colorizer != nil {
|
||||
c.applyColor(buf, c.colorizer.Level(Level(level)))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *consoleEncoder) applyColor(buf *buffer.Buffer, s string) {
|
||||
if c.colorizer != nil {
|
||||
buf.AppendString(ansi.Reset)
|
||||
if s != "" {
|
||||
buf.AppendString(s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *consoleEncoder) colorReset(buf *buffer.Buffer) {
|
||||
c.applyColor(buf, "")
|
||||
}
|
27
vendor/github.com/gemalto/flume/context.go
generated
vendored
Normal file
27
vendor/github.com/gemalto/flume/context.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
package flume
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// DefaultLogger is returned by FromContext if no other logger has been
|
||||
// injected into the context.
|
||||
var DefaultLogger = New("")
|
||||
|
||||
type ctxKey struct{}
|
||||
|
||||
var loggerKey = &ctxKey{}
|
||||
|
||||
// WithLogger returns a new context with the specified logger injected into it.
|
||||
func WithLogger(ctx context.Context, l Logger) context.Context {
|
||||
return context.WithValue(ctx, loggerKey, l)
|
||||
}
|
||||
|
||||
// FromContext returns a logger from the context. If the context
|
||||
// doesn't contain a logger, the DefaultLogger will be returned.
|
||||
func FromContext(ctx context.Context) Logger {
|
||||
if l, ok := ctx.Value(loggerKey).(Logger); ok {
|
||||
return l
|
||||
}
|
||||
return DefaultLogger
|
||||
}
|
266
vendor/github.com/gemalto/flume/core.go
generated
vendored
Normal file
266
vendor/github.com/gemalto/flume/core.go
generated
vendored
Normal file
@ -0,0 +1,266 @@
|
||||
package flume
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go.uber.org/multierr"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
var _ Logger = (*Core)(nil)
|
||||
|
||||
type atomicInnerCore struct {
|
||||
innerLoggerPtr atomic.Value
|
||||
}
|
||||
|
||||
func (af *atomicInnerCore) get() *innerCore {
|
||||
return af.innerLoggerPtr.Load().(*innerCore)
|
||||
}
|
||||
|
||||
func (af *atomicInnerCore) set(ic *innerCore) {
|
||||
af.innerLoggerPtr.Store(ic)
|
||||
}
|
||||
|
||||
// innerCore holds state which can be reconfigured at the factory level.
|
||||
// if these settings are changed in the factory, the factory builds new
|
||||
// innerCore instances, and atomically injects them into all existing loggers.
|
||||
type innerCore struct {
|
||||
name string
|
||||
zapcore.Core
|
||||
addCaller bool
|
||||
errorOutput zapcore.WriteSyncer
|
||||
hooks []HookFunc
|
||||
}
|
||||
|
||||
// Core is the concrete implementation of Logger. It has some additional
|
||||
// lower-level methods which can be used by other logging packages which wrap
|
||||
// flume, to build alternate logging interfaces.
|
||||
type Core struct {
|
||||
*atomicInnerCore
|
||||
context []zap.Field
|
||||
callerSkip int
|
||||
// these are logger-scoped hooks, which only hook into this particular logger
|
||||
hooks []HookFunc
|
||||
}
|
||||
|
||||
// Log is the core logging method, used by the convenience methods Debug(), Info(), and Error().
|
||||
//
|
||||
// Returns true if the log was actually logged.
|
||||
//
|
||||
// AddCaller option will report the caller of this method. If wrapping this, be sure to
|
||||
// use the AddCallerSkip option.
|
||||
func (l *Core) Log(lvl Level, template string, fmtArgs, context []interface{}) bool {
|
||||
// call another method, just to add a caller to the call stack, so the
|
||||
// add caller option resolves the right caller in the stack
|
||||
return l.log(lvl, template, fmtArgs, context)
|
||||
}
|
||||
|
||||
// log must be called directly from one of the public methods to make the addcaller
|
||||
// resolution resolve the caller of the public method.
|
||||
func (l *Core) log(lvl Level, template string, fmtArgs, context []interface{}) bool {
|
||||
c := l.get()
|
||||
|
||||
if !c.Enabled(zapcore.Level(lvl)) {
|
||||
return false
|
||||
}
|
||||
|
||||
msg := template
|
||||
if msg == "" && len(fmtArgs) > 0 {
|
||||
msg = fmt.Sprint(fmtArgs...)
|
||||
} else if msg != "" && len(fmtArgs) > 0 {
|
||||
msg = fmt.Sprintf(template, fmtArgs...)
|
||||
}
|
||||
|
||||
// check must always be called directly by a method in the Logger interface
|
||||
// (e.g., Log, Info, Debug).
|
||||
const callerSkipOffset = 2
|
||||
|
||||
// Create basic checked entry thru the core; this will be non-nil if the
|
||||
// log message will actually be written somewhere.
|
||||
ent := zapcore.Entry{
|
||||
LoggerName: c.name,
|
||||
Time: time.Now(),
|
||||
Level: zapcore.Level(lvl),
|
||||
Message: msg,
|
||||
}
|
||||
ce := c.Check(ent, nil)
|
||||
if ce == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Thread the error output through to the CheckedEntry.
|
||||
ce.ErrorOutput = c.errorOutput
|
||||
if c.addCaller {
|
||||
ce.Entry.Caller = zapcore.NewEntryCaller(runtime.Caller(l.callerSkip + callerSkipOffset))
|
||||
if !ce.Entry.Caller.Defined {
|
||||
_, _ = fmt.Fprintf(c.errorOutput, "%v Logger.check error: failed to get caller\n", time.Now().UTC())
|
||||
_ = ce.ErrorOutput.Sync()
|
||||
}
|
||||
}
|
||||
|
||||
fields := append(l.context, l.sweetenFields(context)...) //nolint:gocritic
|
||||
|
||||
// execute global hooks, which might modify the fields
|
||||
for i := range c.hooks {
|
||||
if f := c.hooks[i](ce, fields); f != nil {
|
||||
fields = f
|
||||
}
|
||||
}
|
||||
|
||||
// execute logger hooks
|
||||
for i := range l.hooks {
|
||||
if f := l.hooks[i](ce, fields); f != nil {
|
||||
fields = f
|
||||
}
|
||||
}
|
||||
|
||||
ce.Write(fields...)
|
||||
return true
|
||||
}
|
||||
|
||||
// IsEnabled returns true if the specified level is enabled.
|
||||
func (l *Core) IsEnabled(lvl Level) bool {
|
||||
return l.get().Enabled(zapcore.Level(lvl))
|
||||
}
|
||||
|
||||
const (
|
||||
_oddNumberErrMsg = "Ignored key without a value."
|
||||
_nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys."
|
||||
)
|
||||
|
||||
func (l *Core) sweetenFields(args []interface{}) []zap.Field {
|
||||
if len(args) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Allocate enough space for the worst case; if users pass only structured
|
||||
// fields, we shouldn't penalize them with extra allocations.
|
||||
fields := make([]zap.Field, 0, len(args))
|
||||
var invalid invalidPairs
|
||||
|
||||
for i := 0; i < len(args); {
|
||||
// This is a strongly-typed field. Consume it and move on.
|
||||
if f, ok := args[i].(zap.Field); ok {
|
||||
fields = append(fields, f)
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
if len(args) == 1 {
|
||||
// passed a bare arg with no key. We'll handle this
|
||||
// as a special case
|
||||
if err, ok := args[0].(error); ok {
|
||||
return append(fields, zap.Error(err))
|
||||
}
|
||||
return append(fields, zap.Any("", args[0]))
|
||||
}
|
||||
|
||||
// Make sure this element isn't a dangling key.
|
||||
if i == len(args)-1 {
|
||||
l.Error(_oddNumberErrMsg, zap.Any("ignored", args[i]))
|
||||
break
|
||||
}
|
||||
|
||||
// Consume this value and the next, treating them as a key-value pair. If the
|
||||
// key isn't a string, add this pair to the slice of invalid pairs.
|
||||
key, val := args[i], args[i+1]
|
||||
if keyStr, ok := key.(string); !ok {
|
||||
// Subsequent errors are likely, so allocate once up front.
|
||||
if cap(invalid) == 0 {
|
||||
invalid = make(invalidPairs, 0, len(args)/2)
|
||||
}
|
||||
invalid = append(invalid, invalidPair{i, key, val})
|
||||
} else {
|
||||
fields = append(fields, zap.Any(keyStr, val))
|
||||
}
|
||||
i += 2
|
||||
}
|
||||
|
||||
// If we encountered any invalid key-value pairs, log an error.
|
||||
if len(invalid) > 0 {
|
||||
l.Error(_nonStringKeyErrMsg, zap.Array("invalid", invalid))
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
type invalidPair struct {
|
||||
position int
|
||||
key, value interface{}
|
||||
}
|
||||
|
||||
func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error {
|
||||
enc.AddInt64("position", int64(p.position))
|
||||
zap.Any("key", p.key).AddTo(enc)
|
||||
zap.Any("value", p.value).AddTo(enc)
|
||||
return nil
|
||||
}
|
||||
|
||||
type invalidPairs []invalidPair
|
||||
|
||||
func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error {
|
||||
var err error
|
||||
for i := range ps {
|
||||
err = multierr.Append(err, enc.AppendObject(ps[i]))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Debug logs at DBG level. args should be alternative keys and values. keys should be strings.
|
||||
func (l *Core) Debug(msg string, args ...interface{}) {
|
||||
l.log(DebugLevel, msg, nil, args)
|
||||
}
|
||||
|
||||
// Info logs at INF level. args should be alternative keys and values. keys should be strings.
|
||||
func (l *Core) Info(msg string, args ...interface{}) {
|
||||
l.log(InfoLevel, msg, nil, args)
|
||||
}
|
||||
|
||||
// Error logs at ERR level. args should be alternative keys and values. keys should be strings.
|
||||
func (l *Core) Error(msg string, args ...interface{}) {
|
||||
l.log(ErrorLevel, msg, nil, args)
|
||||
}
|
||||
|
||||
// IsDebug returns true if DBG level is enabled.
|
||||
func (l *Core) IsDebug() bool {
|
||||
return l.IsEnabled(DebugLevel)
|
||||
}
|
||||
|
||||
// IsDebug returns true if INF level is enabled
|
||||
func (l *Core) IsInfo() bool {
|
||||
return l.IsEnabled(InfoLevel)
|
||||
}
|
||||
|
||||
// With returns a new Logger with some context baked in. All entries
|
||||
// logged with the new logger will include this context.
|
||||
//
|
||||
// args should be alternative keys and values. keys should be strings.
|
||||
//
|
||||
// reqLogger := l.With("requestID", reqID)
|
||||
//
|
||||
func (l *Core) With(args ...interface{}) Logger {
|
||||
return l.WithArgs(args...)
|
||||
}
|
||||
|
||||
// WithArgs is the same as With() but returns the concrete type. Useful
|
||||
// for other logging packages which wrap this one.
|
||||
func (l *Core) WithArgs(args ...interface{}) *Core {
|
||||
l2 := l.clone()
|
||||
switch len(args) {
|
||||
case 0:
|
||||
default:
|
||||
l2.context = append(l2.context, l.sweetenFields(args)...)
|
||||
}
|
||||
return l2
|
||||
}
|
||||
|
||||
func (l *Core) clone() *Core {
|
||||
l2 := *l
|
||||
l2.context = nil
|
||||
if len(l.context) > 0 {
|
||||
l2.context = append(l2.context, l.context...)
|
||||
}
|
||||
return &l2
|
||||
}
|
157
vendor/github.com/gemalto/flume/doc.go
generated
vendored
Normal file
157
vendor/github.com/gemalto/flume/doc.go
generated
vendored
Normal file
@ -0,0 +1,157 @@
|
||||
// Package flume is a logging package, build on top of zap. It's structured and leveled logs, like zap/logrus/etc.
|
||||
// It adds global, runtime re-configuration of all loggers, via an internal logger registry.
|
||||
//
|
||||
// There are two interaction points with flume: code that generates logs, and code that configures logging output.
|
||||
// Code which generates logs needs to create named logger instances, and call log functions on it, like Info()
|
||||
// and Debug(). But by default, all these logs will be silently discarded. Flume does not output
|
||||
// log entries unless explicitly told to do so. This ensures libraries can freely use flume internally, without
|
||||
// polluting the stdout of the programs importing the library.
|
||||
//
|
||||
// The Logger type is a small interface. Libraries should allow replacement of their Logger instances so
|
||||
// importers can entirely replace flume if they wish. Alternately, importers can use flume to configure
|
||||
// the library's log output, and/or redirect it into the overall program's log stream.
|
||||
//
|
||||
// Logging
|
||||
//
|
||||
// This package does not offer package level log functions, so you need to create a logger instance first:
|
||||
// A common pattern is to create a single, package-wide logger, named after the package:
|
||||
//
|
||||
// var log = flume.New("mypkg")
|
||||
//
|
||||
// Then, write some logs:
|
||||
//
|
||||
// log.Debug("created user", "username", "frank", "role", "admin")
|
||||
//
|
||||
// Logs have a message, then matched pairs of key/value properties. Child loggers can be created
|
||||
// and pre-seeded with a set of properties:
|
||||
//
|
||||
// reqLogger := log.With("remoteAddr", req.RemoteAddr)
|
||||
//
|
||||
// Expensive log events can be avoid by explicitly checking level:
|
||||
//
|
||||
// if log.IsDebug() {
|
||||
// log.Debug("created resource", "resource", resource.ExpensiveToString())
|
||||
// }
|
||||
//
|
||||
// Loggers can be bound to context.Context, which is convenient for carrying
|
||||
// per-transaction loggers (pre-seeded with transaction specific context) through layers of request
|
||||
// processing code:
|
||||
//
|
||||
// ctx = flume.WithLogger(ctx, log.With("transactionID", tid))
|
||||
// // ...later...
|
||||
// flume.FromContext(ctx).Info("Request handled.")
|
||||
//
|
||||
// The standard Logger interface only supports 3 levels of log, DBG, INF, and ERR. This is inspired by
|
||||
// this article: https://dave.cheney.net/2015/11/05/lets-talk-about-logging. However, you can create
|
||||
// instances of DeprecatedLogger instead, which support more levels.
|
||||
//
|
||||
// Configuration
|
||||
//
|
||||
// There are several package level functions which reconfigure logging output. They control which
|
||||
// levels are discarded, which fields are included in each log entry, and how those fields are rendered,
|
||||
// and how the overall log entry is rendered (JSON, LTSV, colorized, etc).
|
||||
//
|
||||
// To configure logging settings from environment variables, call the configuration function from main():
|
||||
//
|
||||
// flume.ConfigFromEnv()
|
||||
//
|
||||
// This reads the log configuration from the environment variable "FLUME" (the default, which can be
|
||||
// overridden). The value is JSON, e.g.:
|
||||
//
|
||||
// {"level":"INF","levels":"http=DBG","development"="true"}
|
||||
//
|
||||
// The properties of the config string:
|
||||
//
|
||||
// - "level": ERR, INF, or DBG. The default level for all loggers.
|
||||
// - "levels": A string configuring log levels for specific loggers, overriding the default level.
|
||||
// See note below for syntax.
|
||||
// - "development": true or false. In development mode, the defaults for the other
|
||||
// settings change to be more suitable for developers at a terminal (colorized, multiline, human
|
||||
// readable, etc). See note below for exact defaults.
|
||||
// - "addCaller": true or false. Adds call site information to log entries (file and line).
|
||||
// - "encoding": json, ltsv, term, or term-color. Configures how log entries are encoded in the output.
|
||||
// "term" and "term-color" are multi-line, human-friendly
|
||||
// formats, intended for terminal output.
|
||||
// - "encoderConfig": a JSON object which configures advanced encoding settings, like how timestamps
|
||||
// are formatted. See docs for go.uber.org/zap/zapcore/EncoderConfig
|
||||
//
|
||||
// - "messageKey": the label of the message property of the log entry. If empty, message is omitted.
|
||||
// - "levelKey": the label of the level property of the log entry. If empty, level is omitted.
|
||||
// - "timeKey": the label of the timestamp of the log entry. If empty, timestamp is omitted.
|
||||
// - "nameKey": the label of the logger name in the log entry. If empty, logger name is omitted.
|
||||
// - "callerKey": the label of the logger name in the log entry. If empty, logger name is omitted.
|
||||
// - "lineEnding": the end of each log output line.
|
||||
// - "levelEncoder": capital, capitalColor, color, lower, or abbr. Controls how the log entry level
|
||||
// is rendered. "abbr" renders 3-letter abbreviations, like ERR and INF.
|
||||
// - "timeEncoder": iso8601, millis, nanos, unix, or justtime. Controls how timestamps are rendered.
|
||||
// "millis", "nanos", and "unix" are since UNIX epoch. "unix" is in floating point seconds.
|
||||
// "justtime" omits the date, and just prints the time in the format "15:04:05.000".
|
||||
// - "durationEncoder": string, nanos, or seconds. Controls how time.Duration values are rendered.
|
||||
// - "callerEncoder": full or short. Controls how the call site is rendered.
|
||||
// "full" includes the entire package path, "short" only includes the last folder of the package.
|
||||
//
|
||||
// Defaults:
|
||||
//
|
||||
// {
|
||||
// "level":"INF",
|
||||
// "levels":"",
|
||||
// "development":false,
|
||||
// "addCaller":false,
|
||||
// "encoding":"term-color",
|
||||
// "encoderConfig":{
|
||||
// "messageKey":"msg",
|
||||
// "levelKey":"level",
|
||||
// "timeKey":"time",
|
||||
// "nameKey":"name",
|
||||
// "callerKey":"caller",
|
||||
// "lineEnding":"\n",
|
||||
// "levelEncoder":"abbr",
|
||||
// "timeEncoder":"iso8601",
|
||||
// "durationEncoder":"seconds",
|
||||
// "callerEncoder":"short",
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// These defaults are only applied if one of the configuration functions is called, like ConfigFromEnv(), ConfigString(),
|
||||
// Configure(), or LevelsString(). Initially, all loggers are configured to discard everything, following
|
||||
// flume's opinion that log packages should be silent unless spoken too. Ancillary to this: library packages
|
||||
// should *not* call these functions, or configure logging levels or output in anyway. Only program entry points,
|
||||
// like main() or test code, should configure logging. Libraries should just create loggers and log to them.
|
||||
//
|
||||
// Development mode: if "development"=true, the defaults for the rest of the settings change, equivalent to:
|
||||
//
|
||||
// {
|
||||
// "addCaller":true,
|
||||
// "encoding":"term-color",
|
||||
// "encodingConfig": {
|
||||
// "timeEncoder":"justtime",
|
||||
// "durationEncoder":"string",
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// The "levels" value is a list of key=value pairs, configuring the level of individual named loggers.
|
||||
// If the key is "*", it sets the default level. If "level" and "levels" both configure the default
|
||||
// level, "levels" wins.
|
||||
// Examples:
|
||||
//
|
||||
// * // set the default level to ALL, equivalent to {"level"="ALL"}
|
||||
// *=INF // same, but set default level to INF
|
||||
// *,sql=WRN // set default to ALL, set "sql" logger to WRN
|
||||
// *=INF,http=ALL // set default to INF, set "http" to ALL
|
||||
// *=INF,http // same as above. If name has no level, level is set to ALL
|
||||
// *=INF,-http // set default to INF, set "http" to OFF
|
||||
// http=INF // leave default setting unchanged.
|
||||
//
|
||||
// Factories
|
||||
//
|
||||
// Most usages of flume will use its package functions. The package functions delegate to an internal
|
||||
// instance of Factory, which a the logger registry. You can create and manage your own instance of
|
||||
// Factory, which will be an isolated set of Loggers.
|
||||
//
|
||||
// tl;dr
|
||||
//
|
||||
// The implementation is a wrapper around zap. zap does levels, structured logs, and is very fast.
|
||||
// zap doesn't do centralized, global configuration, so this package
|
||||
// adds that by maintaining an internal registry of all loggers, and using the sync.atomic stuff to swap out
|
||||
// levels and writers in a thread safe way.
|
||||
package flume
|
7
vendor/github.com/gemalto/flume/docker-compose.yml
generated
vendored
Normal file
7
vendor/github.com/gemalto/flume/docker-compose.yml
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
version: '3'
|
||||
services:
|
||||
builder:
|
||||
build:
|
||||
context: .
|
||||
volumes:
|
||||
- ./build:/flume/build
|
399
vendor/github.com/gemalto/flume/factory.go
generated
vendored
Normal file
399
vendor/github.com/gemalto/flume/factory.go
generated
vendored
Normal file
@ -0,0 +1,399 @@
|
||||
package flume
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/ansel1/merry"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type loggerInfo struct {
|
||||
levelEnabler zapcore.LevelEnabler
|
||||
atomicInnerCore atomicInnerCore
|
||||
}
|
||||
|
||||
// Factory is a log management core. It spawns loggers. The Factory has
|
||||
// methods for dynamically reconfiguring all the loggers spawned from Factory.
|
||||
//
|
||||
// The flume package has mirrors of most of the functions which delegate to a
|
||||
// default, package-level factory.
|
||||
type Factory struct {
|
||||
defaultLevel zap.AtomicLevel
|
||||
|
||||
encoder zapcore.Encoder
|
||||
out io.Writer
|
||||
|
||||
loggers map[string]*loggerInfo
|
||||
sync.Mutex
|
||||
|
||||
addCaller bool
|
||||
|
||||
hooks []HookFunc
|
||||
}
|
||||
|
||||
// Encoder serializes log entries. Re-exported from zap for now to avoid exporting zap.
|
||||
type Encoder zapcore.Encoder
|
||||
|
||||
// NewFactory returns a factory. The default level is set to OFF (all logs disabled)
|
||||
func NewFactory() *Factory {
|
||||
f := Factory{
|
||||
defaultLevel: zap.NewAtomicLevel(),
|
||||
loggers: map[string]*loggerInfo{},
|
||||
}
|
||||
f.SetDefaultLevel(OffLevel)
|
||||
|
||||
return &f
|
||||
}
|
||||
|
||||
func (r *Factory) getEncoder() zapcore.Encoder {
|
||||
if r.encoder == nil {
|
||||
return NewLTSVEncoder(NewEncoderConfig())
|
||||
}
|
||||
return r.encoder
|
||||
}
|
||||
|
||||
// SetEncoder sets the encoder for all loggers created by (in the past or future) this factory.
|
||||
func (r *Factory) SetEncoder(e Encoder) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
r.encoder = e
|
||||
r.refreshLoggers()
|
||||
}
|
||||
|
||||
// SetOut sets the output writer for all logs produced by this factory.
|
||||
// Returns a function which sets the output writer back to the prior setting.
|
||||
func (r *Factory) SetOut(w io.Writer) func() {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
prior := r.out
|
||||
r.out = w
|
||||
r.refreshLoggers()
|
||||
return func() {
|
||||
r.SetOut(prior)
|
||||
}
|
||||
}
|
||||
|
||||
// SetAddCaller enables adding the logging callsite (file and line number) to the log entries.
|
||||
func (r *Factory) SetAddCaller(b bool) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
r.addCaller = b
|
||||
r.refreshLoggers()
|
||||
}
|
||||
|
||||
func (r *Factory) getOut() io.Writer {
|
||||
if r.out == nil {
|
||||
return os.Stdout
|
||||
}
|
||||
return r.out
|
||||
}
|
||||
|
||||
func (r *Factory) refreshLoggers() {
|
||||
for name, info := range r.loggers {
|
||||
info.atomicInnerCore.set(r.newInnerCore(name, info))
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Factory) getLoggerInfo(name string) *loggerInfo {
|
||||
info, found := r.loggers[name]
|
||||
if !found {
|
||||
info = &loggerInfo{}
|
||||
r.loggers[name] = info
|
||||
info.atomicInnerCore.set(r.newInnerCore(name, info))
|
||||
}
|
||||
return info
|
||||
}
|
||||
|
||||
func (r *Factory) newInnerCore(name string, info *loggerInfo) *innerCore {
|
||||
var l zapcore.LevelEnabler
|
||||
switch {
|
||||
case info.levelEnabler != nil:
|
||||
l = info.levelEnabler
|
||||
default:
|
||||
l = r.defaultLevel
|
||||
}
|
||||
zc := zapcore.NewCore(
|
||||
r.getEncoder(),
|
||||
zapcore.AddSync(r.getOut()),
|
||||
l,
|
||||
)
|
||||
|
||||
return &innerCore{
|
||||
name: name,
|
||||
Core: zc,
|
||||
addCaller: r.addCaller,
|
||||
errorOutput: zapcore.AddSync(os.Stderr),
|
||||
hooks: r.hooks,
|
||||
}
|
||||
}
|
||||
|
||||
// NewLogger returns a new Logger
|
||||
func (r *Factory) NewLogger(name string) Logger {
|
||||
return r.NewCore(name)
|
||||
}
|
||||
|
||||
// NewCore returns a new Core.
|
||||
func (r *Factory) NewCore(name string, options ...CoreOption) *Core {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
info := r.getLoggerInfo(name)
|
||||
core := &Core{
|
||||
atomicInnerCore: &info.atomicInnerCore,
|
||||
}
|
||||
for _, opt := range options {
|
||||
opt.apply(core)
|
||||
}
|
||||
return core
|
||||
}
|
||||
|
||||
func (r *Factory) setLevel(name string, l Level) {
|
||||
info := r.getLoggerInfo(name)
|
||||
info.levelEnabler = zapcore.Level(l)
|
||||
}
|
||||
|
||||
// SetLevel sets the log level for a particular named logger. All loggers with this same
|
||||
// are affected, in the past or future.
|
||||
func (r *Factory) SetLevel(name string, l Level) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
r.setLevel(name, l)
|
||||
r.refreshLoggers()
|
||||
}
|
||||
|
||||
// SetDefaultLevel sets the default log level for all loggers which don't have a specific level
|
||||
// assigned to them
|
||||
func (r *Factory) SetDefaultLevel(l Level) {
|
||||
r.defaultLevel.SetLevel(zapcore.Level(l))
|
||||
}
|
||||
|
||||
type Entry = zapcore.Entry
|
||||
type CheckedEntry = zapcore.CheckedEntry
|
||||
type Field = zapcore.Field
|
||||
|
||||
// HookFunc adapts a single function to the Hook interface.
|
||||
type HookFunc func(*CheckedEntry, []Field) []Field
|
||||
|
||||
// Hooks adds functions which are called before a log entry is encoded. The hook function
|
||||
// is given the entry and the total set of fields to be logged. The set of fields which are
|
||||
// returned are then logged. Hook functions can return a modified set of fields, or just return
|
||||
// the unaltered fields.
|
||||
//
|
||||
// The Entry is not modified. It is purely informational.
|
||||
//
|
||||
// If a hook returns an error, that error is logged, but the in-flight log entry
|
||||
// will proceed with the original set of fields.
|
||||
//
|
||||
// These global hooks will be injected into all loggers owned by this factory. They will
|
||||
// execute before any hooks installed in individual loggers.
|
||||
func (r *Factory) Hooks(hooks ...HookFunc) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
r.hooks = append(r.hooks, hooks...)
|
||||
r.refreshLoggers()
|
||||
}
|
||||
|
||||
// ClearHooks removes all hooks.
|
||||
func (r *Factory) ClearHooks() {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
r.hooks = nil
|
||||
r.refreshLoggers()
|
||||
}
|
||||
|
||||
func parseConfigString(s string) map[string]interface{} {
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
items := strings.Split(s, ",")
|
||||
m := map[string]interface{}{}
|
||||
for _, setting := range items {
|
||||
parts := strings.Split(setting, "=")
|
||||
|
||||
switch len(parts) {
|
||||
case 1:
|
||||
name := parts[0]
|
||||
if strings.HasPrefix(name, "-") {
|
||||
m[name[1:]] = false
|
||||
} else {
|
||||
m[name] = true
|
||||
}
|
||||
case 2:
|
||||
m[parts[0]] = parts[1]
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// LevelsString reconfigures the log level for all loggers. Calling it with
|
||||
// an empty string will reset the default level to info, and reset all loggers
|
||||
// to use the default level.
|
||||
//
|
||||
// The string can contain a list of directives, separated by commas. Directives
|
||||
// can set the default log level, and can explicitly set the log level for individual
|
||||
// loggers.
|
||||
//
|
||||
// Directives
|
||||
//
|
||||
// - Default level: Use the `*` directive to set the default log level. Examples:
|
||||
//
|
||||
// * // set the default log level to debug
|
||||
// -* // set the default log level to off
|
||||
//
|
||||
// If the `*` directive is omitted, the default log level will be set to info.
|
||||
// - Logger level: Use the name of the logger to set the log level for a specific
|
||||
// logger. Examples:
|
||||
//
|
||||
// http // set the http logger to debug
|
||||
// -http // set the http logger to off
|
||||
// http=INF // set the http logger to info
|
||||
//
|
||||
// Multiple directives can be included, separated by commas. Examples:
|
||||
//
|
||||
// http // set http logger to debug
|
||||
// http,sql // set http and sql logger to debug
|
||||
// *,-http,sql=INF // set the default level to debug, disable the http logger,
|
||||
// // and set the sql logger to info
|
||||
//
|
||||
func (r *Factory) LevelsString(s string) error {
|
||||
m := parseConfigString(s)
|
||||
levelMap := map[string]Level{}
|
||||
var errMsgs []string
|
||||
for key, val := range m {
|
||||
switch t := val.(type) {
|
||||
case bool:
|
||||
if t {
|
||||
levelMap[key] = DebugLevel
|
||||
} else {
|
||||
levelMap[key] = OffLevel
|
||||
}
|
||||
case string:
|
||||
l, err := levelForAbbr(t)
|
||||
levelMap[key] = l
|
||||
if err != nil {
|
||||
errMsgs = append(errMsgs, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
// first, check default setting
|
||||
if defaultLevel, found := levelMap["*"]; found {
|
||||
r.SetDefaultLevel(defaultLevel)
|
||||
delete(levelMap, "*")
|
||||
} else {
|
||||
r.SetDefaultLevel(InfoLevel)
|
||||
}
|
||||
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
|
||||
// iterate through the current level map first.
|
||||
// Any existing loggers which aren't in the levels map
|
||||
// get reset to the default level.
|
||||
for name, info := range r.loggers {
|
||||
if _, found := levelMap[name]; !found {
|
||||
info.levelEnabler = r.defaultLevel
|
||||
}
|
||||
}
|
||||
|
||||
// iterate through the levels map and set the specific levels
|
||||
for name, level := range levelMap {
|
||||
r.setLevel(name, level)
|
||||
}
|
||||
|
||||
if len(errMsgs) > 0 {
|
||||
return merry.New("errors parsing config string: " + strings.Join(errMsgs, ", "))
|
||||
}
|
||||
|
||||
r.refreshLoggers()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Configure uses a serializable struct to configure most of the options.
|
||||
// This is useful when fully configuring the logging from an env var or file.
|
||||
//
|
||||
// The zero value for Config will set defaults for a standard, production logger:
|
||||
//
|
||||
// See the Config docs for details on settings.
|
||||
func (r *Factory) Configure(cfg Config) error {
|
||||
|
||||
r.SetDefaultLevel(cfg.DefaultLevel)
|
||||
|
||||
var encCfg *EncoderConfig
|
||||
if cfg.EncoderConfig != nil {
|
||||
encCfg = cfg.EncoderConfig
|
||||
} else {
|
||||
if cfg.Development {
|
||||
encCfg = NewDevelopmentEncoderConfig()
|
||||
} else {
|
||||
encCfg = NewEncoderConfig()
|
||||
}
|
||||
}
|
||||
|
||||
// These *Caller properties *must* be set or errors
|
||||
// will occur
|
||||
if encCfg.EncodeCaller == nil {
|
||||
encCfg.EncodeCaller = zapcore.ShortCallerEncoder
|
||||
}
|
||||
if encCfg.EncodeLevel == nil {
|
||||
encCfg.EncodeLevel = AbbrLevelEncoder
|
||||
}
|
||||
|
||||
var encoder zapcore.Encoder
|
||||
switch cfg.Encoding {
|
||||
case "json":
|
||||
encoder = NewJSONEncoder(encCfg)
|
||||
case "ltsv":
|
||||
encoder = NewLTSVEncoder(encCfg)
|
||||
case "term":
|
||||
encoder = NewConsoleEncoder(encCfg)
|
||||
case "term-color":
|
||||
encoder = NewColorizedConsoleEncoder(encCfg, nil)
|
||||
case "console":
|
||||
encoder = zapcore.NewConsoleEncoder((zapcore.EncoderConfig)(*encCfg))
|
||||
case "":
|
||||
if cfg.Development {
|
||||
encoder = NewColorizedConsoleEncoder(encCfg, nil)
|
||||
} else {
|
||||
encoder = NewJSONEncoder(encCfg)
|
||||
}
|
||||
default:
|
||||
return merry.Errorf("%s is not a valid encoding, must be one of: json, ltsv, term, or term-color", cfg.Encoding)
|
||||
}
|
||||
|
||||
var addCaller bool
|
||||
if cfg.AddCaller != nil {
|
||||
addCaller = *cfg.AddCaller
|
||||
} else {
|
||||
addCaller = cfg.Development
|
||||
}
|
||||
|
||||
if cfg.Levels != "" {
|
||||
if err := r.LevelsString(cfg.Levels); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
r.encoder = encoder
|
||||
r.addCaller = addCaller
|
||||
r.refreshLoggers()
|
||||
return nil
|
||||
}
|
||||
|
||||
func levelForAbbr(abbr string) (Level, error) {
|
||||
switch strings.ToLower(abbr) {
|
||||
case "off":
|
||||
return OffLevel, nil
|
||||
case "dbg", "debug", "", "all":
|
||||
return DebugLevel, nil
|
||||
case "inf", "info":
|
||||
return InfoLevel, nil
|
||||
case "err", "error":
|
||||
return ErrorLevel, nil
|
||||
default:
|
||||
return InfoLevel, fmt.Errorf("%s not recognized level, defaulting to info", abbr)
|
||||
}
|
||||
}
|
11
vendor/github.com/gemalto/flume/json_encoder.go
generated
vendored
Normal file
11
vendor/github.com/gemalto/flume/json_encoder.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
package flume
|
||||
|
||||
import "go.uber.org/zap/zapcore"
|
||||
|
||||
// NewJSONEncoder just hides the zap json encoder, to avoid exporting zap
|
||||
func NewJSONEncoder(cfg *EncoderConfig) Encoder {
|
||||
if cfg == nil {
|
||||
cfg = &EncoderConfig{}
|
||||
}
|
||||
return zapcore.NewJSONEncoder(zapcore.EncoderConfig(*cfg))
|
||||
}
|
198
vendor/github.com/gemalto/flume/log.go
generated
vendored
Normal file
198
vendor/github.com/gemalto/flume/log.go
generated
vendored
Normal file
@ -0,0 +1,198 @@
|
||||
package flume
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/ansel1/merry"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type (
|
||||
// Logger is the basic logging interface. Construct instances of Logger with a Factory,
|
||||
// or with the package functions (which use a package level Factory).
|
||||
Logger interface {
|
||||
Debug(msg string, args ...interface{})
|
||||
Info(msg string, args ...interface{})
|
||||
Error(msg string, args ...interface{})
|
||||
|
||||
IsDebug() bool
|
||||
IsInfo() bool
|
||||
|
||||
// With creates a new Logger with some context already attached. All
|
||||
// entries logged with the child logger will include this context.
|
||||
With(args ...interface{}) Logger
|
||||
}
|
||||
|
||||
// Level is a log level
|
||||
Level zapcore.Level
|
||||
)
|
||||
|
||||
const (
|
||||
// OffLevel disables all logs
|
||||
OffLevel = Level(127)
|
||||
// DebugLevel should be used for low-level, non-production logs. Typically intended only for developers.
|
||||
DebugLevel = Level(zapcore.DebugLevel)
|
||||
// InfoLevel should be used for production level logs. Typically intended for end-users and developers.
|
||||
InfoLevel = Level(zapcore.InfoLevel)
|
||||
// ErrorLevel should be used for errors. Generally, this should be reserved for events which truly
|
||||
// need to be looked at by an admin, and might be reported to an error-tracking system.
|
||||
ErrorLevel = Level(zapcore.ErrorLevel)
|
||||
)
|
||||
|
||||
var pkgFactory = NewFactory()
|
||||
|
||||
// New creates a new Logger
|
||||
func New(name string) Logger {
|
||||
return pkgFactory.NewLogger(name)
|
||||
}
|
||||
|
||||
// NewCore returns a new Core
|
||||
func NewCore(name string, options ...CoreOption) *Core {
|
||||
return pkgFactory.NewCore(name, options...)
|
||||
}
|
||||
|
||||
// ConfigString configures the package level Factory. The
|
||||
// string can either be a JSON-serialized Config object, or
|
||||
// just a LevelsString (see Factory.LevelsString for format).
|
||||
//
|
||||
// Note: this will reconfigure the logging levels for all
|
||||
// loggers.
|
||||
func ConfigString(s string) error {
|
||||
if strings.HasPrefix(strings.TrimSpace(s), "{") {
|
||||
// it's json, treat it like a full config string
|
||||
cfg := Config{}
|
||||
err := json.Unmarshal([]byte(s), &cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return Configure(cfg)
|
||||
}
|
||||
return pkgFactory.LevelsString(s)
|
||||
}
|
||||
|
||||
// Configure configures the package level Factory from
|
||||
// the settings in the Config object. See Config for
|
||||
// details.
|
||||
//
|
||||
// Note: this will reconfigure the logging levels for all
|
||||
// loggers.
|
||||
func Configure(cfg Config) error {
|
||||
return pkgFactory.Configure(cfg)
|
||||
}
|
||||
|
||||
// SetOut sets the output writer for all logs produced by the default factory.
|
||||
// Returns a function which sets the output writer back to the prior setting.
|
||||
func SetOut(w io.Writer) func() {
|
||||
return pkgFactory.SetOut(w)
|
||||
}
|
||||
|
||||
// SetDefaultLevel sets the default log level on the package-level Factory.
|
||||
func SetDefaultLevel(l Level) {
|
||||
pkgFactory.SetDefaultLevel(l)
|
||||
}
|
||||
|
||||
// SetLevel sets a log level for a named logger on the package-level Factory.
|
||||
func SetLevel(name string, l Level) {
|
||||
pkgFactory.SetLevel(name, l)
|
||||
}
|
||||
|
||||
// SetAddCaller enables/disables call site logging on the package-level Factory
|
||||
func SetAddCaller(b bool) {
|
||||
pkgFactory.SetAddCaller(b)
|
||||
}
|
||||
|
||||
// SetEncoder sets the encoder for the package-level Factory
|
||||
func SetEncoder(e Encoder) {
|
||||
pkgFactory.SetEncoder(e)
|
||||
}
|
||||
|
||||
// Hooks adds hooks to the package-level Factory.
|
||||
func Hooks(hooks ...HookFunc) {
|
||||
pkgFactory.Hooks(hooks...)
|
||||
}
|
||||
|
||||
// ClearHooks clears all hooks from the package-level Factory.
|
||||
func ClearHooks() {
|
||||
pkgFactory.ClearHooks()
|
||||
}
|
||||
|
||||
// SetDevelopmentDefaults sets useful default settings on the package-level Factory
|
||||
// which are appropriate for a development setting. Default log level is
|
||||
// set to INF, all loggers are reset to the default level, call site information
|
||||
// is logged, and the encoder is a colorized, multi-line friendly console
|
||||
// encoder with a simplified time stamp format.
|
||||
func SetDevelopmentDefaults() error {
|
||||
return Configure(Config{
|
||||
Development: true,
|
||||
})
|
||||
}
|
||||
|
||||
// String implements stringer and a few other interfaces.
|
||||
func (l Level) String() string {
|
||||
switch l {
|
||||
case DebugLevel:
|
||||
return "DBG"
|
||||
case InfoLevel:
|
||||
return "INF"
|
||||
case ErrorLevel:
|
||||
return "ERR"
|
||||
case OffLevel:
|
||||
return "OFF"
|
||||
default:
|
||||
return fmt.Sprintf("Level(%d)", l)
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler
|
||||
func (l Level) MarshalText() ([]byte, error) {
|
||||
return []byte(l.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler
|
||||
func (l *Level) UnmarshalText(text []byte) error {
|
||||
if l == nil {
|
||||
return merry.New("can't unmarshal a nil *Level")
|
||||
}
|
||||
if !l.unmarshalText(text) {
|
||||
return fmt.Errorf("unrecognized level: %q", text)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Level) unmarshalText(text []byte) bool {
|
||||
text = bytes.ToLower(text)
|
||||
switch string(text) {
|
||||
case "debug", "dbg", "all":
|
||||
*l = DebugLevel
|
||||
case "info", "inf", "": // make the zero value useful
|
||||
*l = InfoLevel
|
||||
case "error", "err":
|
||||
*l = ErrorLevel
|
||||
case "off":
|
||||
*l = OffLevel
|
||||
default:
|
||||
if i, err := strconv.Atoi(string(text)); err != nil {
|
||||
if i >= -127 && i <= 127 {
|
||||
*l = Level(i)
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Set implements flags.Value
|
||||
func (l *Level) Set(s string) error {
|
||||
return l.UnmarshalText([]byte(s))
|
||||
}
|
||||
|
||||
// Get implements flag.Getter
|
||||
func (l *Level) Get() interface{} {
|
||||
return *l
|
||||
}
|
53
vendor/github.com/gemalto/flume/logger_writer.go
generated
vendored
Normal file
53
vendor/github.com/gemalto/flume/logger_writer.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
package flume
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// LogFuncWriter is a writer which writes to a logging function signature
|
||||
// like that of testing.T.Log() and fmt/log.Println().
|
||||
// It can be used to redirect flumes *output* to some other logger.
|
||||
//
|
||||
// SetOut(LogFuncWriter(fmt.Println, true))
|
||||
// SetOut(LogFuncWriter(t.Log, true))
|
||||
//
|
||||
func LogFuncWriter(l func(args ...interface{}), trimSpace bool) io.Writer {
|
||||
return &logWriter{lf: l, trimSpace: trimSpace}
|
||||
}
|
||||
|
||||
// LoggerFuncWriter is a writer which writes lines to a logging function with
|
||||
// a signature like that of flume.Logger's functions, like Info(), Debug(), and Error().
|
||||
//
|
||||
// http.Server{
|
||||
// ErrorLog: log.New(LoggerFuncWriter(flume.New("http").Error), "", 0),
|
||||
// }
|
||||
//
|
||||
func LoggerFuncWriter(l func(msg string, kvpairs ...interface{})) io.Writer {
|
||||
return &loggerWriter{lf: l}
|
||||
}
|
||||
|
||||
type logWriter struct {
|
||||
lf func(args ...interface{})
|
||||
trimSpace bool
|
||||
}
|
||||
|
||||
// Write implements io.Writer
|
||||
func (t *logWriter) Write(p []byte) (n int, err error) {
|
||||
s := string(p)
|
||||
if t.trimSpace {
|
||||
s = strings.TrimSpace(s)
|
||||
}
|
||||
t.lf(s)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
type loggerWriter struct {
|
||||
lf func(msg string, kvpairs ...interface{})
|
||||
}
|
||||
|
||||
// Write implements io.Writer
|
||||
func (t *loggerWriter) Write(p []byte) (n int, err error) {
|
||||
t.lf(string(p))
|
||||
return len(p), nil
|
||||
}
|
506
vendor/github.com/gemalto/flume/ltsv_encoder.go
generated
vendored
Normal file
506
vendor/github.com/gemalto/flume/ltsv_encoder.go
generated
vendored
Normal file
@ -0,0 +1,506 @@
|
||||
package flume
|
||||
|
||||
// Copyright (c) 2016 Uber Technologies, Inc.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/buffer"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"strings"
|
||||
)
|
||||
|
||||
//nolint:gochecknoinits
|
||||
func init() {
|
||||
_ = zap.RegisterEncoder("ltsv", func(cfg zapcore.EncoderConfig) (zapcore.Encoder, error) {
|
||||
return NewLTSVEncoder((*EncoderConfig)(&cfg)), nil
|
||||
})
|
||||
}
|
||||
|
||||
type ltsvEncoder struct {
|
||||
*EncoderConfig
|
||||
buf *buffer.Buffer
|
||||
allowTabs bool
|
||||
allowNewLines bool
|
||||
skipNextElementSeparator bool
|
||||
lastElementWasMultiline bool
|
||||
fieldNamePrefix string
|
||||
nestingLevel int
|
||||
blankKey string
|
||||
binaryEncoder func([]byte) string
|
||||
}
|
||||
|
||||
// NewLTSVEncoder creates a fast, low-allocation LTSV encoder.
|
||||
func NewLTSVEncoder(cfg *EncoderConfig) Encoder {
|
||||
return <svEncoder{
|
||||
EncoderConfig: cfg,
|
||||
buf: bufPool.Get(),
|
||||
blankKey: "_",
|
||||
binaryEncoder: base64.StdEncoding.EncodeToString,
|
||||
}
|
||||
}
|
||||
|
||||
// AddBinary implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddBinary(key string, value []byte) {
|
||||
enc.AddString(key, enc.binaryEncoder(value))
|
||||
}
|
||||
|
||||
// AddArray implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddArray(key string, arr zapcore.ArrayMarshaler) error {
|
||||
enc.addKey(key)
|
||||
return enc.AppendArray(arr)
|
||||
}
|
||||
|
||||
// AddObject implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddObject(key string, obj zapcore.ObjectMarshaler) error {
|
||||
enc.addKey(key)
|
||||
return enc.AppendObject(obj)
|
||||
}
|
||||
|
||||
// AddBool implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddBool(key string, val bool) {
|
||||
enc.addKey(key)
|
||||
enc.AppendBool(val)
|
||||
}
|
||||
|
||||
// AddComplex128 implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddComplex128(key string, val complex128) {
|
||||
enc.addKey(key)
|
||||
enc.AppendComplex128(val)
|
||||
}
|
||||
|
||||
// AddDuration implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddDuration(key string, val time.Duration) {
|
||||
enc.addKey(key)
|
||||
enc.AppendDuration(val)
|
||||
}
|
||||
|
||||
// AddFloat64 implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddFloat64(key string, val float64) {
|
||||
enc.addKey(key)
|
||||
enc.AppendFloat64(val)
|
||||
}
|
||||
|
||||
// AddInt64 implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddInt64(key string, val int64) {
|
||||
enc.addKey(key)
|
||||
enc.AppendInt64(val)
|
||||
}
|
||||
|
||||
// AddReflected implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddReflected(key string, obj interface{}) error {
|
||||
enc.addKey(key)
|
||||
return enc.AppendReflected(obj)
|
||||
}
|
||||
|
||||
// OpenNamespace implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) OpenNamespace(key string) {
|
||||
switch len(enc.fieldNamePrefix) {
|
||||
case 0:
|
||||
enc.fieldNamePrefix = key
|
||||
default:
|
||||
enc.fieldNamePrefix = enc.fieldNamePrefix + "." + key
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// AddString implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddString(key, val string) {
|
||||
enc.addKey(key)
|
||||
enc.AppendString(val)
|
||||
}
|
||||
|
||||
// AddByteString implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddByteString(key string, value []byte) {
|
||||
enc.addKey(key)
|
||||
enc.AppendByteString(value)
|
||||
}
|
||||
|
||||
// AddTime implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddTime(key string, val time.Time) {
|
||||
enc.addKey(key)
|
||||
enc.AppendTime(val)
|
||||
}
|
||||
|
||||
// AddUint64 implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddUint64(key string, val uint64) {
|
||||
enc.addKey(key)
|
||||
enc.AppendUint64(val)
|
||||
}
|
||||
|
||||
// AppendArray implements zapcore.ArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendArray(arr zapcore.ArrayMarshaler) error {
|
||||
enc.addElementSeparator()
|
||||
enc.buf.AppendByte('[')
|
||||
enc.skipNextElementSeparator = true
|
||||
err := arr.MarshalLogArray(enc)
|
||||
enc.buf.AppendByte(']')
|
||||
enc.skipNextElementSeparator = false
|
||||
return err
|
||||
}
|
||||
|
||||
// AppendObject implements zapcore.ArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendObject(obj zapcore.ObjectMarshaler) error {
|
||||
enc.addElementSeparator()
|
||||
enc.nestingLevel++
|
||||
enc.skipNextElementSeparator = true
|
||||
enc.buf.AppendByte('{')
|
||||
err := obj.MarshalLogObject(enc)
|
||||
enc.buf.AppendByte('}')
|
||||
enc.skipNextElementSeparator = false
|
||||
enc.nestingLevel--
|
||||
return err
|
||||
}
|
||||
|
||||
// AppendBool implements zapcore.PrimitiveArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendBool(val bool) {
|
||||
enc.addElementSeparator()
|
||||
enc.buf.AppendBool(val)
|
||||
}
|
||||
|
||||
// AppendComplex128 implements zapcore.PrimitiveArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendComplex128(val complex128) {
|
||||
enc.addElementSeparator()
|
||||
// Cast to a platform-independent, fixed-size type.
|
||||
r, i := real(val), imag(val)
|
||||
// Because we're always in a quoted string, we can use strconv without
|
||||
// special-casing NaN and +/-Inf.
|
||||
enc.buf.AppendFloat(r, 64)
|
||||
enc.buf.AppendByte('+')
|
||||
enc.buf.AppendFloat(i, 64)
|
||||
enc.buf.AppendByte('i')
|
||||
}
|
||||
|
||||
// AppendDuration implements zapcore.ArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendDuration(val time.Duration) {
|
||||
enc.EncodeDuration(val, enc)
|
||||
}
|
||||
|
||||
// AppendInt64 implements zapcore.ArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendInt64(val int64) {
|
||||
enc.addElementSeparator()
|
||||
enc.buf.AppendInt(val)
|
||||
}
|
||||
|
||||
// AppendReflected implements zapcore.ArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendReflected(val interface{}) error {
|
||||
enc.AppendString(fmt.Sprintf("%+v", val))
|
||||
return nil
|
||||
}
|
||||
|
||||
// AppendString implements zapcore.PrimitiveArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendString(val string) {
|
||||
enc.addElementSeparator()
|
||||
if enc.allowNewLines && strings.Contains(val, "\n") {
|
||||
enc.safeAddString("\n", false)
|
||||
}
|
||||
enc.safeAddString(val, false)
|
||||
}
|
||||
|
||||
// AppendByteString implements zapcore.PrimitiveArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendByteString(val []byte) {
|
||||
enc.addElementSeparator()
|
||||
|
||||
if enc.allowNewLines && bytes.Contains(val, []byte("\n")) {
|
||||
enc.safeAddString("\n", false)
|
||||
}
|
||||
enc.safeAddByteString(val, false)
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// AppendTime implements zapcore.ArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendTime(val time.Time) {
|
||||
enc.EncodeTime(val, enc)
|
||||
}
|
||||
|
||||
// AppendUint64 implements zapcore.PrimitiveArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendUint64(val uint64) {
|
||||
enc.addElementSeparator()
|
||||
enc.buf.AppendUint(val)
|
||||
}
|
||||
|
||||
//
|
||||
// AddComplex64 implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) }
|
||||
|
||||
// AddFloat32 implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) }
|
||||
|
||||
// AddInt implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
|
||||
|
||||
// AddInt32 implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
|
||||
|
||||
// AddInt16 implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
|
||||
|
||||
// AddInt8 implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
|
||||
|
||||
// AddUint implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
|
||||
|
||||
// AddUint32 implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
|
||||
|
||||
// AddUint16 implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
|
||||
|
||||
// AddUint8 implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
|
||||
|
||||
// AddUintptr implements zapcore.ObjectEncoder
|
||||
func (enc *ltsvEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
|
||||
|
||||
// AppendComplex64 implements zapcore.PrimitiveArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) }
|
||||
|
||||
// AppendFloat64 implements zapcore.PrimitiveArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
|
||||
|
||||
// AppendFloat32 implements zapcore.PrimitiveArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
|
||||
|
||||
// AppendInt implements zapcore.PrimitiveArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
|
||||
|
||||
// AppendInt32 implements zapcore.PrimitiveArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
|
||||
|
||||
// AppendInt16 implements zapcore.PrimitiveArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
|
||||
|
||||
// AppendInt8 implements zapcore.PrimitiveArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
|
||||
|
||||
// AppendUint implements zapcore.PrimitiveArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
|
||||
|
||||
// AppendUint32 implements zapcore.PrimitiveArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
|
||||
|
||||
// AppendUint16 implements zapcore.PrimitiveArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
|
||||
|
||||
// AppendUint8 implements zapcore.PrimitiveArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
|
||||
|
||||
// AppendUintptr implements zapcore.PrimitiveArrayEncoder
|
||||
func (enc *ltsvEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
|
||||
|
||||
// Clone implements zapcore.Encoder
|
||||
func (enc *ltsvEncoder) Clone() zapcore.Encoder {
|
||||
clone := *enc
|
||||
clone.buf = bufPool.Get()
|
||||
_, _ = clone.buf.Write(enc.buf.Bytes())
|
||||
return &clone
|
||||
}
|
||||
|
||||
// EncodeEntry implements zapcore.Encoder
|
||||
func (enc *ltsvEncoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) {
|
||||
final := *enc
|
||||
final.buf = bufPool.Get()
|
||||
|
||||
if final.LevelKey != "" {
|
||||
final.addKey(final.LevelKey)
|
||||
final.EncodeLevel(ent.Level, &final)
|
||||
}
|
||||
if final.TimeKey != "" {
|
||||
final.AddTime(final.TimeKey, ent.Time)
|
||||
}
|
||||
if final.MessageKey != "" {
|
||||
final.addKey(enc.MessageKey)
|
||||
final.AppendString(ent.Message)
|
||||
}
|
||||
if ent.LoggerName != "" && final.NameKey != "" {
|
||||
final.addKey(final.NameKey)
|
||||
final.AppendString(ent.LoggerName)
|
||||
}
|
||||
if ent.Caller.Defined && final.CallerKey != "" {
|
||||
final.addKey(final.CallerKey)
|
||||
final.EncodeCaller(ent.Caller, &final)
|
||||
}
|
||||
if final.buf.Len() > 0 {
|
||||
final.addFieldSeparator()
|
||||
_, _ = final.buf.Write(enc.buf.Bytes())
|
||||
}
|
||||
for i := range fields {
|
||||
fields[i].AddTo(&final)
|
||||
}
|
||||
if ent.Stack != "" && final.StacktraceKey != "" {
|
||||
final.AddString(final.StacktraceKey, ent.Stack)
|
||||
}
|
||||
final.buf.AppendByte('\n')
|
||||
return final.buf, nil
|
||||
}
|
||||
|
||||
func (enc *ltsvEncoder) addKey(key string) {
|
||||
enc.addFieldSeparator()
|
||||
switch {
|
||||
case key == "" && enc.blankKey == "":
|
||||
return
|
||||
case key == "" && enc.blankKey != "":
|
||||
key = enc.blankKey
|
||||
}
|
||||
if len(enc.fieldNamePrefix) > 0 {
|
||||
enc.safeAddString(enc.fieldNamePrefix, true)
|
||||
enc.buf.AppendByte('.')
|
||||
}
|
||||
enc.safeAddString(key, true)
|
||||
enc.buf.AppendByte(':')
|
||||
}
|
||||
|
||||
func (enc *ltsvEncoder) addFieldSeparator() {
|
||||
last := enc.buf.Len() - 1
|
||||
if last < 0 {
|
||||
enc.skipNextElementSeparator = true
|
||||
return
|
||||
}
|
||||
if enc.nestingLevel > 0 {
|
||||
enc.addElementSeparator()
|
||||
enc.skipNextElementSeparator = true
|
||||
return
|
||||
}
|
||||
|
||||
lastByte := enc.buf.Bytes()[last]
|
||||
if enc.lastElementWasMultiline {
|
||||
if lastByte != '\n' && lastByte != '\r' {
|
||||
// make sure the last line terminated with a newline
|
||||
enc.buf.AppendByte('\n')
|
||||
}
|
||||
enc.lastElementWasMultiline = false
|
||||
} else if lastByte != '\t' {
|
||||
enc.buf.AppendByte('\t')
|
||||
}
|
||||
enc.skipNextElementSeparator = true
|
||||
}
|
||||
|
||||
func (enc *ltsvEncoder) addElementSeparator() {
|
||||
if !enc.skipNextElementSeparator && enc.buf.Len() != 0 {
|
||||
enc.buf.AppendByte(',')
|
||||
}
|
||||
enc.skipNextElementSeparator = false
|
||||
}
|
||||
|
||||
func (enc *ltsvEncoder) appendFloat(val float64, bitSize int) {
|
||||
enc.addElementSeparator()
|
||||
switch {
|
||||
case math.IsNaN(val):
|
||||
enc.buf.AppendString(`"NaN"`)
|
||||
case math.IsInf(val, 1):
|
||||
enc.buf.AppendString(`"+Inf"`)
|
||||
case math.IsInf(val, -1):
|
||||
enc.buf.AppendString(`"-Inf"`)
|
||||
default:
|
||||
enc.buf.AppendFloat(val, bitSize)
|
||||
}
|
||||
}
|
||||
|
||||
// safeAddString appends a string to the internal buffer.
|
||||
// If `key`, colons are replaced with underscores, and newlines and tabs are escaped
|
||||
// If not `key`, only newlines and tabs are escaped, unless configured otherwise
|
||||
//nolint:dupl
|
||||
func (enc *ltsvEncoder) safeAddString(s string, key bool) {
|
||||
for i := 0; i < len(s); {
|
||||
if b := s[i]; b < utf8.RuneSelf {
|
||||
i++
|
||||
switch {
|
||||
case key && b == ':':
|
||||
enc.buf.AppendByte('_')
|
||||
case b == '\n':
|
||||
if !enc.allowNewLines || key {
|
||||
enc.buf.AppendString("\\n")
|
||||
} else {
|
||||
enc.buf.AppendByte(b)
|
||||
enc.lastElementWasMultiline = true
|
||||
}
|
||||
case b == '\r':
|
||||
if !enc.allowNewLines || key {
|
||||
enc.buf.AppendString("\\r")
|
||||
} else {
|
||||
enc.buf.AppendByte(b)
|
||||
enc.lastElementWasMultiline = true
|
||||
}
|
||||
case (!enc.allowTabs || key) && b == '\t':
|
||||
enc.buf.AppendString("\\t")
|
||||
default:
|
||||
enc.buf.AppendByte(b)
|
||||
}
|
||||
continue
|
||||
}
|
||||
c, size := utf8.DecodeRuneInString(s[i:])
|
||||
if c == utf8.RuneError && size == 1 {
|
||||
enc.buf.AppendString(`\ufffd`)
|
||||
i++
|
||||
continue
|
||||
}
|
||||
enc.buf.AppendString(s[i : i+size])
|
||||
i += size
|
||||
}
|
||||
}
|
||||
|
||||
// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte.
|
||||
//nolint:dupl
|
||||
func (enc *ltsvEncoder) safeAddByteString(s []byte, key bool) {
|
||||
for i := 0; i < len(s); {
|
||||
if b := s[i]; b < utf8.RuneSelf {
|
||||
i++
|
||||
switch {
|
||||
case key && b == ':':
|
||||
enc.buf.AppendByte('_')
|
||||
case b == '\n':
|
||||
if !enc.allowNewLines || key {
|
||||
enc.buf.AppendString("\\n")
|
||||
} else {
|
||||
enc.buf.AppendByte(b)
|
||||
enc.lastElementWasMultiline = true
|
||||
}
|
||||
case b == '\r':
|
||||
if !enc.allowNewLines || key {
|
||||
enc.buf.AppendString("\\r")
|
||||
} else {
|
||||
enc.buf.AppendByte(b)
|
||||
enc.lastElementWasMultiline = true
|
||||
}
|
||||
case (!enc.allowTabs || key) && b == '\t':
|
||||
enc.buf.AppendString("\\t")
|
||||
default:
|
||||
enc.buf.AppendByte(b)
|
||||
}
|
||||
continue
|
||||
}
|
||||
c, size := utf8.DecodeRune(s[i:])
|
||||
if c == utf8.RuneError && size == 1 {
|
||||
enc.buf.AppendString(`\ufffd`)
|
||||
i++
|
||||
continue
|
||||
}
|
||||
_, _ = enc.buf.Write(s[i : i+size])
|
||||
i += size
|
||||
}
|
||||
}
|
31
vendor/github.com/gemalto/flume/options.go
generated
vendored
Normal file
31
vendor/github.com/gemalto/flume/options.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
package flume
|
||||
|
||||
// An CoreOption configures a Core.
|
||||
type CoreOption interface {
|
||||
apply(*Core)
|
||||
}
|
||||
|
||||
// coreOptionFunc wraps a func so it satisfies the CoreOption interface.
|
||||
type coreOptionFunc func(*Core)
|
||||
|
||||
func (f coreOptionFunc) apply(c *Core) {
|
||||
f(c)
|
||||
}
|
||||
|
||||
// AddCallerSkip increases the number of callers skipped by caller annotation
|
||||
// (as enabled by the AddCaller option). When building wrappers around a
|
||||
// Core, supplying this CoreOption prevents Core from always
|
||||
// reporting the wrapper code as the caller.
|
||||
func AddCallerSkip(skip int) CoreOption {
|
||||
return coreOptionFunc(func(c *Core) {
|
||||
c.callerSkip += skip
|
||||
})
|
||||
}
|
||||
|
||||
// AddHooks adds hooks to this logger core. These will only execute on this
|
||||
// logger, after the global hooks.
|
||||
func AddHooks(hooks ...HookFunc) CoreOption {
|
||||
return coreOptionFunc(func(core *Core) {
|
||||
core.hooks = append(core.hooks, hooks...)
|
||||
})
|
||||
}
|
BIN
vendor/github.com/gemalto/flume/sample.png
generated
vendored
Normal file
BIN
vendor/github.com/gemalto/flume/sample.png
generated
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 53 KiB |
6
vendor/github.com/gemalto/kmip-go/.dockerignore
generated
vendored
Normal file
6
vendor/github.com/gemalto/kmip-go/.dockerignore
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
.idea
|
||||
.git
|
||||
Dockerfile
|
||||
docker-compose.yml
|
||||
.dockerignore
|
||||
build
|
6
vendor/github.com/gemalto/kmip-go/.gitignore
generated
vendored
Normal file
6
vendor/github.com/gemalto/kmip-go/.gitignore
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
.idea
|
||||
scratch
|
||||
build
|
||||
vendor
|
||||
/pykmip-server/server.log
|
||||
/pykmip-server/server.db
|
235
vendor/github.com/gemalto/kmip-go/.golangci.yml
generated
vendored
Normal file
235
vendor/github.com/gemalto/kmip-go/.golangci.yml
generated
vendored
Normal file
@ -0,0 +1,235 @@
|
||||
# This file contains all available configuration options
|
||||
# with their default values.
|
||||
|
||||
# options for analysis running
|
||||
run:
|
||||
tests: true
|
||||
skip-files:
|
||||
- requests.go
|
||||
|
||||
# output configuration options
|
||||
output:
|
||||
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
||||
format: colored-line-number
|
||||
|
||||
# print lines of code with issue, default is true
|
||||
print-issued-lines: true
|
||||
|
||||
# print linter name in the end of issue text, default is true
|
||||
print-linter-name: true
|
||||
|
||||
|
||||
# all available settings of specific linters
|
||||
linters-settings:
|
||||
govet:
|
||||
# report about shadowed variables
|
||||
check-shadowing: false
|
||||
dupl:
|
||||
# tokens count to trigger issue, 150 by default
|
||||
threshold: 100
|
||||
exhaustive:
|
||||
default-signifies-exhaustive: true
|
||||
goconst:
|
||||
# minimal length of string constant, 3 by default
|
||||
min-len: 3
|
||||
# minimal occurrences count to trigger, 3 by default
|
||||
min-occurrences: 3
|
||||
depguard:
|
||||
list-type: blacklist
|
||||
include-go-root: false
|
||||
packages:
|
||||
- github.com/magiconair/properties/assert
|
||||
- gopkg.in/go-playground/assert.v1
|
||||
- github.com/pborman/uuid #replace with github.com/google/uuid
|
||||
inTests:
|
||||
- github.com/davecgh/go-spew/spew
|
||||
- github.com/stretchr/testify
|
||||
misspell:
|
||||
# Correct spellings using locale preferences for US or UK.
|
||||
# Default is to use a neutral variety of English.
|
||||
# Setting locale to US will correct the British spelling of 'colour' to 'color'.
|
||||
locale: US
|
||||
unused:
|
||||
# treat code as a program (not a library) and report unused exported identifiers; default is false.
|
||||
# XXX: if you enable this setting, unused will report a lot of false-positives in text editors:
|
||||
# if it's called for subdir of a project it can't find funcs usages. All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
gocritic:
|
||||
# Which checks should be disabled; can't be combined with 'enabled-checks'; default is empty
|
||||
disabled-checks:
|
||||
- commentFormatting
|
||||
revive:
|
||||
ignore-generated-header: true
|
||||
wsl:
|
||||
allow-cuddle-declarations: true
|
||||
allow-separated-leading-comment: true
|
||||
allow-assign-and-anything: true
|
||||
|
||||
linters:
|
||||
# to try out individual linters: golangci-lint run -E gocyclo,gosimple
|
||||
enable:
|
||||
# default linters
|
||||
- staticcheck
|
||||
- deadcode
|
||||
- errcheck
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- structcheck
|
||||
- unused
|
||||
- varcheck
|
||||
|
||||
# additional linters
|
||||
- asciicheck
|
||||
- bidichk
|
||||
## - bodyclose # its all false positives with requester and sling, which both close the body already
|
||||
- containedctx
|
||||
- contextcheck
|
||||
# - cyclop # need to analyze findings
|
||||
- decorder
|
||||
- depguard
|
||||
## - dogsled # checks for too many blank identifiers. don't care
|
||||
- dupl
|
||||
- durationcheck
|
||||
- errchkjson
|
||||
- errname
|
||||
- errorlint
|
||||
- exhaustive
|
||||
- exportloopref
|
||||
- forbidigo
|
||||
- forcetypeassert
|
||||
## - funlen # checks function length. don't care
|
||||
# - gci # not sure why this is complaining
|
||||
## - gochecknoglobals # too common
|
||||
- gochecknoinits
|
||||
# - gocognit # too many findings, will take time to evaluate
|
||||
- goconst
|
||||
- gocritic
|
||||
## - gocyclo # checks cyclomatic complexity. don't care
|
||||
# - godot # too many false positives
|
||||
# - godox # doesn't allow TODO comments. We allow those to be committed.
|
||||
# - goerr113 # good practice, but it doesn't recognize that we're already wrapping errors with merry
|
||||
## - gofmt # checks code is formatted, handled by make prep
|
||||
- gofumpt
|
||||
- goheader
|
||||
## - goimports # checks import order. We're not using goimports
|
||||
# - gomnd # too aggressive
|
||||
- gomoddirectives
|
||||
# - gomodguard
|
||||
- goprintffuncname
|
||||
- gosec
|
||||
- grouper
|
||||
- ifshort
|
||||
- importas
|
||||
# - ireturn # there are valid use cases for this pattern. too strict.
|
||||
## - lll # checks line length. not enforced
|
||||
# - maintidx # look at this later
|
||||
- makezero
|
||||
## - maligned # optimizies struct field order, but structs are usually ordered for legibility
|
||||
- misspell
|
||||
- nakedret
|
||||
# - nestif # need to evaluate the findings
|
||||
- nilerr
|
||||
- nilnil
|
||||
# - nlreturn # a little too aggressive. wsl covers the same ground.
|
||||
- noctx
|
||||
- nolintlint
|
||||
# - paralleltest # look at this later
|
||||
# - prealloc # slice optimizations, but promotes too much premature optimization
|
||||
- predeclared
|
||||
- promlinter
|
||||
- revive
|
||||
- rowserrcheck
|
||||
- sqlclosecheck
|
||||
- stylecheck
|
||||
- tagliatelle
|
||||
- thelper
|
||||
- tparallel
|
||||
- unconvert
|
||||
- unparam
|
||||
# - varnamelen # take a look later
|
||||
- wastedassign
|
||||
- whitespace
|
||||
# - wrapcheck # way too aggressive
|
||||
- wsl
|
||||
## - unparam # too many false positives
|
||||
## - whitespace # not enforced
|
||||
disable-all: true
|
||||
# presets:
|
||||
# - bugs
|
||||
# - unused
|
||||
# fast: false
|
||||
|
||||
|
||||
issues:
|
||||
# List of regexps of issue texts to exclude, empty list by default.
|
||||
# But independently from this option we use default exclude patterns,
|
||||
# it can be disabled by `exclude-use-default: false`. To list all
|
||||
# excluded by default patterns execute `golangci-lint run --help`
|
||||
# exclude:
|
||||
# - abcdef
|
||||
|
||||
# Excluding configuration per-path, per-linter, per-text and per-source
|
||||
exclude-rules:
|
||||
# Explicitly exclude the typecheck plugin. There is some bug in golangci which is
|
||||
# enabling this checker, even though it isn't listed above.
|
||||
# Exclude some linters from running on tests files.
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- gocyclo
|
||||
- errcheck
|
||||
- dupl
|
||||
- gosec
|
||||
- exportloopref
|
||||
- gochecknoinits
|
||||
- gochecknoglobals
|
||||
- wsl
|
||||
- nlreturn
|
||||
- errchkjson
|
||||
- forcetypeassert
|
||||
- path: cmd
|
||||
linters:
|
||||
# init(), globals, and prints are pretty common in main packages
|
||||
- gochecknoinits
|
||||
- gochecknoglobals
|
||||
- forbidigo
|
||||
|
||||
# Exclude known linters from partially hard-vendored code,
|
||||
# which is impossible to exclude via "nolint" comments.
|
||||
# - path: internal/hmac/
|
||||
# text: "weak cryptographic primitive"
|
||||
# linters:
|
||||
# - gosec
|
||||
|
||||
# Exclude some staticcheck messages
|
||||
# - linters:
|
||||
# - staticcheck
|
||||
# text: "SA9003:"
|
||||
|
||||
|
||||
# Independently from option `exclude` we use default exclude patterns,
|
||||
# it can be disabled by this option. To list all
|
||||
# excluded by default patterns execute `golangci-lint run --help`.
|
||||
# Default value for this option is true.
|
||||
# exclude-use-default: false
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
# max-issues-per-linter: 0
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
# max-same-issues: 0
|
||||
|
||||
# Show only new issues: if there are unstaged changes or untracked files,
|
||||
# only those changes are analyzed, else only changes in HEAD~ are analyzed.
|
||||
# It's a super-useful option for integration of golangci-lint into existing
|
||||
# large codebase. It's not practical to fix all existing issues at the moment
|
||||
# of integration: much better don't allow issues in new code.
|
||||
# Default is false.
|
||||
new: false
|
||||
|
||||
# Show only new issues created after git revision `REV`
|
||||
# new-from-rev: REV
|
||||
|
||||
# Show only new issues created in git patch with set file path.
|
||||
# new-from-patch: path/to/patch/file
|
10
vendor/github.com/gemalto/kmip-go/Dockerfile
generated
vendored
Normal file
10
vendor/github.com/gemalto/kmip-go/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
FROM golang:alpine
|
||||
|
||||
RUN apk --no-cache add make git curl bash fish
|
||||
|
||||
WORKDIR /project
|
||||
|
||||
COPY ./ /project
|
||||
RUN make tools
|
||||
|
||||
CMD make
|
21
vendor/github.com/gemalto/kmip-go/LICENSE
generated
vendored
Normal file
21
vendor/github.com/gemalto/kmip-go/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2018 Gemalto OSS
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
104
vendor/github.com/gemalto/kmip-go/Makefile
generated
vendored
Normal file
104
vendor/github.com/gemalto/kmip-go/Makefile
generated
vendored
Normal file
@ -0,0 +1,104 @@
|
||||
SHELL = bash
|
||||
BUILD_FLAGS =
|
||||
TEST_FLAGS =
|
||||
COMPOSE ?= docker-compose
|
||||
|
||||
all: fmt build up test lint
|
||||
|
||||
build:
|
||||
go build $(BUILD_FLAGS) ./...
|
||||
|
||||
builddir:
|
||||
mkdir -p -m 0777 build
|
||||
|
||||
install:
|
||||
go install ./cmd/ppkmip
|
||||
go install ./cmd/kmipgen
|
||||
|
||||
ppkmip: builddir
|
||||
GOOS=darwin GOARCH=amd64 go build -o build/ppkmip-macos ./cmd/ppkmip
|
||||
GOOS=windows GOARCH=amd64 go build -o build/ppkmip-windows.exe ./cmd/ppkmip
|
||||
GOOS=linux GOARCH=amd64 go build -o build/ppkmip-linux ./cmd/ppkmip
|
||||
|
||||
kmipgen:
|
||||
go install ./cmd/kmipgen
|
||||
|
||||
lint:
|
||||
golangci-lint run
|
||||
|
||||
clean:
|
||||
rm -rf build/*
|
||||
|
||||
fmt:
|
||||
gofumpt -w -l .
|
||||
|
||||
# generates go code structures representing all the enums, mask, and tags defined
|
||||
# in the KMIP spec. The source specifications are stored in kmip14/kmip_1_4.json
|
||||
# and ttls/kmip20/kmip_2_0_additions.json. The generated .go files are named *_generated.go
|
||||
#
|
||||
# the kmipgen tool (defined in cmd/kmipgen) is used to generate the source. This tool can
|
||||
# be used independently to generate source for future specs or vendor extensions.
|
||||
#
|
||||
# this target only needs to be run if the json files are changed. The generated
|
||||
# go files should be committed to source control.
|
||||
generate:
|
||||
go generate ./...
|
||||
|
||||
test:
|
||||
go test $(BUILD_FLAGS) $(TEST_FLAGS) ./...
|
||||
|
||||
# creates a test coverage report, and produces json test output. useful for ci.
|
||||
cover: builddir
|
||||
go test $(TEST_FLAGS) -v -covermode=count -coverprofile=build/coverage.out -json ./...
|
||||
go tool cover -html=build/coverage.out -o build/coverage.html
|
||||
|
||||
# brings up the projects dependencies in a compose stack
|
||||
up:
|
||||
$(COMPOSE) build --pull pykmip-server
|
||||
$(COMPOSE) run --rm dependencies
|
||||
|
||||
# brings down the projects dependencies
|
||||
down:
|
||||
$(COMPOSE) down -v --remove-orphans
|
||||
|
||||
# runs the build inside a docker container. useful for ci to completely encapsulate the
|
||||
# build environment.
|
||||
docker:
|
||||
$(COMPOSE) build --pull builder
|
||||
$(COMPOSE) run --rm builder make all cover
|
||||
|
||||
# opens a shell into the build environment container. Useful for troubleshooting the
|
||||
# containerized build.
|
||||
bash:
|
||||
$(COMPOSE) build --pull builder
|
||||
$(COMPOSE) run --rm builder bash
|
||||
|
||||
# opens a shell into the build environment container. Useful for troubleshooting the
|
||||
# containerized build.
|
||||
fish:
|
||||
$(COMPOSE) build --pull builder
|
||||
$(COMPOSE) run --rm builder fish
|
||||
|
||||
tidy:
|
||||
go mod tidy
|
||||
|
||||
# use go mod to update all dependencies
|
||||
update:
|
||||
go get -u ./...
|
||||
go mod tidy
|
||||
|
||||
# install tools used by the build. typically only needs to be run once
|
||||
# to initialize a workspace.
|
||||
tools: kmipgen
|
||||
go install mvdan.cc/gofumpt@latest
|
||||
go install golang.org/x/tools/cmd/cover@latest
|
||||
sh -c "$$(wget -O - -q https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh || echo exit 2)" -- -b $(shell go env GOPATH)/bin $(GOLANGCI_LINT_VERSION)
|
||||
|
||||
pykmip-server: up
|
||||
$(COMPOSE) exec pykmip-server tail -f server.log
|
||||
|
||||
gen-certs:
|
||||
openssl req -x509 -newkey rsa:4096 -keyout pykmip-server/server.key -out pykmip-server/server.cert -days 3650 -nodes -subj '/CN=localhost'
|
||||
|
||||
.PHONY: all build builddir run artifacts vet lint clean fmt test testall testreport up down pull builder runc ci bash fish image prep vendor.update vendor.ensure tools buildtools migratetool db.migrate
|
||||
|
50
vendor/github.com/gemalto/kmip-go/README.md
generated
vendored
Normal file
50
vendor/github.com/gemalto/kmip-go/README.md
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
kmip-go [](https://godoc.org/github.com/gemalto/kmip-go) [](https://goreportcard.com/report/gemalto/kmip-go) [](https://github.com/gemalto/kmip-go/actions?query=branch%3Amaster+workflow%3ABuild+)
|
||||
=======
|
||||
|
||||
kmip-go is a go implemenation of KMIP protocol primitives. It supports marshaling data in TTLV, XML, or JSON
|
||||
encodings to and from go values and structs. It can be used to implement KMIP clients or servers.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
go get github.com/gemalto/kmip-go
|
||||
|
||||
Or, to just install the `ppkmip` pretty printing tool:
|
||||
|
||||
go install github.com/gemalto/kmip-go/cmd/ppkmip
|
||||
|
||||
Packages
|
||||
--------
|
||||
|
||||
The `ttlv` package implements the core encoder and decoder logic.
|
||||
|
||||
The `kmip14` package contains constants for all the tags, types, enumerations and bitmasks defined in the KMIP 1.4
|
||||
specification. It also contains mappings from these values to the normalized names used in the JSON and XML
|
||||
encodings, and the canonical names used in Attribute structures.
|
||||
The `kmip14` definitions are all automatically registered with `ttlv.DefaultRegistry`.
|
||||
|
||||
The `kmip20` package adds additional enumeration values from the 2.0 specification. It is meant to be registered
|
||||
on top of the 1.4 definitions.
|
||||
|
||||
The root package defines golang structures for some of the significant Structure definitions in the 1.4
|
||||
specification, like Attributes, Request, Response, etc. It is incomplete, but can be used as an example
|
||||
for defining other structures. It also contains an example of a client and server.
|
||||
|
||||
`cmd/kmipgen` is a code generation tool which generates the tag and enum constants from a JSON specification
|
||||
input. It can also be used independently in your own code to generate additional tags and constants. `make install`
|
||||
to build and install the tool. See `kmip14/kmip_1_4.go` for an example of using the tool.
|
||||
|
||||
`cmd/kmipgen` is a tool for pretty printing kmip values. It can accept KMIP input from stdin or files, encoded
|
||||
in TTLV, XML, or JSON, and output in a variety of formats. `make install` to intall the tool, and
|
||||
`ppkmip --help` to see usage.
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
To build, be sure to have a recent go SDK, and make. Run `make tools` to install other dependencies.
|
||||
|
||||
There is also a dockerized build, which only requires make and docker-compose: `make docker`. You can also
|
||||
do `make fish` or `make bash` to shell into the docker build container.
|
||||
|
||||
Merge requests are welcome! Before submitting, please run `make` and make sure all tests pass and there are
|
||||
no linter findings.
|
70
vendor/github.com/gemalto/kmip-go/attributes.go
generated
vendored
Normal file
70
vendor/github.com/gemalto/kmip-go/attributes.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
package kmip
|
||||
|
||||
import (
|
||||
"github.com/gemalto/kmip-go/kmip14"
|
||||
)
|
||||
|
||||
// 3
|
||||
|
||||
// Name 3.2 Table 57
|
||||
//
|
||||
// The Name attribute is a structure (see Table 57) used to identify and locate an object.
|
||||
// This attribute is assigned by the client, and the Name Value is intended to be in a form that
|
||||
// humans are able to interpret. The key management system MAY specify rules by which the client
|
||||
// creates valid names. Clients are informed of such rules by a mechanism that is not specified by
|
||||
// this standard. Names SHALL be unique within a given key management domain,
|
||||
// but are NOT REQUIRED to be globally unique.
|
||||
type Name struct {
|
||||
NameValue string
|
||||
NameType kmip14.NameType
|
||||
}
|
||||
|
||||
// Cryptographic Parameters 3.6 Table 65
|
||||
//
|
||||
// The Cryptographic Parameters attribute is a structure (see Table 65) that contains a set of OPTIONAL
|
||||
// fields that describe certain cryptographic parameters to be used when performing cryptographic operations
|
||||
// using the object. Specific fields MAY pertain only to certain types of Managed Cryptographic Objects. The
|
||||
// Cryptographic Parameters attribute of a Certificate object identifies the cryptographic parameters of the
|
||||
// public key contained within the Certificate.
|
||||
//
|
||||
// The Cryptographic Algorithm is also used to specify the parameters for cryptographic operations. For operations
|
||||
// involving digital signatures, either the Digital Signature Algorithm can be specified or the Cryptographic
|
||||
// Algorithm and Hashing Algorithm combination can be specified.
|
||||
//
|
||||
// Random IV can be used to request that the KMIP server generate an appropriate IV for a
|
||||
// cryptographic operation that uses an IV. The generated Random IV is returned in the response
|
||||
// to the cryptographic operation.
|
||||
//
|
||||
// IV Length is the length of the Initialization Vector in bits. This parameter SHALL be provided when the
|
||||
// specified Block Cipher Mode supports variable IV lengths such as CTR or GCM.
|
||||
//
|
||||
// Tag Length is the length of the authentication tag in bytes. This parameter SHALL be provided when the
|
||||
// Block Cipher Mode is GCM or CCM.
|
||||
//
|
||||
// The IV used with counter modes of operation (e.g., CTR and GCM) cannot repeat for a given cryptographic key.
|
||||
// To prevent an IV/key reuse, the IV is often constructed of three parts: a fixed field, an invocation field,
|
||||
// and a counter as described in [SP800-38A] and [SP800-38D]. The Fixed Field Length is the length of the fixed
|
||||
// field portion of the IV in bits. The Invocation Field Length is the length of the invocation field portion of
|
||||
// the IV in bits. The Counter Length is the length of the counter portion of the IV in bits.
|
||||
//
|
||||
// Initial Counter Value is the starting counter value for CTR mode (for [RFC3686] it is 1).
|
||||
type CryptographicParameters struct {
|
||||
BlockCipherMode kmip14.BlockCipherMode `ttlv:",omitempty"`
|
||||
PaddingMethod kmip14.PaddingMethod `ttlv:",omitempty"`
|
||||
HashingAlgorithm kmip14.HashingAlgorithm `ttlv:",omitempty"`
|
||||
KeyRoleType kmip14.KeyRoleType `ttlv:",omitempty"`
|
||||
DigitalSignatureAlgorithm kmip14.DigitalSignatureAlgorithm `ttlv:",omitempty"`
|
||||
CryptographicAlgorithm kmip14.CryptographicAlgorithm `ttlv:",omitempty"`
|
||||
RandomIV bool `ttlv:",omitempty"`
|
||||
IVLength int `ttlv:",omitempty"`
|
||||
TagLength int `ttlv:",omitempty"`
|
||||
FixedFieldLength int `ttlv:",omitempty"`
|
||||
InvocationFieldLength int `ttlv:",omitempty"`
|
||||
CounterLength int `ttlv:",omitempty"`
|
||||
InitialCounterValue int `ttlv:",omitempty"`
|
||||
SaltLength int `ttlv:",omitempty"`
|
||||
MaskGenerator kmip14.MaskGenerator `ttlv:",omitempty" default:"1"` // defaults to MGF1
|
||||
MaskGeneratorHashingAlgorithm kmip14.HashingAlgorithm `ttlv:",omitempty" default:"4"` // defaults to SHA-1
|
||||
PSource []byte `ttlv:",omitempty"`
|
||||
TrailerField int `ttlv:",omitempty"`
|
||||
}
|
473
vendor/github.com/gemalto/kmip-go/base_objects.go
generated
vendored
Normal file
473
vendor/github.com/gemalto/kmip-go/base_objects.go
generated
vendored
Normal file
@ -0,0 +1,473 @@
|
||||
package kmip
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/gemalto/kmip-go/kmip14"
|
||||
"github.com/gemalto/kmip-go/ttlv"
|
||||
)
|
||||
|
||||
// 2.1 Base Objects
|
||||
//
|
||||
// These objects are used within the messages of the protocol, but are not objects managed by the key
|
||||
// management system. They are components of Managed Objects.
|
||||
|
||||
// Attribute 2.1.1 Table 2
|
||||
//
|
||||
// An Attribute object is a structure (see Table 2) used for sending and receiving Managed Object attributes.
|
||||
// The Attribute Name is a text-string that is used to identify the attribute. The Attribute Index is an index
|
||||
// number assigned by the key management server. The Attribute Index is used to identify the particular instance.
|
||||
// Attribute Indices SHALL start with 0. The Attribute Index of an attribute SHALL NOT change when other instances
|
||||
// are added or deleted. Single-instance Attributes (attributes which an object MAY only have at most one instance
|
||||
// thereof) SHALL have an Attribute Index of 0. The Attribute Value is either a primitive data type or structured
|
||||
// object, depending on the attribute.
|
||||
//
|
||||
// When an Attribute structure is used to specify or return a particular instance of an Attribute and the Attribute
|
||||
// Index is not specified it SHALL be assumed to be 0.
|
||||
type Attribute struct {
|
||||
// AttributeName should contain the canonical name of a tag, e.g. "Cryptographic Algorithm"
|
||||
AttributeName string
|
||||
// AttributeIndex is typically 0 when clients use this struct to create objects or add attributes. Clients
|
||||
// only need to set this if modifying or deleting an existing attribute.
|
||||
AttributeIndex int `ttlv:",omitempty"`
|
||||
AttributeValue interface{}
|
||||
}
|
||||
|
||||
func NewAttributeFromTag(tag ttlv.Tag, idx int, val interface{}) Attribute {
|
||||
return Attribute{
|
||||
AttributeName: tag.CanonicalName(),
|
||||
AttributeIndex: idx,
|
||||
AttributeValue: val,
|
||||
}
|
||||
}
|
||||
|
||||
// Credential 2.1.2 Table 3
|
||||
//
|
||||
// A Credential is a structure (see Table 3) used for client identification purposes and is not managed by the
|
||||
// key management system (e.g., user id/password pairs, Kerberos tokens, etc.). It MAY be used for authentication
|
||||
// purposes as indicated in [KMIP-Prof].
|
||||
//
|
||||
// TODO: add an unmarshal impl to Credential to handle decoding the right kind
|
||||
// of credential based on the credential type value
|
||||
type Credential struct {
|
||||
CredentialType kmip14.CredentialType
|
||||
CredentialValue interface{}
|
||||
}
|
||||
|
||||
// UsernameAndPasswordCredentialValue 2.1.2 Table 4
|
||||
//
|
||||
// If the Credential Type in the Credential is Username and Password, then Credential Value is a
|
||||
// structure as shown in Table 4. The Username field identifies the client, and the Password field
|
||||
// is a secret that authenticates the client.
|
||||
type UsernameAndPasswordCredentialValue struct {
|
||||
Username string
|
||||
Password string `ttlv:",omitempty"`
|
||||
}
|
||||
|
||||
// DeviceCredentialValue 2.1.2 Table 5
|
||||
//
|
||||
// If the Credential Type in the Credential is Device, then Credential Value is a structure as shown in
|
||||
// Table 5. One or a combination of the Device Serial Number, Network Identifier, Machine Identifier,
|
||||
// and Media Identifier SHALL be unique. Server implementations MAY enforce policies on uniqueness for
|
||||
// individual fields. A shared secret or password MAY also be used to authenticate the client.
|
||||
// The client SHALL provide at least one field.
|
||||
type DeviceCredentialValue struct {
|
||||
DeviceSerialNumber string `ttlv:",omitempty"`
|
||||
Password string `ttlv:",omitempty"`
|
||||
DeviceIdentifier string `ttlv:",omitempty"`
|
||||
NetworkIdentifier string `ttlv:",omitempty"`
|
||||
MachineIdentifier string `ttlv:",omitempty"`
|
||||
MediaIdentifier string `ttlv:",omitempty"`
|
||||
}
|
||||
|
||||
// AttestationCredentialValue 2.1.2 Table 6
|
||||
//
|
||||
// If the Credential Type in the Credential is Attestation, then Credential Value is a structure
|
||||
// as shown in Table 6. The Nonce Value is obtained from the key management server in a Nonce Object.
|
||||
// The Attestation Credential Object can contain a measurement from the client or an assertion from a
|
||||
// third party if the server is not capable or willing to verify the attestation data from the client.
|
||||
// Neither type of attestation data (Attestation Measurement or Attestation Assertion) is necessary to
|
||||
// allow the server to accept either. However, the client SHALL provide attestation data in either the
|
||||
// Attestation Measurement or Attestation Assertion fields.
|
||||
type AttestationCredentialValue struct {
|
||||
Nonce Nonce
|
||||
AttestationType kmip14.AttestationType
|
||||
AttestationMeasurement []byte `ttlv:",omitempty"`
|
||||
AttestationAssertion []byte `ttlv:",omitempty"`
|
||||
}
|
||||
|
||||
// KeyBlock 2.1.3 Table 7
|
||||
//
|
||||
// A Key Block object is a structure (see Table 7) used to encapsulate all of the information that is
|
||||
// closely associated with a cryptographic key. It contains a Key Value of one of the following Key Format Types:
|
||||
//
|
||||
// · Raw – This is a key that contains only cryptographic key material, encoded as a string of bytes.
|
||||
// · Opaque – This is an encoded key for which the encoding is unknown to the key management system.
|
||||
// It is encoded as a string of bytes.
|
||||
// · PKCS1 – This is an encoded private key, expressed as a DER-encoded ASN.1 PKCS#1 object.
|
||||
// · PKCS8 – This is an encoded private key, expressed as a DER-encoded ASN.1 PKCS#8 object, supporting both
|
||||
// the RSAPrivateKey syntax and EncryptedPrivateKey.
|
||||
// · X.509 – This is an encoded object, expressed as a DER-encoded ASN.1 X.509 object.
|
||||
// · ECPrivateKey – This is an ASN.1 encoded elliptic curve private key.
|
||||
// · Several Transparent Key types – These are algorithm-specific structures containing defined values
|
||||
// for the various key types, as defined in Section 2.1.7.
|
||||
// · Extensions – These are vendor-specific extensions to allow for proprietary or legacy key formats.
|
||||
//
|
||||
// The Key Block MAY contain the Key Compression Type, which indicates the format of the elliptic curve public
|
||||
// key. By default, the public key is uncompressed.
|
||||
//
|
||||
// The Key Block also has the Cryptographic Algorithm and the Cryptographic Length of the key contained
|
||||
// in the Key Value field. Some example values are:
|
||||
//
|
||||
// · RSA keys are typically 1024, 2048 or 3072 bits in length.
|
||||
// · 3DES keys are typically from 112 to 192 bits (depending upon key length and the presence of parity bits).
|
||||
// · AES keys are 128, 192 or 256 bits in length.
|
||||
//
|
||||
// The Key Block SHALL contain a Key Wrapping Data structure if the key in the Key Value field is
|
||||
// wrapped (i.e., encrypted, or MACed/signed, or both).
|
||||
|
||||
type KeyBlock struct {
|
||||
KeyFormatType kmip14.KeyFormatType
|
||||
KeyCompressionType kmip14.KeyCompressionType `ttlv:",omitempty"`
|
||||
KeyValue *KeyValue `ttlv:",omitempty"`
|
||||
CryptographicAlgorithm kmip14.CryptographicAlgorithm `ttlv:",omitempty"`
|
||||
CryptographicLength int `ttlv:",omitempty"`
|
||||
KeyWrappingData *KeyWrappingData
|
||||
}
|
||||
|
||||
// KeyValue 2.1.4 Table 8
|
||||
//
|
||||
// The Key Value is used only inside a Key Block and is either a Byte String or a structure (see Table 8):
|
||||
//
|
||||
// · The Key Value structure contains the key material, either as a byte string or as a Transparent Key
|
||||
// structure (see Section 2.1.7), and OPTIONAL attribute information that is associated and encapsulated
|
||||
// with the key material. This attribute information differs from the attributes associated with Managed
|
||||
// Objects, and is obtained via the Get Attributes operation, only by the fact that it is encapsulated with
|
||||
// (and possibly wrapped with) the key material itself.
|
||||
// · The Key Value Byte String is either the wrapped TTLV-encoded (see Section 9.1) Key Value structure, or
|
||||
// the wrapped un-encoded value of the Byte String Key Material field.
|
||||
//
|
||||
// TODO: Unmarshaler impl which unmarshals correct KeyMaterial type.
|
||||
type KeyValue struct {
|
||||
// KeyMaterial should be []byte, one of the Transparent*Key structs, or a custom struct if KeyFormatType is
|
||||
// an extension.
|
||||
KeyMaterial interface{}
|
||||
Attribute []Attribute
|
||||
}
|
||||
|
||||
// KeyWrappingData 2.1.5 Table 9
|
||||
//
|
||||
// The Key Block MAY also supply OPTIONAL information about a cryptographic key wrapping mechanism used
|
||||
// to wrap the Key Value. This consists of a Key Wrapping Data structure (see Table 9). It is only used
|
||||
// inside a Key Block.
|
||||
//
|
||||
// This structure contains fields for:
|
||||
//
|
||||
// · A Wrapping Method, which indicates the method used to wrap the Key Value.
|
||||
// · Encryption Key Information, which contains the Unique Identifier (see 3.1) value of the encryption key
|
||||
// and associated cryptographic parameters.
|
||||
// · MAC/Signature Key Information, which contains the Unique Identifier value of the MAC/signature key
|
||||
// and associated cryptographic parameters.
|
||||
// · A MAC/Signature, which contains a MAC or signature of the Key Value.
|
||||
// · An IV/Counter/Nonce, if REQUIRED by the wrapping method.
|
||||
// · An Encoding Option, specifying the encoding of the Key Material within the Key Value structure of the
|
||||
// Key Block that has been wrapped. If No Encoding is specified, then the Key Value structure SHALL NOT contain
|
||||
// any attributes.
|
||||
//
|
||||
// If wrapping is used, then the whole Key Value structure is wrapped unless otherwise specified by the
|
||||
// Wrapping Method. The algorithms used for wrapping are given by the Cryptographic Algorithm attributes of
|
||||
// the encryption key and/or MAC/signature key; the block-cipher mode, padding method, and hashing algorithm used
|
||||
// for wrapping are given by the Cryptographic Parameters in the Encryption Key Information and/or MAC/Signature
|
||||
// Key Information, or, if not present, from the Cryptographic Parameters attribute of the respective key(s).
|
||||
// Either the Encryption Key Information or the MAC/Signature Key Information (or both) in the Key Wrapping Data
|
||||
// structure SHALL be specified.
|
||||
//
|
||||
// The following wrapping methods are currently defined:
|
||||
//
|
||||
// · Encrypt only (i.e., encryption using a symmetric key or public key, or authenticated encryption algorithms that use a single key).
|
||||
// · MAC/sign only (i.e., either MACing the Key Value with a symmetric key, or signing the Key Value with a private key).
|
||||
// · Encrypt then MAC/sign.
|
||||
// · MAC/sign then encrypt.
|
||||
// · TR-31.
|
||||
// · Extensions.
|
||||
//
|
||||
// The following encoding options are currently defined:
|
||||
//
|
||||
// · No Encoding (i.e., the wrapped un-encoded value of the Byte String Key Material field in the Key Value structure).
|
||||
// · TTLV Encoding (i.e., the wrapped TTLV-encoded Key Value structure).
|
||||
type KeyWrappingData struct {
|
||||
WrappingMethod kmip14.WrappingMethod
|
||||
EncryptionKeyInformation *EncryptionKeyInformation
|
||||
MACSignatureKeyInformation *MACSignatureKeyInformation
|
||||
MACSignature []byte
|
||||
IVCounterNonce []byte
|
||||
EncodingOption kmip14.EncodingOption `ttlv:",omitempty" default:"TTLVEncoding"`
|
||||
}
|
||||
|
||||
// EncryptionKeyInformation 2.1.5 Table 10
|
||||
type EncryptionKeyInformation struct {
|
||||
UniqueIdentifier string
|
||||
CryptographicParameters *CryptographicParameters
|
||||
}
|
||||
|
||||
// MACSignatureKeyInformation 2.1.5 Table 11
|
||||
type MACSignatureKeyInformation struct {
|
||||
UniqueIdentifier string
|
||||
CryptographicParameters *CryptographicParameters
|
||||
}
|
||||
|
||||
// TransparentSymmetricKey 2.1.7.1 Table 14
|
||||
//
|
||||
// If the Key Format Type in the Key Block is Transparent Symmetric Key, then Key Material is a
|
||||
// structure as shown in Table 14.
|
||||
type TransparentSymmetricKey struct {
|
||||
Key []byte `validate:"required"`
|
||||
}
|
||||
|
||||
// TransparentDSAPrivateKey 2.1.7.2 Table 15
|
||||
//
|
||||
// If the Key Format Type in the Key Block is Transparent DSA Private Key, then Key Material is a structure as
|
||||
// shown in Table 15.
|
||||
type TransparentDSAPrivateKey struct {
|
||||
// TODO: should these be pointers? big package deals entirely with pointers, but these are not optional values.
|
||||
P *big.Int `validate:"required"`
|
||||
Q *big.Int `validate:"required"`
|
||||
G *big.Int `validate:"required"`
|
||||
X *big.Int `validate:"required"`
|
||||
}
|
||||
|
||||
// TransparentDSAPublicKey 2.1.7.3 Table 16
|
||||
//
|
||||
// If the Key Format Type in the Key Block is Transparent DSA Public Key, then Key Material is a structure as
|
||||
// shown in Table 16.
|
||||
type TransparentDSAPublicKey struct {
|
||||
P *big.Int `validate:"required"`
|
||||
Q *big.Int `validate:"required"`
|
||||
G *big.Int `validate:"required"`
|
||||
Y *big.Int `validate:"required"`
|
||||
}
|
||||
|
||||
// TransparentRSAPrivateKey 2.1.7.4 Table 17
|
||||
//
|
||||
// If the Key Format Type in the Key Block is Transparent RSA Private Key, then Key Material is a structure
|
||||
// as shown in Table 17.
|
||||
//
|
||||
// One of the following SHALL be present (refer to [PKCS#1]):
|
||||
//
|
||||
// · Private Exponent,
|
||||
// · P and Q (the first two prime factors of Modulus), or
|
||||
// · Prime Exponent P and Prime Exponent Q.
|
||||
type TransparentRSAPrivateKey struct {
|
||||
Modulus *big.Int `validate:"required"`
|
||||
PrivateExponent, PublicExponent *big.Int
|
||||
P, Q *big.Int
|
||||
PrimeExponentP, PrimeExponentQ *big.Int
|
||||
CRTCoefficient *big.Int
|
||||
}
|
||||
|
||||
// TransparentRSAPublicKey 2.1.7.5 Table 18
|
||||
//
|
||||
// If the Key Format Type in the Key Block is Transparent RSA Public Key, then Key Material is a structure
|
||||
// as shown in Table 18.
|
||||
type TransparentRSAPublicKey struct {
|
||||
Modulus *big.Int `validate:"required"`
|
||||
PublicExponent *big.Int `validate:"required"`
|
||||
}
|
||||
|
||||
// TransparentDHPrivateKey 2.1.7.6 Table 19
|
||||
//
|
||||
// If the Key Format Type in the Key Block is Transparent DH Private Key, then Key Material is a structure as shown
|
||||
// in Table 19.
|
||||
type TransparentDHPrivateKey struct {
|
||||
P *big.Int `validate:"required"`
|
||||
Q *big.Int
|
||||
G *big.Int `validate:"required"`
|
||||
J *big.Int
|
||||
X *big.Int `validate:"required"`
|
||||
}
|
||||
|
||||
// TransparentDHPublicKey 2.1.7.7 Table 20
|
||||
//
|
||||
// If the Key Format Type in the Key Block is Transparent DH Public Key, then Key Material is a structure as
|
||||
// shown in Table 20.
|
||||
//
|
||||
// P, G, and Y are required.
|
||||
type TransparentDHPublicKey struct {
|
||||
P *big.Int `validate:"required"`
|
||||
Q *big.Int
|
||||
G *big.Int `validate:"required"`
|
||||
J *big.Int
|
||||
Y *big.Int `validate:"required"`
|
||||
}
|
||||
|
||||
// TransparentECDSAPrivateKey 2.1.7.8 Table 21
|
||||
//
|
||||
// The Transparent ECDSA Private Key structure is deprecated as of version 1.3 of this
|
||||
// specification and MAY be removed from subsequent versions of the specification. The
|
||||
// Transparent EC Private Key structure SHOULD be used as a replacement.
|
||||
//
|
||||
// If the Key Format Type in the Key Block is Transparent ECDSA Private Key, then Key Material is a
|
||||
// structure as shown in Table 21.
|
||||
type TransparentECDSAPrivateKey struct {
|
||||
RecommendedCurve kmip14.RecommendedCurve
|
||||
D *big.Int `validate:"required"`
|
||||
}
|
||||
|
||||
// TransparentECDSAPublicKey 2.1.7.9 Table 22
|
||||
//
|
||||
// The Transparent ECDSA Public Key structure is deprecated as of version 1.3 of this specification and
|
||||
// MAY be removed from subsequent versions of the specification. The Transparent EC Public Key structure
|
||||
// SHOULD be used as a replacement.
|
||||
//
|
||||
// If the Key Format Type in the Key Block is Transparent ECDSA Public Key, then Key Material is a
|
||||
// structure as shown in Table 22.
|
||||
type TransparentECDSAPublicKey struct {
|
||||
RecommendedCurve kmip14.RecommendedCurve
|
||||
QString []byte `validate:"required"`
|
||||
}
|
||||
|
||||
// TransparentECDHPrivateKey 2.1.7.10 Table 23
|
||||
//
|
||||
// The Transparent ECDH Private Key structure is deprecated as of version 1.3 of this specification and
|
||||
// MAY be removed from subsequent versions of the specification. The Transparent EC Private Key structure
|
||||
// SHOULD be used as a replacement.
|
||||
//
|
||||
// If the Key Format Type in the Key Block is Transparent ECDH Private Key, then Key Material is a structure
|
||||
// as shown in Table 23.
|
||||
type TransparentECDHPrivateKey TransparentECPrivateKey
|
||||
|
||||
// TransparentECDHPublicKey 2.1.7.11 Table 24
|
||||
//
|
||||
// The Transparent ECDH Public Key structure is deprecated as of version 1.3 of this specification and MAY
|
||||
// be removed from subsequent versions of the specification. The Transparent EC Public Key structure SHOULD
|
||||
// be used as a replacement.
|
||||
//
|
||||
// If the Key Format Type in the Key Block is Transparent ECDH Public Key, then Key Material is a structure as
|
||||
// shown in Table 24.
|
||||
type TransparentECDHPublicKey TransparentECPublicKey
|
||||
|
||||
// TransparentECMQVPrivateKey 2.1.7.12 Table 25
|
||||
//
|
||||
// The Transparent ECMQV Private Key structure is deprecated as of version 1.3 of this specification and MAY
|
||||
// be removed from subsequent versions of the specification. The Transparent EC Private Key structure SHOULD
|
||||
// be used as a replacement.
|
||||
//
|
||||
// If the Key Format Type in the Key Block is Transparent ECMQV Private Key, then Key Material is a structure
|
||||
// as shown in Table 25.
|
||||
type TransparentECMQVPrivateKey TransparentECPrivateKey
|
||||
|
||||
// TransparentECMQVPublicKey 2.1.7.13 Table 26
|
||||
//
|
||||
// The Transparent ECMQV Public Key structure is deprecated as of version 1.3 of this specification and MAY be
|
||||
// removed from subsequent versions of the specification. The Transparent EC Public Key structure SHOULD be used as
|
||||
// a replacement.
|
||||
//
|
||||
// If the Key Format Type in the Key Block is Transparent ECMQV Public Key, then Key Material is a structure as shown
|
||||
// in Table 26.
|
||||
type TransparentECMQVPublicKey TransparentECPublicKey
|
||||
|
||||
// TransparentECPrivateKey 2.1.7.14 Table 27
|
||||
//
|
||||
// If the Key Format Type in the Key Block is Transparent EC Private Key, then Key Material is a structure as shown
|
||||
// in Table 27.
|
||||
type TransparentECPrivateKey struct {
|
||||
RecommendedCurve kmip14.RecommendedCurve
|
||||
D *big.Int `validate:"required"`
|
||||
}
|
||||
|
||||
// TransparentECPublicKey 2.1.7.15 Table 28
|
||||
//
|
||||
// If the Key Format Type in the Key Block is Transparent EC Public Key, then Key Material is a structure as
|
||||
// shown in Table 28.
|
||||
type TransparentECPublicKey struct {
|
||||
RecommendedCurve kmip14.RecommendedCurve
|
||||
QString []byte `validate:"required"`
|
||||
}
|
||||
|
||||
// TemplateAttribute 2.1.8 Table 29
|
||||
//
|
||||
// The Template Managed Object is deprecated as of version 1.3 of this specification and MAY be removed from
|
||||
// subsequent versions of the specification. Individual Attributes SHOULD be used in operations which currently
|
||||
// support use of a Name within a Template-Attribute to reference a Template.
|
||||
//
|
||||
// These structures are used in various operations to provide the desired attribute values and/or template
|
||||
// names in the request and to return the actual attribute values in the response.
|
||||
//
|
||||
// The Template-Attribute, Common Template-Attribute, Private Key Template-Attribute, and Public Key
|
||||
// Template-Attribute structures are defined identically as follows:
|
||||
// type TemplateAttribute struct {
|
||||
// Attribute []Attribute
|
||||
// }
|
||||
|
||||
type TemplateAttribute struct {
|
||||
Name []Name
|
||||
Attribute []Attribute
|
||||
}
|
||||
|
||||
// Get returns a reference to the first Attribute in the list matching the name.
|
||||
// Returns nil if not found.
|
||||
func (t *TemplateAttribute) Get(s string) *Attribute {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := range t.Attribute {
|
||||
if t.Attribute[i].AttributeName == s {
|
||||
return &t.Attribute[i]
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetIdx returns a reference to the Attribute in the list matching the name and index.
|
||||
// Returns nil if not found.
|
||||
func (t *TemplateAttribute) GetIdx(s string, idx int) *Attribute {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := range t.Attribute {
|
||||
if t.Attribute[i].AttributeName == s && t.Attribute[i].AttributeIndex == idx {
|
||||
return &t.Attribute[i]
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetTag returns a reference to the first Attribute in the list matching the tag.
|
||||
// Returns nil if not found.
|
||||
func (t *TemplateAttribute) GetTag(tag ttlv.Tag) *Attribute {
|
||||
return t.Get(tag.String())
|
||||
}
|
||||
|
||||
// GetTagIdx returns a reference to the first Attribute in the list matching the tag and index.
|
||||
// Returns nil if not found.
|
||||
func (t *TemplateAttribute) GetTagIdx(tag ttlv.Tag, idx int) *Attribute {
|
||||
return t.GetIdx(tag.String(), idx)
|
||||
}
|
||||
|
||||
func (t *TemplateAttribute) GetAll(s string) []Attribute {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var ret []Attribute
|
||||
|
||||
for i := range t.Attribute {
|
||||
if t.Attribute[i].AttributeName == s {
|
||||
ret = append(ret, t.Attribute[i])
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (t *TemplateAttribute) Append(tag ttlv.Tag, value interface{}) {
|
||||
t.Attribute = append(t.Attribute, NewAttributeFromTag(tag, 0, value))
|
||||
}
|
||||
|
||||
func (t *TemplateAttribute) GetAllTag(tag ttlv.Tag) []Attribute {
|
||||
return t.GetAll(tag.String())
|
||||
}
|
22
vendor/github.com/gemalto/kmip-go/docker-compose.yml
generated
vendored
Normal file
22
vendor/github.com/gemalto/kmip-go/docker-compose.yml
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
version: '3'
|
||||
services:
|
||||
builder:
|
||||
build:
|
||||
context: .
|
||||
environment:
|
||||
CGO_ENABLED: 0
|
||||
volumes:
|
||||
- ./build:/project/build
|
||||
|
||||
dependencies:
|
||||
image: waisbrot/wait
|
||||
environment:
|
||||
TARGETS: pykmip-server:5696
|
||||
depends_on:
|
||||
- pykmip-server
|
||||
|
||||
pykmip-server:
|
||||
build:
|
||||
context: pykmip-server
|
||||
ports:
|
||||
- 5696:5696
|
13
vendor/github.com/gemalto/kmip-go/docs.go
generated
vendored
Normal file
13
vendor/github.com/gemalto/kmip-go/docs.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// Package kmip is a general purpose KMIP library for implementing KMIP services and clients.
|
||||
//
|
||||
// The ttlv sub package contains the core logic for parsing the KMIP TTLV encoding formats,
|
||||
// and marshaling them to and from golang structs.
|
||||
//
|
||||
// This package defines structs for many of the structures defined in the KMIP Spec, such as
|
||||
// the different types of managed objects, request and response bodies, etc. Not all Structures
|
||||
// are represented here yet, but the ones that are can be used as examples.
|
||||
//
|
||||
// There is also a partial implementation of a server, and an example of a client. There is
|
||||
// currently no Client type for KMIP, but it is simple to open a socket overwhich you send
|
||||
// and receive raw KMIP requests and responses.
|
||||
package kmip
|
42
vendor/github.com/gemalto/kmip-go/errors.go
generated
vendored
Normal file
42
vendor/github.com/gemalto/kmip-go/errors.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
package kmip
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/ansel1/merry"
|
||||
"github.com/gemalto/kmip-go/kmip14"
|
||||
)
|
||||
|
||||
func Details(err error) string {
|
||||
return merry.Details(err)
|
||||
}
|
||||
|
||||
var ErrInvalidTag = errors.New("invalid tag")
|
||||
|
||||
type errKey int
|
||||
|
||||
const (
|
||||
errorKeyResultReason errKey = iota
|
||||
)
|
||||
|
||||
//nolint:gochecknoinits
|
||||
func init() {
|
||||
merry.RegisterDetail("Result Reason", errorKeyResultReason)
|
||||
}
|
||||
|
||||
func WithResultReason(err error, rr kmip14.ResultReason) error {
|
||||
return merry.WithValue(err, errorKeyResultReason, rr)
|
||||
}
|
||||
|
||||
func GetResultReason(err error) kmip14.ResultReason {
|
||||
v := merry.Value(err, errorKeyResultReason)
|
||||
switch t := v.(type) {
|
||||
case nil:
|
||||
return kmip14.ResultReason(0)
|
||||
case kmip14.ResultReason:
|
||||
return t
|
||||
default:
|
||||
panic(fmt.Sprintf("err result reason attribute's value was wrong type, expected ResultReason, got %T", v))
|
||||
}
|
||||
}
|
66
vendor/github.com/gemalto/kmip-go/internal/kmiputil/hex_values.go
generated
vendored
Normal file
66
vendor/github.com/gemalto/kmip-go/internal/kmiputil/hex_values.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
package kmiputil
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"strings"
|
||||
|
||||
"github.com/ansel1/merry"
|
||||
)
|
||||
|
||||
var ErrInvalidHexString = merry.New("invalid hex string")
|
||||
|
||||
func DecodeUint32(b []byte) uint32 {
|
||||
// pad to 4 bytes with leading zeros
|
||||
return binary.BigEndian.Uint32(pad(b, 4))
|
||||
}
|
||||
|
||||
func DecodeUint64(b []byte) uint64 {
|
||||
// pad to 8 bytes with leading zeros
|
||||
return binary.BigEndian.Uint64(pad(b, 8))
|
||||
}
|
||||
|
||||
func pad(b []byte, l int) []byte {
|
||||
if len(b) < l {
|
||||
b2 := make([]byte, l)
|
||||
copy(b2[l-len(b):], b)
|
||||
b = b2
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// ParseHexValue attempts to parse a string formatted as a hex value
|
||||
// as described in the KMIP Profiles spec, in the "Hex representations" section.
|
||||
//
|
||||
// If the string doesn't start with the required prefix "0x", it is assumed the string
|
||||
// is not a hex representation, and nil, nil is returned.
|
||||
//
|
||||
// An ErrInvalidHexString is returned if the hex parsing fails.
|
||||
// If the max argument is >0, ErrInvalidHexString is returned if the number of bytes parsed
|
||||
// is greater than max, ignoring leading zeros. All bytes parsed are returned (including
|
||||
// leading zeros).
|
||||
func ParseHexValue(s string, max int) ([]byte, error) {
|
||||
if !strings.HasPrefix(s, "0x") {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
b, err := hex.DecodeString(s[2:])
|
||||
if err != nil {
|
||||
return nil, merry.WithCause(ErrInvalidHexString, err).Append(err.Error())
|
||||
}
|
||||
|
||||
if max > 0 {
|
||||
l := len(b)
|
||||
// minus leading zeros
|
||||
for i := 0; i < len(b) && b[i] == 0; i++ {
|
||||
l--
|
||||
}
|
||||
|
||||
if l > max {
|
||||
return nil, merry.Appendf(ErrInvalidHexString, "must be %v bytes", max)
|
||||
}
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
62
vendor/github.com/gemalto/kmip-go/internal/kmiputil/names.go
generated
vendored
Normal file
62
vendor/github.com/gemalto/kmip-go/internal/kmiputil/names.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
package kmiputil
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/cases"
|
||||
"golang.org/x/text/language"
|
||||
)
|
||||
|
||||
var (
|
||||
nonWordAtWordBoundary = regexp.MustCompile(`(\W)([a-zA-Z][a-z])`)
|
||||
startingDigits = regexp.MustCompile(`^([\d]+)(.*)`)
|
||||
)
|
||||
|
||||
// NormalizeName converts a string into the CamelCase format required for the XML and JSON encoding
|
||||
// of KMIP values. It should be used for tag names, type names, and enumeration value names.
|
||||
// Implementation of 5.4.1.1 and 5.5.1.1 from the KMIP Profiles specification.
|
||||
func NormalizeName(s string) string {
|
||||
// 1. Replace round brackets ‘(‘, ‘)’ with spaces
|
||||
s = strings.Map(func(r rune) rune {
|
||||
switch r {
|
||||
case '(', ')':
|
||||
return ' '
|
||||
}
|
||||
|
||||
return r
|
||||
}, s)
|
||||
|
||||
// 2. If a non-word char (not alpha, digit or underscore) is followed by a letter (either upper or lower case) then a lower case letter, replace the non-word char with space
|
||||
s = nonWordAtWordBoundary.ReplaceAllString(s, " $2")
|
||||
|
||||
// 3. Replace remaining non-word chars (except whitespace) with underscore.
|
||||
s = strings.Map(func(r rune) rune {
|
||||
switch {
|
||||
case r >= 'a' && r <= 'z':
|
||||
case r >= 'A' && r <= 'Z':
|
||||
case r >= '0' && r <= '9':
|
||||
case r == '_':
|
||||
case r == ' ':
|
||||
default:
|
||||
return '_'
|
||||
}
|
||||
|
||||
return r
|
||||
}, s)
|
||||
|
||||
words := strings.Split(s, " ")
|
||||
|
||||
for i, w := range words {
|
||||
if i == 0 {
|
||||
// 4. If the first word begins with a digit, move all digits at start of first word to end of first word
|
||||
w = startingDigits.ReplaceAllString(w, `$2$1`)
|
||||
}
|
||||
|
||||
// 5. Capitalize the first letter of each word
|
||||
words[i] = cases.Title(language.AmericanEnglish, cases.NoLower).String(w)
|
||||
}
|
||||
|
||||
// 6. Concatenate all words with spaces removed
|
||||
return strings.Join(words, "")
|
||||
}
|
23
vendor/github.com/gemalto/kmip-go/kmip14/kmip_1_4.go
generated
vendored
Normal file
23
vendor/github.com/gemalto/kmip-go/kmip14/kmip_1_4.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
//go:generate go run ../cmd/kmipgen/main.go -o kmip_1_4_generated.go -i kmip_1_4.json -p kmip14
|
||||
|
||||
// Package kmip14 contains tag and enumeration value definitions from the 1.4 specification.
|
||||
// These definitions will be registered automatically into the DefaultRegistry.
|
||||
//
|
||||
// Each tag is stored in a package constant, named Tag<normalized KMIP name>.
|
||||
// Bitmask and Enumeration values are each represented by a type, named
|
||||
// after the normalized name of the values set from the spec, e.g.
|
||||
package kmip14
|
||||
|
||||
import (
|
||||
"github.com/gemalto/kmip-go/ttlv"
|
||||
)
|
||||
|
||||
// nolint:gochecknoinits
|
||||
func init() {
|
||||
Register(&ttlv.DefaultRegistry)
|
||||
}
|
||||
|
||||
// Register registers the 1.4 enumeration values with the registry.
|
||||
func Register(registry *ttlv.Registry) {
|
||||
RegisterGeneratedDefinitions(registry)
|
||||
}
|
1389
vendor/github.com/gemalto/kmip-go/kmip14/kmip_1_4.json
generated
vendored
Normal file
1389
vendor/github.com/gemalto/kmip-go/kmip14/kmip_1_4.json
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
3660
vendor/github.com/gemalto/kmip-go/kmip14/kmip_1_4_generated.go
generated
vendored
Normal file
3660
vendor/github.com/gemalto/kmip-go/kmip14/kmip_1_4_generated.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
72
vendor/github.com/gemalto/kmip-go/managed_objects.go
generated
vendored
Normal file
72
vendor/github.com/gemalto/kmip-go/managed_objects.go
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
package kmip
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/gemalto/kmip-go/kmip14"
|
||||
)
|
||||
|
||||
// 2.2
|
||||
|
||||
// 2.2.1
|
||||
|
||||
type Certificate struct {
|
||||
CertificateType kmip14.CertificateType
|
||||
CertificateValue []byte
|
||||
}
|
||||
|
||||
// 2.2.2
|
||||
|
||||
type SymmetricKey struct {
|
||||
KeyBlock KeyBlock
|
||||
}
|
||||
|
||||
// 2.2.3
|
||||
|
||||
type PublicKey struct {
|
||||
KeyBlock KeyBlock
|
||||
}
|
||||
|
||||
// 2.2.4
|
||||
|
||||
type PrivateKey struct {
|
||||
KeyBlock KeyBlock
|
||||
}
|
||||
|
||||
// 2.2.5
|
||||
|
||||
type SplitKey struct {
|
||||
SplitKeyParts int
|
||||
KeyPartIdentifier int
|
||||
SplitKeyThreshold int
|
||||
SplitKeyMethod kmip14.SplitKeyMethod
|
||||
PrimeFieldSize *big.Int `ttlv:",omitempty"`
|
||||
KeyBlock KeyBlock
|
||||
}
|
||||
|
||||
// 2.2.6
|
||||
|
||||
type Template struct {
|
||||
Attribute []Attribute
|
||||
}
|
||||
|
||||
// 2.2.7
|
||||
|
||||
type SecretData struct {
|
||||
SecretDataType kmip14.SecretDataType
|
||||
KeyBlock KeyBlock
|
||||
}
|
||||
|
||||
// 2.2.8
|
||||
|
||||
type OpaqueObject struct {
|
||||
OpaqueDataType kmip14.OpaqueDataType
|
||||
OpaqueDataValue []byte
|
||||
}
|
||||
|
||||
// 2.2.9
|
||||
|
||||
type PGPKey struct {
|
||||
PGPKeyVersion int
|
||||
KeyBlock KeyBlock
|
||||
}
|
74
vendor/github.com/gemalto/kmip-go/op_create.go
generated
vendored
Normal file
74
vendor/github.com/gemalto/kmip-go/op_create.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
package kmip
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ansel1/merry"
|
||||
|
||||
"github.com/gemalto/kmip-go/kmip14"
|
||||
)
|
||||
|
||||
// TODO: should request and response payloads implement validation?
|
||||
// Sort of makes sense to run validation over the request at this level, at least for spec
|
||||
// compliance, though perhaps handlers may want to be more relaxed with validation.
|
||||
//
|
||||
// Should the response object run through validation? What is a valid response may change as
|
||||
// the spec changes. Maybe this should just be handled by spec compliance tests.
|
||||
|
||||
// 4.1
|
||||
//
|
||||
// This operation requests the server to generate a new symmetric key as a Managed Cryptographic Object.
|
||||
// This operation is not used to create a Template object (see Register operation, Section 4.3).
|
||||
//
|
||||
// The request contains information about the type of object being created, and some of the attributes to be
|
||||
// assigned to the object (e.g., Cryptographic Algorithm, Cryptographic Length, etc.). This information MAY be
|
||||
// specified by the names of Template objects that already exist.
|
||||
//
|
||||
// The response contains the Unique Identifier of the created object. The server SHALL copy the Unique Identifier
|
||||
// returned by this operation into the ID Placeholder variable.
|
||||
|
||||
// CreateRequestPayload 4.1 Table 163
|
||||
//
|
||||
// TemplateAttribute MUST include CryptographicAlgorithm (3.4) and CryptographicUsageMask (3.19).
|
||||
type CreateRequestPayload struct {
|
||||
ObjectType kmip14.ObjectType
|
||||
TemplateAttribute TemplateAttribute
|
||||
}
|
||||
|
||||
// CreateResponsePayload 4.1 Table 164
|
||||
type CreateResponsePayload struct {
|
||||
ObjectType kmip14.ObjectType
|
||||
UniqueIdentifier string
|
||||
TemplateAttribute *TemplateAttribute
|
||||
}
|
||||
|
||||
type CreateHandler struct {
|
||||
Create func(ctx context.Context, payload *CreateRequestPayload) (*CreateResponsePayload, error)
|
||||
}
|
||||
|
||||
func (h *CreateHandler) HandleItem(ctx context.Context, req *Request) (*ResponseBatchItem, error) {
|
||||
var payload CreateRequestPayload
|
||||
|
||||
err := req.DecodePayload(&payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
respPayload, err := h.Create(ctx, &payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ok bool
|
||||
|
||||
idAttr := respPayload.TemplateAttribute.GetTag(kmip14.TagUniqueIdentifier)
|
||||
|
||||
req.IDPlaceholder, ok = idAttr.AttributeValue.(string)
|
||||
if !ok {
|
||||
return nil, merry.Errorf("invalid response returned by CreateHandler: unique identifier tag in attributes should have been a string, was %t", idAttr.AttributeValue)
|
||||
}
|
||||
|
||||
return &ResponseBatchItem{
|
||||
ResponsePayload: respPayload,
|
||||
}, nil
|
||||
}
|
38
vendor/github.com/gemalto/kmip-go/op_create_key_pair.go
generated
vendored
Normal file
38
vendor/github.com/gemalto/kmip-go/op_create_key_pair.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
package kmip
|
||||
|
||||
// CreateKeyPairRequestPayload
|
||||
// 4.2 Create Key Pair
|
||||
// This operation requests the server to generate a new public/private key pair
|
||||
// and register the two corresponding new Managed Cryptographic Objects.
|
||||
//
|
||||
// The request contains attributes to be assigned to the objects (e.g.,
|
||||
// Cryptographic Algorithm, Cryptographic Length, etc.). Attributes and Template
|
||||
// Names MAY be specified for both keys at the same time by specifying a Common
|
||||
// Template-Attribute object in the request. Attributes not common to both keys
|
||||
// (e.g., Name, Cryptographic Usage Mask) MAY be specified using the Private Key
|
||||
// Template-Attribute and Public Key Template-Attribute objects in the request,
|
||||
// which take precedence over the Common Template-Attribute object.
|
||||
//
|
||||
// The Template Managed Object is deprecated as of version 1.3 of this
|
||||
// specification and MAY be removed from subsequent versions of the
|
||||
// specification. Individual Attributes SHOULD be used in operations which
|
||||
// currently support use of a Name within a Template-Attribute to reference a
|
||||
// Template.
|
||||
//
|
||||
// For the Private Key, the server SHALL create a Link attribute of Link Type
|
||||
// Public Key pointing to the Public Key. For the Public Key, the server SHALL
|
||||
// create a Link attribute of Link Type Private Key pointing to the Private Key.
|
||||
// The response contains the Unique Identifiers of both created objects. The ID
|
||||
// Placeholder value SHALL be set to the Unique Identifier of the Private Key.
|
||||
type CreateKeyPairRequestPayload struct {
|
||||
CommonTemplateAttribute *TemplateAttribute
|
||||
PrivateKeyTemplateAttribute *TemplateAttribute
|
||||
PublicKeyTemplateAttribute *TemplateAttribute
|
||||
}
|
||||
|
||||
type CreateKeyPairResponsePayload struct {
|
||||
PrivateKeyUniqueIdentifier string
|
||||
PublicKeyUniqueIdentifier string
|
||||
PrivateKeyTemplateAttribute *TemplateAttribute
|
||||
PublicKeyTemplateAttribute *TemplateAttribute
|
||||
}
|
40
vendor/github.com/gemalto/kmip-go/op_destroy.go
generated
vendored
Normal file
40
vendor/github.com/gemalto/kmip-go/op_destroy.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
package kmip
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// DestroyRequestPayload ////////////////////////////////////////
|
||||
//
|
||||
type DestroyRequestPayload struct {
|
||||
UniqueIdentifier string
|
||||
}
|
||||
|
||||
// DestroyResponsePayload
|
||||
type DestroyResponsePayload struct {
|
||||
UniqueIdentifier string
|
||||
}
|
||||
|
||||
type DestroyHandler struct {
|
||||
Destroy func(ctx context.Context, payload *DestroyRequestPayload) (*DestroyResponsePayload, error)
|
||||
}
|
||||
|
||||
func (h *DestroyHandler) HandleItem(ctx context.Context, req *Request) (*ResponseBatchItem, error) {
|
||||
var payload DestroyRequestPayload
|
||||
|
||||
err := req.DecodePayload(&payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
respPayload, err := h.Destroy(ctx, &payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// req.Key = respPayload.Key
|
||||
|
||||
return &ResponseBatchItem{
|
||||
ResponsePayload: respPayload,
|
||||
}, nil
|
||||
}
|
47
vendor/github.com/gemalto/kmip-go/op_discover_versions.go
generated
vendored
Normal file
47
vendor/github.com/gemalto/kmip-go/op_discover_versions.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
package kmip
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// 4.26
|
||||
|
||||
type DiscoverVersionsRequestPayload struct {
|
||||
ProtocolVersion []ProtocolVersion
|
||||
}
|
||||
|
||||
type DiscoverVersionsResponsePayload struct {
|
||||
ProtocolVersion []ProtocolVersion
|
||||
}
|
||||
|
||||
type DiscoverVersionsHandler struct {
|
||||
SupportedVersions []ProtocolVersion
|
||||
}
|
||||
|
||||
func (h *DiscoverVersionsHandler) HandleItem(ctx context.Context, req *Request) (item *ResponseBatchItem, err error) {
|
||||
var payload DiscoverVersionsRequestPayload
|
||||
|
||||
err = req.DecodePayload(&payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var respPayload DiscoverVersionsResponsePayload
|
||||
|
||||
if len(payload.ProtocolVersion) == 0 {
|
||||
respPayload.ProtocolVersion = h.SupportedVersions
|
||||
} else {
|
||||
for _, v := range h.SupportedVersions {
|
||||
for _, cv := range payload.ProtocolVersion {
|
||||
if cv == v {
|
||||
respPayload.ProtocolVersion = append(respPayload.ProtocolVersion, v)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &ResponseBatchItem{
|
||||
ResponsePayload: respPayload,
|
||||
}, nil
|
||||
}
|
51
vendor/github.com/gemalto/kmip-go/op_get.go
generated
vendored
Normal file
51
vendor/github.com/gemalto/kmip-go/op_get.go
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
package kmip
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/gemalto/kmip-go/kmip14"
|
||||
)
|
||||
|
||||
// GetRequestPayload ////////////////////////////////////////
|
||||
//
|
||||
type GetRequestPayload struct {
|
||||
UniqueIdentifier string
|
||||
}
|
||||
|
||||
// GetResponsePayload
|
||||
type GetResponsePayload struct {
|
||||
ObjectType kmip14.ObjectType
|
||||
UniqueIdentifier string
|
||||
Certificate *Certificate
|
||||
SymmetricKey *SymmetricKey
|
||||
PrivateKey *PrivateKey
|
||||
PublicKey *PublicKey
|
||||
SplitKey *SplitKey
|
||||
Template *Template
|
||||
SecretData *SecretData
|
||||
OpaqueObject *OpaqueObject
|
||||
}
|
||||
|
||||
type GetHandler struct {
|
||||
Get func(ctx context.Context, payload *GetRequestPayload) (*GetResponsePayload, error)
|
||||
}
|
||||
|
||||
func (h *GetHandler) HandleItem(ctx context.Context, req *Request) (*ResponseBatchItem, error) {
|
||||
var payload GetRequestPayload
|
||||
|
||||
err := req.DecodePayload(&payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
respPayload, err := h.Get(ctx, &payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// req.Key = respPayload.Key
|
||||
|
||||
return &ResponseBatchItem{
|
||||
ResponsePayload: respPayload,
|
||||
}, nil
|
||||
}
|
86
vendor/github.com/gemalto/kmip-go/op_register.go
generated
vendored
Normal file
86
vendor/github.com/gemalto/kmip-go/op_register.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
package kmip
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ansel1/merry"
|
||||
"github.com/gemalto/kmip-go/kmip14"
|
||||
)
|
||||
|
||||
// 4.3
|
||||
|
||||
// Table 169
|
||||
|
||||
type RegisterRequestPayload struct {
|
||||
ObjectType kmip14.ObjectType
|
||||
TemplateAttribute TemplateAttribute
|
||||
Certificate *Certificate
|
||||
SymmetricKey *SymmetricKey
|
||||
PrivateKey *PrivateKey
|
||||
PublicKey *PublicKey
|
||||
SplitKey *SplitKey
|
||||
Template *Template
|
||||
SecretData *SecretData
|
||||
OpaqueObject *OpaqueObject
|
||||
}
|
||||
|
||||
// Table 170
|
||||
|
||||
type RegisterResponsePayload struct {
|
||||
UniqueIdentifier string
|
||||
TemplateAttribute TemplateAttribute
|
||||
}
|
||||
|
||||
type RegisterHandler struct {
|
||||
SkipValidation bool
|
||||
RegisterFunc func(context.Context, *RegisterRequestPayload) (*RegisterResponsePayload, error)
|
||||
}
|
||||
|
||||
func (h *RegisterHandler) HandleItem(ctx context.Context, req *Request) (item *ResponseBatchItem, err error) {
|
||||
var payload RegisterRequestPayload
|
||||
|
||||
err = req.DecodePayload(&payload)
|
||||
if err != nil {
|
||||
return nil, merry.Prepend(err, "decoding request")
|
||||
}
|
||||
|
||||
if !h.SkipValidation {
|
||||
var payloadPresent bool
|
||||
|
||||
switch payload.ObjectType {
|
||||
default:
|
||||
return nil, WithResultReason(merry.UserError("Object Type is not recognized"), kmip14.ResultReasonInvalidField)
|
||||
case kmip14.ObjectTypeCertificate:
|
||||
payloadPresent = payload.Certificate != nil
|
||||
case kmip14.ObjectTypeSymmetricKey:
|
||||
payloadPresent = payload.SymmetricKey != nil
|
||||
case kmip14.ObjectTypePrivateKey:
|
||||
payloadPresent = payload.PrivateKey != nil
|
||||
case kmip14.ObjectTypePublicKey:
|
||||
payloadPresent = payload.PublicKey != nil
|
||||
case kmip14.ObjectTypeSplitKey:
|
||||
payloadPresent = payload.SplitKey != nil
|
||||
case kmip14.ObjectTypeTemplate:
|
||||
payloadPresent = payload.Template != nil
|
||||
case kmip14.ObjectTypeSecretData:
|
||||
payloadPresent = payload.SecretData != nil
|
||||
case kmip14.ObjectTypeOpaqueObject:
|
||||
payloadPresent = payload.OpaqueObject != nil
|
||||
}
|
||||
|
||||
if !payloadPresent {
|
||||
return nil, WithResultReason(merry.UserErrorf("Object Type %s does not match type of cryptographic object provided", payload.ObjectType.String()), kmip14.ResultReasonInvalidField)
|
||||
}
|
||||
}
|
||||
|
||||
respPayload, err := h.RegisterFunc(ctx, &payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.IDPlaceholder = respPayload.UniqueIdentifier
|
||||
|
||||
return &ResponseBatchItem{
|
||||
ResponsePayload: respPayload,
|
||||
}, nil
|
||||
}
|
902
vendor/github.com/gemalto/kmip-go/requests.go
generated
vendored
Normal file
902
vendor/github.com/gemalto/kmip-go/requests.go
generated
vendored
Normal file
@ -0,0 +1,902 @@
|
||||
package kmip
|
||||
|
||||
// This is a WIP implementation of a KMIP server. The code is mostly based on the http server in
|
||||
// the golang standard library. It is functional, but not all of the features of the http server
|
||||
// have been ported over yet, and some of the stuff in here still refers to http stuff.
|
||||
//
|
||||
// The responsibility of handling a request is broken up into 3 layers of handlers: ProtocolHandler, MessageHandler,
|
||||
// and ItemHandler. Each of these handlers delegates details to the next layer. Using the http
|
||||
// package as an analogy, ProtocolHandler is similar to the wire-level HTTP protocol handling in
|
||||
// http.Server and http.Transport. MessageHandler parses KMIP TTLV bytes into golang request and response structs.
|
||||
// ItemHandler is a bit like http.ServeMux, routing particular KMIP operations to registered handlers.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ansel1/merry"
|
||||
"github.com/gemalto/flume"
|
||||
"github.com/gemalto/kmip-go/kmip14"
|
||||
"github.com/gemalto/kmip-go/ttlv"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
var serverLog = flume.New("kmip_server")
|
||||
|
||||
// Server serves KMIP protocol connections from a net.Listener. Because KMIP is a connection-oriented
|
||||
// protocol, unlike HTTP, each connection ends up being serviced by a dedicated goroutine (rather than
|
||||
// each request). For each KMIP connection, requests are processed serially. The handling
|
||||
// of the request is delegated to the ProtocolHandler.
|
||||
//
|
||||
// Limitations:
|
||||
//
|
||||
// This implementation is functional (it can respond to KMIP requests), but incomplete. Some of the
|
||||
// connection management features of the http package haven't been ported over, and also, there is
|
||||
// currently no connection-context in which to store things like an authentication or session management.
|
||||
// Since HTTP is an intrinsically stateless model, it makes sense for the http package to delegate session
|
||||
// management to third party packages, but for KMIP, it would makes sense for there to be some first
|
||||
// class support for a connection context.
|
||||
//
|
||||
// This package also only handles the binary TTLV encoding for now. It may make sense for this
|
||||
// server to detect or support the XML and JSON encodings as well. It may also makes sense to support
|
||||
// KMIP requests over HTTP, perhaps by adapting ProtocolHandler to an http.Handler or something.
|
||||
type Server struct {
|
||||
Handler ProtocolHandler
|
||||
|
||||
mu sync.Mutex
|
||||
listeners map[*net.Listener]struct{}
|
||||
inShutdown int32 // accessed atomically (non-zero means we're in Shutdown)
|
||||
}
|
||||
|
||||
// ErrServerClosed is returned by the Server's Serve, ServeTLS, ListenAndServe,
|
||||
// and ListenAndServeTLS methods after a call to Shutdown or Close.
|
||||
var ErrServerClosed = errors.New("http: Server closed")
|
||||
|
||||
// Serve accepts incoming connections on the Listener l, creating a
|
||||
// new service goroutine for each. The service goroutines read requests and
|
||||
// then call srv.MessageHandler to reply to them.
|
||||
//
|
||||
// Serve always returns a non-nil error and closes l.
|
||||
// After Shutdown or Close, the returned error is ErrServerClosed.
|
||||
func (srv *Server) Serve(l net.Listener) error {
|
||||
//if fn := testHookServerServe; fn != nil {
|
||||
// fn(srv, l) // call hook with unwrapped listener
|
||||
//}
|
||||
|
||||
l = &onceCloseListener{Listener: l}
|
||||
defer l.Close()
|
||||
|
||||
if !srv.trackListener(&l, true) {
|
||||
return ErrServerClosed
|
||||
}
|
||||
defer srv.trackListener(&l, false)
|
||||
|
||||
var tempDelay time.Duration // how long to sleep on accept failure
|
||||
baseCtx := context.Background() // base is always background, per Issue 16220
|
||||
ctx := baseCtx
|
||||
// ctx := context.WithValue(baseCtx, ServerContextKey, srv)
|
||||
for {
|
||||
rw, e := l.Accept()
|
||||
if e != nil {
|
||||
if srv.shuttingDown() {
|
||||
return ErrServerClosed
|
||||
}
|
||||
if ne, ok := e.(net.Error); ok && ne.Temporary() {
|
||||
if tempDelay == 0 {
|
||||
tempDelay = 5 * time.Millisecond
|
||||
} else {
|
||||
tempDelay *= 2
|
||||
}
|
||||
if max := 1 * time.Second; tempDelay > max {
|
||||
tempDelay = max
|
||||
}
|
||||
// srv.logf("http: Accept error: %v; retrying in %v", e, tempDelay)
|
||||
time.Sleep(tempDelay)
|
||||
continue
|
||||
}
|
||||
return e
|
||||
}
|
||||
tempDelay = 0
|
||||
c := &conn{server: srv, rwc: rw}
|
||||
// c.setState(c.rwc, StateNew) // before Serve can return
|
||||
go c.serve(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// Close immediately closes all active net.Listeners and any
|
||||
// connections in state StateNew, StateActive, or StateIdle. For a
|
||||
// graceful shutdown, use Shutdown.
|
||||
//
|
||||
// Close does not attempt to close (and does not even know about)
|
||||
// any hijacked connections, such as WebSockets.
|
||||
//
|
||||
// Close returns any error returned from closing the Server's
|
||||
// underlying Listener(s).
|
||||
func (srv *Server) Close() error {
|
||||
atomic.StoreInt32(&srv.inShutdown, 1)
|
||||
srv.mu.Lock()
|
||||
defer srv.mu.Unlock()
|
||||
// srv.closeDoneChanLocked()
|
||||
err := srv.closeListenersLocked()
|
||||
//for c := range srv.activeConn {
|
||||
// c.rwc.Close()
|
||||
// delete(srv.activeConn, c)
|
||||
//}
|
||||
return err
|
||||
}
|
||||
|
||||
// shutdownPollInterval is how often we poll for quiescence
|
||||
// during Server.Shutdown. This is lower during tests, to
|
||||
// speed up tests.
|
||||
// Ideally we could find a solution that doesn't involve polling,
|
||||
// but which also doesn't have a high runtime cost (and doesn't
|
||||
// involve any contentious mutexes), but that is left as an
|
||||
// exercise for the reader.
|
||||
var shutdownPollInterval = 500 * time.Millisecond
|
||||
|
||||
// Shutdown gracefully shuts down the server without interrupting any
|
||||
// active connections. Shutdown works by first closing all open
|
||||
// listeners, then closing all idle connections, and then waiting
|
||||
// indefinitely for connections to return to idle and then shut down.
|
||||
// If the provided context expires before the shutdown is complete,
|
||||
// Shutdown returns the context's error, otherwise it returns any
|
||||
// error returned from closing the Server's underlying Listener(s).
|
||||
//
|
||||
// When Shutdown is called, Serve, ListenAndServe, and
|
||||
// ListenAndServeTLS immediately return ErrServerClosed. Make sure the
|
||||
// program doesn't exit and waits instead for Shutdown to return.
|
||||
//
|
||||
// Shutdown does not attempt to close nor wait for hijacked
|
||||
// connections such as WebSockets. The caller of Shutdown should
|
||||
// separately notify such long-lived connections of shutdown and wait
|
||||
// for them to close, if desired. See RegisterOnShutdown for a way to
|
||||
// register shutdown notification functions.
|
||||
//
|
||||
// Once Shutdown has been called on a server, it may not be reused;
|
||||
// future calls to methods such as Serve will return ErrServerClosed.
|
||||
func (srv *Server) Shutdown(ctx context.Context) error {
|
||||
atomic.StoreInt32(&srv.inShutdown, 1)
|
||||
|
||||
srv.mu.Lock()
|
||||
lnerr := srv.closeListenersLocked()
|
||||
//srv.closeDoneChanLocked()
|
||||
//for _, f := range srv.onShutdown {
|
||||
// go f()
|
||||
//}
|
||||
srv.mu.Unlock()
|
||||
|
||||
ticker := time.NewTicker(shutdownPollInterval)
|
||||
defer ticker.Stop()
|
||||
return lnerr
|
||||
//for {
|
||||
// if srv.closeIdleConns() {
|
||||
// return lnerr
|
||||
// }
|
||||
// select {
|
||||
// case <-ctx.Done():
|
||||
// return ctx.Err()
|
||||
// case <-ticker.C:
|
||||
// }
|
||||
//}
|
||||
}
|
||||
|
||||
func (srv *Server) closeListenersLocked() error {
|
||||
var err error
|
||||
for ln := range srv.listeners {
|
||||
if cerr := (*ln).Close(); cerr != nil && err == nil {
|
||||
err = cerr
|
||||
}
|
||||
delete(srv.listeners, ln)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// trackListener adds or removes a net.Listener to the set of tracked
|
||||
// listeners.
|
||||
//
|
||||
// We store a pointer to interface in the map set, in case the
|
||||
// net.Listener is not comparable. This is safe because we only call
|
||||
// trackListener via Serve and can track+defer untrack the same
|
||||
// pointer to local variable there. We never need to compare a
|
||||
// Listener from another caller.
|
||||
//
|
||||
// It reports whether the server is still up (not Shutdown or Closed).
|
||||
func (srv *Server) trackListener(ln *net.Listener, add bool) bool {
|
||||
srv.mu.Lock()
|
||||
defer srv.mu.Unlock()
|
||||
if srv.listeners == nil {
|
||||
srv.listeners = make(map[*net.Listener]struct{})
|
||||
}
|
||||
if add {
|
||||
if srv.shuttingDown() {
|
||||
return false
|
||||
}
|
||||
srv.listeners[ln] = struct{}{}
|
||||
} else {
|
||||
delete(srv.listeners, ln)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (srv *Server) shuttingDown() bool {
|
||||
return atomic.LoadInt32(&srv.inShutdown) != 0
|
||||
}
|
||||
|
||||
type conn struct {
|
||||
rwc net.Conn
|
||||
remoteAddr string
|
||||
localAddr string
|
||||
tlsState *tls.ConnectionState
|
||||
// cancelCtx cancels the connection-level context.
|
||||
cancelCtx context.CancelFunc
|
||||
|
||||
// bufr reads from rwc.
|
||||
bufr *bufio.Reader
|
||||
dec *ttlv.Decoder
|
||||
|
||||
server *Server
|
||||
}
|
||||
|
||||
func (c *conn) close() {
|
||||
// TODO: http package has a buffered writer on the conn too, which is flushed here
|
||||
_ = c.rwc.Close()
|
||||
}
|
||||
|
||||
// Serve a new connection.
|
||||
func (c *conn) serve(ctx context.Context) {
|
||||
ctx = flume.WithLogger(ctx, serverLog)
|
||||
ctx, cancelCtx := context.WithCancel(ctx)
|
||||
c.cancelCtx = cancelCtx
|
||||
c.remoteAddr = c.rwc.RemoteAddr().String()
|
||||
c.localAddr = c.rwc.LocalAddr().String()
|
||||
// ctx = context.WithValue(ctx, LocalAddrContextKey, c.rwc.LocalAddr())
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
// TODO: logging support
|
||||
// if err := recover(); err != nil && err != ErrAbortHandler {
|
||||
const size = 64 << 10
|
||||
buf := make([]byte, size)
|
||||
buf = buf[:runtime.Stack(buf, false)]
|
||||
if e, ok := err.(error); ok {
|
||||
fmt.Printf("kmip: panic serving %v: %v\n%s", c.remoteAddr, Details(e), buf)
|
||||
} else {
|
||||
fmt.Printf("kmip: panic serving %v: %v\n%s", c.remoteAddr, err, buf)
|
||||
}
|
||||
|
||||
// c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf)
|
||||
}
|
||||
cancelCtx()
|
||||
// if !c.hijacked() {
|
||||
c.close()
|
||||
// c.setState(c.rwc, StateClosed)
|
||||
//}
|
||||
}()
|
||||
|
||||
if tlsConn, ok := c.rwc.(*tls.Conn); ok {
|
||||
//if d := c.server.ReadTimeout; d != 0 {
|
||||
// c.rwc.SetReadDeadline(time.Now().Add(d))
|
||||
//}
|
||||
//if d := c.server.WriteTimeout; d != 0 {
|
||||
// c.rwc.SetWriteDeadline(time.Now().Add(d))
|
||||
//}
|
||||
if err := tlsConn.Handshake(); err != nil {
|
||||
// TODO: logging support
|
||||
fmt.Printf("kmip: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err)
|
||||
// c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err)
|
||||
return
|
||||
}
|
||||
c.tlsState = new(tls.ConnectionState)
|
||||
*c.tlsState = tlsConn.ConnectionState()
|
||||
//if proto := c.tlsState.NegotiatedProtocol; validNPN(proto) {
|
||||
// if fn := c.server.TLSNextProto[proto]; fn != nil {
|
||||
// h := initNPNRequest{tlsConn, serverHandler{c.server}}
|
||||
// fn(c.server, tlsConn, h)
|
||||
// }
|
||||
// return
|
||||
//}
|
||||
}
|
||||
|
||||
// TODO: do we really need instance pooling here? We expect KMIP connections to be long lasting
|
||||
c.dec = ttlv.NewDecoder(c.rwc)
|
||||
c.bufr = bufio.NewReader(c.rwc)
|
||||
// c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10)
|
||||
|
||||
for {
|
||||
w, err := c.readRequest(ctx)
|
||||
//if c.r.remain != c.server.initialReadLimitSize() {
|
||||
// If we read any bytes off the wire, we're active.
|
||||
//c.setState(c.rwc, StateActive)
|
||||
//}
|
||||
if err != nil {
|
||||
if merry.Is(err, io.EOF) {
|
||||
fmt.Println("client closed connection")
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: do something with this error
|
||||
panic(err)
|
||||
//const errorHeaders= "\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n"
|
||||
//
|
||||
//if err == errTooLarge {
|
||||
// // Their HTTP client may or may not be
|
||||
// // able to read this if we're
|
||||
// // responding to them and hanging up
|
||||
// // while they're still writing their
|
||||
// // request. Undefined behavior.
|
||||
// const publicErr= "431 Request Header Fields Too Large"
|
||||
// fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
|
||||
// c.closeWriteAndWait()
|
||||
// return
|
||||
//}
|
||||
//if isCommonNetReadError(err) {
|
||||
// return // don't reply
|
||||
//}
|
||||
//
|
||||
//publicErr := "400 Bad Request"
|
||||
//if v, ok := err.(badRequestError); ok {
|
||||
// publicErr = publicErr + ": " + string(v)
|
||||
//}
|
||||
//
|
||||
//fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
|
||||
//return
|
||||
}
|
||||
|
||||
// Expect 100 Continue support
|
||||
//req := w.req
|
||||
//if req.expectsContinue() {
|
||||
// if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 {
|
||||
// // Wrap the Body reader with one that replies on the connection
|
||||
// req.Body = &expectContinueReader{readCloser: req.Body, resp: w}
|
||||
// }
|
||||
//} else if req.Header.get("Expect") != "" {
|
||||
// w.sendExpectationFailed()
|
||||
// return
|
||||
//}
|
||||
|
||||
// c.curReq.Store(w)
|
||||
|
||||
//if requestBodyRemains(req.Body) {
|
||||
// registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead)
|
||||
//} else {
|
||||
// w.conn.r.startBackgroundRead()
|
||||
//}
|
||||
|
||||
// HTTP cannot have multiple simultaneous active requests.[*]
|
||||
// Until the server replies to this request, it can't read another,
|
||||
// so we might as well run the handler in this goroutine.
|
||||
// [*] Not strictly true: HTTP pipelining. We could let them all process
|
||||
// in parallel even if their responses need to be serialized.
|
||||
// But we're not going to implement HTTP pipelining because it
|
||||
// was never deployed in the wild and the answer is HTTP/2.
|
||||
|
||||
h := c.server.Handler
|
||||
if h == nil {
|
||||
h = DefaultProtocolHandler
|
||||
}
|
||||
|
||||
// var resp ResponseMessage
|
||||
// err = c.server.MessageHandler.Handle(ctx, w, &resp)
|
||||
// TODO: this cancelCtx() was created at the connection level, not the request level. Need to
|
||||
// figure out how to handle connection vs request timeouts and cancels.
|
||||
// cancelCtx()
|
||||
|
||||
// TODO: use recycled buffered writer
|
||||
writer := bufio.NewWriter(c.rwc)
|
||||
h.ServeKMIP(ctx, w, writer)
|
||||
err = writer.Flush()
|
||||
if err != nil {
|
||||
// TODO: handle error
|
||||
panic(err)
|
||||
}
|
||||
|
||||
//serverHandler{c.server}.ServeHTTP(w, w.req)
|
||||
//w.cancelCtx()
|
||||
//if c.hijacked() {
|
||||
// return
|
||||
//}
|
||||
//w.finishRequest()
|
||||
//if !w.shouldReuseConnection() {
|
||||
// if w.requestBodyLimitHit || w.closedRequestBodyEarly() {
|
||||
// c.closeWriteAndWait()
|
||||
// }
|
||||
// return
|
||||
//}
|
||||
//c.setState(c.rwc, StateIdle)
|
||||
//c.curReq.Store((*response)(nil))
|
||||
|
||||
//if !w.conn.server.doKeepAlives() {
|
||||
// // We're in shutdown mode. We might've replied
|
||||
// // to the user without "Connection: close" and
|
||||
// // they might think they can send another
|
||||
// // request, but such is life with HTTP/1.1.
|
||||
// return
|
||||
//}
|
||||
//
|
||||
//if d := c.server.idleTimeout(); d != 0 {
|
||||
// c.rwc.SetReadDeadline(time.Now().Add(d))
|
||||
// if _, err := c.bufr.Peek(4); err != nil {
|
||||
// return
|
||||
// }
|
||||
//}
|
||||
//c.rwc.SetReadDeadline(time.Time{})
|
||||
}
|
||||
}
|
||||
|
||||
// Read next request from connection.
|
||||
func (c *conn) readRequest(ctx context.Context) (w *Request, err error) {
|
||||
//if c.hijacked() {
|
||||
// return nil, ErrHijacked
|
||||
//}
|
||||
|
||||
//var (
|
||||
// wholeReqDeadline time.Time // or zero if none
|
||||
// hdrDeadline time.Time // or zero if none
|
||||
//)
|
||||
//t0 := time.Now()
|
||||
//if d := c.server.readHeaderTimeout(); d != 0 {
|
||||
// hdrDeadline = t0.Add(d)
|
||||
//}
|
||||
//if d := c.server.ReadTimeout; d != 0 {
|
||||
// wholeReqDeadline = t0.Add(d)
|
||||
//}
|
||||
//c.rwc.SetReadDeadline(hdrDeadline)
|
||||
//if d := c.server.WriteTimeout; d != 0 {
|
||||
// defer func() {
|
||||
// c.rwc.SetWriteDeadline(time.Now().Add(d))
|
||||
// }()
|
||||
//}
|
||||
|
||||
//c.r.setReadLimit(c.server.initialReadLimitSize())
|
||||
//if c.lastMethod == "POST" {
|
||||
// RFC 7230 section 3 tolerance for old buggy clients.
|
||||
//peek, _ := c.bufr.Peek(4) // ReadRequest will get err below
|
||||
//c.bufr.Discard(numLeadingCRorLF(peek))
|
||||
//}
|
||||
ttlvVal, err := c.dec.NextTTLV()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//if err != nil {
|
||||
//if c.r.hitReadLimit() {
|
||||
// return nil, errTooLarge
|
||||
//}
|
||||
//}
|
||||
|
||||
// TODO: use pooling to recycle requests?
|
||||
req := &Request{
|
||||
TTLV: ttlvVal,
|
||||
RemoteAddr: c.remoteAddr,
|
||||
LocalAddr: c.localAddr,
|
||||
TLS: c.tlsState,
|
||||
}
|
||||
|
||||
// c.r.setInfiniteReadLimit()
|
||||
|
||||
// Adjust the read deadline if necessary.
|
||||
//if !hdrDeadline.Equal(wholeReqDeadline) {
|
||||
// c.rwc.SetReadDeadline(wholeReqDeadline)
|
||||
//}
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// Request represents a KMIP request.
|
||||
type Request struct {
|
||||
// TTLV will hold the entire body of the request.
|
||||
TTLV ttlv.TTLV
|
||||
Message *RequestMessage
|
||||
CurrentItem *RequestBatchItem
|
||||
DisallowExtraValues bool
|
||||
|
||||
// TLS holds the TLS state of the connection this request was received on.
|
||||
TLS *tls.ConnectionState
|
||||
RemoteAddr string
|
||||
LocalAddr string
|
||||
|
||||
IDPlaceholder string
|
||||
|
||||
decoder *ttlv.Decoder
|
||||
}
|
||||
|
||||
// coerceToTTLV attempts to coerce an interface value to TTLV.
|
||||
// In most production scenarios, this is intended to be used in
|
||||
// places where the value is already a TTLV, and just needs to be
|
||||
// type cast. If v is not TTLV, it will be marshaled. This latter
|
||||
// behavior is slow, so it should be used only in tests.
|
||||
func coerceToTTLV(v interface{}) (ttlv.TTLV, error) {
|
||||
switch t := v.(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case ttlv.TTLV:
|
||||
return t, nil
|
||||
default:
|
||||
return ttlv.Marshal(v)
|
||||
}
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals ttlv into structures. Handlers should prefer this
|
||||
// method over than their own Decoders or Unmarshal(). This method
|
||||
// enforces rules about whether extra fields are allowed, and reuses
|
||||
// buffers for efficiency.
|
||||
func (r *Request) Unmarshal(ttlv ttlv.TTLV, into interface{}) error {
|
||||
if len(ttlv) == 0 {
|
||||
return nil
|
||||
}
|
||||
r.decoder.Reset(bytes.NewReader(ttlv))
|
||||
return r.decoder.Decode(into)
|
||||
}
|
||||
|
||||
func (r *Request) DecodePayload(v interface{}) error {
|
||||
if r.CurrentItem == nil {
|
||||
return nil
|
||||
}
|
||||
ttlvVal, err := coerceToTTLV(r.CurrentItem.RequestPayload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return r.Unmarshal(ttlvVal, v)
|
||||
}
|
||||
|
||||
// onceCloseListener wraps a net.Listener, protecting it from
|
||||
// multiple Close calls.
|
||||
type onceCloseListener struct {
|
||||
net.Listener
|
||||
once sync.Once
|
||||
closeErr error
|
||||
}
|
||||
|
||||
func (oc *onceCloseListener) Close() error {
|
||||
oc.once.Do(oc.close)
|
||||
return oc.closeErr
|
||||
}
|
||||
|
||||
func (oc *onceCloseListener) close() { oc.closeErr = oc.Listener.Close() }
|
||||
|
||||
type ResponseWriter interface {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
// ProtocolHandler is responsible for handling raw requests read off the wire. The
|
||||
// *Request object will only have TTLV field populated. The response should
|
||||
// be written directly to the ResponseWriter.
|
||||
//
|
||||
// The default implemention of ProtocolHandler is StandardProtocolHandler.
|
||||
type ProtocolHandler interface {
|
||||
ServeKMIP(ctx context.Context, req *Request, resp ResponseWriter)
|
||||
}
|
||||
|
||||
// MessageHandler handles KMIP requests which have already be decoded. The *Request
|
||||
// object's Message field will be populated from the decoded TTLV. The *Response
|
||||
// object will always be non-nil, and its ResponseHeader will be populated. The
|
||||
// MessageHandler usually shouldn't modify the ResponseHeader: the ProtocolHandler
|
||||
// is responsible for the header. The MessageHandler just needs to populate
|
||||
// the response batch items.
|
||||
//
|
||||
// The default implementation of MessageHandler is OperationMux.
|
||||
type MessageHandler interface {
|
||||
HandleMessage(ctx context.Context, req *Request, resp *Response)
|
||||
}
|
||||
|
||||
// ItemHandler handles a single batch item in a KMIP request. The *Request
|
||||
// object's CurrentItem field will be populated with item to be handled.
|
||||
type ItemHandler interface {
|
||||
HandleItem(ctx context.Context, req *Request) (item *ResponseBatchItem, err error)
|
||||
}
|
||||
|
||||
type ProtocolHandlerFunc func(context.Context, *Request, ResponseWriter)
|
||||
|
||||
func (f ProtocolHandlerFunc) ServeKMIP(ctx context.Context, r *Request, w ResponseWriter) {
|
||||
f(ctx, r, w)
|
||||
}
|
||||
|
||||
type MessageHandlerFunc func(context.Context, *Request, *Response)
|
||||
|
||||
func (f MessageHandlerFunc) HandleMessage(ctx context.Context, req *Request, resp *Response) {
|
||||
f(ctx, req, resp)
|
||||
}
|
||||
|
||||
type ItemHandlerFunc func(context.Context, *Request) (*ResponseBatchItem, error)
|
||||
|
||||
func (f ItemHandlerFunc) HandleItem(ctx context.Context, req *Request) (item *ResponseBatchItem, err error) {
|
||||
return f(ctx, req)
|
||||
}
|
||||
|
||||
var DefaultProtocolHandler = &StandardProtocolHandler{
|
||||
MessageHandler: DefaultOperationMux,
|
||||
ProtocolVersion: ProtocolVersion{
|
||||
ProtocolVersionMajor: 1,
|
||||
ProtocolVersionMinor: 4,
|
||||
},
|
||||
}
|
||||
|
||||
var DefaultOperationMux = &OperationMux{}
|
||||
|
||||
// StandardProtocolHandler is the default ProtocolHandler implementation. It
|
||||
// handles decoding the request and encoding the response, as well as protocol
|
||||
// level tasks like version negotiation and correlation values.
|
||||
//
|
||||
// It delegates handling of the request to a MessageHandler.
|
||||
type StandardProtocolHandler struct {
|
||||
ProtocolVersion ProtocolVersion
|
||||
MessageHandler MessageHandler
|
||||
|
||||
LogTraffic bool
|
||||
}
|
||||
|
||||
func (h *StandardProtocolHandler) parseMessage(ctx context.Context, req *Request) error {
|
||||
ttlvV := req.TTLV
|
||||
if err := ttlvV.Valid(); err != nil {
|
||||
return merry.Prepend(err, "invalid ttlv")
|
||||
}
|
||||
|
||||
if ttlvV.Tag() != kmip14.TagRequestMessage {
|
||||
return merry.Errorf("invalid tag: expected RequestMessage, was %s", ttlvV.Tag().String())
|
||||
}
|
||||
|
||||
var message RequestMessage
|
||||
err := ttlv.Unmarshal(ttlvV, &message)
|
||||
if err != nil {
|
||||
return merry.Prepend(err, "failed to parse message")
|
||||
}
|
||||
|
||||
req.Message = &message
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var responsePool = sync.Pool{}
|
||||
|
||||
type Response struct {
|
||||
ResponseMessage
|
||||
buf bytes.Buffer
|
||||
enc *ttlv.Encoder
|
||||
}
|
||||
|
||||
func newResponse() *Response {
|
||||
v := responsePool.Get()
|
||||
if v != nil {
|
||||
r := v.(*Response)
|
||||
r.reset()
|
||||
return r
|
||||
}
|
||||
r := Response{}
|
||||
r.enc = ttlv.NewEncoder(&r.buf)
|
||||
return &r
|
||||
}
|
||||
|
||||
func releaseResponse(r *Response) {
|
||||
responsePool.Put(r)
|
||||
}
|
||||
|
||||
func (r *Response) reset() {
|
||||
r.BatchItem = nil
|
||||
r.ResponseMessage = ResponseMessage{}
|
||||
r.buf.Reset()
|
||||
}
|
||||
|
||||
func (r *Response) Bytes() []byte {
|
||||
r.buf.Reset()
|
||||
err := r.enc.Encode(&r.ResponseMessage)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return r.buf.Bytes()
|
||||
}
|
||||
|
||||
func (r *Response) errorResponse(reason kmip14.ResultReason, msg string) {
|
||||
r.BatchItem = []ResponseBatchItem{
|
||||
{
|
||||
ResultStatus: kmip14.ResultStatusOperationFailed,
|
||||
ResultReason: reason,
|
||||
ResultMessage: msg,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (h *StandardProtocolHandler) handleRequest(ctx context.Context, req *Request, resp *Response) (logger flume.Logger) {
|
||||
// create a server correlation value, which is like a unique transaction ID
|
||||
scv := uuid.New().String()
|
||||
|
||||
// create a logger for the transaction, seeded with the scv
|
||||
logger = flume.FromContext(ctx).With("scv", scv)
|
||||
// attach the logger to the context, so it is available to the handling chain
|
||||
ctx = flume.WithLogger(ctx, logger)
|
||||
|
||||
// TODO: it's unclear how the full protocol negogiation is supposed to work
|
||||
// should server be pinned to a particular version? Or should we try and negogiate a common version?
|
||||
resp.ResponseHeader.ProtocolVersion = h.ProtocolVersion
|
||||
resp.ResponseHeader.TimeStamp = time.Now()
|
||||
resp.ResponseHeader.BatchCount = len(resp.BatchItem)
|
||||
resp.ResponseHeader.ServerCorrelationValue = scv
|
||||
|
||||
if err := h.parseMessage(ctx, req); err != nil {
|
||||
resp.errorResponse(kmip14.ResultReasonInvalidMessage, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
ccv := req.Message.RequestHeader.ClientCorrelationValue
|
||||
// add the client correlation value to the logging context. This value uniquely
|
||||
// identifies the client, and is supposed to be included in server logs
|
||||
logger = logger.With("ccv", ccv)
|
||||
ctx = flume.WithLogger(ctx, logger)
|
||||
resp.ResponseHeader.ClientCorrelationValue = req.Message.RequestHeader.ClientCorrelationValue
|
||||
|
||||
clientMajorVersion := req.Message.RequestHeader.ProtocolVersion.ProtocolVersionMajor
|
||||
if clientMajorVersion != h.ProtocolVersion.ProtocolVersionMajor {
|
||||
resp.errorResponse(kmip14.ResultReasonInvalidMessage,
|
||||
fmt.Sprintf("mismatched protocol versions, client: %d, server: %d", clientMajorVersion, h.ProtocolVersion.ProtocolVersionMajor))
|
||||
return
|
||||
}
|
||||
|
||||
// set a flag hinting to handlers that extra fields should not be tolerated when
|
||||
// unmarshaling payloads. According to spec, if server and client protocol version
|
||||
// minor versions match, then extra fields should cause an error. Not sure how to enforce
|
||||
// this in this higher level handler, since we (the protocol/message handlers) don't unmarshal the payload.
|
||||
// That's done by a particular item handler.
|
||||
req.DisallowExtraValues = req.Message.RequestHeader.ProtocolVersion.ProtocolVersionMinor == h.ProtocolVersion.ProtocolVersionMinor
|
||||
req.decoder = ttlv.NewDecoder(nil)
|
||||
req.decoder.DisallowExtraValues = req.DisallowExtraValues
|
||||
|
||||
h.MessageHandler.HandleMessage(ctx, req, resp)
|
||||
resp.ResponseHeader.BatchCount = len(resp.BatchItem)
|
||||
|
||||
respTTLV := resp.Bytes()
|
||||
|
||||
if req.Message.RequestHeader.MaximumResponseSize > 0 && len(respTTLV) > req.Message.RequestHeader.MaximumResponseSize {
|
||||
// new error resp
|
||||
resp.errorResponse(kmip14.ResultReasonResponseTooLarge, "")
|
||||
respTTLV = resp.Bytes()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (h *StandardProtocolHandler) ServeKMIP(ctx context.Context, req *Request, writer ResponseWriter) {
|
||||
// we precreate the response object and pass it down to handlers, because due
|
||||
// the guidance in the spec on the Maximum Response Size, it will be necessary
|
||||
// for handlers to recalculate the response size after each batch item, which
|
||||
// requires re-encoding the entire response. Seems inefficient.
|
||||
resp := newResponse()
|
||||
logger := h.handleRequest(ctx, req, resp)
|
||||
|
||||
var err error
|
||||
if h.LogTraffic {
|
||||
ttlvV := resp.Bytes()
|
||||
|
||||
logger.Debug("traffic log", "request", req.TTLV.String(), "response", ttlv.TTLV(ttlvV).String())
|
||||
_, err = writer.Write(ttlvV)
|
||||
} else {
|
||||
_, err = resp.buf.WriteTo(writer)
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
releaseResponse(resp)
|
||||
}
|
||||
|
||||
func (r *ResponseMessage) addFailure(reason kmip14.ResultReason, msg string) {
|
||||
if msg == "" {
|
||||
msg = reason.String()
|
||||
}
|
||||
r.BatchItem = append(r.BatchItem, ResponseBatchItem{
|
||||
ResultStatus: kmip14.ResultStatusOperationFailed,
|
||||
ResultReason: reason,
|
||||
ResultMessage: msg,
|
||||
})
|
||||
}
|
||||
|
||||
// OperationMux is an implementation of MessageHandler which handles each batch item in the request
|
||||
// by routing the operation to an ItemHandler. The ItemHandler performs the operation, and returns
|
||||
// either a *ResponseBatchItem, or an error. If it returns an error, the error is passed to
|
||||
// ErrorHandler, which converts it into a error *ResponseBatchItem. OperationMux handles correlating
|
||||
// items in the request to items in the response.
|
||||
type OperationMux struct {
|
||||
mu sync.RWMutex
|
||||
handlers map[kmip14.Operation]ItemHandler
|
||||
// ErrorHandler defaults to the DefaultErrorHandler.
|
||||
ErrorHandler ErrorHandler
|
||||
}
|
||||
|
||||
// ErrorHandler converts a golang error into a *ResponseBatchItem (which should hold information
|
||||
// about the error to convey back to the client).
|
||||
type ErrorHandler interface {
|
||||
HandleError(err error) *ResponseBatchItem
|
||||
}
|
||||
|
||||
type ErrorHandlerFunc func(err error) *ResponseBatchItem
|
||||
|
||||
func (f ErrorHandlerFunc) HandleError(err error) *ResponseBatchItem {
|
||||
return f(err)
|
||||
}
|
||||
|
||||
// DefaultErrorHandler tries to map errors to ResultReasons.
|
||||
var DefaultErrorHandler = ErrorHandlerFunc(func(err error) *ResponseBatchItem {
|
||||
reason := GetResultReason(err)
|
||||
if reason == kmip14.ResultReason(0) {
|
||||
// error not handled
|
||||
return nil
|
||||
}
|
||||
|
||||
// prefer user message, but fall back on message
|
||||
msg := merry.UserMessage(err)
|
||||
if msg == "" {
|
||||
msg = merry.Message(err)
|
||||
}
|
||||
return newFailedResponseBatchItem(reason, msg)
|
||||
})
|
||||
|
||||
func newFailedResponseBatchItem(reason kmip14.ResultReason, msg string) *ResponseBatchItem {
|
||||
return &ResponseBatchItem{
|
||||
ResultStatus: kmip14.ResultStatusOperationFailed,
|
||||
ResultReason: reason,
|
||||
ResultMessage: msg,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *OperationMux) bi(ctx context.Context, req *Request, reqItem *RequestBatchItem) *ResponseBatchItem {
|
||||
req.CurrentItem = reqItem
|
||||
h := m.handlerForOp(reqItem.Operation)
|
||||
if h == nil {
|
||||
return newFailedResponseBatchItem(kmip14.ResultReasonOperationNotSupported, "")
|
||||
}
|
||||
|
||||
resp, err := h.HandleItem(ctx, req)
|
||||
if err != nil {
|
||||
eh := m.ErrorHandler
|
||||
if eh == nil {
|
||||
eh = DefaultErrorHandler
|
||||
}
|
||||
resp = eh.HandleError(err)
|
||||
if resp == nil {
|
||||
// errors which don't convert just panic
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
func (m *OperationMux) HandleMessage(ctx context.Context, req *Request, resp *Response) {
|
||||
for i := range req.Message.BatchItem {
|
||||
reqItem := &req.Message.BatchItem[i]
|
||||
respItem := m.bi(ctx, req, reqItem)
|
||||
respItem.Operation = reqItem.Operation
|
||||
respItem.UniqueBatchItemID = reqItem.UniqueBatchItemID
|
||||
resp.BatchItem = append(resp.BatchItem, *respItem)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *OperationMux) Handle(op kmip14.Operation, handler ItemHandler) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
if m.handlers == nil {
|
||||
m.handlers = map[kmip14.Operation]ItemHandler{}
|
||||
}
|
||||
|
||||
m.handlers[op] = handler
|
||||
}
|
||||
|
||||
func (m *OperationMux) handlerForOp(op kmip14.Operation) ItemHandler {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
return m.handlers[op]
|
||||
}
|
||||
|
||||
func (m *OperationMux) missingHandler(ctx context.Context, req *Request, resp *ResponseMessage) error {
|
||||
resp.addFailure(kmip14.ResultReasonOperationNotSupported, "")
|
||||
return nil
|
||||
}
|
499
vendor/github.com/gemalto/kmip-go/ttlv/decoder.go
generated
vendored
Normal file
499
vendor/github.com/gemalto/kmip-go/ttlv/decoder.go
generated
vendored
Normal file
@ -0,0 +1,499 @@
|
||||
package ttlv
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"reflect"
|
||||
|
||||
"github.com/ansel1/merry"
|
||||
)
|
||||
|
||||
var ErrUnexpectedValue = errors.New("no field was found to unmarshal value into")
|
||||
|
||||
// Unmarshal parses TTLV encoded data and stores the result
|
||||
// in the value pointed to by v.
|
||||
//
|
||||
// An error will be returned if v is nil or not a point, or
|
||||
// if b is not valid TTLV.
|
||||
//
|
||||
// Unmarshal will allocate values to store the result in, similar to the
|
||||
// json.Marshal. Generally, the destination value can be a pointer or
|
||||
// or a direct value. Currently, Unmarshal does not support anonymous fields.
|
||||
// They will be ignored. Private fields are ignored.
|
||||
//
|
||||
// Unmarshal maps TTLV values to golang values according to the following
|
||||
// rules:
|
||||
//
|
||||
// 1. If the destination value is interface{}, it will be set to the result
|
||||
// of TTLV.Value()
|
||||
// 2. If the destination implements Unmarshaler, that will be called.
|
||||
// 3. If the destination is a slice (except for []byte), append the
|
||||
// unmarshalled value to the slice
|
||||
// 4. Structure unmarshals into a struct. See rules
|
||||
// below for matching struct fields to the values in the Structure.
|
||||
// 5. Interval unmarshals into an int64
|
||||
// 6. DateTime and DateTimeExtended ummarshal into time.Time
|
||||
// 7. ByteString unmarshals to a []byte
|
||||
// 8. TextString unmarshals into a string
|
||||
// 9. Boolean unmarshals into a bool
|
||||
// 10. Enumeration can unmarshal into an int, int8, int16, int32, or their
|
||||
// uint counterparts. If the KMIP value overflows the destination, a
|
||||
// *UnmarshalerError with cause ErrIntOverflow is returned.
|
||||
// 11. Integer can unmarshal to the same types as Enumeration, with the
|
||||
// same overflow check.
|
||||
// 12. LongInteger unmarshals to int64 or uint64
|
||||
// 13. BitInteger unmarshals to big.Int.
|
||||
//
|
||||
// If the destination value is not a supported type, an *UnmarshalerError with
|
||||
// cause ErrUnsupportedTypeError is returned. If the source value's type is not recognized,
|
||||
// *UnmarshalerError with cause ErrInvalidType is returned.
|
||||
//
|
||||
// Unmarshaling Structure
|
||||
//
|
||||
// Unmarshal will try to match the values in the Structure with the fields in the
|
||||
// destination struct. Structure is an array of values, while a struct is more like
|
||||
// a map, so not all Structure values can be accurately represented by a golang struct.
|
||||
// In particular, a Structure can hold the same tag multiple times, e.g. 3 TagComment values
|
||||
// in a row.
|
||||
//
|
||||
// For each field in the struct, Unmarshal infers a KMIP Tag by examining both the name
|
||||
// and type of the field. It uses the following rules, in order:
|
||||
//
|
||||
// 1. If the type of a field is a struct, and the struct contains a field named "TTLVTag", and the field
|
||||
// has a "ttlv" struct tag, the value of the struct tag will be parsed using ParseTag(). If
|
||||
// parsing fails, an error is returned. The type and value of the TTLVTag field is ignored.
|
||||
// In this example, the F field will map to TagDeactivationDate:
|
||||
//
|
||||
// type Bar struct {
|
||||
// F Foo
|
||||
// }
|
||||
// type Foo struct {
|
||||
// TTLVTag struct{} `ttlv:"DeactivationDate"`
|
||||
// }
|
||||
//
|
||||
// If Bar uses a struct tag on F indicating a different tag, it is an error:
|
||||
//
|
||||
// type Bar struct {
|
||||
// F Foo `ttlv:"DerivationData"` // this will cause an ErrTagConflict
|
||||
// // because conflict Bar's field tag
|
||||
// // conflicts with Foo's intrinsic tag
|
||||
// F2 Foo `ttlv:"0x420034"` // the value can also be hex
|
||||
// }
|
||||
// 2. If the type of the field is a struct, and the struct contains a field named "TTLVTag",
|
||||
// and that field is of type ttlv.Tag and is not empty, the value of the field will be the
|
||||
// inferred Tag. For example:
|
||||
//
|
||||
// type Foo struct {
|
||||
// TTLVTag ttlv.Tag
|
||||
// }
|
||||
// f := Foo{TTLVTag: ttlv.TagState}
|
||||
//
|
||||
// This allows you to dynamically set the KMIP tag that a value will marshal to.
|
||||
// 3. The "ttlv" struct tag can be used to indicate the tag for a field. The value will
|
||||
// be parsed with ParseTag()
|
||||
//
|
||||
// type Bar struct {
|
||||
// F Foo `ttlv:"DerivationData"`
|
||||
// }
|
||||
//
|
||||
// 4. The name of the field is parsed with ParseTag():
|
||||
//
|
||||
// type Bar struct {
|
||||
// DerivationData int
|
||||
// }
|
||||
//
|
||||
// 5. The name of the field's type is parsed with ParseTab():
|
||||
//
|
||||
// type DerivationData int
|
||||
//
|
||||
// type Bar struct {
|
||||
// dd DerivationData
|
||||
// }
|
||||
//
|
||||
// If no tag value can be inferred, the field is ignored. Multiple fields
|
||||
// *cannot* map to the same KMIP tag. If they do, an ErrTagConflict will
|
||||
// be returned.
|
||||
//
|
||||
// Each value in the Structure will be matched against the first field
|
||||
// in the struct with the same inferred tag.
|
||||
//
|
||||
// If the value cannot be matched with a field, Unmarshal will look for
|
||||
// the first field with the "any" struct flag set and unmarshal into that:
|
||||
//
|
||||
// type Foo struct {
|
||||
// Comment string // the Comment will unmarshal into this
|
||||
// EverythingElse []interface{} `,any` // all other values will unmarshal into this
|
||||
// AnotherAny []interface{} `,any` // allowed, but ignored. first any field will always match
|
||||
// NotLegal []interface{} `TagComment,any` // you cannot specify a tag and the any flag.
|
||||
// // will return error
|
||||
// }
|
||||
//
|
||||
// If after applying these rules no destination field is found, the KMIP value is ignored.
|
||||
func Unmarshal(ttlv TTLV, v interface{}) error {
|
||||
return NewDecoder(bytes.NewReader(ttlv)).Decode(v)
|
||||
}
|
||||
|
||||
// Unmarshaler knows how to unmarshal a ttlv value into itself.
|
||||
// The decoder argument may be used to decode the ttlv value into
|
||||
// intermediary values if needed.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalTTLV(d *Decoder, ttlv TTLV) error
|
||||
}
|
||||
|
||||
// Decoder reads KMIP values from a stream, and decodes them into golang values.
|
||||
// It currently only decodes TTLV encoded KMIP values.
|
||||
// TODO: support decoding XML and JSON, so their decoding can be configured
|
||||
//
|
||||
// If DisallowExtraValues is true, the decoder will return an error when decoding
|
||||
// Structures into structs and a matching field can't get found for every value.
|
||||
type Decoder struct {
|
||||
r io.Reader
|
||||
bufr *bufio.Reader
|
||||
DisallowExtraValues bool
|
||||
|
||||
currStruct reflect.Type
|
||||
currField string
|
||||
}
|
||||
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{
|
||||
r: r,
|
||||
bufr: bufio.NewReader(r),
|
||||
}
|
||||
}
|
||||
|
||||
// Reset resets the internal state of the decoder for reuse.
|
||||
func (dec *Decoder) Reset(r io.Reader) {
|
||||
*dec = Decoder{
|
||||
r: r,
|
||||
bufr: dec.bufr,
|
||||
}
|
||||
dec.bufr.Reset(r)
|
||||
}
|
||||
|
||||
// Decode the first KMIP value from the reader into v.
|
||||
// See Unmarshal for decoding rules.
|
||||
func (dec *Decoder) Decode(v interface{}) error {
|
||||
ttlv, err := dec.NextTTLV()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dec.DecodeValue(v, ttlv)
|
||||
}
|
||||
|
||||
// DecodeValue decodes a ttlv value into v. This doesn't read anything
|
||||
// from the Decoder's reader.
|
||||
// See Unmarshal for decoding rules.
|
||||
func (dec *Decoder) DecodeValue(v interface{}, ttlv TTLV) error {
|
||||
val := reflect.ValueOf(v)
|
||||
if val.Kind() != reflect.Ptr {
|
||||
return merry.New("non-pointer passed to Decode")
|
||||
}
|
||||
|
||||
return dec.unmarshal(val, ttlv)
|
||||
}
|
||||
|
||||
func (dec *Decoder) unmarshal(val reflect.Value, ttlv TTLV) error {
|
||||
if len(ttlv) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load value from interface, but only if the result will be
|
||||
// usefully addressable.
|
||||
if val.Kind() == reflect.Interface && !val.IsNil() {
|
||||
e := val.Elem()
|
||||
if e.Kind() == reflect.Ptr && !e.IsNil() {
|
||||
val = e
|
||||
}
|
||||
}
|
||||
|
||||
if val.Kind() == reflect.Ptr {
|
||||
if val.IsNil() {
|
||||
val.Set(reflect.New(val.Type().Elem()))
|
||||
}
|
||||
|
||||
val = val.Elem()
|
||||
}
|
||||
|
||||
if val.Type().Implements(unmarshalerType) {
|
||||
return val.Interface().(Unmarshaler).UnmarshalTTLV(dec, ttlv) //nolint:forcetypeassert
|
||||
}
|
||||
|
||||
if val.CanAddr() {
|
||||
valAddr := val.Addr()
|
||||
if valAddr.CanInterface() && valAddr.Type().Implements(unmarshalerType) {
|
||||
return valAddr.Interface().(Unmarshaler).UnmarshalTTLV(dec, ttlv) //nolint:forcetypeassert
|
||||
}
|
||||
}
|
||||
|
||||
switch val.Kind() {
|
||||
case reflect.Interface:
|
||||
if ttlv.Type() == TypeStructure {
|
||||
// if the value is a structure, set the whole TTLV
|
||||
// as the value.
|
||||
val.Set(reflect.ValueOf(ttlv))
|
||||
} else {
|
||||
// set blank interface equal to the TTLV.Value()
|
||||
val.Set(reflect.ValueOf(ttlv.Value()))
|
||||
}
|
||||
|
||||
return nil
|
||||
case reflect.Slice:
|
||||
typ := val.Type()
|
||||
if typ.Elem() == byteType {
|
||||
// []byte
|
||||
break
|
||||
}
|
||||
|
||||
// Slice of element values.
|
||||
// Grow slice.
|
||||
n := val.Len()
|
||||
val.Set(reflect.Append(val, reflect.Zero(val.Type().Elem())))
|
||||
|
||||
// Recur to read element into slice.
|
||||
if err := dec.unmarshal(val.Index(n), ttlv); err != nil {
|
||||
val.SetLen(n)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
typeMismatchErr := func() error {
|
||||
e := &UnmarshalerError{
|
||||
Struct: dec.currStruct,
|
||||
Field: dec.currField,
|
||||
Tag: ttlv.Tag(),
|
||||
Type: ttlv.Type(),
|
||||
Val: val.Type(),
|
||||
}
|
||||
err := merry.WrapSkipping(e, 1).WithCause(ErrUnsupportedTypeError)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
switch ttlv.Type() {
|
||||
case TypeStructure:
|
||||
if val.Kind() != reflect.Struct {
|
||||
return typeMismatchErr()
|
||||
}
|
||||
// stash currStruct
|
||||
currStruct := dec.currStruct
|
||||
err := dec.unmarshalStructure(ttlv, val)
|
||||
// restore currStruct
|
||||
dec.currStruct = currStruct
|
||||
|
||||
return err
|
||||
case TypeInterval:
|
||||
if val.Kind() != reflect.Int64 {
|
||||
return typeMismatchErr()
|
||||
}
|
||||
|
||||
val.SetInt(int64(ttlv.ValueInterval()))
|
||||
case TypeDateTime, TypeDateTimeExtended:
|
||||
if val.Type() != timeType {
|
||||
return typeMismatchErr()
|
||||
}
|
||||
|
||||
val.Set(reflect.ValueOf(ttlv.ValueDateTime()))
|
||||
case TypeByteString:
|
||||
if val.Kind() != reflect.Slice && val.Type().Elem() != byteType {
|
||||
return typeMismatchErr()
|
||||
}
|
||||
|
||||
val.SetBytes(ttlv.ValueByteString())
|
||||
case TypeTextString:
|
||||
if val.Kind() != reflect.String {
|
||||
return typeMismatchErr()
|
||||
}
|
||||
|
||||
val.SetString(ttlv.ValueTextString())
|
||||
case TypeBoolean:
|
||||
if val.Kind() != reflect.Bool {
|
||||
return typeMismatchErr()
|
||||
}
|
||||
|
||||
val.SetBool(ttlv.ValueBoolean())
|
||||
// nolint:dupl
|
||||
case TypeEnumeration:
|
||||
switch val.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32:
|
||||
i := int64(ttlv.ValueEnumeration())
|
||||
if val.OverflowInt(i) {
|
||||
return dec.newUnmarshalerError(ttlv, val.Type(), ErrIntOverflow)
|
||||
}
|
||||
|
||||
val.SetInt(i)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
i := uint64(ttlv.ValueEnumeration())
|
||||
if val.OverflowUint(i) {
|
||||
return dec.newUnmarshalerError(ttlv, val.Type(), ErrIntOverflow)
|
||||
}
|
||||
|
||||
val.SetUint(i)
|
||||
default:
|
||||
return typeMismatchErr()
|
||||
}
|
||||
// nolint:dupl
|
||||
case TypeInteger:
|
||||
switch val.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32:
|
||||
i := int64(ttlv.ValueInteger())
|
||||
if val.OverflowInt(i) {
|
||||
return dec.newUnmarshalerError(ttlv, val.Type(), ErrIntOverflow)
|
||||
}
|
||||
|
||||
val.SetInt(i)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
i := uint64(ttlv.ValueInteger())
|
||||
if val.OverflowUint(i) {
|
||||
return dec.newUnmarshalerError(ttlv, val.Type(), ErrIntOverflow)
|
||||
}
|
||||
|
||||
val.SetUint(i)
|
||||
default:
|
||||
return typeMismatchErr()
|
||||
}
|
||||
case TypeLongInteger:
|
||||
switch val.Kind() {
|
||||
case reflect.Int64:
|
||||
val.SetInt(ttlv.ValueLongInteger())
|
||||
case reflect.Uint64:
|
||||
val.SetUint(uint64(ttlv.ValueLongInteger()))
|
||||
default:
|
||||
return typeMismatchErr()
|
||||
}
|
||||
case TypeBigInteger:
|
||||
if val.Type() != bigIntType {
|
||||
return typeMismatchErr()
|
||||
}
|
||||
|
||||
val.Set(reflect.ValueOf(*ttlv.ValueBigInteger()))
|
||||
default:
|
||||
return dec.newUnmarshalerError(ttlv, val.Type(), ErrInvalidType)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dec *Decoder) unmarshalStructure(ttlv TTLV, val reflect.Value) error {
|
||||
ti, err := getTypeInfo(val.Type())
|
||||
if err != nil {
|
||||
return dec.newUnmarshalerError(ttlv, val.Type(), err)
|
||||
}
|
||||
|
||||
if ti.tagField != nil && ti.tagField.ti.typ == tagType {
|
||||
val.FieldByIndex(ti.tagField.index).Set(reflect.ValueOf(ttlv.Tag()))
|
||||
}
|
||||
|
||||
fields := ti.valueFields
|
||||
|
||||
// push currStruct (caller will pop)
|
||||
dec.currStruct = val.Type()
|
||||
|
||||
for n := ttlv.ValueStructure(); n != nil; n = n.Next() {
|
||||
fldIdx := -1
|
||||
|
||||
for i := range fields {
|
||||
if fields[i].flags.any() {
|
||||
// if this is the first any field found, keep track
|
||||
// of it as the current candidate match, but
|
||||
// keep looking for a tag match
|
||||
if fldIdx == -1 {
|
||||
fldIdx = i
|
||||
}
|
||||
} else if fields[i].tag == n.Tag() {
|
||||
// tag match found
|
||||
// we can stop looking
|
||||
fldIdx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if fldIdx > -1 {
|
||||
// push currField
|
||||
currField := dec.currField
|
||||
dec.currField = fields[fldIdx].name
|
||||
err := dec.unmarshal(val.FieldByIndex(fields[fldIdx].index), n)
|
||||
// restore currField
|
||||
dec.currField = currField
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if dec.DisallowExtraValues {
|
||||
return dec.newUnmarshalerError(ttlv, val.Type(), ErrUnexpectedValue)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NextTTLV reads the next, full KMIP value off the reader.
|
||||
func (dec *Decoder) NextTTLV() (TTLV, error) {
|
||||
// first, read the header
|
||||
header, err := dec.bufr.Peek(8)
|
||||
if err != nil {
|
||||
return nil, merry.Wrap(err)
|
||||
}
|
||||
|
||||
if err := TTLV(header).ValidHeader(); err != nil {
|
||||
// bad header, abort
|
||||
return TTLV(header), merry.Prependf(err, "invalid header: %v", TTLV(header))
|
||||
}
|
||||
|
||||
// allocate a buffer large enough for the entire message
|
||||
fullLen := TTLV(header).FullLen()
|
||||
buf := make([]byte, fullLen)
|
||||
|
||||
var totRead int
|
||||
|
||||
for {
|
||||
n, err := dec.bufr.Read(buf[totRead:])
|
||||
if err != nil {
|
||||
return TTLV(buf), merry.Wrap(err)
|
||||
}
|
||||
|
||||
totRead += n
|
||||
if totRead >= fullLen {
|
||||
// we've read off a single full message
|
||||
return buf, nil
|
||||
} // else keep reading
|
||||
}
|
||||
}
|
||||
|
||||
func (dec *Decoder) newUnmarshalerError(ttlv TTLV, valType reflect.Type, cause error) merry.Error {
|
||||
e := &UnmarshalerError{
|
||||
Struct: dec.currStruct,
|
||||
Field: dec.currField,
|
||||
Tag: ttlv.Tag(),
|
||||
Type: ttlv.Type(),
|
||||
Val: valType,
|
||||
}
|
||||
|
||||
return merry.WrapSkipping(e, 1).WithCause(cause)
|
||||
}
|
||||
|
||||
type UnmarshalerError struct {
|
||||
// Val is the type of the destination value
|
||||
Val reflect.Type
|
||||
// Struct is the type of the containing struct if the value is a field
|
||||
Struct reflect.Type
|
||||
// Field is the name of the value field
|
||||
Field string
|
||||
Tag Tag
|
||||
Type Type
|
||||
}
|
||||
|
||||
func (e *UnmarshalerError) Error() string {
|
||||
msg := "kmip: error unmarshaling " + e.Tag.String() + " with type " + e.Type.String() + " into value of type " + e.Val.Name()
|
||||
if e.Struct != nil {
|
||||
msg += " in struct field " + e.Struct.Name() + "." + e.Field
|
||||
}
|
||||
|
||||
return msg
|
||||
}
|
22
vendor/github.com/gemalto/kmip-go/ttlv/docs.go
generated
vendored
Normal file
22
vendor/github.com/gemalto/kmip-go/ttlv/docs.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
// Package ttlv encodes and decodes the 3 wire formats defined in the KMIP specification:
|
||||
//
|
||||
// 1. TTLV (the default, binary wire format)
|
||||
// 2. JSON
|
||||
// 3. XML
|
||||
//
|
||||
// The core representation of KMIP values is the ttlv.TTLV type, which is
|
||||
// a []byte encoded in the TTLV binary format. The ttlv.TTLV type knows how to marshal/
|
||||
// unmarshal to and from the JSON and XML encoding formats.
|
||||
//
|
||||
// This package also knows how to marshal and unmarshal ttlv.TTLV values to golang structs,
|
||||
// in a way similar to the json or xml packages.
|
||||
//
|
||||
// See Marshal() and Unmarshal() for the rules about how golang values map to KMIP TTLVs.
|
||||
// Encoder and Decoder can be used to process streams of KMIP values.
|
||||
//
|
||||
// This package holds a registry of type, tag, and enum value names, which are used to transcode
|
||||
// strings into these values. KMIP 1.4 names will be automatically loaded into the
|
||||
// DefaultRegistry. See the kmip20 package to add definitions for 2.0 names.
|
||||
//
|
||||
// Print() and PrettyPrintHex() can be used to debug TTLV values.
|
||||
package ttlv
|
962
vendor/github.com/gemalto/kmip-go/ttlv/encoder.go
generated
vendored
Normal file
962
vendor/github.com/gemalto/kmip-go/ttlv/encoder.go
generated
vendored
Normal file
@ -0,0 +1,962 @@
|
||||
package ttlv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ansel1/merry"
|
||||
)
|
||||
|
||||
const structFieldTag = "ttlv"
|
||||
|
||||
var (
|
||||
ErrIntOverflow = fmt.Errorf("value exceeds max int value %d", math.MaxInt32)
|
||||
ErrUnsupportedEnumTypeError = errors.New("unsupported type for enums, must be string, or int types")
|
||||
ErrUnsupportedTypeError = errors.New("marshaling/unmarshaling is not supported for this type")
|
||||
ErrNoTag = errors.New("unable to determine tag for field")
|
||||
ErrTagConflict = errors.New("tag conflict")
|
||||
)
|
||||
|
||||
// Marshal encodes a golang value into a KMIP value.
|
||||
//
|
||||
// An error will be returned if v is an invalid pointer.
|
||||
//
|
||||
// Currently, Marshal does not support anonymous fields.
|
||||
// Private fields are ignored.
|
||||
//
|
||||
// Marshal maps the golang value to a KMIP tag, type, and value
|
||||
// encoding. To determine the KMIP tag, Marshal uses the same rules
|
||||
// as Unmarshal.
|
||||
//
|
||||
// The appropriate type and encoding are inferred from the golang type
|
||||
// and from the inferred KMIP tag, according to these rules:
|
||||
//
|
||||
// 1. If the value is a TTLV, it is copied byte for byte
|
||||
// 2. If the value implements Marshaler, call that
|
||||
// 3. If the struct field has an "omitempty" flag, and the value is
|
||||
// zero, skip the field:
|
||||
//
|
||||
// type Foo struct {
|
||||
// Comment string `ttlv:,omitempty`
|
||||
// }
|
||||
//
|
||||
// 4. If the value is a slice (except []byte) or array, marshal all
|
||||
// values concatenated
|
||||
// 5. If a tag has not been inferred at this point, return *MarshalerError with
|
||||
// cause ErrNoTag
|
||||
// 6. If the Tag is registered as an enum, or has the "enum" struct tag flag, attempt
|
||||
// to marshal as an Enumeration. int, int8, int16, int32, and their uint counterparts
|
||||
// can be marshaled as an Enumeration. A string can be marshaled to an Enumeration
|
||||
// if the string contains a number, a 4 byte (8 char) hex string with the prefix "0x",
|
||||
// or the normalized name of an enum value registered to this tag. Examples:
|
||||
//
|
||||
// type Foo struct {
|
||||
// CancellationResult string // will encode as an Enumeration because
|
||||
// // the tag CancellationResult is registered
|
||||
// // as an enum.
|
||||
// C int `ttlv:"Comment,enum" // The tag Comment is not registered as an enum
|
||||
// // but the enum flag will force this to encode
|
||||
// // as an enumeration.
|
||||
// }
|
||||
//
|
||||
// If the string can't be interpreted as an enum value, it will be encoded as a TextString. If
|
||||
// the "enum" struct flag is set, the value *must* successfully encode to an Enumeration using
|
||||
// above rules, or an error is returned.
|
||||
// 7. If the Tag is registered as a bitmask, or has the "bitmask" struct tag flag, attempt
|
||||
// to marshal to an Integer, following the same rules as for Enumerations. The ParseInt()
|
||||
// function is used to parse string values.
|
||||
// 9. time.Time marshals to DateTime. If the field has the "datetimeextended" struct flag,
|
||||
// marshal as DateTimeExtended. Example:
|
||||
//
|
||||
// type Foo struct {
|
||||
// ActivationDate time.Time `ttlv:",datetimeextended"`
|
||||
// }
|
||||
//
|
||||
// 10. big.Int marshals to BigInteger
|
||||
// 11. time.Duration marshals to Interval
|
||||
// 12. string marshals to TextString
|
||||
// 13. []byte marshals to ByteString
|
||||
// 14. all int and uint variants except int64 and uint64 marshal to Integer. If the golang
|
||||
// value overflows the KMIP value, *MarshalerError with cause ErrIntOverflow is returned
|
||||
// 15. int64 and uint64 marshal to LongInteger
|
||||
// 16. bool marshals to Boolean
|
||||
// 17. structs marshal to Structure. Each field of the struct will be marshaled into the
|
||||
// values of the Structure according to the above rules.
|
||||
//
|
||||
// Any other golang type will return *MarshalerError with cause ErrUnsupportedTypeError.
|
||||
func Marshal(v interface{}) (TTLV, error) {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
||||
err := NewEncoder(buf).Encode(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// Marshaler knows how to encode itself to TTLV.
|
||||
// The implementation should use the primitive methods of the encoder,
|
||||
// such as EncodeInteger(), etc.
|
||||
//
|
||||
// The tag inferred by the Encoder from the field or type information is
|
||||
// passed as an argument, but the implementation can choose to ignore it.
|
||||
type Marshaler interface {
|
||||
MarshalTTLV(e *Encoder, tag Tag) error
|
||||
}
|
||||
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{w: w}
|
||||
}
|
||||
|
||||
// Encode a single value and flush to the writer. The tag will be inferred from
|
||||
// the value. If no tag can be inferred, an error is returned.
|
||||
// See Marshal for encoding rules.
|
||||
func (e *Encoder) Encode(v interface{}) error {
|
||||
return e.EncodeValue(TagNone, v)
|
||||
}
|
||||
|
||||
// EncodeValue encodes a single value with the given tag and flushes it
|
||||
// to the writer.
|
||||
// See Marshal for encoding rules.
|
||||
func (e *Encoder) EncodeValue(tag Tag, v interface{}) error {
|
||||
err := e.encode(tag, reflect.ValueOf(v), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return e.Flush()
|
||||
}
|
||||
|
||||
// EncodeStructure encodes a Structure with the given tag to the writer.
|
||||
// The function argument should encode the enclosed values inside the Structure.
|
||||
// Call Flush() to write the data to the writer.
|
||||
func (e *Encoder) EncodeStructure(tag Tag, f func(e *Encoder) error) error {
|
||||
e.encodeDepth++
|
||||
i := e.encBuf.begin(tag, TypeStructure)
|
||||
err := f(e)
|
||||
e.encBuf.end(i)
|
||||
e.encodeDepth--
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// EncodeEnumeration, along with the other Encode<Type> methods, encodes a
|
||||
// single KMIP value with the given tag to an internal buffer. These methods
|
||||
// do not flush the data to the writer: call Flush() to flush the buffer.
|
||||
func (e *Encoder) EncodeEnumeration(tag Tag, v uint32) {
|
||||
e.encBuf.encodeEnum(tag, v)
|
||||
}
|
||||
|
||||
func (e *Encoder) EncodeInteger(tag Tag, v int32) {
|
||||
e.encBuf.encodeInt(tag, v)
|
||||
}
|
||||
|
||||
func (e *Encoder) EncodeLongInteger(tag Tag, v int64) {
|
||||
e.encBuf.encodeLongInt(tag, v)
|
||||
}
|
||||
|
||||
func (e *Encoder) EncodeInterval(tag Tag, v time.Duration) {
|
||||
e.encBuf.encodeInterval(tag, v)
|
||||
}
|
||||
|
||||
func (e *Encoder) EncodeDateTime(tag Tag, v time.Time) {
|
||||
e.encBuf.encodeDateTime(tag, v)
|
||||
}
|
||||
|
||||
func (e *Encoder) EncodeDateTimeExtended(tag Tag, v time.Time) {
|
||||
e.encBuf.encodeDateTimeExtended(tag, v)
|
||||
}
|
||||
|
||||
func (e *Encoder) EncodeBigInteger(tag Tag, v *big.Int) {
|
||||
e.encBuf.encodeBigInt(tag, v)
|
||||
}
|
||||
|
||||
func (e *Encoder) EncodeBoolean(tag Tag, v bool) {
|
||||
e.encBuf.encodeBool(tag, v)
|
||||
}
|
||||
|
||||
func (e *Encoder) EncodeTextString(tag Tag, v string) {
|
||||
e.encBuf.encodeTextString(tag, v)
|
||||
}
|
||||
|
||||
func (e *Encoder) EncodeByteString(tag Tag, v []byte) {
|
||||
e.encBuf.encodeByteString(tag, v)
|
||||
}
|
||||
|
||||
// Flush flushes the internal encoding buffer to the writer.
|
||||
func (e *Encoder) Flush() error {
|
||||
if e.encodeDepth > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := e.encBuf.WriteTo(e.w)
|
||||
e.encBuf.Reset()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
type MarshalerError struct {
|
||||
// Type is the golang type of the value being marshaled
|
||||
Type reflect.Type
|
||||
// Struct is the name of the enclosing struct if the marshaled value is a field.
|
||||
Struct string
|
||||
// Field is the name of the field being marshaled
|
||||
Field string
|
||||
Tag Tag
|
||||
}
|
||||
|
||||
func (e *MarshalerError) Error() string {
|
||||
msg := "kmip: error marshaling value"
|
||||
if e.Type != nil {
|
||||
msg += " of type " + e.Type.String()
|
||||
}
|
||||
|
||||
if e.Struct != "" {
|
||||
msg += " in struct field " + e.Struct + "." + e.Field
|
||||
}
|
||||
|
||||
return msg
|
||||
}
|
||||
|
||||
func (e *Encoder) marshalingError(tag Tag, t reflect.Type, cause error) merry.Error {
|
||||
err := &MarshalerError{
|
||||
Type: t,
|
||||
Struct: e.currStruct,
|
||||
Field: e.currField,
|
||||
Tag: tag,
|
||||
}
|
||||
|
||||
return merry.WrapSkipping(err, 1).WithCause(cause)
|
||||
}
|
||||
|
||||
var (
|
||||
byteType = reflect.TypeOf(byte(0))
|
||||
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
||||
timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
|
||||
bigIntPtrType = reflect.TypeOf((*big.Int)(nil))
|
||||
bigIntType = bigIntPtrType.Elem()
|
||||
durationType = reflect.TypeOf(time.Nanosecond)
|
||||
ttlvType = reflect.TypeOf((*TTLV)(nil)).Elem()
|
||||
tagType = reflect.TypeOf(Tag(0))
|
||||
)
|
||||
|
||||
var invalidValue = reflect.Value{}
|
||||
|
||||
// indirect dives into interfaces values, and one level deep into pointers
|
||||
// returns an invalid value if the resolved value is nil or invalid.
|
||||
func indirect(v reflect.Value) reflect.Value {
|
||||
if !v.IsValid() {
|
||||
return v
|
||||
}
|
||||
|
||||
if v.Kind() == reflect.Interface {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
if !v.IsValid() {
|
||||
return v
|
||||
}
|
||||
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Func, reflect.Slice, reflect.Map, reflect.Chan, reflect.Ptr, reflect.Interface:
|
||||
if v.IsNil() {
|
||||
return invalidValue
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
var zeroBigInt = big.Int{}
|
||||
|
||||
func isEmptyValue(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
default:
|
||||
}
|
||||
|
||||
switch v.Type() {
|
||||
case timeType:
|
||||
return v.Interface().(time.Time).IsZero() //nolint:forcetypeassert
|
||||
case bigIntType:
|
||||
i := v.Interface().(big.Int) //nolint:forcetypeassert
|
||||
return zeroBigInt.Cmp(&i) == 0
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (e *Encoder) encode(tag Tag, v reflect.Value, fi *fieldInfo) error {
|
||||
// if pointer or interface
|
||||
v = indirect(v)
|
||||
if !v.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
typ := v.Type()
|
||||
|
||||
if typ == ttlvType {
|
||||
// fast path: if the value is TTLV, we write it directly to the output buffer
|
||||
_, err := e.encBuf.Write(v.Bytes())
|
||||
return err
|
||||
}
|
||||
|
||||
typeInfo, err := getTypeInfo(typ)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tag == TagNone {
|
||||
tag = tagForMarshal(v, typeInfo, fi)
|
||||
}
|
||||
|
||||
var flags fieldFlags
|
||||
if fi != nil {
|
||||
flags = fi.flags
|
||||
}
|
||||
|
||||
// check for Marshaler
|
||||
switch {
|
||||
case typ.Implements(marshalerType):
|
||||
if flags.omitEmpty() && isEmptyValue(v) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return v.Interface().(Marshaler).MarshalTTLV(e, tag) //nolint:forcetypeassert
|
||||
case v.CanAddr():
|
||||
pv := v.Addr()
|
||||
|
||||
pvtyp := pv.Type()
|
||||
if pvtyp.Implements(marshalerType) {
|
||||
if flags.omitEmpty() && isEmptyValue(v) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return pv.Interface().(Marshaler).MarshalTTLV(e, tag) //nolint:forcetypeassert
|
||||
}
|
||||
}
|
||||
|
||||
// If the type doesn't implement Marshaler, then validate the value is a supported kind
|
||||
switch v.Kind() {
|
||||
case reflect.Chan, reflect.Map, reflect.Func, reflect.Ptr, reflect.UnsafePointer, reflect.Uintptr, reflect.Float32,
|
||||
reflect.Float64,
|
||||
reflect.Complex64,
|
||||
reflect.Complex128,
|
||||
reflect.Interface:
|
||||
return e.marshalingError(tag, v.Type(), ErrUnsupportedTypeError)
|
||||
default:
|
||||
}
|
||||
|
||||
// skip if value is empty and tags include omitempty
|
||||
if flags.omitEmpty() && isEmptyValue(v) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// recurse to handle slices of values
|
||||
switch v.Kind() {
|
||||
case reflect.Slice:
|
||||
if typ.Elem() == byteType {
|
||||
// special case, encode as a ByteString, handled below
|
||||
break
|
||||
}
|
||||
|
||||
fallthrough
|
||||
case reflect.Array:
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
// turn off the omit empty flag. applies at the field level,
|
||||
// not to each member of the slice
|
||||
// TODO: is this true?
|
||||
var fi2 *fieldInfo
|
||||
if fi != nil {
|
||||
fi2 = &fieldInfo{}
|
||||
// make a copy.
|
||||
*fi2 = *fi
|
||||
fi2.flags &^= fOmitEmpty
|
||||
}
|
||||
|
||||
err := e.encode(tag, v.Index(i), fi2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
if tag == TagNone {
|
||||
return e.marshalingError(tag, v.Type(), ErrNoTag)
|
||||
}
|
||||
|
||||
// handle enums and bitmasks
|
||||
//
|
||||
// If the field has the "enum" or "bitmask" flag, or the tag is registered as an enum or bitmask,
|
||||
// attempt to interpret the go value as such.
|
||||
//
|
||||
// If the field is explicitly flag, return an error if the value can't be interpreted. Otherwise
|
||||
// ignore errors and let processing fallthrough to the type-based encoding.
|
||||
enumMap := DefaultRegistry.EnumForTag(tag)
|
||||
if flags.enum() || flags.bitmask() || enumMap != nil {
|
||||
switch typ.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32:
|
||||
i := v.Int()
|
||||
|
||||
if flags.bitmask() || (enumMap != nil && enumMap.Bitmask()) {
|
||||
e.encBuf.encodeInt(tag, int32(i))
|
||||
} else {
|
||||
e.encBuf.encodeEnum(tag, uint32(i))
|
||||
}
|
||||
|
||||
return nil
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
i := v.Uint()
|
||||
|
||||
if flags.bitmask() || (enumMap != nil && enumMap.Bitmask()) {
|
||||
e.encBuf.encodeInt(tag, int32(i))
|
||||
} else {
|
||||
e.encBuf.encodeEnum(tag, uint32(i))
|
||||
}
|
||||
|
||||
return nil
|
||||
case reflect.String:
|
||||
s := v.String()
|
||||
|
||||
if flags.bitmask() || (enumMap != nil && enumMap.Bitmask()) {
|
||||
i, err := ParseInt(s, enumMap)
|
||||
if err == nil {
|
||||
e.encBuf.encodeInt(tag, i)
|
||||
return nil
|
||||
}
|
||||
// only throw an error if the field is explicitly marked as a bitmask
|
||||
// otherwise just ignore it, and let it encode as a string later on.
|
||||
if flags.bitmask() {
|
||||
// if we couldn't parse the string as an enum value
|
||||
return e.marshalingError(tag, typ, err)
|
||||
}
|
||||
} else {
|
||||
i, err := ParseEnum(s, enumMap)
|
||||
if err == nil {
|
||||
e.encBuf.encodeEnum(tag, i)
|
||||
return nil
|
||||
}
|
||||
// only throw an error if the field is explicitly marked as an enum
|
||||
// otherwise just ignore it, and let it encode as a string later on.
|
||||
if flags.enum() {
|
||||
// if we couldn't parse the string as an enum value
|
||||
return e.marshalingError(tag, typ, err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
if flags.enum() || flags.bitmask() {
|
||||
return e.marshalingError(tag, typ, ErrUnsupportedEnumTypeError).Append(typ.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handle special types
|
||||
switch typ {
|
||||
case timeType:
|
||||
if flags.dateTimeExt() {
|
||||
e.encBuf.encodeDateTimeExtended(tag, v.Interface().(time.Time)) //nolint:forcetypeassert
|
||||
} else {
|
||||
e.encBuf.encodeDateTime(tag, v.Interface().(time.Time)) //nolint:forcetypeassert
|
||||
}
|
||||
|
||||
return nil
|
||||
case bigIntType:
|
||||
bi := v.Interface().(big.Int) //nolint:forcetypeassert
|
||||
e.encBuf.encodeBigInt(tag, &bi)
|
||||
|
||||
return nil
|
||||
case bigIntPtrType:
|
||||
e.encBuf.encodeBigInt(tag, v.Interface().(*big.Int)) //nolint:forcetypeassert
|
||||
return nil
|
||||
case durationType:
|
||||
e.encBuf.encodeInterval(tag, time.Duration(v.Int()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// handle the rest of the kinds
|
||||
switch typ.Kind() {
|
||||
case reflect.Struct:
|
||||
// push current struct onto stack
|
||||
currStruct := e.currStruct
|
||||
e.currStruct = typ.Name()
|
||||
|
||||
err = e.EncodeStructure(tag, func(e *Encoder) error {
|
||||
for _, field := range typeInfo.valueFields {
|
||||
fv := v.FieldByIndex(field.index)
|
||||
|
||||
// note: we're staying in reflection world here instead of
|
||||
// converting back to an interface{} value and going through
|
||||
// the non-reflection path again. Calling Interface()
|
||||
// on the reflect value would make a potentially addressable value
|
||||
// into an unaddressable value, reducing the chances we can coerce
|
||||
// the value into a Marshalable.
|
||||
//
|
||||
// tl;dr
|
||||
// Consider a type which implements Marshaler with
|
||||
// a pointer receiver, and a struct with a non-pointer field of that type:
|
||||
//
|
||||
// type Wheel struct{}
|
||||
// func (*Wheel) MarshalTTLV(...)
|
||||
//
|
||||
// type Car struct{
|
||||
// Wheel Wheel
|
||||
// }
|
||||
//
|
||||
// When traversing the Car struct, should the encoder invoke Wheel's
|
||||
// Marshaler method, or not? Technically, the type `Wheel`
|
||||
// doesn't implement the Marshaler interface. Only the type `*Wheel`
|
||||
// implements it. However, the other encoders in the SDK, like JSON
|
||||
// and XML, will try, if possible, to get a pointer to field values like this, in
|
||||
// order to invoke the Marshaler interface anyway.
|
||||
//
|
||||
// Encoders can only get a pointer to field values if the field
|
||||
// value is `addressable`. Addressability is explained in the docs for reflect.Value#CanAddr().
|
||||
// Using reflection to turn a reflect.Value() back into an interface{}
|
||||
// can make a potentially addressable value (like the field of an addressable struct)
|
||||
// into an unaddressable value (reflect.Value#Interface{} always returns an unaddressable
|
||||
// copy).
|
||||
|
||||
// push the currField
|
||||
currField := e.currField
|
||||
e.currField = field.name
|
||||
err := e.encode(TagNone, fv, &field) //nolint:gosec,scopelint
|
||||
// pop the currField
|
||||
e.currField = currField
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
// pop current struct
|
||||
e.currStruct = currStruct
|
||||
|
||||
return err
|
||||
case reflect.String:
|
||||
e.encBuf.encodeTextString(tag, v.String())
|
||||
case reflect.Slice:
|
||||
// special case, encode as a ByteString
|
||||
// all slices which aren't []byte should have been handled above
|
||||
// the call to v.Bytes() will panic if this assumption is wrong
|
||||
e.encBuf.encodeByteString(tag, v.Bytes())
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32:
|
||||
i := v.Int()
|
||||
if i > math.MaxInt32 {
|
||||
return e.marshalingError(tag, typ, ErrIntOverflow)
|
||||
}
|
||||
|
||||
e.encBuf.encodeInt(tag, int32(i))
|
||||
|
||||
return nil
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
u := v.Uint()
|
||||
if u > math.MaxInt32 {
|
||||
return e.marshalingError(tag, typ, ErrIntOverflow)
|
||||
}
|
||||
|
||||
e.encBuf.encodeInt(tag, int32(u))
|
||||
|
||||
return nil
|
||||
case reflect.Uint64:
|
||||
u := v.Uint()
|
||||
e.encBuf.encodeLongInt(tag, int64(u))
|
||||
|
||||
return nil
|
||||
case reflect.Int64:
|
||||
e.encBuf.encodeLongInt(tag, v.Int())
|
||||
return nil
|
||||
case reflect.Bool:
|
||||
e.encBuf.encodeBool(tag, v.Bool())
|
||||
default:
|
||||
// all kinds should have been handled by now
|
||||
panic(errors.New("should never get here"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func tagForMarshal(v reflect.Value, ti typeInfo, fi *fieldInfo) Tag {
|
||||
// the tag on the TTLVTag field
|
||||
if ti.tagField != nil && ti.tagField.explicitTag != TagNone {
|
||||
return ti.tagField.explicitTag
|
||||
}
|
||||
|
||||
// the value of the TTLVTag field of type Tag
|
||||
if v.IsValid() && ti.tagField != nil && ti.tagField.ti.typ == tagType {
|
||||
tag := v.FieldByIndex(ti.tagField.index).Interface().(Tag) //nolint:forcetypeassert
|
||||
if tag != TagNone {
|
||||
return tag
|
||||
}
|
||||
}
|
||||
|
||||
// if value is in a struct field, infer the tag from the field
|
||||
// else infer from the value's type name
|
||||
if fi != nil {
|
||||
return fi.tag
|
||||
}
|
||||
|
||||
return ti.inferredTag
|
||||
}
|
||||
|
||||
// encBuf encodes basic KMIP types into TTLV.
|
||||
type encBuf struct {
|
||||
bytes.Buffer
|
||||
}
|
||||
|
||||
func (h *encBuf) begin(tag Tag, typ Type) int {
|
||||
_ = h.WriteByte(byte(tag >> 16))
|
||||
_ = h.WriteByte(byte(tag >> 8))
|
||||
_ = h.WriteByte(byte(tag))
|
||||
_ = h.WriteByte(byte(typ))
|
||||
_, _ = h.Write(zeros[:4])
|
||||
|
||||
return h.Len()
|
||||
}
|
||||
|
||||
func (h *encBuf) end(i int) {
|
||||
n := h.Len() - i
|
||||
if m := n % 8; m > 0 {
|
||||
_, _ = h.Write(zeros[:8-m])
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint32(h.Bytes()[i-4:], uint32(n))
|
||||
}
|
||||
|
||||
func (h *encBuf) writeLongIntVal(tag Tag, typ Type, i int64) {
|
||||
s := h.begin(tag, typ)
|
||||
ll := h.Len()
|
||||
_, _ = h.Write(zeros[:8])
|
||||
binary.BigEndian.PutUint64(h.Bytes()[ll:], uint64(i))
|
||||
h.end(s)
|
||||
}
|
||||
|
||||
func (h *encBuf) writeIntVal(tag Tag, typ Type, val uint32) {
|
||||
s := h.begin(tag, typ)
|
||||
ll := h.Len()
|
||||
_, _ = h.Write(zeros[:4])
|
||||
binary.BigEndian.PutUint32(h.Bytes()[ll:], val)
|
||||
h.end(s)
|
||||
}
|
||||
|
||||
var (
|
||||
ones = [8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
|
||||
zeros = [8]byte{}
|
||||
)
|
||||
|
||||
func (h *encBuf) encodeBigInt(tag Tag, i *big.Int) {
|
||||
if i == nil {
|
||||
return
|
||||
}
|
||||
|
||||
ii := h.begin(tag, TypeBigInteger)
|
||||
|
||||
switch i.Sign() {
|
||||
case 0:
|
||||
_, _ = h.Write(zeros[:8])
|
||||
case 1:
|
||||
b := i.Bytes()
|
||||
l := len(b)
|
||||
// if n is positive, but the first bit is a 1, it will look like
|
||||
// a negative in 2's complement, so prepend zeroes in front
|
||||
if b[0]&0x80 > 0 {
|
||||
_ = h.WriteByte(byte(0))
|
||||
l++
|
||||
}
|
||||
// pad front with zeros to multiple of 8
|
||||
if m := l % 8; m > 0 {
|
||||
_, _ = h.Write(zeros[:8-m])
|
||||
}
|
||||
|
||||
_, _ = h.Write(b)
|
||||
case -1:
|
||||
length := uint(i.BitLen()/8+1) * 8
|
||||
j := new(big.Int).Lsh(one, length)
|
||||
b := j.Add(i, j).Bytes()
|
||||
// When the most significant bit is on a byte
|
||||
// boundary, we can get some extra significant
|
||||
// bits, so strip them off when that happens.
|
||||
if len(b) >= 2 && b[0] == 0xff && b[1]&0x80 != 0 {
|
||||
b = b[1:]
|
||||
}
|
||||
|
||||
l := len(b)
|
||||
// pad front with ones to multiple of 8
|
||||
if m := l % 8; m > 0 {
|
||||
_, _ = h.Write(ones[:8-m])
|
||||
}
|
||||
|
||||
_, _ = h.Write(b)
|
||||
}
|
||||
|
||||
h.end(ii)
|
||||
}
|
||||
|
||||
func (h *encBuf) encodeInt(tag Tag, i int32) {
|
||||
h.writeIntVal(tag, TypeInteger, uint32(i))
|
||||
}
|
||||
|
||||
func (h *encBuf) encodeBool(tag Tag, b bool) {
|
||||
if b {
|
||||
h.writeLongIntVal(tag, TypeBoolean, 1)
|
||||
} else {
|
||||
h.writeLongIntVal(tag, TypeBoolean, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *encBuf) encodeLongInt(tag Tag, i int64) {
|
||||
h.writeLongIntVal(tag, TypeLongInteger, i)
|
||||
}
|
||||
|
||||
func (h *encBuf) encodeDateTime(tag Tag, t time.Time) {
|
||||
h.writeLongIntVal(tag, TypeDateTime, t.Unix())
|
||||
}
|
||||
|
||||
func (h *encBuf) encodeDateTimeExtended(tag Tag, t time.Time) {
|
||||
// take unix seconds, times a million, to get microseconds, then
|
||||
// add nanoseconds remainder/1000
|
||||
//
|
||||
// this gives us a larger ranger of possible values than just t.UnixNano() / 1000.
|
||||
// see UnixNano() docs for its limits.
|
||||
//
|
||||
// this is limited to max(int64) *microseconds* from epoch, rather than
|
||||
// max(int64) nanoseconds like UnixNano().
|
||||
m := (t.Unix() * 1000000) + int64(t.Nanosecond()/1000)
|
||||
h.writeLongIntVal(tag, TypeDateTimeExtended, m)
|
||||
}
|
||||
|
||||
func (h *encBuf) encodeInterval(tag Tag, d time.Duration) {
|
||||
h.writeIntVal(tag, TypeInterval, uint32(d/time.Second))
|
||||
}
|
||||
|
||||
func (h *encBuf) encodeEnum(tag Tag, i uint32) {
|
||||
h.writeIntVal(tag, TypeEnumeration, i)
|
||||
}
|
||||
|
||||
func (h *encBuf) encodeTextString(tag Tag, s string) {
|
||||
i := h.begin(tag, TypeTextString)
|
||||
_, _ = h.WriteString(s)
|
||||
h.end(i)
|
||||
}
|
||||
|
||||
func (h *encBuf) encodeByteString(tag Tag, b []byte) {
|
||||
if b == nil {
|
||||
return
|
||||
}
|
||||
|
||||
i := h.begin(tag, TypeByteString)
|
||||
_, _ = h.Write(b)
|
||||
h.end(i)
|
||||
}
|
||||
|
||||
func getTypeInfo(typ reflect.Type) (ti typeInfo, err error) {
|
||||
ti.inferredTag, _ = DefaultRegistry.ParseTag(typ.Name())
|
||||
ti.typ = typ
|
||||
err = ti.getFieldsInfo()
|
||||
|
||||
return ti, err
|
||||
}
|
||||
|
||||
var errSkip = errors.New("skip")
|
||||
|
||||
func getFieldInfo(typ reflect.Type, sf reflect.StructField) (fieldInfo, error) {
|
||||
var fi fieldInfo
|
||||
|
||||
// skip anonymous and unexported fields
|
||||
if sf.Anonymous || /*unexported:*/ sf.PkgPath != "" {
|
||||
return fi, errSkip
|
||||
}
|
||||
|
||||
fi.name = sf.Name
|
||||
fi.structType = typ
|
||||
fi.index = sf.Index
|
||||
|
||||
var anyField bool
|
||||
|
||||
// handle field tags
|
||||
parts := strings.Split(sf.Tag.Get(structFieldTag), ",")
|
||||
for i, value := range parts {
|
||||
if i == 0 {
|
||||
switch value {
|
||||
case "-":
|
||||
// skip
|
||||
return fi, errSkip
|
||||
case "":
|
||||
default:
|
||||
var err error
|
||||
|
||||
fi.explicitTag, err = DefaultRegistry.ParseTag(value)
|
||||
if err != nil {
|
||||
return fi, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
switch strings.ToLower(value) {
|
||||
case "enum":
|
||||
fi.flags |= fEnum
|
||||
case "omitempty":
|
||||
fi.flags |= fOmitEmpty
|
||||
case "datetimeextended":
|
||||
fi.flags |= fDateTimeExtended
|
||||
case "bitmask":
|
||||
fi.flags |= fBitBask
|
||||
case "any":
|
||||
anyField = true
|
||||
fi.flags |= fAny
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if anyField && fi.explicitTag != TagNone {
|
||||
return fi, merry.Here(ErrTagConflict).Appendf(`field %s.%s may not specify a TTLV tag and the "any" flag`, fi.structType.Name(), fi.name)
|
||||
}
|
||||
|
||||
// extract type info for the field. The KMIP tag
|
||||
// for this field is derived from either the field name,
|
||||
// the field tags, or the field type.
|
||||
var err error
|
||||
|
||||
fi.ti, err = getTypeInfo(sf.Type)
|
||||
if err != nil {
|
||||
return fi, err
|
||||
}
|
||||
|
||||
if fi.ti.tagField != nil && fi.ti.tagField.explicitTag != TagNone {
|
||||
fi.tag = fi.ti.tagField.explicitTag
|
||||
if fi.explicitTag != TagNone && fi.explicitTag != fi.tag {
|
||||
// if there was a tag on the struct field containing this value, it must
|
||||
// agree with the value's intrinsic tag
|
||||
return fi, merry.Here(ErrTagConflict).Appendf(`TTLV tag "%s" in tag of %s.%s conflicts with TTLV tag "%s" in %s.%s`, fi.explicitTag, fi.structType.Name(), fi.name, fi.ti.tagField.explicitTag, fi.ti.typ.Name(), fi.ti.tagField.name)
|
||||
}
|
||||
}
|
||||
|
||||
// pre-calculate the tag for this field. This intentional duplicates
|
||||
// some of tagForMarshaling(). The value is primarily used in unmarshaling
|
||||
// where the dynamic value of the field is not needed.
|
||||
if fi.tag == TagNone {
|
||||
fi.tag = fi.explicitTag
|
||||
}
|
||||
|
||||
if fi.tag == TagNone {
|
||||
fi.tag, _ = DefaultRegistry.ParseTag(fi.name)
|
||||
}
|
||||
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
func (ti *typeInfo) getFieldsInfo() error {
|
||||
if ti.typ.Kind() != reflect.Struct {
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := 0; i < ti.typ.NumField(); i++ {
|
||||
fi, err := getFieldInfo(ti.typ, ti.typ.Field(i))
|
||||
|
||||
switch {
|
||||
case err == errSkip: //nolint:errorlint
|
||||
// skip
|
||||
case err != nil:
|
||||
return err
|
||||
case fi.name == "TTLVTag":
|
||||
ti.tagField = &fi
|
||||
default:
|
||||
ti.valueFields = append(ti.valueFields, fi)
|
||||
}
|
||||
}
|
||||
|
||||
// verify that multiple fields don't have the same tag
|
||||
names := map[Tag]string{}
|
||||
|
||||
for _, f := range ti.valueFields {
|
||||
if f.flags.any() {
|
||||
// ignore any fields
|
||||
continue
|
||||
}
|
||||
|
||||
tag := f.tag
|
||||
if tag != TagNone {
|
||||
if fname, ok := names[tag]; ok {
|
||||
return merry.Here(ErrTagConflict).Appendf("field resolves to the same tag (%s) as other field (%s)", tag, fname)
|
||||
}
|
||||
|
||||
names[tag] = f.name
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type typeInfo struct {
|
||||
typ reflect.Type
|
||||
inferredTag Tag
|
||||
tagField *fieldInfo
|
||||
valueFields []fieldInfo
|
||||
}
|
||||
|
||||
const (
|
||||
fOmitEmpty fieldFlags = 1 << iota
|
||||
fEnum
|
||||
fDateTimeExtended
|
||||
fAny
|
||||
fBitBask
|
||||
)
|
||||
|
||||
type fieldFlags int
|
||||
|
||||
func (f fieldFlags) omitEmpty() bool {
|
||||
return f&fOmitEmpty != 0
|
||||
}
|
||||
|
||||
func (f fieldFlags) any() bool {
|
||||
return f&fAny != 0
|
||||
}
|
||||
|
||||
func (f fieldFlags) dateTimeExt() bool {
|
||||
return f&fDateTimeExtended != 0
|
||||
}
|
||||
|
||||
func (f fieldFlags) enum() bool {
|
||||
return f&fEnum != 0
|
||||
}
|
||||
|
||||
func (f fieldFlags) bitmask() bool {
|
||||
return f&fBitBask != 0
|
||||
}
|
||||
|
||||
type fieldInfo struct {
|
||||
structType reflect.Type
|
||||
explicitTag, tag Tag
|
||||
name string
|
||||
index []int
|
||||
flags fieldFlags
|
||||
ti typeInfo
|
||||
}
|
8
vendor/github.com/gemalto/kmip-go/ttlv/errors.go
generated
vendored
Normal file
8
vendor/github.com/gemalto/kmip-go/ttlv/errors.go
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
package ttlv
|
||||
|
||||
import "github.com/ansel1/merry"
|
||||
|
||||
// Details prints details from the error, including a stacktrace when available.
|
||||
func Details(err error) string {
|
||||
return merry.Details(err)
|
||||
}
|
314
vendor/github.com/gemalto/kmip-go/ttlv/formatting.go
generated
vendored
Normal file
314
vendor/github.com/gemalto/kmip-go/ttlv/formatting.go
generated
vendored
Normal file
@ -0,0 +1,314 @@
|
||||
package ttlv
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ansel1/merry"
|
||||
"github.com/gemalto/kmip-go/internal/kmiputil"
|
||||
)
|
||||
|
||||
// FormatType formats a byte as a KMIP Type string,
|
||||
// as described in the KMIP Profiles spec. If the value is registered,
|
||||
// the normalized name of the value will be returned.
|
||||
//
|
||||
// Otherwise, a 1 byte hex string is returned, but this is not
|
||||
// technically a valid encoding for types in the JSON and XML encoding
|
||||
// specs. Hex values Should only be used for debugging. Examples:
|
||||
//
|
||||
// - Integer
|
||||
// - 0x42
|
||||
func FormatType(b byte, enumMap EnumMap) string {
|
||||
if enumMap != nil {
|
||||
if s, ok := enumMap.Name(uint32(b)); ok {
|
||||
return s
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%#02x", b)
|
||||
}
|
||||
|
||||
// FormatTag formats an uint32 as a KMIP Tag string,
|
||||
// as described in the KMIP Profiles spec. If the value is registered,
|
||||
// the normalized name of the value will be returned. Otherwise, a
|
||||
// 3 byte hex string is returned. Examples:
|
||||
//
|
||||
// - ActivationDate
|
||||
// - 0x420001
|
||||
func FormatTag(v uint32, enumMap EnumMap) string {
|
||||
if enumMap != nil {
|
||||
if s, ok := enumMap.Name(v); ok {
|
||||
return s
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%#06x", v)
|
||||
}
|
||||
|
||||
// FormatTagCanonical formats an uint32 as a canonical Tag name
|
||||
// from the KMIP spec. If the value is registered,
|
||||
// the canonical name of the value will be returned. Otherwise, a
|
||||
// 3 byte hex string is returned. Examples:
|
||||
//
|
||||
// - Activation Date
|
||||
// - 0x420001
|
||||
//
|
||||
// Canonical tag names are used in the AttributeName of Attribute structs.
|
||||
func FormatTagCanonical(v uint32, enumMap EnumMap) string {
|
||||
if enumMap != nil {
|
||||
if s, ok := enumMap.CanonicalName(v); ok {
|
||||
return s
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%#06x", v)
|
||||
}
|
||||
|
||||
// FormatEnum formats an uint32 as a KMIP Enumeration string,
|
||||
// as described in the KMIP Profiles spec. If the value is registered,
|
||||
// the normalized name of the value will be returned. Otherwise, a
|
||||
// four byte hex string is returned. Examples:
|
||||
//
|
||||
// - SymmetricKey
|
||||
// - 0x00000002
|
||||
func FormatEnum(v uint32, enumMap EnumMap) string {
|
||||
if enumMap != nil {
|
||||
if s, ok := enumMap.Name(v); ok {
|
||||
return s
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%#08x", v)
|
||||
}
|
||||
|
||||
// FormatInt formats an integer as a KMIP bitmask string, as
|
||||
// described in the KMIP Profiles spec for JSON under
|
||||
// the "Special case for Masks" section. Examples:
|
||||
//
|
||||
// - 0x0000100c
|
||||
// - Encrypt|Decrypt|CertificateSign
|
||||
// - CertificateSign|0x00000004|0x0000008
|
||||
// - CertificateSign|0x0000000c
|
||||
func FormatInt(i int32, enumMap EnumMap) string {
|
||||
if enumMap == nil {
|
||||
return fmt.Sprintf("%#08x", i)
|
||||
}
|
||||
|
||||
values := enumMap.Values()
|
||||
if len(values) == 0 {
|
||||
return fmt.Sprintf("%#08x", i)
|
||||
}
|
||||
|
||||
v := uint32(i)
|
||||
|
||||
// bitmask
|
||||
// decompose mask into the names of set flags, concatenated by pipe
|
||||
// if remaining value (minus registered flags) is not zero, append
|
||||
// the remaining value as hex.
|
||||
|
||||
var sb strings.Builder
|
||||
|
||||
for _, v1 := range values {
|
||||
if v1&v == v1 {
|
||||
if name, ok := enumMap.Name(v1); ok {
|
||||
if sb.Len() > 0 {
|
||||
sb.WriteString("|")
|
||||
}
|
||||
|
||||
sb.WriteString(name)
|
||||
|
||||
v ^= v1
|
||||
}
|
||||
}
|
||||
|
||||
if v == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if v != 0 {
|
||||
if sb.Len() > 0 {
|
||||
sb.WriteString("|")
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(&sb, "%#08x", v)
|
||||
}
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// ParseEnum parses a string into a uint32 according to the rules
|
||||
// in the KMIP Profiles regarding encoding enumeration values.
|
||||
// See FormatEnum for examples of the formats which can be parsed.
|
||||
// It will also parse numeric strings. Examples:
|
||||
//
|
||||
// ParseEnum("UnableToCancel", registry.EnumForTag(TagCancellationResult))
|
||||
// ParseEnum("0x00000002")
|
||||
// ParseEnum("2")
|
||||
//
|
||||
// Returns ErrInvalidHexString if the string is invalid hex, or
|
||||
// if the hex value is less than 1 byte or more than 4 bytes (ignoring
|
||||
// leading zeroes).
|
||||
//
|
||||
// Returns ErrUnregisteredEnumName if string value is not a
|
||||
// registered enum value name.
|
||||
func ParseEnum(s string, enumMap EnumMap) (uint32, error) {
|
||||
u, err := strconv.ParseUint(s, 10, 32)
|
||||
if err == nil {
|
||||
// it was a raw number
|
||||
return uint32(u), nil
|
||||
}
|
||||
|
||||
v, err := parseHexOrName(s, 4, enumMap)
|
||||
if err != nil {
|
||||
return 0, merry.Here(err)
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// ParseInt parses a string into an int32 according the rules
|
||||
// in the KMIP Profiles regarding encoding integers, including
|
||||
// the special rules for bitmasks. See FormatInt for examples
|
||||
// of the formats which can be parsed.
|
||||
//
|
||||
// Returns ErrInvalidHexString if the string is invalid hex, or
|
||||
// if the hex value is less than 1 byte or more than 4 bytes (ignoring
|
||||
// leading zeroes).
|
||||
//
|
||||
// Returns ErrUnregisteredEnumName if string value is not a
|
||||
// registered enum value name.
|
||||
func ParseInt(s string, enumMap EnumMap) (int32, error) {
|
||||
i, err := strconv.ParseInt(s, 10, 32)
|
||||
if err == nil {
|
||||
// it was a raw number
|
||||
return int32(i), nil
|
||||
}
|
||||
|
||||
if !strings.ContainsAny(s, "| ") {
|
||||
v, err := parseHexOrName(s, 4, enumMap)
|
||||
if err != nil {
|
||||
return 0, merry.Here(err)
|
||||
}
|
||||
|
||||
return int32(v), nil
|
||||
}
|
||||
|
||||
// split values, look up each, and recombine
|
||||
s = strings.ReplaceAll(s, "|", " ")
|
||||
parts := strings.Split(s, " ")
|
||||
var v uint32
|
||||
|
||||
for _, part := range parts {
|
||||
if len(part) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
i, err := parseHexOrName(part, 4, enumMap)
|
||||
if err != nil {
|
||||
return 0, merry.Here(err)
|
||||
}
|
||||
|
||||
v |= i
|
||||
}
|
||||
|
||||
return int32(v), nil
|
||||
}
|
||||
|
||||
func parseHexOrName(s string, max int, enumMap EnumMap) (uint32, error) {
|
||||
b, err := kmiputil.ParseHexValue(s, max)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if b != nil {
|
||||
return kmiputil.DecodeUint32(b), nil
|
||||
}
|
||||
|
||||
if enumMap != nil {
|
||||
if v, ok := enumMap.Value(s); ok {
|
||||
return v, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, merry.Append(ErrUnregisteredEnumName, s)
|
||||
}
|
||||
|
||||
// ParseTag parses a string into Tag according the rules
|
||||
// in the KMIP Profiles regarding encoding tag values.
|
||||
// See FormatTag for examples of the formats which can be parsed.
|
||||
//
|
||||
// Returns ErrInvalidHexString if the string is invalid hex, or
|
||||
// if the hex value is less than 1 byte or more than 3 bytes (ignoring
|
||||
// leading zeroes).
|
||||
//
|
||||
// Returns ErrUnregisteredEnumName if string value is not a
|
||||
// registered enum value name.
|
||||
func ParseTag(s string, enumMap EnumMap) (Tag, error) {
|
||||
v, err := parseHexOrName(s, 3, enumMap)
|
||||
if err != nil {
|
||||
return 0, merry.Here(err)
|
||||
}
|
||||
|
||||
return Tag(v), nil
|
||||
}
|
||||
|
||||
// ParseType parses a string into Type according the rules
|
||||
// in the KMIP Profiles regarding encoding type values.
|
||||
// See FormatType for examples of the formats which can be parsed.
|
||||
// This also supports parsing a hex string type (e.g. "0x01"), though
|
||||
// this is not technically a valid encoding of a type in the spec.
|
||||
//
|
||||
// Returns ErrInvalidHexString if the string is invalid hex, or
|
||||
// if the hex value is less than 1 byte or more than 3 bytes (ignoring
|
||||
// leading zeroes).
|
||||
//
|
||||
// Returns ErrUnregisteredEnumName if string value is not a
|
||||
// registered enum value name.
|
||||
func ParseType(s string, enumMap EnumMap) (Type, error) {
|
||||
b, err := kmiputil.ParseHexValue(s, 1)
|
||||
if err != nil {
|
||||
return 0, merry.Here(err)
|
||||
}
|
||||
|
||||
if b != nil {
|
||||
return Type(b[0]), nil
|
||||
}
|
||||
|
||||
if enumMap != nil {
|
||||
if v, ok := enumMap.Value(s); ok {
|
||||
return Type(v), nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, merry.Here(ErrUnregisteredEnumName).Append(s)
|
||||
}
|
||||
|
||||
// EnumMap defines a set of named enumeration values. Canonical names should
|
||||
// be the name from the spec. Names should be in the normalized format
|
||||
// described in the KMIP spec (see NormalizeName()).
|
||||
//
|
||||
//
|
||||
// Value enumerations are used for encoding and decoding KMIP Enumeration values,
|
||||
// KMIP Integer bitmask values, Types, and Tags.
|
||||
type EnumMap interface {
|
||||
// Name returns the normalized name for a value, e.g. AttributeName.
|
||||
// If the name is not registered, it returns "", false.
|
||||
Name(v uint32) (string, bool)
|
||||
// CanonicalName returns the canonical name for the value from the spec,
|
||||
// e.g. Attribute Name.
|
||||
// If the name is not registered, it returns "", false
|
||||
CanonicalName(v uint32) (string, bool)
|
||||
// Value returns the value registered for the name argument. If there is
|
||||
// no name registered for this value, it returns 0, false.
|
||||
// The name argument may be the canonical name (e.g. "Cryptographic Algorithm") or
|
||||
// the normalized name (e.g. "CryptographicAlgorithm").
|
||||
Value(name string) (uint32, bool)
|
||||
// Values returns the complete set of registered values. The order
|
||||
// they are returned in will be the order they are encoded in when
|
||||
// encoding bitmasks as strings.
|
||||
Values() []uint32
|
||||
// Bitmask returns true if this is an enumeration of bitmask flags.
|
||||
Bitmask() bool
|
||||
}
|
248
vendor/github.com/gemalto/kmip-go/ttlv/registry.go
generated
vendored
Normal file
248
vendor/github.com/gemalto/kmip-go/ttlv/registry.go
generated
vendored
Normal file
@ -0,0 +1,248 @@
|
||||
package ttlv
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/ansel1/merry"
|
||||
"github.com/gemalto/kmip-go/internal/kmiputil"
|
||||
)
|
||||
|
||||
// DefaultRegistry holds the default mappings of types, tags, enums, and bitmasks
|
||||
// to canonical names and normalized names from the KMIP spec. It is pre-populated with the 1.4 spec's
|
||||
// values. It can be replaced, or additional values can be registered with it.
|
||||
//
|
||||
// It is not currently concurrent-safe, so replace or configure it early in your
|
||||
// program.
|
||||
var DefaultRegistry Registry
|
||||
|
||||
// nolint:gochecknoinits
|
||||
func init() {
|
||||
RegisterTypes(&DefaultRegistry)
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidHexString = kmiputil.ErrInvalidHexString
|
||||
ErrUnregisteredEnumName = merry.New("unregistered enum name")
|
||||
)
|
||||
|
||||
// NormalizeName tranforms KMIP names from the spec into the
|
||||
// normalized form of the name. Typically, this means removing spaces,
|
||||
// and replacing some special characters. The normalized form of the name
|
||||
// is used in the JSON and XML encodings from the KMIP Profiles.
|
||||
// The spec describes the normalization process in 5.4.1.1 and 5.5.1.1
|
||||
func NormalizeName(s string) string {
|
||||
return kmiputil.NormalizeName(s)
|
||||
}
|
||||
|
||||
// Enum represents an enumeration of KMIP values (as uint32), and maps them
|
||||
// to the canonical string names and the normalized string names of the
|
||||
// value as declared in the KMIP specs.
|
||||
// Enum is used to transpose values from strings to byte values, as required
|
||||
// by the JSON and XML encodings defined in the KMIP Profiles spec.
|
||||
// These mappings are also used to pretty print KMIP values, and to marshal
|
||||
// and unmarshal enum and bitmask values to golang string values.
|
||||
//
|
||||
// Enum currently uses plain maps, so it is not thread safe to register new values
|
||||
// concurrently. You should register all values at the start of your program before
|
||||
// using this package concurrently.
|
||||
//
|
||||
// Enums are used in the KMIP spec for two purposes: for defining the possible values
|
||||
// for values encoded as the KMIP Enumeration type, and for bitmask values. Bitmask
|
||||
// values are encoded as Integers, but are really enum values bitwise-OR'd together.
|
||||
//
|
||||
// Enums are registered with a Registry. The code to register enums is typically
|
||||
// generated by the kmipgen tool.
|
||||
type Enum struct {
|
||||
valuesToName map[uint32]string
|
||||
valuesToCanonicalName map[uint32]string
|
||||
nameToValue map[string]uint32
|
||||
canonicalNamesToValue map[string]uint32
|
||||
bitMask bool
|
||||
}
|
||||
|
||||
func NewEnum() Enum {
|
||||
return Enum{}
|
||||
}
|
||||
|
||||
func NewBitmask() Enum {
|
||||
return Enum{
|
||||
bitMask: true,
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterValue adds a mapping of a uint32 value to a name. The name will be
|
||||
// processed by NormalizeName to produce the normalized enum value name as described
|
||||
// in the KMIP spec.
|
||||
func (e *Enum) RegisterValue(v uint32, name string) {
|
||||
nn := NormalizeName(name)
|
||||
|
||||
if e.valuesToName == nil {
|
||||
e.valuesToName = map[uint32]string{}
|
||||
e.nameToValue = map[string]uint32{}
|
||||
e.valuesToCanonicalName = map[uint32]string{}
|
||||
e.canonicalNamesToValue = map[string]uint32{}
|
||||
}
|
||||
|
||||
e.valuesToName[v] = nn
|
||||
e.nameToValue[nn] = v
|
||||
e.valuesToCanonicalName[v] = name
|
||||
e.canonicalNamesToValue[name] = v
|
||||
}
|
||||
|
||||
func (e *Enum) Name(v uint32) (string, bool) {
|
||||
if e == nil {
|
||||
return "", false
|
||||
}
|
||||
|
||||
name, ok := e.valuesToName[v]
|
||||
|
||||
return name, ok
|
||||
}
|
||||
|
||||
func (e *Enum) CanonicalName(v uint32) (string, bool) {
|
||||
if e == nil {
|
||||
return "", false
|
||||
}
|
||||
|
||||
name, ok := e.valuesToCanonicalName[v]
|
||||
|
||||
return name, ok
|
||||
}
|
||||
|
||||
func (e *Enum) Value(name string) (uint32, bool) {
|
||||
if e == nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
v, ok := e.nameToValue[name]
|
||||
if !ok {
|
||||
v, ok = e.canonicalNamesToValue[name]
|
||||
}
|
||||
|
||||
return v, ok
|
||||
}
|
||||
|
||||
func (e *Enum) Values() []uint32 {
|
||||
values := make([]uint32, 0, len(e.valuesToName))
|
||||
for v := range e.valuesToName {
|
||||
values = append(values, v)
|
||||
}
|
||||
// Always list them in order of value so output is stable.
|
||||
sort.Sort(uint32Slice(values))
|
||||
|
||||
return values
|
||||
}
|
||||
|
||||
func (e *Enum) Bitmask() bool {
|
||||
if e == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return e.bitMask
|
||||
}
|
||||
|
||||
// Registry holds all the known tags, types, enums and bitmaps declared in
|
||||
// a KMIP spec. It's used throughout the package to map values their canonical
|
||||
// and normalized names.
|
||||
type Registry struct {
|
||||
enums map[Tag]EnumMap
|
||||
tags Enum
|
||||
types Enum
|
||||
}
|
||||
|
||||
func (r *Registry) RegisterType(t Type, name string) {
|
||||
r.types.RegisterValue(uint32(t), name)
|
||||
}
|
||||
|
||||
func (r *Registry) RegisterTag(t Tag, name string) {
|
||||
r.tags.RegisterValue(uint32(t), name)
|
||||
}
|
||||
|
||||
func (r *Registry) RegisterEnum(t Tag, def EnumMap) {
|
||||
if r.enums == nil {
|
||||
r.enums = map[Tag]EnumMap{}
|
||||
}
|
||||
|
||||
r.enums[t] = def
|
||||
}
|
||||
|
||||
// EnumForTag returns the enum map registered for a tag. Returns
|
||||
// nil if no map is registered for this tag.
|
||||
func (r *Registry) EnumForTag(t Tag) EnumMap {
|
||||
if r.enums == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return r.enums[t]
|
||||
}
|
||||
|
||||
func (r *Registry) IsBitmask(t Tag) bool {
|
||||
if e := r.EnumForTag(t); e != nil {
|
||||
return e.Bitmask()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *Registry) IsEnum(t Tag) bool {
|
||||
if e := r.EnumForTag(t); e != nil {
|
||||
return !e.Bitmask()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *Registry) Tags() EnumMap {
|
||||
return &r.tags
|
||||
}
|
||||
|
||||
func (r *Registry) Types() EnumMap {
|
||||
return &r.types
|
||||
}
|
||||
|
||||
func (r *Registry) FormatEnum(t Tag, v uint32) string {
|
||||
return FormatEnum(v, r.EnumForTag(t))
|
||||
}
|
||||
|
||||
func (r *Registry) FormatInt(t Tag, v int32) string {
|
||||
return FormatInt(v, r.EnumForTag(t))
|
||||
}
|
||||
|
||||
func (r *Registry) FormatTag(t Tag) string {
|
||||
return FormatTag(uint32(t), &r.tags)
|
||||
}
|
||||
|
||||
func (r *Registry) FormatTagCanonical(t Tag) string {
|
||||
return FormatTagCanonical(uint32(t), &r.tags)
|
||||
}
|
||||
|
||||
func (r *Registry) FormatType(t Type) string {
|
||||
return FormatType(byte(t), &r.types)
|
||||
}
|
||||
|
||||
func (r *Registry) ParseEnum(t Tag, s string) (uint32, error) {
|
||||
return ParseEnum(s, r.EnumForTag(t))
|
||||
}
|
||||
|
||||
func (r *Registry) ParseInt(t Tag, s string) (int32, error) {
|
||||
return ParseInt(s, r.EnumForTag(t))
|
||||
}
|
||||
|
||||
// ParseTag parses a string into Tag according the rules
|
||||
// in the KMIP Profiles regarding encoding tag values.
|
||||
// Returns TagNone if not found.
|
||||
// Returns error if s is a malformed hex string, or a hex string of incorrect length
|
||||
func (r *Registry) ParseTag(s string) (Tag, error) {
|
||||
return ParseTag(s, &r.tags)
|
||||
}
|
||||
|
||||
func (r *Registry) ParseType(s string) (Type, error) {
|
||||
return ParseType(s, &r.types)
|
||||
}
|
||||
|
||||
// uint32Slice attaches the methods of Interface to []int, sorting in increasing order.
|
||||
type uint32Slice []uint32
|
||||
|
||||
func (p uint32Slice) Len() int { return len(p) }
|
||||
func (p uint32Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||
func (p uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
50
vendor/github.com/gemalto/kmip-go/ttlv/tag.go
generated
vendored
Normal file
50
vendor/github.com/gemalto/kmip-go/ttlv/tag.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
package ttlv
|
||||
|
||||
const (
|
||||
TagNone = Tag(0)
|
||||
tagAttributeName Tag = 0x42000a
|
||||
tagAttributeValue Tag = 0x42000b
|
||||
)
|
||||
|
||||
// Tag
|
||||
// 9.1.3.1
|
||||
type Tag uint32
|
||||
|
||||
// String returns the normalized name of the tag.
|
||||
func (t Tag) String() string {
|
||||
return DefaultRegistry.FormatTag(t)
|
||||
}
|
||||
|
||||
// CanonicalName returns the canonical name of the tag.
|
||||
func (t Tag) CanonicalName() string {
|
||||
return DefaultRegistry.FormatTagCanonical(t)
|
||||
}
|
||||
|
||||
func (t Tag) MarshalText() (text []byte, err error) {
|
||||
return []byte(t.String()), nil
|
||||
}
|
||||
|
||||
func (t *Tag) UnmarshalText(text []byte) (err error) {
|
||||
*t, err = DefaultRegistry.ParseTag(string(text))
|
||||
return
|
||||
}
|
||||
|
||||
const (
|
||||
minStandardTag uint32 = 0x00420000
|
||||
maxStandardTag uint32 = 0x00430000
|
||||
minCustomTag uint32 = 0x00540000
|
||||
maxCustomTag uint32 = 0x00550000
|
||||
)
|
||||
|
||||
// Valid checks whether the tag's numeric value is valid according to
|
||||
// the ranges in the spec.
|
||||
func (t Tag) Valid() bool {
|
||||
switch {
|
||||
case uint32(t) >= minStandardTag && uint32(t) < maxStandardTag:
|
||||
return true
|
||||
case uint32(t) >= minCustomTag && uint32(t) < maxCustomTag:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
1252
vendor/github.com/gemalto/kmip-go/ttlv/ttlv.go
generated
vendored
Normal file
1252
vendor/github.com/gemalto/kmip-go/ttlv/ttlv.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
196
vendor/github.com/gemalto/kmip-go/ttlv/types.go
generated
vendored
Normal file
196
vendor/github.com/gemalto/kmip-go/ttlv/types.go
generated
vendored
Normal file
@ -0,0 +1,196 @@
|
||||
package ttlv
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
func RegisterTypes(r *Registry) {
|
||||
m := map[string]Type{
|
||||
"BigInteger": TypeBigInteger,
|
||||
"Boolean": TypeBoolean,
|
||||
"ByteString": TypeByteString,
|
||||
"DateTime": TypeDateTime,
|
||||
"Enumeration": TypeEnumeration,
|
||||
"Integer": TypeInteger,
|
||||
"Interval": TypeInterval,
|
||||
"LongInteger": TypeLongInteger,
|
||||
"Structure": TypeStructure,
|
||||
"TextString": TypeTextString,
|
||||
"DateTimeExtended": TypeDateTimeExtended,
|
||||
}
|
||||
|
||||
for name, v := range m {
|
||||
r.RegisterType(v, name)
|
||||
}
|
||||
}
|
||||
|
||||
// Type describes the type of a KMIP TTLV.
|
||||
// 2 and 9.1.1.2
|
||||
type Type byte
|
||||
|
||||
const (
|
||||
TypeStructure Type = 0x01
|
||||
TypeInteger Type = 0x02
|
||||
TypeLongInteger Type = 0x03
|
||||
TypeBigInteger Type = 0x04
|
||||
TypeEnumeration Type = 0x05
|
||||
TypeBoolean Type = 0x06
|
||||
TypeTextString Type = 0x07
|
||||
TypeByteString Type = 0x08
|
||||
TypeDateTime Type = 0x09
|
||||
TypeInterval Type = 0x0A
|
||||
TypeDateTimeExtended Type = 0x0B
|
||||
)
|
||||
|
||||
// String returns the normalized name of the type. If the type
|
||||
// name isn't registered, it returns the hex value of the type,
|
||||
// e.g. "0x01" (TypeStructure). The value of String() is suitable
|
||||
// for use in the JSON or XML encoding of TTLV.
|
||||
func (t Type) String() string {
|
||||
return DefaultRegistry.FormatType(t)
|
||||
}
|
||||
|
||||
func (t Type) MarshalText() (text []byte, err error) {
|
||||
return []byte(t.String()), nil
|
||||
}
|
||||
|
||||
func (t *Type) UnmarshalText(text []byte) (err error) {
|
||||
*t, err = DefaultRegistry.ParseType(string(text))
|
||||
return
|
||||
}
|
||||
|
||||
// DateTimeExtended is a time wrapper which always marshals to a DateTimeExtended.
|
||||
type DateTimeExtended struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
func (t *DateTimeExtended) UnmarshalTTLV(d *Decoder, ttlv TTLV) error {
|
||||
if len(ttlv) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if t == nil {
|
||||
*t = DateTimeExtended{}
|
||||
}
|
||||
|
||||
err := d.DecodeValue(&t.Time, ttlv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t DateTimeExtended) MarshalTTLV(e *Encoder, tag Tag) error {
|
||||
e.EncodeDateTimeExtended(tag, t.Time)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value is a go-typed mapping for a TTLV value. It holds a tag, and the value in
|
||||
// the form of a native go type.
|
||||
//
|
||||
// Value supports marshaling and unmarshaling, allowing a mapping between encoded TTLV
|
||||
// bytes and native go types. It's useful in tests, or where you want to construct
|
||||
// an arbitrary TTLV structure in code without declaring a bespoke type, e.g.:
|
||||
//
|
||||
// v := ttlv.Value{Tag: TagBatchCount, Value: Values{
|
||||
// Value{Tag: TagComment, Value: "red"},
|
||||
// Value{Tag: TagComment, Value: "blue"},
|
||||
// Value{Tag: TagComment, Value: "green"},
|
||||
// }
|
||||
// t, err := ttlv.Marshal(v)
|
||||
//
|
||||
// KMIP Structure types are mapped to the Values go type. When marshaling, if the Value
|
||||
// field is set to a Values{}, the resulting TTLV will be TypeStructure. When unmarshaling
|
||||
// a TTLV with TypeStructure, the Value field will be set to a Values{}.
|
||||
type Value struct {
|
||||
Tag Tag
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
// UnmarshalTTLV implements Unmarshaler
|
||||
func (t *Value) UnmarshalTTLV(d *Decoder, ttlv TTLV) error {
|
||||
t.Tag = ttlv.Tag()
|
||||
|
||||
switch ttlv.Type() {
|
||||
case TypeStructure:
|
||||
var v Values
|
||||
|
||||
ttlv = ttlv.ValueStructure()
|
||||
for ttlv.Valid() == nil {
|
||||
err := d.DecodeValue(&v, ttlv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ttlv = ttlv.Next()
|
||||
}
|
||||
|
||||
t.Value = v
|
||||
default:
|
||||
t.Value = ttlv.Value()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalTTLV implements Marshaler
|
||||
func (t Value) MarshalTTLV(e *Encoder, tag Tag) error {
|
||||
// if tag is set, override the suggested tag
|
||||
if t.Tag != TagNone {
|
||||
tag = t.Tag
|
||||
}
|
||||
|
||||
if tvs, ok := t.Value.(Values); ok {
|
||||
return e.EncodeStructure(tag, func(e *Encoder) error {
|
||||
for _, v := range tvs {
|
||||
if err := e.Encode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
return e.EncodeValue(tag, t.Value)
|
||||
}
|
||||
|
||||
// Values is a slice of Value objects. It represents the body of a TTLV with a type of Structure.
|
||||
type Values []Value
|
||||
|
||||
// NewValue creates a new tagged value.
|
||||
func NewValue(tag Tag, val interface{}) Value {
|
||||
return Value{
|
||||
Tag: tag,
|
||||
Value: val,
|
||||
}
|
||||
}
|
||||
|
||||
// NewStruct creates a new tagged value which is of type struct.
|
||||
func NewStruct(tag Tag, vals ...Value) Value {
|
||||
return Value{
|
||||
Tag: tag,
|
||||
Value: Values(vals),
|
||||
}
|
||||
}
|
||||
|
||||
type Encoder struct {
|
||||
encodeDepth int
|
||||
w io.Writer
|
||||
encBuf encBuf
|
||||
|
||||
// these fields store where the encoder is when marshaling a nested struct. its
|
||||
// used to construct error messages.
|
||||
currStruct string
|
||||
currField string
|
||||
}
|
||||
|
||||
// EnumValue is a uint32 wrapper which always encodes as an enumeration.
|
||||
type EnumValue uint32
|
||||
|
||||
func (v EnumValue) MarshalTTLV(e *Encoder, tag Tag) error {
|
||||
e.EncodeEnumeration(tag, uint32(v))
|
||||
return nil
|
||||
}
|
25
vendor/github.com/gemalto/kmip-go/types.go
generated
vendored
Normal file
25
vendor/github.com/gemalto/kmip-go/types.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
package kmip
|
||||
|
||||
type Authentication struct {
|
||||
Credential []Credential
|
||||
}
|
||||
|
||||
type Nonce struct {
|
||||
NonceID []byte
|
||||
NonceValue []byte
|
||||
}
|
||||
|
||||
type ProtocolVersion struct {
|
||||
ProtocolVersionMajor int
|
||||
ProtocolVersionMinor int
|
||||
}
|
||||
|
||||
type MessageExtension struct {
|
||||
VendorIdentification string
|
||||
CriticalityIndicator bool
|
||||
VendorExtension interface{}
|
||||
}
|
||||
|
||||
type Attributes struct {
|
||||
Attributes []Attribute
|
||||
}
|
64
vendor/github.com/gemalto/kmip-go/types_messages.go
generated
vendored
Normal file
64
vendor/github.com/gemalto/kmip-go/types_messages.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
package kmip
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/gemalto/kmip-go/kmip14"
|
||||
)
|
||||
|
||||
// 7.1
|
||||
|
||||
type RequestMessage struct {
|
||||
RequestHeader RequestHeader
|
||||
BatchItem []RequestBatchItem
|
||||
}
|
||||
|
||||
type ResponseMessage struct {
|
||||
ResponseHeader ResponseHeader
|
||||
BatchItem []ResponseBatchItem
|
||||
}
|
||||
|
||||
// 7.2
|
||||
|
||||
type RequestHeader struct {
|
||||
ProtocolVersion ProtocolVersion
|
||||
MaximumResponseSize int `ttlv:",omitempty"`
|
||||
ClientCorrelationValue string `ttlv:",omitempty"`
|
||||
ServerCorrelationValue string `ttlv:",omitempty"`
|
||||
AsynchronousIndicator bool `ttlv:",omitempty"`
|
||||
AttestationCapableIndicator bool `ttlv:",omitempty"`
|
||||
AttestationType []kmip14.AttestationType
|
||||
Authentication *Authentication
|
||||
BatchErrorContinuationOption kmip14.BatchErrorContinuationOption `ttlv:",omitempty"`
|
||||
BatchOrderOption bool `ttlv:",omitempty"`
|
||||
TimeStamp *time.Time
|
||||
BatchCount int
|
||||
}
|
||||
|
||||
type RequestBatchItem struct {
|
||||
Operation kmip14.Operation
|
||||
UniqueBatchItemID []byte `ttlv:",omitempty"`
|
||||
RequestPayload interface{}
|
||||
MessageExtension *MessageExtension `ttlv:",omitempty"`
|
||||
}
|
||||
|
||||
type ResponseHeader struct {
|
||||
ProtocolVersion ProtocolVersion
|
||||
TimeStamp time.Time
|
||||
Nonce *Nonce
|
||||
AttestationType []kmip14.AttestationType
|
||||
ClientCorrelationValue string `ttlv:",omitempty"`
|
||||
ServerCorrelationValue string `ttlv:",omitempty"`
|
||||
BatchCount int
|
||||
}
|
||||
|
||||
type ResponseBatchItem struct {
|
||||
Operation kmip14.Operation `ttlv:",omitempty"`
|
||||
UniqueBatchItemID []byte `ttlv:",omitempty"`
|
||||
ResultStatus kmip14.ResultStatus
|
||||
ResultReason kmip14.ResultReason `ttlv:",omitempty"`
|
||||
ResultMessage string `ttlv:",omitempty"`
|
||||
AsynchronousCorrelationValue []byte `ttlv:",omitempty"`
|
||||
ResponsePayload interface{} `ttlv:",omitempty"`
|
||||
MessageExtension *MessageExtension
|
||||
}
|
Reference in New Issue
Block a user