update
This commit is contained in:
parent
9dc80cd34b
commit
6e432c2a06
7
go.mod
7
go.mod
@ -1,7 +1,10 @@
|
|||||||
module novit.nc/direktil/pkg
|
module novit.nc/direktil/pkg
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/ulikunitz/xz v0.5.5
|
github.com/kr/pretty v0.1.0 // indirect
|
||||||
|
github.com/ulikunitz/xz v0.5.6
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||||
gopkg.in/yaml.v2 v2.2.2
|
gopkg.in/yaml.v2 v2.2.4
|
||||||
)
|
)
|
||||||
|
|
||||||
|
go 1.13
|
||||||
|
16
go.sum
16
go.sum
@ -1,8 +1,12 @@
|
|||||||
github.com/ulikunitz/xz v0.5.4/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
|
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||||
github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
|
github.com/ulikunitz/xz v0.5.6 h1:jGHAfXawEGZQ3blwU5wnWKQJvAraT7Ftq9EXjnXYgt8=
|
||||||
|
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
|
@ -15,8 +15,9 @@ type Config struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Cluster struct {
|
type Cluster struct {
|
||||||
Name string
|
Name string
|
||||||
Addons string
|
Addons string
|
||||||
|
BootstrapPods string
|
||||||
}
|
}
|
||||||
|
|
||||||
func FromBytes(data []byte) (*Config, error) {
|
func FromBytes(data []byte) (*Config, error) {
|
||||||
|
25
vendor/github.com/ulikunitz/xz/.gitignore
generated
vendored
25
vendor/github.com/ulikunitz/xz/.gitignore
generated
vendored
@ -1,25 +0,0 @@
|
|||||||
# .gitignore
|
|
||||||
|
|
||||||
TODO.html
|
|
||||||
README.html
|
|
||||||
|
|
||||||
lzma/writer.txt
|
|
||||||
lzma/reader.txt
|
|
||||||
|
|
||||||
cmd/gxz/gxz
|
|
||||||
cmd/xb/xb
|
|
||||||
|
|
||||||
# test executables
|
|
||||||
*.test
|
|
||||||
|
|
||||||
# profile files
|
|
||||||
*.out
|
|
||||||
|
|
||||||
# vim swap file
|
|
||||||
.*.swp
|
|
||||||
|
|
||||||
# executables on windows
|
|
||||||
*.exe
|
|
||||||
|
|
||||||
# default compression test file
|
|
||||||
enwik8*
|
|
26
vendor/github.com/ulikunitz/xz/LICENSE
generated
vendored
26
vendor/github.com/ulikunitz/xz/LICENSE
generated
vendored
@ -1,26 +0,0 @@
|
|||||||
Copyright (c) 2014-2016 Ulrich Kunitz
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright notice, this
|
|
||||||
list of conditions and the following disclaimer.
|
|
||||||
|
|
||||||
* Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
this list of conditions and the following disclaimer in the documentation
|
|
||||||
and/or other materials provided with the distribution.
|
|
||||||
|
|
||||||
* My name, Ulrich Kunitz, may not be used to endorse or promote products
|
|
||||||
derived from this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
||||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
|
||||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
||||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
||||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
||||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
||||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
73
vendor/github.com/ulikunitz/xz/README.md
generated
vendored
73
vendor/github.com/ulikunitz/xz/README.md
generated
vendored
@ -1,73 +0,0 @@
|
|||||||
# Package xz
|
|
||||||
|
|
||||||
This Go language package supports the reading and writing of xz
|
|
||||||
compressed streams. It includes also a gxz command for compressing and
|
|
||||||
decompressing data. The package is completely written in Go and doesn't
|
|
||||||
have any dependency on any C code.
|
|
||||||
|
|
||||||
The package is currently under development. There might be bugs and APIs
|
|
||||||
are not considered stable. At this time the package cannot compete with
|
|
||||||
the xz tool regarding compression speed and size. The algorithms there
|
|
||||||
have been developed over a long time and are highly optimized. However
|
|
||||||
there are a number of improvements planned and I'm very optimistic about
|
|
||||||
parallel compression and decompression. Stay tuned!
|
|
||||||
|
|
||||||
## Using the API
|
|
||||||
|
|
||||||
The following example program shows how to use the API.
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/ulikunitz/xz"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
const text = "The quick brown fox jumps over the lazy dog.\n"
|
|
||||||
var buf bytes.Buffer
|
|
||||||
// compress text
|
|
||||||
w, err := xz.NewWriter(&buf)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("xz.NewWriter error %s", err)
|
|
||||||
}
|
|
||||||
if _, err := io.WriteString(w, text); err != nil {
|
|
||||||
log.Fatalf("WriteString error %s", err)
|
|
||||||
}
|
|
||||||
if err := w.Close(); err != nil {
|
|
||||||
log.Fatalf("w.Close error %s", err)
|
|
||||||
}
|
|
||||||
// decompress buffer and write output to stdout
|
|
||||||
r, err := xz.NewReader(&buf)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("NewReader error %s", err)
|
|
||||||
}
|
|
||||||
if _, err = io.Copy(os.Stdout, r); err != nil {
|
|
||||||
log.Fatalf("io.Copy error %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Using the gxz compression tool
|
|
||||||
|
|
||||||
The package includes a gxz command line utility for compression and
|
|
||||||
decompression.
|
|
||||||
|
|
||||||
Use following command for installation:
|
|
||||||
|
|
||||||
$ go get github.com/ulikunitz/xz/cmd/gxz
|
|
||||||
|
|
||||||
To test it call the following command.
|
|
||||||
|
|
||||||
$ gxz bigfile
|
|
||||||
|
|
||||||
After some time a much smaller file bigfile.xz will replace bigfile.
|
|
||||||
To decompress it use the following command.
|
|
||||||
|
|
||||||
$ gxz -d bigfile.xz
|
|
||||||
|
|
319
vendor/github.com/ulikunitz/xz/TODO.md
generated
vendored
319
vendor/github.com/ulikunitz/xz/TODO.md
generated
vendored
@ -1,319 +0,0 @@
|
|||||||
# TODO list
|
|
||||||
|
|
||||||
## Release v0.6
|
|
||||||
|
|
||||||
1. Review encoder and check for lzma improvements under xz.
|
|
||||||
2. Fix binary tree matcher.
|
|
||||||
3. Compare compression ratio with xz tool using comparable parameters
|
|
||||||
and optimize parameters
|
|
||||||
4. Do some optimizations
|
|
||||||
- rename operation action and make it a simple type of size 8
|
|
||||||
- make maxMatches, wordSize parameters
|
|
||||||
- stop searching after a certain length is found (parameter sweetLen)
|
|
||||||
|
|
||||||
## Release v0.7
|
|
||||||
|
|
||||||
1. Optimize code
|
|
||||||
2. Do statistical analysis to get linear presets.
|
|
||||||
3. Test sync.Pool compatability for xz and lzma Writer and Reader
|
|
||||||
3. Fuzz optimized code.
|
|
||||||
|
|
||||||
## Release v0.8
|
|
||||||
|
|
||||||
1. Support parallel go routines for writing and reading xz files.
|
|
||||||
2. Support a ReaderAt interface for xz files with small block sizes.
|
|
||||||
3. Improve compatibility between gxz and xz
|
|
||||||
4. Provide manual page for gxz
|
|
||||||
|
|
||||||
## Release v0.9
|
|
||||||
|
|
||||||
1. Improve documentation
|
|
||||||
2. Fuzz again
|
|
||||||
|
|
||||||
## Release v1.0
|
|
||||||
|
|
||||||
1. Full functioning gxz
|
|
||||||
2. Add godoc URL to README.md (godoc.org)
|
|
||||||
3. Resolve all issues.
|
|
||||||
4. Define release candidates.
|
|
||||||
5. Public announcement.
|
|
||||||
|
|
||||||
## Package lzma
|
|
||||||
|
|
||||||
### Release v0.6
|
|
||||||
|
|
||||||
- Rewrite Encoder into a simple greedy one-op-at-a-time encoder
|
|
||||||
including
|
|
||||||
+ simple scan at the dictionary head for the same byte
|
|
||||||
+ use the killer byte (requiring matches to get longer, the first
|
|
||||||
test should be the byte that would make the match longer)
|
|
||||||
|
|
||||||
|
|
||||||
## Optimizations
|
|
||||||
|
|
||||||
- There may be a lot of false sharing in lzma.State; check whether this
|
|
||||||
can be improved by reorganizing the internal structure of it.
|
|
||||||
- Check whether batching encoding and decoding improves speed.
|
|
||||||
|
|
||||||
### DAG optimizations
|
|
||||||
|
|
||||||
- Use full buffer to create minimal bit-length above range encoder.
|
|
||||||
- Might be too slow (see v0.4)
|
|
||||||
|
|
||||||
### Different match finders
|
|
||||||
|
|
||||||
- hashes with 2, 3 characters additional to 4 characters
|
|
||||||
- binary trees with 2-7 characters (uint64 as key, use uint32 as
|
|
||||||
pointers into a an array)
|
|
||||||
- rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers
|
|
||||||
into an array with bit-steeling for the colors)
|
|
||||||
|
|
||||||
## Release Procedure
|
|
||||||
|
|
||||||
- execute goch -l for all packages; probably with lower param like 0.5.
|
|
||||||
- check orthography with gospell
|
|
||||||
- Write release notes in doc/relnotes.
|
|
||||||
- Update README.md
|
|
||||||
- xb copyright . in xz directory to ensure all new files have Copyright
|
|
||||||
header
|
|
||||||
- VERSION=<version> go generate github.com/ulikunitz/xz/... to update
|
|
||||||
version files
|
|
||||||
- Execute test for Linux/amd64, Linux/x86 and Windows/amd64.
|
|
||||||
- Update TODO.md - write short log entry
|
|
||||||
- git checkout master && git merge dev
|
|
||||||
- git tag -a <version>
|
|
||||||
- git push
|
|
||||||
|
|
||||||
## Log
|
|
||||||
|
|
||||||
### 2018-10-28
|
|
||||||
|
|
||||||
Release v0.5.5 fixes issues #19 observing ErrLimit outputs.
|
|
||||||
|
|
||||||
### 2017-06-05
|
|
||||||
|
|
||||||
Release v0.5.4 fixes issues #15 of another problem with the padding size
|
|
||||||
check for the xz block header. I removed the check completely.
|
|
||||||
|
|
||||||
### 2017-02-15
|
|
||||||
|
|
||||||
Release v0.5.3 fixes issue #12 regarding the decompression of an empty
|
|
||||||
XZ stream. Many thanks to Tomasz Kłak, who reported the issue.
|
|
||||||
|
|
||||||
### 2016-12-02
|
|
||||||
|
|
||||||
Release v0.5.2 became necessary to allow the decoding of xz files with
|
|
||||||
4-byte padding in the block header. Many thanks to Greg, who reported
|
|
||||||
the issue.
|
|
||||||
|
|
||||||
### 2016-07-23
|
|
||||||
|
|
||||||
Release v0.5.1 became necessary to fix problems with 32-bit platforms.
|
|
||||||
Many thanks to Bruno Brigas, who reported the issue.
|
|
||||||
|
|
||||||
### 2016-07-04
|
|
||||||
|
|
||||||
Release v0.5 provides improvements to the compressor and provides support for
|
|
||||||
the decompression of xz files with multiple xz streams.
|
|
||||||
|
|
||||||
### 2016-01-31
|
|
||||||
|
|
||||||
Another compression rate increase by checking the byte at length of the
|
|
||||||
best match first, before checking the whole prefix. This makes the
|
|
||||||
compressor even faster. We have now a large time budget to beat the
|
|
||||||
compression ratio of the xz tool. For enwik8 we have now over 40 seconds
|
|
||||||
to reduce the compressed file size for another 7 MiB.
|
|
||||||
|
|
||||||
### 2016-01-30
|
|
||||||
|
|
||||||
I simplified the encoder. Speed and compression rate increased
|
|
||||||
dramatically. A high compression rate affects also the decompression
|
|
||||||
speed. The approach with the buffer and optimizing for operation
|
|
||||||
compression rate has not been successful. Going for the maximum length
|
|
||||||
appears to be the best approach.
|
|
||||||
|
|
||||||
### 2016-01-28
|
|
||||||
|
|
||||||
The release v0.4 is ready. It provides a working xz implementation,
|
|
||||||
which is rather slow, but works and is interoperable with the xz tool.
|
|
||||||
It is an important milestone.
|
|
||||||
|
|
||||||
### 2016-01-10
|
|
||||||
|
|
||||||
I have the first working implementation of an xz reader and writer. I'm
|
|
||||||
happy about reaching this milestone.
|
|
||||||
|
|
||||||
### 2015-12-02
|
|
||||||
|
|
||||||
I'm now ready to implement xz because, I have a working LZMA2
|
|
||||||
implementation. I decided today that v0.4 will use the slow encoder
|
|
||||||
using the operations buffer to be able to go back, if I intend to do so.
|
|
||||||
|
|
||||||
### 2015-10-21
|
|
||||||
|
|
||||||
I have restarted the work on the library. While trying to implement
|
|
||||||
LZMA2, I discovered that I need to resimplify the encoder and decoder
|
|
||||||
functions. The option approach is too complicated. Using a limited byte
|
|
||||||
writer and not caring for written bytes at all and not to try to handle
|
|
||||||
uncompressed data simplifies the LZMA encoder and decoder much.
|
|
||||||
Processing uncompressed data and handling limits is a feature of the
|
|
||||||
LZMA2 format not of LZMA.
|
|
||||||
|
|
||||||
I learned an interesting method from the LZO format. If the last copy is
|
|
||||||
too far away they are moving the head one 2 bytes and not 1 byte to
|
|
||||||
reduce processing times.
|
|
||||||
|
|
||||||
### 2015-08-26
|
|
||||||
|
|
||||||
I have now reimplemented the lzma package. The code is reasonably fast,
|
|
||||||
but can still be optimized. The next step is to implement LZMA2 and then
|
|
||||||
xz.
|
|
||||||
|
|
||||||
### 2015-07-05
|
|
||||||
|
|
||||||
Created release v0.3. The version is the foundation for a full xz
|
|
||||||
implementation that is the target of v0.4.
|
|
||||||
|
|
||||||
### 2015-06-11
|
|
||||||
|
|
||||||
The gflag package has been developed because I couldn't use flag and
|
|
||||||
pflag for a fully compatible support of gzip's and lzma's options. It
|
|
||||||
seems to work now quite nicely.
|
|
||||||
|
|
||||||
### 2015-06-05
|
|
||||||
|
|
||||||
The overflow issue was interesting to research, however Henry S. Warren
|
|
||||||
Jr. Hacker's Delight book was very helpful as usual and had the issue
|
|
||||||
explained perfectly. Fefe's information on his website was based on the
|
|
||||||
C FAQ and quite bad, because it didn't address the issue of -MININT ==
|
|
||||||
MININT.
|
|
||||||
|
|
||||||
### 2015-06-04
|
|
||||||
|
|
||||||
It has been a productive day. I improved the interface of lzma.Reader
|
|
||||||
and lzma.Writer and fixed the error handling.
|
|
||||||
|
|
||||||
### 2015-06-01
|
|
||||||
|
|
||||||
By computing the bit length of the LZMA operations I was able to
|
|
||||||
improve the greedy algorithm implementation. By using an 8 MByte buffer
|
|
||||||
the compression rate was not as good as for xz but already better then
|
|
||||||
gzip default.
|
|
||||||
|
|
||||||
Compression is currently slow, but this is something we will be able to
|
|
||||||
improve over time.
|
|
||||||
|
|
||||||
### 2015-05-26
|
|
||||||
|
|
||||||
Checked the license of ogier/pflag. The binary lzmago binary should
|
|
||||||
include the license terms for the pflag library.
|
|
||||||
|
|
||||||
I added the endorsement clause as used by Google for the Go sources the
|
|
||||||
LICENSE file.
|
|
||||||
|
|
||||||
### 2015-05-22
|
|
||||||
|
|
||||||
The package lzb contains now the basic implementation for creating or
|
|
||||||
reading LZMA byte streams. It allows the support for the implementation
|
|
||||||
of the DAG-shortest-path algorithm for the compression function.
|
|
||||||
|
|
||||||
### 2015-04-23
|
|
||||||
|
|
||||||
Completed yesterday the lzbase classes. I'm a little bit concerned that
|
|
||||||
using the components may require too much code, but on the other hand
|
|
||||||
there is a lot of flexibility.
|
|
||||||
|
|
||||||
### 2015-04-22
|
|
||||||
|
|
||||||
Implemented Reader and Writer during the Bayern game against Porto. The
|
|
||||||
second half gave me enough time.
|
|
||||||
|
|
||||||
### 2015-04-21
|
|
||||||
|
|
||||||
While showering today morning I discovered that the design for OpEncoder
|
|
||||||
and OpDecoder doesn't work, because encoding/decoding might depend on
|
|
||||||
the current status of the dictionary. This is not exactly the right way
|
|
||||||
to start the day.
|
|
||||||
|
|
||||||
Therefore we need to keep the Reader and Writer design. This time around
|
|
||||||
we simplify it by ignoring size limits. These can be added by wrappers
|
|
||||||
around the Reader and Writer interfaces. The Parameters type isn't
|
|
||||||
needed anymore.
|
|
||||||
|
|
||||||
However I will implement a ReaderState and WriterState type to use
|
|
||||||
static typing to ensure the right State object is combined with the
|
|
||||||
right lzbase.Reader and lzbase.Writer.
|
|
||||||
|
|
||||||
As a start I have implemented ReaderState and WriterState to ensure
|
|
||||||
that the state for reading is only used by readers and WriterState only
|
|
||||||
used by Writers.
|
|
||||||
|
|
||||||
### 2015-04-20
|
|
||||||
|
|
||||||
Today I implemented the OpDecoder and tested OpEncoder and OpDecoder.
|
|
||||||
|
|
||||||
### 2015-04-08
|
|
||||||
|
|
||||||
Came up with a new simplified design for lzbase. I implemented already
|
|
||||||
the type State that replaces OpCodec.
|
|
||||||
|
|
||||||
### 2015-04-06
|
|
||||||
|
|
||||||
The new lzma package is now fully usable and lzmago is using it now. The
|
|
||||||
old lzma package has been completely removed.
|
|
||||||
|
|
||||||
### 2015-04-05
|
|
||||||
|
|
||||||
Implemented lzma.Reader and tested it.
|
|
||||||
|
|
||||||
### 2015-04-04
|
|
||||||
|
|
||||||
Implemented baseReader by adapting code form lzma.Reader.
|
|
||||||
|
|
||||||
### 2015-04-03
|
|
||||||
|
|
||||||
The opCodec has been copied yesterday to lzma2. opCodec has a high
|
|
||||||
number of dependencies on other files in lzma2. Therefore I had to copy
|
|
||||||
almost all files from lzma.
|
|
||||||
|
|
||||||
### 2015-03-31
|
|
||||||
|
|
||||||
Removed only a TODO item.
|
|
||||||
|
|
||||||
However in Francesco Campoy's presentation "Go for Javaneros
|
|
||||||
(Javaïstes?)" is the the idea that using an embedded field E, all the
|
|
||||||
methods of E will be defined on T. If E is an interface T satisfies E.
|
|
||||||
|
|
||||||
https://talks.golang.org/2014/go4java.slide#51
|
|
||||||
|
|
||||||
I have never used this, but it seems to be a cool idea.
|
|
||||||
|
|
||||||
### 2015-03-30
|
|
||||||
|
|
||||||
Finished the type writerDict and wrote a simple test.
|
|
||||||
|
|
||||||
### 2015-03-25
|
|
||||||
|
|
||||||
I started to implement the writerDict.
|
|
||||||
|
|
||||||
### 2015-03-24
|
|
||||||
|
|
||||||
After thinking long about the LZMA2 code and several false starts, I
|
|
||||||
have now a plan to create a self-sufficient lzma2 package that supports
|
|
||||||
the classic LZMA format as well as LZMA2. The core idea is to support a
|
|
||||||
baseReader and baseWriter type that support the basic LZMA stream
|
|
||||||
without any headers. Both types must support the reuse of dictionaries
|
|
||||||
and the opCodec.
|
|
||||||
|
|
||||||
### 2015-01-10
|
|
||||||
|
|
||||||
1. Implemented simple lzmago tool
|
|
||||||
2. Tested tool against large 4.4G file
|
|
||||||
- compression worked correctly; tested decompression with lzma
|
|
||||||
- decompression hits a full buffer condition
|
|
||||||
3. Fixed a bug in the compressor and wrote a test for it
|
|
||||||
4. Executed full cycle for 4.4 GB file; performance can be improved ;-)
|
|
||||||
|
|
||||||
### 2015-01-11
|
|
||||||
|
|
||||||
- Release v0.2 because of the working LZMA encoder and decoder
|
|
74
vendor/github.com/ulikunitz/xz/bits.go
generated
vendored
74
vendor/github.com/ulikunitz/xz/bits.go
generated
vendored
@ -1,74 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package xz
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// putUint32LE puts the little-endian representation of x into the first
|
|
||||||
// four bytes of p.
|
|
||||||
func putUint32LE(p []byte, x uint32) {
|
|
||||||
p[0] = byte(x)
|
|
||||||
p[1] = byte(x >> 8)
|
|
||||||
p[2] = byte(x >> 16)
|
|
||||||
p[3] = byte(x >> 24)
|
|
||||||
}
|
|
||||||
|
|
||||||
// putUint64LE puts the little-endian representation of x into the first
|
|
||||||
// eight bytes of p.
|
|
||||||
func putUint64LE(p []byte, x uint64) {
|
|
||||||
p[0] = byte(x)
|
|
||||||
p[1] = byte(x >> 8)
|
|
||||||
p[2] = byte(x >> 16)
|
|
||||||
p[3] = byte(x >> 24)
|
|
||||||
p[4] = byte(x >> 32)
|
|
||||||
p[5] = byte(x >> 40)
|
|
||||||
p[6] = byte(x >> 48)
|
|
||||||
p[7] = byte(x >> 56)
|
|
||||||
}
|
|
||||||
|
|
||||||
// uint32LE converts a little endian representation to an uint32 value.
|
|
||||||
func uint32LE(p []byte) uint32 {
|
|
||||||
return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 |
|
|
||||||
uint32(p[3])<<24
|
|
||||||
}
|
|
||||||
|
|
||||||
// putUvarint puts a uvarint representation of x into the byte slice.
|
|
||||||
func putUvarint(p []byte, x uint64) int {
|
|
||||||
i := 0
|
|
||||||
for x >= 0x80 {
|
|
||||||
p[i] = byte(x) | 0x80
|
|
||||||
x >>= 7
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
p[i] = byte(x)
|
|
||||||
return i + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// errOverflow indicates an overflow of the 64-bit unsigned integer.
|
|
||||||
var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer")
|
|
||||||
|
|
||||||
// readUvarint reads a uvarint from the given byte reader.
|
|
||||||
func readUvarint(r io.ByteReader) (x uint64, n int, err error) {
|
|
||||||
var s uint
|
|
||||||
i := 0
|
|
||||||
for {
|
|
||||||
b, err := r.ReadByte()
|
|
||||||
if err != nil {
|
|
||||||
return x, i, err
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
if b < 0x80 {
|
|
||||||
if i > 10 || i == 10 && b > 1 {
|
|
||||||
return x, i, errOverflowU64
|
|
||||||
}
|
|
||||||
return x | uint64(b)<<s, i, nil
|
|
||||||
}
|
|
||||||
x |= uint64(b&0x7f) << s
|
|
||||||
s += 7
|
|
||||||
}
|
|
||||||
}
|
|
54
vendor/github.com/ulikunitz/xz/crc.go
generated
vendored
54
vendor/github.com/ulikunitz/xz/crc.go
generated
vendored
@ -1,54 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package xz
|
|
||||||
|
|
||||||
import (
|
|
||||||
"hash"
|
|
||||||
"hash/crc32"
|
|
||||||
"hash/crc64"
|
|
||||||
)
|
|
||||||
|
|
||||||
// crc32Hash implements the hash.Hash32 interface with Sum returning the
|
|
||||||
// crc32 value in little-endian encoding.
|
|
||||||
type crc32Hash struct {
|
|
||||||
hash.Hash32
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sum returns the crc32 value as little endian.
|
|
||||||
func (h crc32Hash) Sum(b []byte) []byte {
|
|
||||||
p := make([]byte, 4)
|
|
||||||
putUint32LE(p, h.Hash32.Sum32())
|
|
||||||
b = append(b, p...)
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// newCRC32 returns a CRC-32 hash that returns the 64-bit value in
|
|
||||||
// little-endian encoding using the IEEE polynomial.
|
|
||||||
func newCRC32() hash.Hash {
|
|
||||||
return crc32Hash{Hash32: crc32.NewIEEE()}
|
|
||||||
}
|
|
||||||
|
|
||||||
// crc64Hash implements the Hash64 interface with Sum returning the
|
|
||||||
// CRC-64 value in little-endian encoding.
|
|
||||||
type crc64Hash struct {
|
|
||||||
hash.Hash64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sum returns the CRC-64 value in little-endian encoding.
|
|
||||||
func (h crc64Hash) Sum(b []byte) []byte {
|
|
||||||
p := make([]byte, 8)
|
|
||||||
putUint64LE(p, h.Hash64.Sum64())
|
|
||||||
b = append(b, p...)
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// crc64Table is used to create a CRC-64 hash.
|
|
||||||
var crc64Table = crc64.MakeTable(crc64.ECMA)
|
|
||||||
|
|
||||||
// newCRC64 returns a CRC-64 hash that returns the 64-bit value in
|
|
||||||
// little-endian encoding using the ECMA polynomial.
|
|
||||||
func newCRC64() hash.Hash {
|
|
||||||
return crc64Hash{Hash64: crc64.New(crc64Table)}
|
|
||||||
}
|
|
40
vendor/github.com/ulikunitz/xz/example.go
generated
vendored
40
vendor/github.com/ulikunitz/xz/example.go
generated
vendored
@ -1,40 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/ulikunitz/xz"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
const text = "The quick brown fox jumps over the lazy dog.\n"
|
|
||||||
var buf bytes.Buffer
|
|
||||||
// compress text
|
|
||||||
w, err := xz.NewWriter(&buf)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("xz.NewWriter error %s", err)
|
|
||||||
}
|
|
||||||
if _, err := io.WriteString(w, text); err != nil {
|
|
||||||
log.Fatalf("WriteString error %s", err)
|
|
||||||
}
|
|
||||||
if err := w.Close(); err != nil {
|
|
||||||
log.Fatalf("w.Close error %s", err)
|
|
||||||
}
|
|
||||||
// decompress buffer and write output to stdout
|
|
||||||
r, err := xz.NewReader(&buf)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("NewReader error %s", err)
|
|
||||||
}
|
|
||||||
if _, err = io.Copy(os.Stdout, r); err != nil {
|
|
||||||
log.Fatalf("io.Copy error %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
728
vendor/github.com/ulikunitz/xz/format.go
generated
vendored
728
vendor/github.com/ulikunitz/xz/format.go
generated
vendored
@ -1,728 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package xz
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/sha256"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"hash"
|
|
||||||
"hash/crc32"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/ulikunitz/xz/lzma"
|
|
||||||
)
|
|
||||||
|
|
||||||
// allZeros checks whether a given byte slice has only zeros.
|
|
||||||
func allZeros(p []byte) bool {
|
|
||||||
for _, c := range p {
|
|
||||||
if c != 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// padLen returns the length of the padding required for the given
|
|
||||||
// argument.
|
|
||||||
func padLen(n int64) int {
|
|
||||||
k := int(n % 4)
|
|
||||||
if k > 0 {
|
|
||||||
k = 4 - k
|
|
||||||
}
|
|
||||||
return k
|
|
||||||
}
|
|
||||||
|
|
||||||
/*** Header ***/
|
|
||||||
|
|
||||||
// headerMagic stores the magic bytes for the header
|
|
||||||
var headerMagic = []byte{0xfd, '7', 'z', 'X', 'Z', 0x00}
|
|
||||||
|
|
||||||
// HeaderLen provides the length of the xz file header.
|
|
||||||
const HeaderLen = 12
|
|
||||||
|
|
||||||
// Constants for the checksum methods supported by xz.
|
|
||||||
const (
|
|
||||||
CRC32 byte = 0x1
|
|
||||||
CRC64 = 0x4
|
|
||||||
SHA256 = 0xa
|
|
||||||
)
|
|
||||||
|
|
||||||
// errInvalidFlags indicates that flags are invalid.
|
|
||||||
var errInvalidFlags = errors.New("xz: invalid flags")
|
|
||||||
|
|
||||||
// verifyFlags returns the error errInvalidFlags if the value is
|
|
||||||
// invalid.
|
|
||||||
func verifyFlags(flags byte) error {
|
|
||||||
switch flags {
|
|
||||||
case CRC32, CRC64, SHA256:
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
return errInvalidFlags
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// flagstrings maps flag values to strings.
|
|
||||||
var flagstrings = map[byte]string{
|
|
||||||
CRC32: "CRC-32",
|
|
||||||
CRC64: "CRC-64",
|
|
||||||
SHA256: "SHA-256",
|
|
||||||
}
|
|
||||||
|
|
||||||
// flagString returns the string representation for the given flags.
|
|
||||||
func flagString(flags byte) string {
|
|
||||||
s, ok := flagstrings[flags]
|
|
||||||
if !ok {
|
|
||||||
return "invalid"
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// newHashFunc returns a function that creates hash instances for the
|
|
||||||
// hash method encoded in flags.
|
|
||||||
func newHashFunc(flags byte) (newHash func() hash.Hash, err error) {
|
|
||||||
switch flags {
|
|
||||||
case CRC32:
|
|
||||||
newHash = newCRC32
|
|
||||||
case CRC64:
|
|
||||||
newHash = newCRC64
|
|
||||||
case SHA256:
|
|
||||||
newHash = sha256.New
|
|
||||||
default:
|
|
||||||
err = errInvalidFlags
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// header provides the actual content of the xz file header: the flags.
|
|
||||||
type header struct {
|
|
||||||
flags byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// Errors returned by readHeader.
|
|
||||||
var errHeaderMagic = errors.New("xz: invalid header magic bytes")
|
|
||||||
|
|
||||||
// ValidHeader checks whether data is a correct xz file header. The
|
|
||||||
// length of data must be HeaderLen.
|
|
||||||
func ValidHeader(data []byte) bool {
|
|
||||||
var h header
|
|
||||||
err := h.UnmarshalBinary(data)
|
|
||||||
return err == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string representation of the flags.
|
|
||||||
func (h header) String() string {
|
|
||||||
return flagString(h.flags)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalBinary reads header from the provided data slice.
|
|
||||||
func (h *header) UnmarshalBinary(data []byte) error {
|
|
||||||
// header length
|
|
||||||
if len(data) != HeaderLen {
|
|
||||||
return errors.New("xz: wrong file header length")
|
|
||||||
}
|
|
||||||
|
|
||||||
// magic header
|
|
||||||
if !bytes.Equal(headerMagic, data[:6]) {
|
|
||||||
return errHeaderMagic
|
|
||||||
}
|
|
||||||
|
|
||||||
// checksum
|
|
||||||
crc := crc32.NewIEEE()
|
|
||||||
crc.Write(data[6:8])
|
|
||||||
if uint32LE(data[8:]) != crc.Sum32() {
|
|
||||||
return errors.New("xz: invalid checksum for file header")
|
|
||||||
}
|
|
||||||
|
|
||||||
// stream flags
|
|
||||||
if data[6] != 0 {
|
|
||||||
return errInvalidFlags
|
|
||||||
}
|
|
||||||
flags := data[7]
|
|
||||||
if err := verifyFlags(flags); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
h.flags = flags
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalBinary generates the xz file header.
|
|
||||||
func (h *header) MarshalBinary() (data []byte, err error) {
|
|
||||||
if err = verifyFlags(h.flags); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
data = make([]byte, 12)
|
|
||||||
copy(data, headerMagic)
|
|
||||||
data[7] = h.flags
|
|
||||||
|
|
||||||
crc := crc32.NewIEEE()
|
|
||||||
crc.Write(data[6:8])
|
|
||||||
putUint32LE(data[8:], crc.Sum32())
|
|
||||||
|
|
||||||
return data, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/*** Footer ***/
|
|
||||||
|
|
||||||
// footerLen defines the length of the footer.
|
|
||||||
const footerLen = 12
|
|
||||||
|
|
||||||
// footerMagic contains the footer magic bytes.
|
|
||||||
var footerMagic = []byte{'Y', 'Z'}
|
|
||||||
|
|
||||||
// footer represents the content of the xz file footer.
|
|
||||||
type footer struct {
|
|
||||||
indexSize int64
|
|
||||||
flags byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// String prints a string representation of the footer structure.
|
|
||||||
func (f footer) String() string {
|
|
||||||
return fmt.Sprintf("%s index size %d", flagString(f.flags), f.indexSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Minimum and maximum for the size of the index (backward size).
|
|
||||||
const (
|
|
||||||
minIndexSize = 4
|
|
||||||
maxIndexSize = (1 << 32) * 4
|
|
||||||
)
|
|
||||||
|
|
||||||
// MarshalBinary converts footer values into an xz file footer. Note
|
|
||||||
// that the footer value is checked for correctness.
|
|
||||||
func (f *footer) MarshalBinary() (data []byte, err error) {
|
|
||||||
if err = verifyFlags(f.flags); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if !(minIndexSize <= f.indexSize && f.indexSize <= maxIndexSize) {
|
|
||||||
return nil, errors.New("xz: index size out of range")
|
|
||||||
}
|
|
||||||
if f.indexSize%4 != 0 {
|
|
||||||
return nil, errors.New(
|
|
||||||
"xz: index size not aligned to four bytes")
|
|
||||||
}
|
|
||||||
|
|
||||||
data = make([]byte, footerLen)
|
|
||||||
|
|
||||||
// backward size (index size)
|
|
||||||
s := (f.indexSize / 4) - 1
|
|
||||||
putUint32LE(data[4:], uint32(s))
|
|
||||||
// flags
|
|
||||||
data[9] = f.flags
|
|
||||||
// footer magic
|
|
||||||
copy(data[10:], footerMagic)
|
|
||||||
|
|
||||||
// CRC-32
|
|
||||||
crc := crc32.NewIEEE()
|
|
||||||
crc.Write(data[4:10])
|
|
||||||
putUint32LE(data, crc.Sum32())
|
|
||||||
|
|
||||||
return data, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalBinary sets the footer value by unmarshalling an xz file
|
|
||||||
// footer.
|
|
||||||
func (f *footer) UnmarshalBinary(data []byte) error {
|
|
||||||
if len(data) != footerLen {
|
|
||||||
return errors.New("xz: wrong footer length")
|
|
||||||
}
|
|
||||||
|
|
||||||
// magic bytes
|
|
||||||
if !bytes.Equal(data[10:], footerMagic) {
|
|
||||||
return errors.New("xz: footer magic invalid")
|
|
||||||
}
|
|
||||||
|
|
||||||
// CRC-32
|
|
||||||
crc := crc32.NewIEEE()
|
|
||||||
crc.Write(data[4:10])
|
|
||||||
if uint32LE(data) != crc.Sum32() {
|
|
||||||
return errors.New("xz: footer checksum error")
|
|
||||||
}
|
|
||||||
|
|
||||||
var g footer
|
|
||||||
// backward size (index size)
|
|
||||||
g.indexSize = (int64(uint32LE(data[4:])) + 1) * 4
|
|
||||||
|
|
||||||
// flags
|
|
||||||
if data[8] != 0 {
|
|
||||||
return errInvalidFlags
|
|
||||||
}
|
|
||||||
g.flags = data[9]
|
|
||||||
if err := verifyFlags(g.flags); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
*f = g
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/*** Block Header ***/
|
|
||||||
|
|
||||||
// blockHeader represents the content of an xz block header.
|
|
||||||
type blockHeader struct {
|
|
||||||
compressedSize int64
|
|
||||||
uncompressedSize int64
|
|
||||||
filters []filter
|
|
||||||
}
|
|
||||||
|
|
||||||
// String converts the block header into a string.
|
|
||||||
func (h blockHeader) String() string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
first := true
|
|
||||||
if h.compressedSize >= 0 {
|
|
||||||
fmt.Fprintf(&buf, "compressed size %d", h.compressedSize)
|
|
||||||
first = false
|
|
||||||
}
|
|
||||||
if h.uncompressedSize >= 0 {
|
|
||||||
if !first {
|
|
||||||
buf.WriteString(" ")
|
|
||||||
}
|
|
||||||
fmt.Fprintf(&buf, "uncompressed size %d", h.uncompressedSize)
|
|
||||||
first = false
|
|
||||||
}
|
|
||||||
for _, f := range h.filters {
|
|
||||||
if !first {
|
|
||||||
buf.WriteString(" ")
|
|
||||||
}
|
|
||||||
fmt.Fprintf(&buf, "filter %s", f)
|
|
||||||
first = false
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Masks for the block flags.
|
|
||||||
const (
|
|
||||||
filterCountMask = 0x03
|
|
||||||
compressedSizePresent = 0x40
|
|
||||||
uncompressedSizePresent = 0x80
|
|
||||||
reservedBlockFlags = 0x3C
|
|
||||||
)
|
|
||||||
|
|
||||||
// errIndexIndicator signals that an index indicator (0x00) has been found
|
|
||||||
// instead of an expected block header indicator.
|
|
||||||
var errIndexIndicator = errors.New("xz: found index indicator")
|
|
||||||
|
|
||||||
// readBlockHeader reads the block header.
|
|
||||||
func readBlockHeader(r io.Reader) (h *blockHeader, n int, err error) {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
buf.Grow(20)
|
|
||||||
|
|
||||||
// block header size
|
|
||||||
z, err := io.CopyN(&buf, r, 1)
|
|
||||||
n = int(z)
|
|
||||||
if err != nil {
|
|
||||||
return nil, n, err
|
|
||||||
}
|
|
||||||
s := buf.Bytes()[0]
|
|
||||||
if s == 0 {
|
|
||||||
return nil, n, errIndexIndicator
|
|
||||||
}
|
|
||||||
|
|
||||||
// read complete header
|
|
||||||
headerLen := (int(s) + 1) * 4
|
|
||||||
buf.Grow(headerLen - 1)
|
|
||||||
z, err = io.CopyN(&buf, r, int64(headerLen-1))
|
|
||||||
n += int(z)
|
|
||||||
if err != nil {
|
|
||||||
return nil, n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// unmarshal block header
|
|
||||||
h = new(blockHeader)
|
|
||||||
if err = h.UnmarshalBinary(buf.Bytes()); err != nil {
|
|
||||||
return nil, n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return h, n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// readSizeInBlockHeader reads the uncompressed or compressed size
|
|
||||||
// fields in the block header. The present value informs the function
|
|
||||||
// whether the respective field is actually present in the header.
|
|
||||||
func readSizeInBlockHeader(r io.ByteReader, present bool) (n int64, err error) {
|
|
||||||
if !present {
|
|
||||||
return -1, nil
|
|
||||||
}
|
|
||||||
x, _, err := readUvarint(r)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if x >= 1<<63 {
|
|
||||||
return 0, errors.New("xz: size overflow in block header")
|
|
||||||
}
|
|
||||||
return int64(x), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalBinary unmarshals the block header.
|
|
||||||
func (h *blockHeader) UnmarshalBinary(data []byte) error {
|
|
||||||
// Check header length
|
|
||||||
s := data[0]
|
|
||||||
if data[0] == 0 {
|
|
||||||
return errIndexIndicator
|
|
||||||
}
|
|
||||||
headerLen := (int(s) + 1) * 4
|
|
||||||
if len(data) != headerLen {
|
|
||||||
return fmt.Errorf("xz: data length %d; want %d", len(data),
|
|
||||||
headerLen)
|
|
||||||
}
|
|
||||||
n := headerLen - 4
|
|
||||||
|
|
||||||
// Check CRC-32
|
|
||||||
crc := crc32.NewIEEE()
|
|
||||||
crc.Write(data[:n])
|
|
||||||
if crc.Sum32() != uint32LE(data[n:]) {
|
|
||||||
return errors.New("xz: checksum error for block header")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Block header flags
|
|
||||||
flags := data[1]
|
|
||||||
if flags&reservedBlockFlags != 0 {
|
|
||||||
return errors.New("xz: reserved block header flags set")
|
|
||||||
}
|
|
||||||
|
|
||||||
r := bytes.NewReader(data[2:n])
|
|
||||||
|
|
||||||
// Compressed size
|
|
||||||
var err error
|
|
||||||
h.compressedSize, err = readSizeInBlockHeader(
|
|
||||||
r, flags&compressedSizePresent != 0)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uncompressed size
|
|
||||||
h.uncompressedSize, err = readSizeInBlockHeader(
|
|
||||||
r, flags&uncompressedSizePresent != 0)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
h.filters, err = readFilters(r, int(flags&filterCountMask)+1)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check padding
|
|
||||||
// Since headerLen is a multiple of 4 we don't need to check
|
|
||||||
// alignment.
|
|
||||||
k := r.Len()
|
|
||||||
// The standard spec says that the padding should have not more
|
|
||||||
// than 3 bytes. However we found paddings of 4 or 5 in the
|
|
||||||
// wild. See https://github.com/ulikunitz/xz/pull/11 and
|
|
||||||
// https://github.com/ulikunitz/xz/issues/15
|
|
||||||
//
|
|
||||||
// The only reasonable approach seems to be to ignore the
|
|
||||||
// padding size. We still check that all padding bytes are zero.
|
|
||||||
if !allZeros(data[n-k : n]) {
|
|
||||||
return errPadding
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalBinary marshals the binary header.
|
|
||||||
func (h *blockHeader) MarshalBinary() (data []byte, err error) {
|
|
||||||
if !(minFilters <= len(h.filters) && len(h.filters) <= maxFilters) {
|
|
||||||
return nil, errors.New("xz: filter count wrong")
|
|
||||||
}
|
|
||||||
for i, f := range h.filters {
|
|
||||||
if i < len(h.filters)-1 {
|
|
||||||
if f.id() == lzmaFilterID {
|
|
||||||
return nil, errors.New(
|
|
||||||
"xz: LZMA2 filter is not the last")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// last filter
|
|
||||||
if f.id() != lzmaFilterID {
|
|
||||||
return nil, errors.New("xz: " +
|
|
||||||
"last filter must be the LZMA2 filter")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
// header size must set at the end
|
|
||||||
buf.WriteByte(0)
|
|
||||||
|
|
||||||
// flags
|
|
||||||
flags := byte(len(h.filters) - 1)
|
|
||||||
if h.compressedSize >= 0 {
|
|
||||||
flags |= compressedSizePresent
|
|
||||||
}
|
|
||||||
if h.uncompressedSize >= 0 {
|
|
||||||
flags |= uncompressedSizePresent
|
|
||||||
}
|
|
||||||
buf.WriteByte(flags)
|
|
||||||
|
|
||||||
p := make([]byte, 10)
|
|
||||||
if h.compressedSize >= 0 {
|
|
||||||
k := putUvarint(p, uint64(h.compressedSize))
|
|
||||||
buf.Write(p[:k])
|
|
||||||
}
|
|
||||||
if h.uncompressedSize >= 0 {
|
|
||||||
k := putUvarint(p, uint64(h.uncompressedSize))
|
|
||||||
buf.Write(p[:k])
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, f := range h.filters {
|
|
||||||
fp, err := f.MarshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
buf.Write(fp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// padding
|
|
||||||
for i := padLen(int64(buf.Len())); i > 0; i-- {
|
|
||||||
buf.WriteByte(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// crc place holder
|
|
||||||
buf.Write(p[:4])
|
|
||||||
|
|
||||||
data = buf.Bytes()
|
|
||||||
if len(data)%4 != 0 {
|
|
||||||
panic("data length not aligned")
|
|
||||||
}
|
|
||||||
s := len(data)/4 - 1
|
|
||||||
if !(1 < s && s <= 255) {
|
|
||||||
panic("wrong block header size")
|
|
||||||
}
|
|
||||||
data[0] = byte(s)
|
|
||||||
|
|
||||||
crc := crc32.NewIEEE()
|
|
||||||
crc.Write(data[:len(data)-4])
|
|
||||||
putUint32LE(data[len(data)-4:], crc.Sum32())
|
|
||||||
|
|
||||||
return data, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Constants used for marshalling and unmarshalling filters in the xz
|
|
||||||
// block header.
|
|
||||||
const (
|
|
||||||
minFilters = 1
|
|
||||||
maxFilters = 4
|
|
||||||
minReservedID = 1 << 62
|
|
||||||
)
|
|
||||||
|
|
||||||
// filter represents a filter in the block header.
|
|
||||||
type filter interface {
|
|
||||||
id() uint64
|
|
||||||
UnmarshalBinary(data []byte) error
|
|
||||||
MarshalBinary() (data []byte, err error)
|
|
||||||
reader(r io.Reader, c *ReaderConfig) (fr io.Reader, err error)
|
|
||||||
writeCloser(w io.WriteCloser, c *WriterConfig) (fw io.WriteCloser, err error)
|
|
||||||
// filter must be last filter
|
|
||||||
last() bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// readFilter reads a block filter from the block header. At this point
|
|
||||||
// in time only the LZMA2 filter is supported.
|
|
||||||
func readFilter(r io.Reader) (f filter, err error) {
|
|
||||||
br := lzma.ByteReader(r)
|
|
||||||
|
|
||||||
// index
|
|
||||||
id, _, err := readUvarint(br)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var data []byte
|
|
||||||
switch id {
|
|
||||||
case lzmaFilterID:
|
|
||||||
data = make([]byte, lzmaFilterLen)
|
|
||||||
data[0] = lzmaFilterID
|
|
||||||
if _, err = io.ReadFull(r, data[1:]); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
f = new(lzmaFilter)
|
|
||||||
default:
|
|
||||||
if id >= minReservedID {
|
|
||||||
return nil, errors.New(
|
|
||||||
"xz: reserved filter id in block stream header")
|
|
||||||
}
|
|
||||||
return nil, errors.New("xz: invalid filter id")
|
|
||||||
}
|
|
||||||
if err = f.UnmarshalBinary(data); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return f, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// readFilters reads count filters. At this point in time only the count
|
|
||||||
// 1 is supported.
|
|
||||||
func readFilters(r io.Reader, count int) (filters []filter, err error) {
|
|
||||||
if count != 1 {
|
|
||||||
return nil, errors.New("xz: unsupported filter count")
|
|
||||||
}
|
|
||||||
f, err := readFilter(r)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return []filter{f}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeFilters writes the filters.
|
|
||||||
func writeFilters(w io.Writer, filters []filter) (n int, err error) {
|
|
||||||
for _, f := range filters {
|
|
||||||
p, err := f.MarshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
k, err := w.Write(p)
|
|
||||||
n += k
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/*** Index ***/
|
|
||||||
|
|
||||||
// record describes a block in the xz file index.
|
|
||||||
type record struct {
|
|
||||||
unpaddedSize int64
|
|
||||||
uncompressedSize int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// readRecord reads an index record.
|
|
||||||
func readRecord(r io.ByteReader) (rec record, n int, err error) {
|
|
||||||
u, k, err := readUvarint(r)
|
|
||||||
n += k
|
|
||||||
if err != nil {
|
|
||||||
return rec, n, err
|
|
||||||
}
|
|
||||||
rec.unpaddedSize = int64(u)
|
|
||||||
if rec.unpaddedSize < 0 {
|
|
||||||
return rec, n, errors.New("xz: unpadded size negative")
|
|
||||||
}
|
|
||||||
|
|
||||||
u, k, err = readUvarint(r)
|
|
||||||
n += k
|
|
||||||
if err != nil {
|
|
||||||
return rec, n, err
|
|
||||||
}
|
|
||||||
rec.uncompressedSize = int64(u)
|
|
||||||
if rec.uncompressedSize < 0 {
|
|
||||||
return rec, n, errors.New("xz: uncompressed size negative")
|
|
||||||
}
|
|
||||||
|
|
||||||
return rec, n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalBinary converts an index record in its binary encoding.
|
|
||||||
func (rec *record) MarshalBinary() (data []byte, err error) {
|
|
||||||
// maximum length of a uvarint is 10
|
|
||||||
p := make([]byte, 20)
|
|
||||||
n := putUvarint(p, uint64(rec.unpaddedSize))
|
|
||||||
n += putUvarint(p[n:], uint64(rec.uncompressedSize))
|
|
||||||
return p[:n], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeIndex writes the index, a sequence of records.
|
|
||||||
func writeIndex(w io.Writer, index []record) (n int64, err error) {
|
|
||||||
crc := crc32.NewIEEE()
|
|
||||||
mw := io.MultiWriter(w, crc)
|
|
||||||
|
|
||||||
// index indicator
|
|
||||||
k, err := mw.Write([]byte{0})
|
|
||||||
n += int64(k)
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// number of records
|
|
||||||
p := make([]byte, 10)
|
|
||||||
k = putUvarint(p, uint64(len(index)))
|
|
||||||
k, err = mw.Write(p[:k])
|
|
||||||
n += int64(k)
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// list of records
|
|
||||||
for _, rec := range index {
|
|
||||||
p, err := rec.MarshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
k, err = mw.Write(p)
|
|
||||||
n += int64(k)
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// index padding
|
|
||||||
k, err = mw.Write(make([]byte, padLen(int64(n))))
|
|
||||||
n += int64(k)
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// crc32 checksum
|
|
||||||
putUint32LE(p, crc.Sum32())
|
|
||||||
k, err = w.Write(p[:4])
|
|
||||||
n += int64(k)
|
|
||||||
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// readIndexBody reads the index from the reader. It assumes that the
|
|
||||||
// index indicator has already been read.
|
|
||||||
func readIndexBody(r io.Reader) (records []record, n int64, err error) {
|
|
||||||
crc := crc32.NewIEEE()
|
|
||||||
// index indicator
|
|
||||||
crc.Write([]byte{0})
|
|
||||||
|
|
||||||
br := lzma.ByteReader(io.TeeReader(r, crc))
|
|
||||||
|
|
||||||
// number of records
|
|
||||||
u, k, err := readUvarint(br)
|
|
||||||
n += int64(k)
|
|
||||||
if err != nil {
|
|
||||||
return nil, n, err
|
|
||||||
}
|
|
||||||
recLen := int(u)
|
|
||||||
if recLen < 0 || uint64(recLen) != u {
|
|
||||||
return nil, n, errors.New("xz: record number overflow")
|
|
||||||
}
|
|
||||||
|
|
||||||
// list of records
|
|
||||||
records = make([]record, recLen)
|
|
||||||
for i := range records {
|
|
||||||
records[i], k, err = readRecord(br)
|
|
||||||
n += int64(k)
|
|
||||||
if err != nil {
|
|
||||||
return nil, n, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
p := make([]byte, padLen(int64(n+1)), 4)
|
|
||||||
k, err = io.ReadFull(br.(io.Reader), p)
|
|
||||||
n += int64(k)
|
|
||||||
if err != nil {
|
|
||||||
return nil, n, err
|
|
||||||
}
|
|
||||||
if !allZeros(p) {
|
|
||||||
return nil, n, errors.New("xz: non-zero byte in index padding")
|
|
||||||
}
|
|
||||||
|
|
||||||
// crc32
|
|
||||||
s := crc.Sum32()
|
|
||||||
p = p[:4]
|
|
||||||
k, err = io.ReadFull(br.(io.Reader), p)
|
|
||||||
n += int64(k)
|
|
||||||
if err != nil {
|
|
||||||
return records, n, err
|
|
||||||
}
|
|
||||||
if uint32LE(p) != s {
|
|
||||||
return nil, n, errors.New("xz: wrong checksum for index")
|
|
||||||
}
|
|
||||||
|
|
||||||
return records, n, nil
|
|
||||||
}
|
|
BIN
vendor/github.com/ulikunitz/xz/fox.xz
generated
vendored
BIN
vendor/github.com/ulikunitz/xz/fox.xz
generated
vendored
Binary file not shown.
181
vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go
generated
vendored
181
vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go
generated
vendored
@ -1,181 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package hash
|
|
||||||
|
|
||||||
// CyclicPoly provides a cyclic polynomial rolling hash.
|
|
||||||
type CyclicPoly struct {
|
|
||||||
h uint64
|
|
||||||
p []uint64
|
|
||||||
i int
|
|
||||||
}
|
|
||||||
|
|
||||||
// ror rotates the unsigned 64-bit integer to right. The argument s must be
|
|
||||||
// less than 64.
|
|
||||||
func ror(x uint64, s uint) uint64 {
|
|
||||||
return (x >> s) | (x << (64 - s))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCyclicPoly creates a new instance of the CyclicPoly structure. The
|
|
||||||
// argument n gives the number of bytes for which a hash will be executed.
|
|
||||||
// This number must be positive; the method panics if this isn't the case.
|
|
||||||
func NewCyclicPoly(n int) *CyclicPoly {
|
|
||||||
if n < 1 {
|
|
||||||
panic("argument n must be positive")
|
|
||||||
}
|
|
||||||
return &CyclicPoly{p: make([]uint64, 0, n)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the length of the byte sequence for which a hash is generated.
|
|
||||||
func (r *CyclicPoly) Len() int {
|
|
||||||
return cap(r.p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RollByte hashes the next byte and returns a hash value. The complete becomes
|
|
||||||
// available after at least Len() bytes have been hashed.
|
|
||||||
func (r *CyclicPoly) RollByte(x byte) uint64 {
|
|
||||||
y := hash[x]
|
|
||||||
if len(r.p) < cap(r.p) {
|
|
||||||
r.h = ror(r.h, 1) ^ y
|
|
||||||
r.p = append(r.p, y)
|
|
||||||
} else {
|
|
||||||
r.h ^= ror(r.p[r.i], uint(cap(r.p)-1))
|
|
||||||
r.h = ror(r.h, 1) ^ y
|
|
||||||
r.p[r.i] = y
|
|
||||||
r.i = (r.i + 1) % cap(r.p)
|
|
||||||
}
|
|
||||||
return r.h
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stores the hash for the individual bytes.
|
|
||||||
var hash = [256]uint64{
|
|
||||||
0x2e4fc3f904065142, 0xc790984cfbc99527,
|
|
||||||
0x879f95eb8c62f187, 0x3b61be86b5021ef2,
|
|
||||||
0x65a896a04196f0a5, 0xc5b307b80470b59e,
|
|
||||||
0xd3bff376a70df14b, 0xc332f04f0b3f1701,
|
|
||||||
0x753b5f0e9abf3e0d, 0xb41538fdfe66ef53,
|
|
||||||
0x1906a10c2c1c0208, 0xfb0c712a03421c0d,
|
|
||||||
0x38be311a65c9552b, 0xfee7ee4ca6445c7e,
|
|
||||||
0x71aadeded184f21e, 0xd73426fccda23b2d,
|
|
||||||
0x29773fb5fb9600b5, 0xce410261cd32981a,
|
|
||||||
0xfe2848b3c62dbc2d, 0x459eaaff6e43e11c,
|
|
||||||
0xc13e35fc9c73a887, 0xf30ed5c201e76dbc,
|
|
||||||
0xa5f10b3910482cea, 0x2945d59be02dfaad,
|
|
||||||
0x06ee334ff70571b5, 0xbabf9d8070f44380,
|
|
||||||
0xee3e2e9912ffd27c, 0x2a7118d1ea6b8ea7,
|
|
||||||
0x26183cb9f7b1664c, 0xea71dac7da068f21,
|
|
||||||
0xea92eca5bd1d0bb7, 0x415595862defcd75,
|
|
||||||
0x248a386023c60648, 0x9cf021ab284b3c8a,
|
|
||||||
0xfc9372df02870f6c, 0x2b92d693eeb3b3fc,
|
|
||||||
0x73e799d139dc6975, 0x7b15ae312486363c,
|
|
||||||
0xb70e5454a2239c80, 0x208e3fb31d3b2263,
|
|
||||||
0x01f563cabb930f44, 0x2ac4533d2a3240d8,
|
|
||||||
0x84231ed1064f6f7c, 0xa9f020977c2a6d19,
|
|
||||||
0x213c227271c20122, 0x09fe8a9a0a03d07a,
|
|
||||||
0x4236dc75bcaf910c, 0x460a8b2bead8f17e,
|
|
||||||
0xd9b27be1aa07055f, 0xd202d5dc4b11c33e,
|
|
||||||
0x70adb010543bea12, 0xcdae938f7ea6f579,
|
|
||||||
0x3f3d870208672f4d, 0x8e6ccbce9d349536,
|
|
||||||
0xe4c0871a389095ae, 0xf5f2a49152bca080,
|
|
||||||
0x9a43f9b97269934e, 0xc17b3753cb6f475c,
|
|
||||||
0xd56d941e8e206bd4, 0xac0a4f3e525eda00,
|
|
||||||
0xa06d5a011912a550, 0x5537ed19537ad1df,
|
|
||||||
0xa32fe713d611449d, 0x2a1d05b47c3b579f,
|
|
||||||
0x991d02dbd30a2a52, 0x39e91e7e28f93eb0,
|
|
||||||
0x40d06adb3e92c9ac, 0x9b9d3afde1c77c97,
|
|
||||||
0x9a3f3f41c02c616f, 0x22ecd4ba00f60c44,
|
|
||||||
0x0b63d5d801708420, 0x8f227ca8f37ffaec,
|
|
||||||
0x0256278670887c24, 0x107e14877dbf540b,
|
|
||||||
0x32c19f2786ac1c05, 0x1df5b12bb4bc9c61,
|
|
||||||
0xc0cac129d0d4c4e2, 0x9fdb52ee9800b001,
|
|
||||||
0x31f601d5d31c48c4, 0x72ff3c0928bcaec7,
|
|
||||||
0xd99264421147eb03, 0x535a2d6d38aefcfe,
|
|
||||||
0x6ba8b4454a916237, 0xfa39366eaae4719c,
|
|
||||||
0x10f00fd7bbb24b6f, 0x5bd23185c76c84d4,
|
|
||||||
0xb22c3d7e1b00d33f, 0x3efc20aa6bc830a8,
|
|
||||||
0xd61c2503fe639144, 0x30ce625441eb92d3,
|
|
||||||
0xe5d34cf359e93100, 0xa8e5aa13f2b9f7a5,
|
|
||||||
0x5c2b8d851ca254a6, 0x68fb6c5e8b0d5fdf,
|
|
||||||
0xc7ea4872c96b83ae, 0x6dd5d376f4392382,
|
|
||||||
0x1be88681aaa9792f, 0xfef465ee1b6c10d9,
|
|
||||||
0x1f98b65ed43fcb2e, 0x4d1ca11eb6e9a9c9,
|
|
||||||
0x7808e902b3857d0b, 0x171c9c4ea4607972,
|
|
||||||
0x58d66274850146df, 0x42b311c10d3981d1,
|
|
||||||
0x647fa8c621c41a4c, 0xf472771c66ddfedc,
|
|
||||||
0x338d27e3f847b46b, 0x6402ce3da97545ce,
|
|
||||||
0x5162db616fc38638, 0x9c83be97bc22a50e,
|
|
||||||
0x2d3d7478a78d5e72, 0xe621a9b938fd5397,
|
|
||||||
0x9454614eb0f81c45, 0x395fb6e742ed39b6,
|
|
||||||
0x77dd9179d06037bf, 0xc478d0fee4d2656d,
|
|
||||||
0x35d9d6cb772007af, 0x83a56e92c883f0f6,
|
|
||||||
0x27937453250c00a1, 0x27bd6ebc3a46a97d,
|
|
||||||
0x9f543bf784342d51, 0xd158f38c48b0ed52,
|
|
||||||
0x8dd8537c045f66b4, 0x846a57230226f6d5,
|
|
||||||
0x6b13939e0c4e7cdf, 0xfca25425d8176758,
|
|
||||||
0x92e5fc6cd52788e6, 0x9992e13d7a739170,
|
|
||||||
0x518246f7a199e8ea, 0xf104c2a71b9979c7,
|
|
||||||
0x86b3ffaabea4768f, 0x6388061cf3e351ad,
|
|
||||||
0x09d9b5295de5bbb5, 0x38bf1638c2599e92,
|
|
||||||
0x1d759846499e148d, 0x4c0ff015e5f96ef4,
|
|
||||||
0xa41a94cfa270f565, 0x42d76f9cb2326c0b,
|
|
||||||
0x0cf385dd3c9c23ba, 0x0508a6c7508d6e7a,
|
|
||||||
0x337523aabbe6cf8d, 0x646bb14001d42b12,
|
|
||||||
0xc178729d138adc74, 0xf900ef4491f24086,
|
|
||||||
0xee1a90d334bb5ac4, 0x9755c92247301a50,
|
|
||||||
0xb999bf7c4ff1b610, 0x6aeeb2f3b21e8fc9,
|
|
||||||
0x0fa8084cf91ac6ff, 0x10d226cf136e6189,
|
|
||||||
0xd302057a07d4fb21, 0x5f03800e20a0fcc3,
|
|
||||||
0x80118d4ae46bd210, 0x58ab61a522843733,
|
|
||||||
0x51edd575c5432a4b, 0x94ee6ff67f9197f7,
|
|
||||||
0x765669e0e5e8157b, 0xa5347830737132f0,
|
|
||||||
0x3ba485a69f01510c, 0x0b247d7b957a01c3,
|
|
||||||
0x1b3d63449fd807dc, 0x0fdc4721c30ad743,
|
|
||||||
0x8b535ed3829b2b14, 0xee41d0cad65d232c,
|
|
||||||
0xe6a99ed97a6a982f, 0x65ac6194c202003d,
|
|
||||||
0x692accf3a70573eb, 0xcc3c02c3e200d5af,
|
|
||||||
0x0d419e8b325914a3, 0x320f160f42c25e40,
|
|
||||||
0x00710d647a51fe7a, 0x3c947692330aed60,
|
|
||||||
0x9288aa280d355a7a, 0xa1806a9b791d1696,
|
|
||||||
0x5d60e38496763da1, 0x6c69e22e613fd0f4,
|
|
||||||
0x977fc2a5aadffb17, 0xfb7bd063fc5a94ba,
|
|
||||||
0x460c17992cbaece1, 0xf7822c5444d3297f,
|
|
||||||
0x344a9790c69b74aa, 0xb80a42e6cae09dce,
|
|
||||||
0x1b1361eaf2b1e757, 0xd84c1e758e236f01,
|
|
||||||
0x88e0b7be347627cc, 0x45246009b7a99490,
|
|
||||||
0x8011c6dd3fe50472, 0xc341d682bffb99d7,
|
|
||||||
0x2511be93808e2d15, 0xd5bc13d7fd739840,
|
|
||||||
0x2a3cd030679ae1ec, 0x8ad9898a4b9ee157,
|
|
||||||
0x3245fef0a8eaf521, 0x3d6d8dbbb427d2b0,
|
|
||||||
0x1ed146d8968b3981, 0x0c6a28bf7d45f3fc,
|
|
||||||
0x4a1fd3dbcee3c561, 0x4210ff6a476bf67e,
|
|
||||||
0xa559cce0d9199aac, 0xde39d47ef3723380,
|
|
||||||
0xe5b69d848ce42e35, 0xefa24296f8e79f52,
|
|
||||||
0x70190b59db9a5afc, 0x26f166cdb211e7bf,
|
|
||||||
0x4deaf2df3c6b8ef5, 0xf171dbdd670f1017,
|
|
||||||
0xb9059b05e9420d90, 0x2f0da855c9388754,
|
|
||||||
0x611d5e9ab77949cc, 0x2912038ac01163f4,
|
|
||||||
0x0231df50402b2fba, 0x45660fc4f3245f58,
|
|
||||||
0xb91cc97c7c8dac50, 0xb72d2aafe4953427,
|
|
||||||
0xfa6463f87e813d6b, 0x4515f7ee95d5c6a2,
|
|
||||||
0x1310e1c1a48d21c3, 0xad48a7810cdd8544,
|
|
||||||
0x4d5bdfefd5c9e631, 0xa43ed43f1fdcb7de,
|
|
||||||
0xe70cfc8fe1ee9626, 0xef4711b0d8dda442,
|
|
||||||
0xb80dd9bd4dab6c93, 0xa23be08d31ba4d93,
|
|
||||||
0x9b37db9d0335a39c, 0x494b6f870f5cfebc,
|
|
||||||
0x6d1b3c1149dda943, 0x372c943a518c1093,
|
|
||||||
0xad27af45e77c09c4, 0x3b6f92b646044604,
|
|
||||||
0xac2917909f5fcf4f, 0x2069a60e977e5557,
|
|
||||||
0x353a469e71014de5, 0x24be356281f55c15,
|
|
||||||
0x2b6d710ba8e9adea, 0x404ad1751c749c29,
|
|
||||||
0xed7311bf23d7f185, 0xba4f6976b4acc43e,
|
|
||||||
0x32d7198d2bc39000, 0xee667019014d6e01,
|
|
||||||
0x494ef3e128d14c83, 0x1f95a152baecd6be,
|
|
||||||
0x201648dff1f483a5, 0x68c28550c8384af6,
|
|
||||||
0x5fc834a6824a7f48, 0x7cd06cb7365eaf28,
|
|
||||||
0xd82bbd95e9b30909, 0x234f0d1694c53f6d,
|
|
||||||
0xd2fb7f4a96d83f4a, 0xff0d5da83acac05e,
|
|
||||||
0xf8f6b97f5585080a, 0x74236084be57b95b,
|
|
||||||
0xa25e40c03bbc36ad, 0x6b6e5c14ce88465b,
|
|
||||||
0x4378ffe93e1528c5, 0x94ca92a17118e2d2,
|
|
||||||
}
|
|
14
vendor/github.com/ulikunitz/xz/internal/hash/doc.go
generated
vendored
14
vendor/github.com/ulikunitz/xz/internal/hash/doc.go
generated
vendored
@ -1,14 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package hash provides rolling hashes.
|
|
||||||
|
|
||||||
Rolling hashes have to be used for maintaining the positions of n-byte
|
|
||||||
sequences in the dictionary buffer.
|
|
||||||
|
|
||||||
The package provides currently the Rabin-Karp rolling hash and a Cyclic
|
|
||||||
Polynomial hash. Both support the Hashes method to be used with an interface.
|
|
||||||
*/
|
|
||||||
package hash
|
|
66
vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go
generated
vendored
66
vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go
generated
vendored
@ -1,66 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package hash
|
|
||||||
|
|
||||||
// A is the default constant for Robin-Karp rolling hash. This is a random
|
|
||||||
// prime.
|
|
||||||
const A = 0x97b548add41d5da1
|
|
||||||
|
|
||||||
// RabinKarp supports the computation of a rolling hash.
|
|
||||||
type RabinKarp struct {
|
|
||||||
A uint64
|
|
||||||
// a^n
|
|
||||||
aOldest uint64
|
|
||||||
h uint64
|
|
||||||
p []byte
|
|
||||||
i int
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRabinKarp creates a new RabinKarp value. The argument n defines the
|
|
||||||
// length of the byte sequence to be hashed. The default constant will will be
|
|
||||||
// used.
|
|
||||||
func NewRabinKarp(n int) *RabinKarp {
|
|
||||||
return NewRabinKarpConst(n, A)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRabinKarpConst creates a new RabinKarp value. The argument n defines the
|
|
||||||
// length of the byte sequence to be hashed. The argument a provides the
|
|
||||||
// constant used to compute the hash.
|
|
||||||
func NewRabinKarpConst(n int, a uint64) *RabinKarp {
|
|
||||||
if n <= 0 {
|
|
||||||
panic("number of bytes n must be positive")
|
|
||||||
}
|
|
||||||
aOldest := uint64(1)
|
|
||||||
// There are faster methods. For the small n required by the LZMA
|
|
||||||
// compressor O(n) is sufficient.
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
aOldest *= a
|
|
||||||
}
|
|
||||||
return &RabinKarp{
|
|
||||||
A: a, aOldest: aOldest,
|
|
||||||
p: make([]byte, 0, n),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the length of the byte sequence.
|
|
||||||
func (r *RabinKarp) Len() int {
|
|
||||||
return cap(r.p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RollByte computes the hash after x has been added.
|
|
||||||
func (r *RabinKarp) RollByte(x byte) uint64 {
|
|
||||||
if len(r.p) < cap(r.p) {
|
|
||||||
r.h += uint64(x)
|
|
||||||
r.h *= r.A
|
|
||||||
r.p = append(r.p, x)
|
|
||||||
} else {
|
|
||||||
r.h -= uint64(r.p[r.i]) * r.aOldest
|
|
||||||
r.h += uint64(x)
|
|
||||||
r.h *= r.A
|
|
||||||
r.p[r.i] = x
|
|
||||||
r.i = (r.i + 1) % cap(r.p)
|
|
||||||
}
|
|
||||||
return r.h
|
|
||||||
}
|
|
29
vendor/github.com/ulikunitz/xz/internal/hash/roller.go
generated
vendored
29
vendor/github.com/ulikunitz/xz/internal/hash/roller.go
generated
vendored
@ -1,29 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package hash
|
|
||||||
|
|
||||||
// Roller provides an interface for rolling hashes. The hash value will become
|
|
||||||
// valid after hash has been called Len times.
|
|
||||||
type Roller interface {
|
|
||||||
Len() int
|
|
||||||
RollByte(x byte) uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashes computes all hash values for the array p. Note that the state of the
|
|
||||||
// roller is changed.
|
|
||||||
func Hashes(r Roller, p []byte) []uint64 {
|
|
||||||
n := r.Len()
|
|
||||||
if len(p) < n {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
h := make([]uint64, len(p)-n+1)
|
|
||||||
for i := 0; i < n-1; i++ {
|
|
||||||
r.RollByte(p[i])
|
|
||||||
}
|
|
||||||
for i := range h {
|
|
||||||
h[i] = r.RollByte(p[i+n-1])
|
|
||||||
}
|
|
||||||
return h
|
|
||||||
}
|
|
457
vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go
generated
vendored
457
vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go
generated
vendored
@ -1,457 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package xlog provides a simple logging package that allows to disable
|
|
||||||
// certain message categories. It defines a type, Logger, with multiple
|
|
||||||
// methods for formatting output. The package has also a predefined
|
|
||||||
// 'standard' Logger accessible through helper function Print[f|ln],
|
|
||||||
// Fatal[f|ln], Panic[f|ln], Warn[f|ln], Print[f|ln] and Debug[f|ln]
|
|
||||||
// that are easier to use then creating a Logger manually. That logger
|
|
||||||
// writes to standard error and prints the date and time of each logged
|
|
||||||
// message, which can be configured using the function SetFlags.
|
|
||||||
//
|
|
||||||
// The Fatal functions call os.Exit(1) after the message is output
|
|
||||||
// unless not suppressed by the flags. The Panic functions call panic
|
|
||||||
// after the writing the log message unless suppressed.
|
|
||||||
package xlog
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The flags define what information is prefixed to each log entry
|
|
||||||
// generated by the Logger. The Lno* versions allow the suppression of
|
|
||||||
// specific output. The bits are or'ed together to control what will be
|
|
||||||
// printed. There is no control over the order of the items printed and
|
|
||||||
// the format. The full format is:
|
|
||||||
//
|
|
||||||
// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message
|
|
||||||
//
|
|
||||||
const (
|
|
||||||
Ldate = 1 << iota // the date: 2009-01-23
|
|
||||||
Ltime // the time: 01:23:23
|
|
||||||
Lmicroseconds // microsecond resolution: 01:23:23.123123
|
|
||||||
Llongfile // full file name and line number: /a/b/c/d.go:23
|
|
||||||
Lshortfile // final file name element and line number: d.go:23
|
|
||||||
Lnopanic // suppresses output from Panic[f|ln] but not the panic call
|
|
||||||
Lnofatal // suppresses output from Fatal[f|ln] but not the exit
|
|
||||||
Lnowarn // suppresses output from Warn[f|ln]
|
|
||||||
Lnoprint // suppresses output from Print[f|ln]
|
|
||||||
Lnodebug // suppresses output from Debug[f|ln]
|
|
||||||
// initial values for the standard logger
|
|
||||||
Lstdflags = Ldate | Ltime | Lnodebug
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Logger represents an active logging object that generates lines of
|
|
||||||
// output to an io.Writer. Each logging operation if not suppressed
|
|
||||||
// makes a single call to the Writer's Write method. A Logger can be
|
|
||||||
// used simultaneously from multiple goroutines; it guarantees to
|
|
||||||
// serialize access to the Writer.
|
|
||||||
type Logger struct {
|
|
||||||
mu sync.Mutex // ensures atomic writes; and protects the following
|
|
||||||
// fields
|
|
||||||
prefix string // prefix to write at beginning of each line
|
|
||||||
flag int // properties
|
|
||||||
out io.Writer // destination for output
|
|
||||||
buf []byte // for accumulating text to write
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new Logger. The out argument sets the destination to
|
|
||||||
// which the log output will be written. The prefix appears at the
|
|
||||||
// beginning of each log line. The flag argument defines the logging
|
|
||||||
// properties.
|
|
||||||
func New(out io.Writer, prefix string, flag int) *Logger {
|
|
||||||
return &Logger{out: out, prefix: prefix, flag: flag}
|
|
||||||
}
|
|
||||||
|
|
||||||
// std is the standard logger used by the package scope functions.
|
|
||||||
var std = New(os.Stderr, "", Lstdflags)
|
|
||||||
|
|
||||||
// itoa converts the integer to ASCII. A negative widths will avoid
|
|
||||||
// zero-padding. The function supports only non-negative integers.
|
|
||||||
func itoa(buf *[]byte, i int, wid int) {
|
|
||||||
var u = uint(i)
|
|
||||||
if u == 0 && wid <= 1 {
|
|
||||||
*buf = append(*buf, '0')
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var b [32]byte
|
|
||||||
bp := len(b)
|
|
||||||
for ; u > 0 || wid > 0; u /= 10 {
|
|
||||||
bp--
|
|
||||||
wid--
|
|
||||||
b[bp] = byte(u%10) + '0'
|
|
||||||
}
|
|
||||||
*buf = append(*buf, b[bp:]...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatHeader puts the header into the buf field of the buffer.
|
|
||||||
func (l *Logger) formatHeader(t time.Time, file string, line int) {
|
|
||||||
l.buf = append(l.buf, l.prefix...)
|
|
||||||
if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 {
|
|
||||||
if l.flag&Ldate != 0 {
|
|
||||||
year, month, day := t.Date()
|
|
||||||
itoa(&l.buf, year, 4)
|
|
||||||
l.buf = append(l.buf, '-')
|
|
||||||
itoa(&l.buf, int(month), 2)
|
|
||||||
l.buf = append(l.buf, '-')
|
|
||||||
itoa(&l.buf, day, 2)
|
|
||||||
l.buf = append(l.buf, ' ')
|
|
||||||
}
|
|
||||||
if l.flag&(Ltime|Lmicroseconds) != 0 {
|
|
||||||
hour, min, sec := t.Clock()
|
|
||||||
itoa(&l.buf, hour, 2)
|
|
||||||
l.buf = append(l.buf, ':')
|
|
||||||
itoa(&l.buf, min, 2)
|
|
||||||
l.buf = append(l.buf, ':')
|
|
||||||
itoa(&l.buf, sec, 2)
|
|
||||||
if l.flag&Lmicroseconds != 0 {
|
|
||||||
l.buf = append(l.buf, '.')
|
|
||||||
itoa(&l.buf, t.Nanosecond()/1e3, 6)
|
|
||||||
}
|
|
||||||
l.buf = append(l.buf, ' ')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if l.flag&(Lshortfile|Llongfile) != 0 {
|
|
||||||
if l.flag&Lshortfile != 0 {
|
|
||||||
short := file
|
|
||||||
for i := len(file) - 1; i > 0; i-- {
|
|
||||||
if file[i] == '/' {
|
|
||||||
short = file[i+1:]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file = short
|
|
||||||
}
|
|
||||||
l.buf = append(l.buf, file...)
|
|
||||||
l.buf = append(l.buf, ':')
|
|
||||||
itoa(&l.buf, line, -1)
|
|
||||||
l.buf = append(l.buf, ": "...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Logger) output(calldepth int, now time.Time, s string) error {
|
|
||||||
var file string
|
|
||||||
var line int
|
|
||||||
if l.flag&(Lshortfile|Llongfile) != 0 {
|
|
||||||
l.mu.Unlock()
|
|
||||||
var ok bool
|
|
||||||
_, file, line, ok = runtime.Caller(calldepth)
|
|
||||||
if !ok {
|
|
||||||
file = "???"
|
|
||||||
line = 0
|
|
||||||
}
|
|
||||||
l.mu.Lock()
|
|
||||||
}
|
|
||||||
l.buf = l.buf[:0]
|
|
||||||
l.formatHeader(now, file, line)
|
|
||||||
l.buf = append(l.buf, s...)
|
|
||||||
if len(s) == 0 || s[len(s)-1] != '\n' {
|
|
||||||
l.buf = append(l.buf, '\n')
|
|
||||||
}
|
|
||||||
_, err := l.out.Write(l.buf)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Output writes the string s with the header controlled by the flags to
|
|
||||||
// the l.out writer. A newline will be appended if s doesn't end in a
|
|
||||||
// newline. Calldepth is used to recover the PC, although all current
|
|
||||||
// calls of Output use the call depth 2. Access to the function is serialized.
|
|
||||||
func (l *Logger) Output(calldepth, noflag int, v ...interface{}) error {
|
|
||||||
now := time.Now()
|
|
||||||
l.mu.Lock()
|
|
||||||
defer l.mu.Unlock()
|
|
||||||
if l.flag&noflag != 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
s := fmt.Sprint(v...)
|
|
||||||
return l.output(calldepth+1, now, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Outputf works like output but formats the output like Printf.
|
|
||||||
func (l *Logger) Outputf(calldepth int, noflag int, format string, v ...interface{}) error {
|
|
||||||
now := time.Now()
|
|
||||||
l.mu.Lock()
|
|
||||||
defer l.mu.Unlock()
|
|
||||||
if l.flag&noflag != 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
s := fmt.Sprintf(format, v...)
|
|
||||||
return l.output(calldepth+1, now, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Outputln works like output but formats the output like Println.
|
|
||||||
func (l *Logger) Outputln(calldepth int, noflag int, v ...interface{}) error {
|
|
||||||
now := time.Now()
|
|
||||||
l.mu.Lock()
|
|
||||||
defer l.mu.Unlock()
|
|
||||||
if l.flag&noflag != 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
s := fmt.Sprintln(v...)
|
|
||||||
return l.output(calldepth+1, now, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Panic prints the message like Print and calls panic. The printing
|
|
||||||
// might be suppressed by the flag Lnopanic.
|
|
||||||
func (l *Logger) Panic(v ...interface{}) {
|
|
||||||
l.Output(2, Lnopanic, v...)
|
|
||||||
s := fmt.Sprint(v...)
|
|
||||||
panic(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Panic prints the message like Print and calls panic. The printing
|
|
||||||
// might be suppressed by the flag Lnopanic.
|
|
||||||
func Panic(v ...interface{}) {
|
|
||||||
std.Output(2, Lnopanic, v...)
|
|
||||||
s := fmt.Sprint(v...)
|
|
||||||
panic(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Panicf prints the message like Printf and calls panic. The printing
|
|
||||||
// might be suppressed by the flag Lnopanic.
|
|
||||||
func (l *Logger) Panicf(format string, v ...interface{}) {
|
|
||||||
l.Outputf(2, Lnopanic, format, v...)
|
|
||||||
s := fmt.Sprintf(format, v...)
|
|
||||||
panic(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Panicf prints the message like Printf and calls panic. The printing
|
|
||||||
// might be suppressed by the flag Lnopanic.
|
|
||||||
func Panicf(format string, v ...interface{}) {
|
|
||||||
std.Outputf(2, Lnopanic, format, v...)
|
|
||||||
s := fmt.Sprintf(format, v...)
|
|
||||||
panic(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Panicln prints the message like Println and calls panic. The printing
|
|
||||||
// might be suppressed by the flag Lnopanic.
|
|
||||||
func (l *Logger) Panicln(v ...interface{}) {
|
|
||||||
l.Outputln(2, Lnopanic, v...)
|
|
||||||
s := fmt.Sprintln(v...)
|
|
||||||
panic(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Panicln prints the message like Println and calls panic. The printing
|
|
||||||
// might be suppressed by the flag Lnopanic.
|
|
||||||
func Panicln(v ...interface{}) {
|
|
||||||
std.Outputln(2, Lnopanic, v...)
|
|
||||||
s := fmt.Sprintln(v...)
|
|
||||||
panic(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fatal prints the message like Print and calls os.Exit(1). The
|
|
||||||
// printing might be suppressed by the flag Lnofatal.
|
|
||||||
func (l *Logger) Fatal(v ...interface{}) {
|
|
||||||
l.Output(2, Lnofatal, v...)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fatal prints the message like Print and calls os.Exit(1). The
|
|
||||||
// printing might be suppressed by the flag Lnofatal.
|
|
||||||
func Fatal(v ...interface{}) {
|
|
||||||
std.Output(2, Lnofatal, v...)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fatalf prints the message like Printf and calls os.Exit(1). The
|
|
||||||
// printing might be suppressed by the flag Lnofatal.
|
|
||||||
func (l *Logger) Fatalf(format string, v ...interface{}) {
|
|
||||||
l.Outputf(2, Lnofatal, format, v...)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fatalf prints the message like Printf and calls os.Exit(1). The
|
|
||||||
// printing might be suppressed by the flag Lnofatal.
|
|
||||||
func Fatalf(format string, v ...interface{}) {
|
|
||||||
std.Outputf(2, Lnofatal, format, v...)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fatalln prints the message like Println and calls os.Exit(1). The
|
|
||||||
// printing might be suppressed by the flag Lnofatal.
|
|
||||||
func (l *Logger) Fatalln(format string, v ...interface{}) {
|
|
||||||
l.Outputln(2, Lnofatal, v...)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fatalln prints the message like Println and calls os.Exit(1). The
|
|
||||||
// printing might be suppressed by the flag Lnofatal.
|
|
||||||
func Fatalln(format string, v ...interface{}) {
|
|
||||||
std.Outputln(2, Lnofatal, v...)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warn prints the message like Print. The printing might be suppressed
|
|
||||||
// by the flag Lnowarn.
|
|
||||||
func (l *Logger) Warn(v ...interface{}) {
|
|
||||||
l.Output(2, Lnowarn, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warn prints the message like Print. The printing might be suppressed
|
|
||||||
// by the flag Lnowarn.
|
|
||||||
func Warn(v ...interface{}) {
|
|
||||||
std.Output(2, Lnowarn, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warnf prints the message like Printf. The printing might be suppressed
|
|
||||||
// by the flag Lnowarn.
|
|
||||||
func (l *Logger) Warnf(format string, v ...interface{}) {
|
|
||||||
l.Outputf(2, Lnowarn, format, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warnf prints the message like Printf. The printing might be suppressed
|
|
||||||
// by the flag Lnowarn.
|
|
||||||
func Warnf(format string, v ...interface{}) {
|
|
||||||
std.Outputf(2, Lnowarn, format, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warnln prints the message like Println. The printing might be suppressed
|
|
||||||
// by the flag Lnowarn.
|
|
||||||
func (l *Logger) Warnln(v ...interface{}) {
|
|
||||||
l.Outputln(2, Lnowarn, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warnln prints the message like Println. The printing might be suppressed
|
|
||||||
// by the flag Lnowarn.
|
|
||||||
func Warnln(v ...interface{}) {
|
|
||||||
std.Outputln(2, Lnowarn, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print prints the message like fmt.Print. The printing might be suppressed
|
|
||||||
// by the flag Lnoprint.
|
|
||||||
func (l *Logger) Print(v ...interface{}) {
|
|
||||||
l.Output(2, Lnoprint, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print prints the message like fmt.Print. The printing might be suppressed
|
|
||||||
// by the flag Lnoprint.
|
|
||||||
func Print(v ...interface{}) {
|
|
||||||
std.Output(2, Lnoprint, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Printf prints the message like fmt.Printf. The printing might be suppressed
|
|
||||||
// by the flag Lnoprint.
|
|
||||||
func (l *Logger) Printf(format string, v ...interface{}) {
|
|
||||||
l.Outputf(2, Lnoprint, format, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Printf prints the message like fmt.Printf. The printing might be suppressed
|
|
||||||
// by the flag Lnoprint.
|
|
||||||
func Printf(format string, v ...interface{}) {
|
|
||||||
std.Outputf(2, Lnoprint, format, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Println prints the message like fmt.Println. The printing might be
|
|
||||||
// suppressed by the flag Lnoprint.
|
|
||||||
func (l *Logger) Println(v ...interface{}) {
|
|
||||||
l.Outputln(2, Lnoprint, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Println prints the message like fmt.Println. The printing might be
|
|
||||||
// suppressed by the flag Lnoprint.
|
|
||||||
func Println(v ...interface{}) {
|
|
||||||
std.Outputln(2, Lnoprint, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debug prints the message like Print. The printing might be suppressed
|
|
||||||
// by the flag Lnodebug.
|
|
||||||
func (l *Logger) Debug(v ...interface{}) {
|
|
||||||
l.Output(2, Lnodebug, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debug prints the message like Print. The printing might be suppressed
|
|
||||||
// by the flag Lnodebug.
|
|
||||||
func Debug(v ...interface{}) {
|
|
||||||
std.Output(2, Lnodebug, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debugf prints the message like Printf. The printing might be suppressed
|
|
||||||
// by the flag Lnodebug.
|
|
||||||
func (l *Logger) Debugf(format string, v ...interface{}) {
|
|
||||||
l.Outputf(2, Lnodebug, format, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debugf prints the message like Printf. The printing might be suppressed
|
|
||||||
// by the flag Lnodebug.
|
|
||||||
func Debugf(format string, v ...interface{}) {
|
|
||||||
std.Outputf(2, Lnodebug, format, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debugln prints the message like Println. The printing might be suppressed
|
|
||||||
// by the flag Lnodebug.
|
|
||||||
func (l *Logger) Debugln(v ...interface{}) {
|
|
||||||
l.Outputln(2, Lnodebug, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debugln prints the message like Println. The printing might be suppressed
|
|
||||||
// by the flag Lnodebug.
|
|
||||||
func Debugln(v ...interface{}) {
|
|
||||||
std.Outputln(2, Lnodebug, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flags returns the current flags used by the logger.
|
|
||||||
func (l *Logger) Flags() int {
|
|
||||||
l.mu.Lock()
|
|
||||||
defer l.mu.Unlock()
|
|
||||||
return l.flag
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flags returns the current flags used by the standard logger.
|
|
||||||
func Flags() int {
|
|
||||||
return std.Flags()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetFlags sets the flags of the logger.
|
|
||||||
func (l *Logger) SetFlags(flag int) {
|
|
||||||
l.mu.Lock()
|
|
||||||
defer l.mu.Unlock()
|
|
||||||
l.flag = flag
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetFlags sets the flags for the standard logger.
|
|
||||||
func SetFlags(flag int) {
|
|
||||||
std.SetFlags(flag)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prefix returns the prefix used by the logger.
|
|
||||||
func (l *Logger) Prefix() string {
|
|
||||||
l.mu.Lock()
|
|
||||||
defer l.mu.Unlock()
|
|
||||||
return l.prefix
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prefix returns the prefix used by the standard logger of the package.
|
|
||||||
func Prefix() string {
|
|
||||||
return std.Prefix()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPrefix sets the prefix for the logger.
|
|
||||||
func (l *Logger) SetPrefix(prefix string) {
|
|
||||||
l.mu.Lock()
|
|
||||||
defer l.mu.Unlock()
|
|
||||||
l.prefix = prefix
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPrefix sets the prefix of the standard logger of the package.
|
|
||||||
func SetPrefix(prefix string) {
|
|
||||||
std.SetPrefix(prefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetOutput sets the output of the logger.
|
|
||||||
func (l *Logger) SetOutput(w io.Writer) {
|
|
||||||
l.mu.Lock()
|
|
||||||
defer l.mu.Unlock()
|
|
||||||
l.out = w
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetOutput sets the output for the standard logger of the package.
|
|
||||||
func SetOutput(w io.Writer) {
|
|
||||||
std.SetOutput(w)
|
|
||||||
}
|
|
523
vendor/github.com/ulikunitz/xz/lzma/bintree.go
generated
vendored
523
vendor/github.com/ulikunitz/xz/lzma/bintree.go
generated
vendored
@ -1,523 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
// node represents a node in the binary tree.
|
|
||||||
type node struct {
|
|
||||||
// x is the search value
|
|
||||||
x uint32
|
|
||||||
// p parent node
|
|
||||||
p uint32
|
|
||||||
// l left child
|
|
||||||
l uint32
|
|
||||||
// r right child
|
|
||||||
r uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// wordLen is the number of bytes represented by the v field of a node.
|
|
||||||
const wordLen = 4
|
|
||||||
|
|
||||||
// binTree supports the identification of the next operation based on a
|
|
||||||
// binary tree.
|
|
||||||
//
|
|
||||||
// Nodes will be identified by their index into the ring buffer.
|
|
||||||
type binTree struct {
|
|
||||||
dict *encoderDict
|
|
||||||
// ring buffer of nodes
|
|
||||||
node []node
|
|
||||||
// absolute offset of the entry for the next node. Position 4
|
|
||||||
// byte larger.
|
|
||||||
hoff int64
|
|
||||||
// front position in the node ring buffer
|
|
||||||
front uint32
|
|
||||||
// index of the root node
|
|
||||||
root uint32
|
|
||||||
// current x value
|
|
||||||
x uint32
|
|
||||||
// preallocated array
|
|
||||||
data []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// null represents the nonexistent index. We can't use zero because it
|
|
||||||
// would always exist or we would need to decrease the index for each
|
|
||||||
// reference.
|
|
||||||
const null uint32 = 1<<32 - 1
|
|
||||||
|
|
||||||
// newBinTree initializes the binTree structure. The capacity defines
|
|
||||||
// the size of the buffer and defines the maximum distance for which
|
|
||||||
// matches will be found.
|
|
||||||
func newBinTree(capacity int) (t *binTree, err error) {
|
|
||||||
if capacity < 1 {
|
|
||||||
return nil, errors.New(
|
|
||||||
"newBinTree: capacity must be larger than zero")
|
|
||||||
}
|
|
||||||
if int64(capacity) >= int64(null) {
|
|
||||||
return nil, errors.New(
|
|
||||||
"newBinTree: capacity must less 2^{32}-1")
|
|
||||||
}
|
|
||||||
t = &binTree{
|
|
||||||
node: make([]node, capacity),
|
|
||||||
hoff: -int64(wordLen),
|
|
||||||
root: null,
|
|
||||||
data: make([]byte, maxMatchLen),
|
|
||||||
}
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *binTree) SetDict(d *encoderDict) { t.dict = d }
|
|
||||||
|
|
||||||
// WriteByte writes a single byte into the binary tree.
|
|
||||||
func (t *binTree) WriteByte(c byte) error {
|
|
||||||
t.x = (t.x << 8) | uint32(c)
|
|
||||||
t.hoff++
|
|
||||||
if t.hoff < 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
v := t.front
|
|
||||||
if int64(v) < t.hoff {
|
|
||||||
// We are overwriting old nodes stored in the tree.
|
|
||||||
t.remove(v)
|
|
||||||
}
|
|
||||||
t.node[v].x = t.x
|
|
||||||
t.add(v)
|
|
||||||
t.front++
|
|
||||||
if int64(t.front) >= int64(len(t.node)) {
|
|
||||||
t.front = 0
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writes writes a sequence of bytes into the binTree structure.
|
|
||||||
func (t *binTree) Write(p []byte) (n int, err error) {
|
|
||||||
for _, c := range p {
|
|
||||||
t.WriteByte(c)
|
|
||||||
}
|
|
||||||
return len(p), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// add puts the node v into the tree. The node must not be part of the
|
|
||||||
// tree before.
|
|
||||||
func (t *binTree) add(v uint32) {
|
|
||||||
vn := &t.node[v]
|
|
||||||
// Set left and right to null indices.
|
|
||||||
vn.l, vn.r = null, null
|
|
||||||
// If the binary tree is empty make v the root.
|
|
||||||
if t.root == null {
|
|
||||||
t.root = v
|
|
||||||
vn.p = null
|
|
||||||
return
|
|
||||||
}
|
|
||||||
x := vn.x
|
|
||||||
p := t.root
|
|
||||||
// Search for the right leave link and add the new node.
|
|
||||||
for {
|
|
||||||
pn := &t.node[p]
|
|
||||||
if x <= pn.x {
|
|
||||||
if pn.l == null {
|
|
||||||
pn.l = v
|
|
||||||
vn.p = p
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p = pn.l
|
|
||||||
} else {
|
|
||||||
if pn.r == null {
|
|
||||||
pn.r = v
|
|
||||||
vn.p = p
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p = pn.r
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// parent returns the parent node index of v and the pointer to v value
|
|
||||||
// in the parent.
|
|
||||||
func (t *binTree) parent(v uint32) (p uint32, ptr *uint32) {
|
|
||||||
if t.root == v {
|
|
||||||
return null, &t.root
|
|
||||||
}
|
|
||||||
p = t.node[v].p
|
|
||||||
if t.node[p].l == v {
|
|
||||||
ptr = &t.node[p].l
|
|
||||||
} else {
|
|
||||||
ptr = &t.node[p].r
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove node v.
|
|
||||||
func (t *binTree) remove(v uint32) {
|
|
||||||
vn := &t.node[v]
|
|
||||||
p, ptr := t.parent(v)
|
|
||||||
l, r := vn.l, vn.r
|
|
||||||
if l == null {
|
|
||||||
// Move the right child up.
|
|
||||||
*ptr = r
|
|
||||||
if r != null {
|
|
||||||
t.node[r].p = p
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if r == null {
|
|
||||||
// Move the left child up.
|
|
||||||
*ptr = l
|
|
||||||
t.node[l].p = p
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Search the in-order predecessor u.
|
|
||||||
un := &t.node[l]
|
|
||||||
ur := un.r
|
|
||||||
if ur == null {
|
|
||||||
// In order predecessor is l. Move it up.
|
|
||||||
un.r = r
|
|
||||||
t.node[r].p = l
|
|
||||||
un.p = p
|
|
||||||
*ptr = l
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var u uint32
|
|
||||||
for {
|
|
||||||
// Look for the max value in the tree where l is root.
|
|
||||||
u = ur
|
|
||||||
ur = t.node[u].r
|
|
||||||
if ur == null {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// replace u with ul
|
|
||||||
un = &t.node[u]
|
|
||||||
ul := un.l
|
|
||||||
up := un.p
|
|
||||||
t.node[up].r = ul
|
|
||||||
if ul != null {
|
|
||||||
t.node[ul].p = up
|
|
||||||
}
|
|
||||||
|
|
||||||
// replace v by u
|
|
||||||
un.l, un.r = l, r
|
|
||||||
t.node[l].p = u
|
|
||||||
t.node[r].p = u
|
|
||||||
*ptr = u
|
|
||||||
un.p = p
|
|
||||||
}
|
|
||||||
|
|
||||||
// search looks for the node that have the value x or for the nodes that
|
|
||||||
// brace it. The node highest in the tree with the value x will be
|
|
||||||
// returned. All other nodes with the same value live in left subtree of
|
|
||||||
// the returned node.
|
|
||||||
func (t *binTree) search(v uint32, x uint32) (a, b uint32) {
|
|
||||||
a, b = null, null
|
|
||||||
if v == null {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
vn := &t.node[v]
|
|
||||||
if x <= vn.x {
|
|
||||||
if x == vn.x {
|
|
||||||
return v, v
|
|
||||||
}
|
|
||||||
b = v
|
|
||||||
if vn.l == null {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
v = vn.l
|
|
||||||
} else {
|
|
||||||
a = v
|
|
||||||
if vn.r == null {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
v = vn.r
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// max returns the node with maximum value in the subtree with v as
|
|
||||||
// root.
|
|
||||||
func (t *binTree) max(v uint32) uint32 {
|
|
||||||
if v == null {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
r := t.node[v].r
|
|
||||||
if r == null {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
v = r
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// min returns the node with the minimum value in the subtree with v as
|
|
||||||
// root.
|
|
||||||
func (t *binTree) min(v uint32) uint32 {
|
|
||||||
if v == null {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
l := t.node[v].l
|
|
||||||
if l == null {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
v = l
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// pred returns the in-order predecessor of node v.
|
|
||||||
func (t *binTree) pred(v uint32) uint32 {
|
|
||||||
if v == null {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
u := t.max(t.node[v].l)
|
|
||||||
if u != null {
|
|
||||||
return u
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
p := t.node[v].p
|
|
||||||
if p == null {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
if t.node[p].r == v {
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
v = p
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// succ returns the in-order successor of node v.
|
|
||||||
func (t *binTree) succ(v uint32) uint32 {
|
|
||||||
if v == null {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
u := t.min(t.node[v].r)
|
|
||||||
if u != null {
|
|
||||||
return u
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
p := t.node[v].p
|
|
||||||
if p == null {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
if t.node[p].l == v {
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
v = p
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// xval converts the first four bytes of a into an 32-bit unsigned
|
|
||||||
// integer in big-endian order.
|
|
||||||
func xval(a []byte) uint32 {
|
|
||||||
var x uint32
|
|
||||||
switch len(a) {
|
|
||||||
default:
|
|
||||||
x |= uint32(a[3])
|
|
||||||
fallthrough
|
|
||||||
case 3:
|
|
||||||
x |= uint32(a[2]) << 8
|
|
||||||
fallthrough
|
|
||||||
case 2:
|
|
||||||
x |= uint32(a[1]) << 16
|
|
||||||
fallthrough
|
|
||||||
case 1:
|
|
||||||
x |= uint32(a[0]) << 24
|
|
||||||
case 0:
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// dumpX converts value x into a four-letter string.
|
|
||||||
func dumpX(x uint32) string {
|
|
||||||
a := make([]byte, 4)
|
|
||||||
for i := 0; i < 4; i++ {
|
|
||||||
c := byte(x >> uint((3-i)*8))
|
|
||||||
if unicode.IsGraphic(rune(c)) {
|
|
||||||
a[i] = c
|
|
||||||
} else {
|
|
||||||
a[i] = '.'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return string(a)
|
|
||||||
}
|
|
||||||
|
|
||||||
// dumpNode writes a representation of the node v into the io.Writer.
|
|
||||||
func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) {
|
|
||||||
if v == null {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
vn := &t.node[v]
|
|
||||||
|
|
||||||
t.dumpNode(w, vn.r, indent+2)
|
|
||||||
|
|
||||||
for i := 0; i < indent; i++ {
|
|
||||||
fmt.Fprint(w, " ")
|
|
||||||
}
|
|
||||||
if vn.p == null {
|
|
||||||
fmt.Fprintf(w, "node %d %q parent null\n", v, dumpX(vn.x))
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(w, "node %d %q parent %d\n", v, dumpX(vn.x), vn.p)
|
|
||||||
}
|
|
||||||
|
|
||||||
t.dumpNode(w, vn.l, indent+2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// dump prints a representation of the binary tree into the writer.
|
|
||||||
func (t *binTree) dump(w io.Writer) error {
|
|
||||||
bw := bufio.NewWriter(w)
|
|
||||||
t.dumpNode(bw, t.root, 0)
|
|
||||||
return bw.Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *binTree) distance(v uint32) int {
|
|
||||||
dist := int(t.front) - int(v)
|
|
||||||
if dist <= 0 {
|
|
||||||
dist += len(t.node)
|
|
||||||
}
|
|
||||||
return dist
|
|
||||||
}
|
|
||||||
|
|
||||||
type matchParams struct {
|
|
||||||
rep [4]uint32
|
|
||||||
// length when match will be accepted
|
|
||||||
nAccept int
|
|
||||||
// nodes to check
|
|
||||||
check int
|
|
||||||
// finish if length get shorter
|
|
||||||
stopShorter bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *binTree) match(m match, distIter func() (int, bool), p matchParams,
|
|
||||||
) (r match, checked int, accepted bool) {
|
|
||||||
buf := &t.dict.buf
|
|
||||||
for {
|
|
||||||
if checked >= p.check {
|
|
||||||
return m, checked, true
|
|
||||||
}
|
|
||||||
dist, ok := distIter()
|
|
||||||
if !ok {
|
|
||||||
return m, checked, false
|
|
||||||
}
|
|
||||||
checked++
|
|
||||||
if m.n > 0 {
|
|
||||||
i := buf.rear - dist + m.n - 1
|
|
||||||
if i < 0 {
|
|
||||||
i += len(buf.data)
|
|
||||||
} else if i >= len(buf.data) {
|
|
||||||
i -= len(buf.data)
|
|
||||||
}
|
|
||||||
if buf.data[i] != t.data[m.n-1] {
|
|
||||||
if p.stopShorter {
|
|
||||||
return m, checked, false
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
n := buf.matchLen(dist, t.data)
|
|
||||||
switch n {
|
|
||||||
case 0:
|
|
||||||
if p.stopShorter {
|
|
||||||
return m, checked, false
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
case 1:
|
|
||||||
if uint32(dist-minDistance) != p.rep[0] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if n < m.n || (n == m.n && int64(dist) >= m.distance) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
m = match{int64(dist), n}
|
|
||||||
if n >= p.nAccept {
|
|
||||||
return m, checked, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *binTree) NextOp(rep [4]uint32) operation {
|
|
||||||
// retrieve maxMatchLen data
|
|
||||||
n, _ := t.dict.buf.Peek(t.data[:maxMatchLen])
|
|
||||||
if n == 0 {
|
|
||||||
panic("no data in buffer")
|
|
||||||
}
|
|
||||||
t.data = t.data[:n]
|
|
||||||
|
|
||||||
var (
|
|
||||||
m match
|
|
||||||
x, u, v uint32
|
|
||||||
iterPred, iterSucc func() (int, bool)
|
|
||||||
)
|
|
||||||
p := matchParams{
|
|
||||||
rep: rep,
|
|
||||||
nAccept: maxMatchLen,
|
|
||||||
check: 32,
|
|
||||||
}
|
|
||||||
i := 4
|
|
||||||
iterSmall := func() (dist int, ok bool) {
|
|
||||||
i--
|
|
||||||
if i <= 0 {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
return i, true
|
|
||||||
}
|
|
||||||
m, checked, accepted := t.match(m, iterSmall, p)
|
|
||||||
if accepted {
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
p.check -= checked
|
|
||||||
x = xval(t.data)
|
|
||||||
u, v = t.search(t.root, x)
|
|
||||||
if u == v && len(t.data) == 4 {
|
|
||||||
iter := func() (dist int, ok bool) {
|
|
||||||
if u == null {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
dist = t.distance(u)
|
|
||||||
u, v = t.search(t.node[u].l, x)
|
|
||||||
if u != v {
|
|
||||||
u = null
|
|
||||||
}
|
|
||||||
return dist, true
|
|
||||||
}
|
|
||||||
m, _, _ = t.match(m, iter, p)
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
p.stopShorter = true
|
|
||||||
iterSucc = func() (dist int, ok bool) {
|
|
||||||
if v == null {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
dist = t.distance(v)
|
|
||||||
v = t.succ(v)
|
|
||||||
return dist, true
|
|
||||||
}
|
|
||||||
m, checked, accepted = t.match(m, iterSucc, p)
|
|
||||||
if accepted {
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
p.check -= checked
|
|
||||||
iterPred = func() (dist int, ok bool) {
|
|
||||||
if u == null {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
dist = t.distance(u)
|
|
||||||
u = t.pred(u)
|
|
||||||
return dist, true
|
|
||||||
}
|
|
||||||
m, _, _ = t.match(m, iterPred, p)
|
|
||||||
end:
|
|
||||||
if m.n == 0 {
|
|
||||||
return lit{t.data[0]}
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
45
vendor/github.com/ulikunitz/xz/lzma/bitops.go
generated
vendored
45
vendor/github.com/ulikunitz/xz/lzma/bitops.go
generated
vendored
@ -1,45 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
/* Naming conventions follows the CodeReviewComments in the Go Wiki. */
|
|
||||||
|
|
||||||
// ntz32Const is used by the functions NTZ and NLZ.
|
|
||||||
const ntz32Const = 0x04d7651f
|
|
||||||
|
|
||||||
// ntz32Table is a helper table for de Bruijn algorithm by Danny Dubé.
|
|
||||||
// See Henry S. Warren, Jr. "Hacker's Delight" section 5-1 figure 5-26.
|
|
||||||
var ntz32Table = [32]int8{
|
|
||||||
0, 1, 2, 24, 3, 19, 6, 25,
|
|
||||||
22, 4, 20, 10, 16, 7, 12, 26,
|
|
||||||
31, 23, 18, 5, 21, 9, 15, 11,
|
|
||||||
30, 17, 8, 14, 29, 13, 28, 27,
|
|
||||||
}
|
|
||||||
|
|
||||||
// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer.
|
|
||||||
func ntz32(x uint32) int {
|
|
||||||
if x == 0 {
|
|
||||||
return 32
|
|
||||||
}
|
|
||||||
x = (x & -x) * ntz32Const
|
|
||||||
return int(ntz32Table[x>>27])
|
|
||||||
}
|
|
||||||
|
|
||||||
// nlz32 computes the number of leading zeros for an unsigned 32-bit integer.
|
|
||||||
func nlz32(x uint32) int {
|
|
||||||
// Smear left most bit to the right
|
|
||||||
x |= x >> 1
|
|
||||||
x |= x >> 2
|
|
||||||
x |= x >> 4
|
|
||||||
x |= x >> 8
|
|
||||||
x |= x >> 16
|
|
||||||
// Use ntz mechanism to calculate nlz.
|
|
||||||
x++
|
|
||||||
if x == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
x *= ntz32Const
|
|
||||||
return 32 - int(ntz32Table[x>>27])
|
|
||||||
}
|
|
39
vendor/github.com/ulikunitz/xz/lzma/breader.go
generated
vendored
39
vendor/github.com/ulikunitz/xz/lzma/breader.go
generated
vendored
@ -1,39 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// breader provides the ReadByte function for a Reader. It doesn't read
|
|
||||||
// more data from the reader than absolutely necessary.
|
|
||||||
type breader struct {
|
|
||||||
io.Reader
|
|
||||||
// helper slice to save allocations
|
|
||||||
p []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByteReader converts an io.Reader into an io.ByteReader.
|
|
||||||
func ByteReader(r io.Reader) io.ByteReader {
|
|
||||||
br, ok := r.(io.ByteReader)
|
|
||||||
if !ok {
|
|
||||||
return &breader{r, make([]byte, 1)}
|
|
||||||
}
|
|
||||||
return br
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadByte read byte function.
|
|
||||||
func (r *breader) ReadByte() (c byte, err error) {
|
|
||||||
n, err := r.Reader.Read(r.p)
|
|
||||||
if n < 1 {
|
|
||||||
if err == nil {
|
|
||||||
err = errors.New("breader.ReadByte: no data")
|
|
||||||
}
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return r.p[0], nil
|
|
||||||
}
|
|
171
vendor/github.com/ulikunitz/xz/lzma/buffer.go
generated
vendored
171
vendor/github.com/ulikunitz/xz/lzma/buffer.go
generated
vendored
@ -1,171 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// buffer provides a circular buffer of bytes. If the front index equals
|
|
||||||
// the rear index the buffer is empty. As a consequence front cannot be
|
|
||||||
// equal rear for a full buffer. So a full buffer has a length that is
|
|
||||||
// one byte less the the length of the data slice.
|
|
||||||
type buffer struct {
|
|
||||||
data []byte
|
|
||||||
front int
|
|
||||||
rear int
|
|
||||||
}
|
|
||||||
|
|
||||||
// newBuffer creates a buffer with the given size.
|
|
||||||
func newBuffer(size int) *buffer {
|
|
||||||
return &buffer{data: make([]byte, size+1)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cap returns the capacity of the buffer.
|
|
||||||
func (b *buffer) Cap() int {
|
|
||||||
return len(b.data) - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resets the buffer. The front and rear index are set to zero.
|
|
||||||
func (b *buffer) Reset() {
|
|
||||||
b.front = 0
|
|
||||||
b.rear = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Buffered returns the number of bytes buffered.
|
|
||||||
func (b *buffer) Buffered() int {
|
|
||||||
delta := b.front - b.rear
|
|
||||||
if delta < 0 {
|
|
||||||
delta += len(b.data)
|
|
||||||
}
|
|
||||||
return delta
|
|
||||||
}
|
|
||||||
|
|
||||||
// Available returns the number of bytes available for writing.
|
|
||||||
func (b *buffer) Available() int {
|
|
||||||
delta := b.rear - 1 - b.front
|
|
||||||
if delta < 0 {
|
|
||||||
delta += len(b.data)
|
|
||||||
}
|
|
||||||
return delta
|
|
||||||
}
|
|
||||||
|
|
||||||
// addIndex adds a non-negative integer to the index i and returns the
|
|
||||||
// resulting index. The function takes care of wrapping the index as
|
|
||||||
// well as potential overflow situations.
|
|
||||||
func (b *buffer) addIndex(i int, n int) int {
|
|
||||||
// subtraction of len(b.data) prevents overflow
|
|
||||||
i += n - len(b.data)
|
|
||||||
if i < 0 {
|
|
||||||
i += len(b.data)
|
|
||||||
}
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads bytes from the buffer into p and returns the number of
|
|
||||||
// bytes read. The function never returns an error but might return less
|
|
||||||
// data than requested.
|
|
||||||
func (b *buffer) Read(p []byte) (n int, err error) {
|
|
||||||
n, err = b.Peek(p)
|
|
||||||
b.rear = b.addIndex(b.rear, n)
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Peek reads bytes from the buffer into p without changing the buffer.
|
|
||||||
// Peek will never return an error but might return less data than
|
|
||||||
// requested.
|
|
||||||
func (b *buffer) Peek(p []byte) (n int, err error) {
|
|
||||||
m := b.Buffered()
|
|
||||||
n = len(p)
|
|
||||||
if m < n {
|
|
||||||
n = m
|
|
||||||
p = p[:n]
|
|
||||||
}
|
|
||||||
k := copy(p, b.data[b.rear:])
|
|
||||||
if k < n {
|
|
||||||
copy(p[k:], b.data)
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Discard skips the n next bytes to read from the buffer, returning the
|
|
||||||
// bytes discarded.
|
|
||||||
//
|
|
||||||
// If Discards skips fewer than n bytes, it returns an error.
|
|
||||||
func (b *buffer) Discard(n int) (discarded int, err error) {
|
|
||||||
if n < 0 {
|
|
||||||
return 0, errors.New("buffer.Discard: negative argument")
|
|
||||||
}
|
|
||||||
m := b.Buffered()
|
|
||||||
if m < n {
|
|
||||||
n = m
|
|
||||||
err = errors.New(
|
|
||||||
"buffer.Discard: discarded less bytes then requested")
|
|
||||||
}
|
|
||||||
b.rear = b.addIndex(b.rear, n)
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrNoSpace indicates that there is insufficient space for the Write
|
|
||||||
// operation.
|
|
||||||
var ErrNoSpace = errors.New("insufficient space")
|
|
||||||
|
|
||||||
// Write puts data into the buffer. If less bytes are written than
|
|
||||||
// requested ErrNoSpace is returned.
|
|
||||||
func (b *buffer) Write(p []byte) (n int, err error) {
|
|
||||||
m := b.Available()
|
|
||||||
n = len(p)
|
|
||||||
if m < n {
|
|
||||||
n = m
|
|
||||||
p = p[:m]
|
|
||||||
err = ErrNoSpace
|
|
||||||
}
|
|
||||||
k := copy(b.data[b.front:], p)
|
|
||||||
if k < n {
|
|
||||||
copy(b.data, p[k:])
|
|
||||||
}
|
|
||||||
b.front = b.addIndex(b.front, n)
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteByte writes a single byte into the buffer. The error ErrNoSpace
|
|
||||||
// is returned if no single byte is available in the buffer for writing.
|
|
||||||
func (b *buffer) WriteByte(c byte) error {
|
|
||||||
if b.Available() < 1 {
|
|
||||||
return ErrNoSpace
|
|
||||||
}
|
|
||||||
b.data[b.front] = c
|
|
||||||
b.front = b.addIndex(b.front, 1)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// prefixLen returns the length of the common prefix of a and b.
|
|
||||||
func prefixLen(a, b []byte) int {
|
|
||||||
if len(a) > len(b) {
|
|
||||||
a, b = b, a
|
|
||||||
}
|
|
||||||
for i, c := range a {
|
|
||||||
if b[i] != c {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return len(a)
|
|
||||||
}
|
|
||||||
|
|
||||||
// matchLen returns the length of the common prefix for the given
|
|
||||||
// distance from the rear and the byte slice p.
|
|
||||||
func (b *buffer) matchLen(distance int, p []byte) int {
|
|
||||||
var n int
|
|
||||||
i := b.rear - distance
|
|
||||||
if i < 0 {
|
|
||||||
if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i {
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
p = p[n:]
|
|
||||||
i = 0
|
|
||||||
}
|
|
||||||
n += prefixLen(p, b.data[i:])
|
|
||||||
return n
|
|
||||||
}
|
|
37
vendor/github.com/ulikunitz/xz/lzma/bytewriter.go
generated
vendored
37
vendor/github.com/ulikunitz/xz/lzma/bytewriter.go
generated
vendored
@ -1,37 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ErrLimit indicates that the limit of the LimitedByteWriter has been
|
|
||||||
// reached.
|
|
||||||
var ErrLimit = errors.New("limit reached")
|
|
||||||
|
|
||||||
// LimitedByteWriter provides a byte writer that can be written until a
|
|
||||||
// limit is reached. The field N provides the number of remaining
|
|
||||||
// bytes.
|
|
||||||
type LimitedByteWriter struct {
|
|
||||||
BW io.ByteWriter
|
|
||||||
N int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteByte writes a single byte to the limited byte writer. It returns
|
|
||||||
// ErrLimit if the limit has been reached. If the byte is successfully
|
|
||||||
// written the field N of the LimitedByteWriter will be decremented by
|
|
||||||
// one.
|
|
||||||
func (l *LimitedByteWriter) WriteByte(c byte) error {
|
|
||||||
if l.N <= 0 {
|
|
||||||
return ErrLimit
|
|
||||||
}
|
|
||||||
if err := l.BW.WriteByte(c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
l.N--
|
|
||||||
return nil
|
|
||||||
}
|
|
277
vendor/github.com/ulikunitz/xz/lzma/decoder.go
generated
vendored
277
vendor/github.com/ulikunitz/xz/lzma/decoder.go
generated
vendored
@ -1,277 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// decoder decodes a raw LZMA stream without any header.
|
|
||||||
type decoder struct {
|
|
||||||
// dictionary; the rear pointer of the buffer will be used for
|
|
||||||
// reading the data.
|
|
||||||
Dict *decoderDict
|
|
||||||
// decoder state
|
|
||||||
State *state
|
|
||||||
// range decoder
|
|
||||||
rd *rangeDecoder
|
|
||||||
// start stores the head value of the dictionary for the LZMA
|
|
||||||
// stream
|
|
||||||
start int64
|
|
||||||
// size of uncompressed data
|
|
||||||
size int64
|
|
||||||
// end-of-stream encountered
|
|
||||||
eos bool
|
|
||||||
// EOS marker found
|
|
||||||
eosMarker bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// newDecoder creates a new decoder instance. The parameter size provides
|
|
||||||
// the expected byte size of the decompressed data. If the size is
|
|
||||||
// unknown use a negative value. In that case the decoder will look for
|
|
||||||
// a terminating end-of-stream marker.
|
|
||||||
func newDecoder(br io.ByteReader, state *state, dict *decoderDict, size int64) (d *decoder, err error) {
|
|
||||||
rd, err := newRangeDecoder(br)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
d = &decoder{
|
|
||||||
State: state,
|
|
||||||
Dict: dict,
|
|
||||||
rd: rd,
|
|
||||||
size: size,
|
|
||||||
start: dict.pos(),
|
|
||||||
}
|
|
||||||
return d, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reopen restarts the decoder with a new byte reader and a new size. Reopen
|
|
||||||
// resets the Decompressed counter to zero.
|
|
||||||
func (d *decoder) Reopen(br io.ByteReader, size int64) error {
|
|
||||||
var err error
|
|
||||||
if d.rd, err = newRangeDecoder(br); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
d.start = d.Dict.pos()
|
|
||||||
d.size = size
|
|
||||||
d.eos = false
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// decodeLiteral decodes a single literal from the LZMA stream.
|
|
||||||
func (d *decoder) decodeLiteral() (op operation, err error) {
|
|
||||||
litState := d.State.litState(d.Dict.byteAt(1), d.Dict.head)
|
|
||||||
match := d.Dict.byteAt(int(d.State.rep[0]) + 1)
|
|
||||||
s, err := d.State.litCodec.Decode(d.rd, d.State.state, match, litState)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return lit{s}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// errEOS indicates that an EOS marker has been found.
|
|
||||||
var errEOS = errors.New("EOS marker found")
|
|
||||||
|
|
||||||
// readOp decodes the next operation from the compressed stream. It
|
|
||||||
// returns the operation. If an explicit end of stream marker is
|
|
||||||
// identified the eos error is returned.
|
|
||||||
func (d *decoder) readOp() (op operation, err error) {
|
|
||||||
// Value of the end of stream (EOS) marker
|
|
||||||
const eosDist = 1<<32 - 1
|
|
||||||
|
|
||||||
state, state2, posState := d.State.states(d.Dict.head)
|
|
||||||
|
|
||||||
b, err := d.State.isMatch[state2].Decode(d.rd)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if b == 0 {
|
|
||||||
// literal
|
|
||||||
op, err := d.decodeLiteral()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
d.State.updateStateLiteral()
|
|
||||||
return op, nil
|
|
||||||
}
|
|
||||||
b, err = d.State.isRep[state].Decode(d.rd)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if b == 0 {
|
|
||||||
// simple match
|
|
||||||
d.State.rep[3], d.State.rep[2], d.State.rep[1] =
|
|
||||||
d.State.rep[2], d.State.rep[1], d.State.rep[0]
|
|
||||||
|
|
||||||
d.State.updateStateMatch()
|
|
||||||
// The length decoder returns the length offset.
|
|
||||||
n, err := d.State.lenCodec.Decode(d.rd, posState)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// The dist decoder returns the distance offset. The actual
|
|
||||||
// distance is 1 higher.
|
|
||||||
d.State.rep[0], err = d.State.distCodec.Decode(d.rd, n)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if d.State.rep[0] == eosDist {
|
|
||||||
d.eosMarker = true
|
|
||||||
return nil, errEOS
|
|
||||||
}
|
|
||||||
op = match{n: int(n) + minMatchLen,
|
|
||||||
distance: int64(d.State.rep[0]) + minDistance}
|
|
||||||
return op, nil
|
|
||||||
}
|
|
||||||
b, err = d.State.isRepG0[state].Decode(d.rd)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
dist := d.State.rep[0]
|
|
||||||
if b == 0 {
|
|
||||||
// rep match 0
|
|
||||||
b, err = d.State.isRepG0Long[state2].Decode(d.rd)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if b == 0 {
|
|
||||||
d.State.updateStateShortRep()
|
|
||||||
op = match{n: 1, distance: int64(dist) + minDistance}
|
|
||||||
return op, nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
b, err = d.State.isRepG1[state].Decode(d.rd)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if b == 0 {
|
|
||||||
dist = d.State.rep[1]
|
|
||||||
} else {
|
|
||||||
b, err = d.State.isRepG2[state].Decode(d.rd)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if b == 0 {
|
|
||||||
dist = d.State.rep[2]
|
|
||||||
} else {
|
|
||||||
dist = d.State.rep[3]
|
|
||||||
d.State.rep[3] = d.State.rep[2]
|
|
||||||
}
|
|
||||||
d.State.rep[2] = d.State.rep[1]
|
|
||||||
}
|
|
||||||
d.State.rep[1] = d.State.rep[0]
|
|
||||||
d.State.rep[0] = dist
|
|
||||||
}
|
|
||||||
n, err := d.State.repLenCodec.Decode(d.rd, posState)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
d.State.updateStateRep()
|
|
||||||
op = match{n: int(n) + minMatchLen, distance: int64(dist) + minDistance}
|
|
||||||
return op, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// apply takes the operation and transforms the decoder dictionary accordingly.
|
|
||||||
func (d *decoder) apply(op operation) error {
|
|
||||||
var err error
|
|
||||||
switch x := op.(type) {
|
|
||||||
case match:
|
|
||||||
err = d.Dict.writeMatch(x.distance, x.n)
|
|
||||||
case lit:
|
|
||||||
err = d.Dict.WriteByte(x.b)
|
|
||||||
default:
|
|
||||||
panic("op is neither a match nor a literal")
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// decompress fills the dictionary unless no space for new data is
|
|
||||||
// available. If the end of the LZMA stream has been reached io.EOF will
|
|
||||||
// be returned.
|
|
||||||
func (d *decoder) decompress() error {
|
|
||||||
if d.eos {
|
|
||||||
return io.EOF
|
|
||||||
}
|
|
||||||
for d.Dict.Available() >= maxMatchLen {
|
|
||||||
op, err := d.readOp()
|
|
||||||
switch err {
|
|
||||||
case nil:
|
|
||||||
break
|
|
||||||
case errEOS:
|
|
||||||
d.eos = true
|
|
||||||
if !d.rd.possiblyAtEnd() {
|
|
||||||
return errDataAfterEOS
|
|
||||||
}
|
|
||||||
if d.size >= 0 && d.size != d.Decompressed() {
|
|
||||||
return errSize
|
|
||||||
}
|
|
||||||
return io.EOF
|
|
||||||
case io.EOF:
|
|
||||||
d.eos = true
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
default:
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err = d.apply(op); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if d.size >= 0 && d.Decompressed() >= d.size {
|
|
||||||
d.eos = true
|
|
||||||
if d.Decompressed() > d.size {
|
|
||||||
return errSize
|
|
||||||
}
|
|
||||||
if !d.rd.possiblyAtEnd() {
|
|
||||||
switch _, err = d.readOp(); err {
|
|
||||||
case nil:
|
|
||||||
return errSize
|
|
||||||
case io.EOF:
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
case errEOS:
|
|
||||||
break
|
|
||||||
default:
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return io.EOF
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Errors that may be returned while decoding data.
|
|
||||||
var (
|
|
||||||
errDataAfterEOS = errors.New("lzma: data after end of stream marker")
|
|
||||||
errSize = errors.New("lzma: wrong uncompressed data size")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Read reads data from the buffer. If no more data is available io.EOF is
|
|
||||||
// returned.
|
|
||||||
func (d *decoder) Read(p []byte) (n int, err error) {
|
|
||||||
var k int
|
|
||||||
for {
|
|
||||||
// Read of decoder dict never returns an error.
|
|
||||||
k, err = d.Dict.Read(p[n:])
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Errorf("dictionary read error %s", err))
|
|
||||||
}
|
|
||||||
if k == 0 && d.eos {
|
|
||||||
return n, io.EOF
|
|
||||||
}
|
|
||||||
n += k
|
|
||||||
if n >= len(p) {
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
if err = d.decompress(); err != nil && err != io.EOF {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decompressed returns the number of bytes decompressed by the decoder.
|
|
||||||
func (d *decoder) Decompressed() int64 {
|
|
||||||
return d.Dict.pos() - d.start
|
|
||||||
}
|
|
135
vendor/github.com/ulikunitz/xz/lzma/decoderdict.go
generated
vendored
135
vendor/github.com/ulikunitz/xz/lzma/decoderdict.go
generated
vendored
@ -1,135 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// decoderDict provides the dictionary for the decoder. The whole
|
|
||||||
// dictionary is used as reader buffer.
|
|
||||||
type decoderDict struct {
|
|
||||||
buf buffer
|
|
||||||
head int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// newDecoderDict creates a new decoder dictionary. The whole dictionary
|
|
||||||
// will be used as reader buffer.
|
|
||||||
func newDecoderDict(dictCap int) (d *decoderDict, err error) {
|
|
||||||
// lower limit supports easy test cases
|
|
||||||
if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) {
|
|
||||||
return nil, errors.New("lzma: dictCap out of range")
|
|
||||||
}
|
|
||||||
d = &decoderDict{buf: *newBuffer(dictCap)}
|
|
||||||
return d, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset clears the dictionary. The read buffer is not changed, so the
|
|
||||||
// buffered data can still be read.
|
|
||||||
func (d *decoderDict) Reset() {
|
|
||||||
d.head = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteByte writes a single byte into the dictionary. It is used to
|
|
||||||
// write literals into the dictionary.
|
|
||||||
func (d *decoderDict) WriteByte(c byte) error {
|
|
||||||
if err := d.buf.WriteByte(c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
d.head++
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// pos returns the position of the dictionary head.
|
|
||||||
func (d *decoderDict) pos() int64 { return d.head }
|
|
||||||
|
|
||||||
// dictLen returns the actual length of the dictionary.
|
|
||||||
func (d *decoderDict) dictLen() int {
|
|
||||||
capacity := d.buf.Cap()
|
|
||||||
if d.head >= int64(capacity) {
|
|
||||||
return capacity
|
|
||||||
}
|
|
||||||
return int(d.head)
|
|
||||||
}
|
|
||||||
|
|
||||||
// byteAt returns a byte stored in the dictionary. If the distance is
|
|
||||||
// non-positive or exceeds the current length of the dictionary the zero
|
|
||||||
// byte is returned.
|
|
||||||
func (d *decoderDict) byteAt(dist int) byte {
|
|
||||||
if !(0 < dist && dist <= d.dictLen()) {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
i := d.buf.front - dist
|
|
||||||
if i < 0 {
|
|
||||||
i += len(d.buf.data)
|
|
||||||
}
|
|
||||||
return d.buf.data[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeMatch writes the match at the top of the dictionary. The given
|
|
||||||
// distance must point in the current dictionary and the length must not
|
|
||||||
// exceed the maximum length 273 supported in LZMA.
|
|
||||||
//
|
|
||||||
// The error value ErrNoSpace indicates that no space is available in
|
|
||||||
// the dictionary for writing. You need to read from the dictionary
|
|
||||||
// first.
|
|
||||||
func (d *decoderDict) writeMatch(dist int64, length int) error {
|
|
||||||
if !(0 < dist && dist <= int64(d.dictLen())) {
|
|
||||||
return errors.New("writeMatch: distance out of range")
|
|
||||||
}
|
|
||||||
if !(0 < length && length <= maxMatchLen) {
|
|
||||||
return errors.New("writeMatch: length out of range")
|
|
||||||
}
|
|
||||||
if length > d.buf.Available() {
|
|
||||||
return ErrNoSpace
|
|
||||||
}
|
|
||||||
d.head += int64(length)
|
|
||||||
|
|
||||||
i := d.buf.front - int(dist)
|
|
||||||
if i < 0 {
|
|
||||||
i += len(d.buf.data)
|
|
||||||
}
|
|
||||||
for length > 0 {
|
|
||||||
var p []byte
|
|
||||||
if i >= d.buf.front {
|
|
||||||
p = d.buf.data[i:]
|
|
||||||
i = 0
|
|
||||||
} else {
|
|
||||||
p = d.buf.data[i:d.buf.front]
|
|
||||||
i = d.buf.front
|
|
||||||
}
|
|
||||||
if len(p) > length {
|
|
||||||
p = p[:length]
|
|
||||||
}
|
|
||||||
if _, err := d.buf.Write(p); err != nil {
|
|
||||||
panic(fmt.Errorf("d.buf.Write returned error %s", err))
|
|
||||||
}
|
|
||||||
length -= len(p)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write writes the given bytes into the dictionary and advances the
|
|
||||||
// head.
|
|
||||||
func (d *decoderDict) Write(p []byte) (n int, err error) {
|
|
||||||
n, err = d.buf.Write(p)
|
|
||||||
d.head += int64(n)
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Available returns the number of available bytes for writing into the
|
|
||||||
// decoder dictionary.
|
|
||||||
func (d *decoderDict) Available() int { return d.buf.Available() }
|
|
||||||
|
|
||||||
// Read reads data from the buffer contained in the decoder dictionary.
|
|
||||||
func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) }
|
|
||||||
|
|
||||||
// Buffered returns the number of bytes currently buffered in the
|
|
||||||
// decoder dictionary.
|
|
||||||
func (d *decoderDict) buffered() int { return d.buf.Buffered() }
|
|
||||||
|
|
||||||
// Peek gets data from the buffer without advancing the rear index.
|
|
||||||
func (d *decoderDict) peek(p []byte) (n int, err error) { return d.buf.Peek(p) }
|
|
49
vendor/github.com/ulikunitz/xz/lzma/directcodec.go
generated
vendored
49
vendor/github.com/ulikunitz/xz/lzma/directcodec.go
generated
vendored
@ -1,49 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
// directCodec allows the encoding and decoding of values with a fixed number
|
|
||||||
// of bits. The number of bits must be in the range [1,32].
|
|
||||||
type directCodec byte
|
|
||||||
|
|
||||||
// makeDirectCodec creates a directCodec. The function panics if the number of
|
|
||||||
// bits is not in the range [1,32].
|
|
||||||
func makeDirectCodec(bits int) directCodec {
|
|
||||||
if !(1 <= bits && bits <= 32) {
|
|
||||||
panic(fmt.Errorf("bits=%d out of range", bits))
|
|
||||||
}
|
|
||||||
return directCodec(bits)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bits returns the number of bits supported by this codec.
|
|
||||||
func (dc directCodec) Bits() int {
|
|
||||||
return int(dc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode uses the range encoder to encode a value with the fixed number of
|
|
||||||
// bits. The most-significant bit is encoded first.
|
|
||||||
func (dc directCodec) Encode(e *rangeEncoder, v uint32) error {
|
|
||||||
for i := int(dc) - 1; i >= 0; i-- {
|
|
||||||
if err := e.DirectEncodeBit(v >> uint(i)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode uses the range decoder to decode a value with the given number of
|
|
||||||
// given bits. The most-significant bit is decoded first.
|
|
||||||
func (dc directCodec) Decode(d *rangeDecoder) (v uint32, err error) {
|
|
||||||
for i := int(dc) - 1; i >= 0; i-- {
|
|
||||||
x, err := d.DirectDecodeBit()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
v = (v << 1) | x
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
156
vendor/github.com/ulikunitz/xz/lzma/distcodec.go
generated
vendored
156
vendor/github.com/ulikunitz/xz/lzma/distcodec.go
generated
vendored
@ -1,156 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
// Constants used by the distance codec.
|
|
||||||
const (
|
|
||||||
// minimum supported distance
|
|
||||||
minDistance = 1
|
|
||||||
// maximum supported distance, value is used for the eos marker.
|
|
||||||
maxDistance = 1 << 32
|
|
||||||
// number of the supported len states
|
|
||||||
lenStates = 4
|
|
||||||
// start for the position models
|
|
||||||
startPosModel = 4
|
|
||||||
// first index with align bits support
|
|
||||||
endPosModel = 14
|
|
||||||
// bits for the position slots
|
|
||||||
posSlotBits = 6
|
|
||||||
// number of align bits
|
|
||||||
alignBits = 4
|
|
||||||
// maximum position slot
|
|
||||||
maxPosSlot = 63
|
|
||||||
)
|
|
||||||
|
|
||||||
// distCodec provides encoding and decoding of distance values.
|
|
||||||
type distCodec struct {
|
|
||||||
posSlotCodecs [lenStates]treeCodec
|
|
||||||
posModel [endPosModel - startPosModel]treeReverseCodec
|
|
||||||
alignCodec treeReverseCodec
|
|
||||||
}
|
|
||||||
|
|
||||||
// deepcopy initializes dc as deep copy of the source.
|
|
||||||
func (dc *distCodec) deepcopy(src *distCodec) {
|
|
||||||
if dc == src {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for i := range dc.posSlotCodecs {
|
|
||||||
dc.posSlotCodecs[i].deepcopy(&src.posSlotCodecs[i])
|
|
||||||
}
|
|
||||||
for i := range dc.posModel {
|
|
||||||
dc.posModel[i].deepcopy(&src.posModel[i])
|
|
||||||
}
|
|
||||||
dc.alignCodec.deepcopy(&src.alignCodec)
|
|
||||||
}
|
|
||||||
|
|
||||||
// distBits returns the number of bits required to encode dist.
|
|
||||||
func distBits(dist uint32) int {
|
|
||||||
if dist < startPosModel {
|
|
||||||
return 6
|
|
||||||
}
|
|
||||||
// slot s > 3, dist d
|
|
||||||
// s = 2(bits(d)-1) + bit(d, bits(d)-2)
|
|
||||||
// s>>1 = bits(d)-1
|
|
||||||
// bits(d) = 32-nlz32(d)
|
|
||||||
// s>>1=31-nlz32(d)
|
|
||||||
// n = 5 + (s>>1) = 36 - nlz32(d)
|
|
||||||
return 36 - nlz32(dist)
|
|
||||||
}
|
|
||||||
|
|
||||||
// newDistCodec creates a new distance codec.
|
|
||||||
func (dc *distCodec) init() {
|
|
||||||
for i := range dc.posSlotCodecs {
|
|
||||||
dc.posSlotCodecs[i] = makeTreeCodec(posSlotBits)
|
|
||||||
}
|
|
||||||
for i := range dc.posModel {
|
|
||||||
posSlot := startPosModel + i
|
|
||||||
bits := (posSlot >> 1) - 1
|
|
||||||
dc.posModel[i] = makeTreeReverseCodec(bits)
|
|
||||||
}
|
|
||||||
dc.alignCodec = makeTreeReverseCodec(alignBits)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lenState converts the value l to a supported lenState value.
|
|
||||||
func lenState(l uint32) uint32 {
|
|
||||||
if l >= lenStates {
|
|
||||||
l = lenStates - 1
|
|
||||||
}
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode encodes the distance using the parameter l. Dist can have values from
|
|
||||||
// the full range of uint32 values. To get the distance offset the actual match
|
|
||||||
// distance has to be decreased by 1. A distance offset of 0xffffffff (eos)
|
|
||||||
// indicates the end of the stream.
|
|
||||||
func (dc *distCodec) Encode(e *rangeEncoder, dist uint32, l uint32) (err error) {
|
|
||||||
// Compute the posSlot using nlz32
|
|
||||||
var posSlot uint32
|
|
||||||
var bits uint32
|
|
||||||
if dist < startPosModel {
|
|
||||||
posSlot = dist
|
|
||||||
} else {
|
|
||||||
bits = uint32(30 - nlz32(dist))
|
|
||||||
posSlot = startPosModel - 2 + (bits << 1)
|
|
||||||
posSlot += (dist >> uint(bits)) & 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = dc.posSlotCodecs[lenState(l)].Encode(e, posSlot); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case posSlot < startPosModel:
|
|
||||||
return nil
|
|
||||||
case posSlot < endPosModel:
|
|
||||||
tc := &dc.posModel[posSlot-startPosModel]
|
|
||||||
return tc.Encode(dist, e)
|
|
||||||
}
|
|
||||||
dic := directCodec(bits - alignBits)
|
|
||||||
if err = dic.Encode(e, dist>>alignBits); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return dc.alignCodec.Encode(dist, e)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode decodes the distance offset using the parameter l. The dist value
|
|
||||||
// 0xffffffff (eos) indicates the end of the stream. Add one to the distance
|
|
||||||
// offset to get the actual match distance.
|
|
||||||
func (dc *distCodec) Decode(d *rangeDecoder, l uint32) (dist uint32, err error) {
|
|
||||||
posSlot, err := dc.posSlotCodecs[lenState(l)].Decode(d)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// posSlot equals distance
|
|
||||||
if posSlot < startPosModel {
|
|
||||||
return posSlot, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// posSlot uses the individual models
|
|
||||||
bits := (posSlot >> 1) - 1
|
|
||||||
dist = (2 | (posSlot & 1)) << bits
|
|
||||||
var u uint32
|
|
||||||
if posSlot < endPosModel {
|
|
||||||
tc := &dc.posModel[posSlot-startPosModel]
|
|
||||||
if u, err = tc.Decode(d); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
dist += u
|
|
||||||
return dist, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// posSlots use direct encoding and a single model for the four align
|
|
||||||
// bits.
|
|
||||||
dic := directCodec(bits - alignBits)
|
|
||||||
if u, err = dic.Decode(d); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
dist += u << alignBits
|
|
||||||
if u, err = dc.alignCodec.Decode(d); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
dist += u
|
|
||||||
return dist, nil
|
|
||||||
}
|
|
268
vendor/github.com/ulikunitz/xz/lzma/encoder.go
generated
vendored
268
vendor/github.com/ulikunitz/xz/lzma/encoder.go
generated
vendored
@ -1,268 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// opLenMargin provides the upper limit of the number of bytes required
|
|
||||||
// to encode a single operation.
|
|
||||||
const opLenMargin = 16
|
|
||||||
|
|
||||||
// compressFlags control the compression process.
|
|
||||||
type compressFlags uint32
|
|
||||||
|
|
||||||
// Values for compressFlags.
|
|
||||||
const (
|
|
||||||
// all data should be compressed, even if compression is not
|
|
||||||
// optimal.
|
|
||||||
all compressFlags = 1 << iota
|
|
||||||
)
|
|
||||||
|
|
||||||
// encoderFlags provide the flags for an encoder.
|
|
||||||
type encoderFlags uint32
|
|
||||||
|
|
||||||
// Flags for the encoder.
|
|
||||||
const (
|
|
||||||
// eosMarker requests an EOS marker to be written.
|
|
||||||
eosMarker encoderFlags = 1 << iota
|
|
||||||
)
|
|
||||||
|
|
||||||
// Encoder compresses data buffered in the encoder dictionary and writes
|
|
||||||
// it into a byte writer.
|
|
||||||
type encoder struct {
|
|
||||||
dict *encoderDict
|
|
||||||
state *state
|
|
||||||
re *rangeEncoder
|
|
||||||
start int64
|
|
||||||
// generate eos marker
|
|
||||||
marker bool
|
|
||||||
limit bool
|
|
||||||
margin int
|
|
||||||
}
|
|
||||||
|
|
||||||
// newEncoder creates a new encoder. If the byte writer must be
|
|
||||||
// limited use LimitedByteWriter provided by this package. The flags
|
|
||||||
// argument supports the eosMarker flag, controlling whether a
|
|
||||||
// terminating end-of-stream marker must be written.
|
|
||||||
func newEncoder(bw io.ByteWriter, state *state, dict *encoderDict,
|
|
||||||
flags encoderFlags) (e *encoder, err error) {
|
|
||||||
|
|
||||||
re, err := newRangeEncoder(bw)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
e = &encoder{
|
|
||||||
dict: dict,
|
|
||||||
state: state,
|
|
||||||
re: re,
|
|
||||||
marker: flags&eosMarker != 0,
|
|
||||||
start: dict.Pos(),
|
|
||||||
margin: opLenMargin,
|
|
||||||
}
|
|
||||||
if e.marker {
|
|
||||||
e.margin += 5
|
|
||||||
}
|
|
||||||
return e, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write writes the bytes from p into the dictionary. If not enough
|
|
||||||
// space is available the data in the dictionary buffer will be
|
|
||||||
// compressed to make additional space available. If the limit of the
|
|
||||||
// underlying writer has been reached ErrLimit will be returned.
|
|
||||||
func (e *encoder) Write(p []byte) (n int, err error) {
|
|
||||||
for {
|
|
||||||
k, err := e.dict.Write(p[n:])
|
|
||||||
n += k
|
|
||||||
if err == ErrNoSpace {
|
|
||||||
if err = e.compress(0); err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reopen reopens the encoder with a new byte writer.
|
|
||||||
func (e *encoder) Reopen(bw io.ByteWriter) error {
|
|
||||||
var err error
|
|
||||||
if e.re, err = newRangeEncoder(bw); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
e.start = e.dict.Pos()
|
|
||||||
e.limit = false
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeLiteral writes a literal into the LZMA stream
|
|
||||||
func (e *encoder) writeLiteral(l lit) error {
|
|
||||||
var err error
|
|
||||||
state, state2, _ := e.state.states(e.dict.Pos())
|
|
||||||
if err = e.state.isMatch[state2].Encode(e.re, 0); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
litState := e.state.litState(e.dict.ByteAt(1), e.dict.Pos())
|
|
||||||
match := e.dict.ByteAt(int(e.state.rep[0]) + 1)
|
|
||||||
err = e.state.litCodec.Encode(e.re, l.b, state, match, litState)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
e.state.updateStateLiteral()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// iverson implements the Iverson operator as proposed by Donald Knuth in his
|
|
||||||
// book Concrete Mathematics.
|
|
||||||
func iverson(ok bool) uint32 {
|
|
||||||
if ok {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeMatch writes a repetition operation into the operation stream
|
|
||||||
func (e *encoder) writeMatch(m match) error {
|
|
||||||
var err error
|
|
||||||
if !(minDistance <= m.distance && m.distance <= maxDistance) {
|
|
||||||
panic(fmt.Errorf("match distance %d out of range", m.distance))
|
|
||||||
}
|
|
||||||
dist := uint32(m.distance - minDistance)
|
|
||||||
if !(minMatchLen <= m.n && m.n <= maxMatchLen) &&
|
|
||||||
!(dist == e.state.rep[0] && m.n == 1) {
|
|
||||||
panic(fmt.Errorf(
|
|
||||||
"match length %d out of range; dist %d rep[0] %d",
|
|
||||||
m.n, dist, e.state.rep[0]))
|
|
||||||
}
|
|
||||||
state, state2, posState := e.state.states(e.dict.Pos())
|
|
||||||
if err = e.state.isMatch[state2].Encode(e.re, 1); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
g := 0
|
|
||||||
for ; g < 4; g++ {
|
|
||||||
if e.state.rep[g] == dist {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
b := iverson(g < 4)
|
|
||||||
if err = e.state.isRep[state].Encode(e.re, b); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
n := uint32(m.n - minMatchLen)
|
|
||||||
if b == 0 {
|
|
||||||
// simple match
|
|
||||||
e.state.rep[3], e.state.rep[2], e.state.rep[1], e.state.rep[0] =
|
|
||||||
e.state.rep[2], e.state.rep[1], e.state.rep[0], dist
|
|
||||||
e.state.updateStateMatch()
|
|
||||||
if err = e.state.lenCodec.Encode(e.re, n, posState); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return e.state.distCodec.Encode(e.re, dist, n)
|
|
||||||
}
|
|
||||||
b = iverson(g != 0)
|
|
||||||
if err = e.state.isRepG0[state].Encode(e.re, b); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if b == 0 {
|
|
||||||
// g == 0
|
|
||||||
b = iverson(m.n != 1)
|
|
||||||
if err = e.state.isRepG0Long[state2].Encode(e.re, b); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if b == 0 {
|
|
||||||
e.state.updateStateShortRep()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// g in {1,2,3}
|
|
||||||
b = iverson(g != 1)
|
|
||||||
if err = e.state.isRepG1[state].Encode(e.re, b); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if b == 1 {
|
|
||||||
// g in {2,3}
|
|
||||||
b = iverson(g != 2)
|
|
||||||
err = e.state.isRepG2[state].Encode(e.re, b)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if b == 1 {
|
|
||||||
e.state.rep[3] = e.state.rep[2]
|
|
||||||
}
|
|
||||||
e.state.rep[2] = e.state.rep[1]
|
|
||||||
}
|
|
||||||
e.state.rep[1] = e.state.rep[0]
|
|
||||||
e.state.rep[0] = dist
|
|
||||||
}
|
|
||||||
e.state.updateStateRep()
|
|
||||||
return e.state.repLenCodec.Encode(e.re, n, posState)
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeOp writes a single operation to the range encoder. The function
|
|
||||||
// checks whether there is enough space available to close the LZMA
|
|
||||||
// stream.
|
|
||||||
func (e *encoder) writeOp(op operation) error {
|
|
||||||
if e.re.Available() < int64(e.margin) {
|
|
||||||
return ErrLimit
|
|
||||||
}
|
|
||||||
switch x := op.(type) {
|
|
||||||
case lit:
|
|
||||||
return e.writeLiteral(x)
|
|
||||||
case match:
|
|
||||||
return e.writeMatch(x)
|
|
||||||
default:
|
|
||||||
panic("unexpected operation")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// compress compressed data from the dictionary buffer. If the flag all
|
|
||||||
// is set, all data in the dictionary buffer will be compressed. The
|
|
||||||
// function returns ErrLimit if the underlying writer has reached its
|
|
||||||
// limit.
|
|
||||||
func (e *encoder) compress(flags compressFlags) error {
|
|
||||||
n := 0
|
|
||||||
if flags&all == 0 {
|
|
||||||
n = maxMatchLen - 1
|
|
||||||
}
|
|
||||||
d := e.dict
|
|
||||||
m := d.m
|
|
||||||
for d.Buffered() > n {
|
|
||||||
op := m.NextOp(e.state.rep)
|
|
||||||
if err := e.writeOp(op); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
d.Discard(op.Len())
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// eosMatch is a pseudo operation that indicates the end of the stream.
|
|
||||||
var eosMatch = match{distance: maxDistance, n: minMatchLen}
|
|
||||||
|
|
||||||
// Close terminates the LZMA stream. If requested the end-of-stream
|
|
||||||
// marker will be written. If the byte writer limit has been or will be
|
|
||||||
// reached during compression of the remaining data in the buffer the
|
|
||||||
// LZMA stream will be closed and data will remain in the buffer.
|
|
||||||
func (e *encoder) Close() error {
|
|
||||||
err := e.compress(all)
|
|
||||||
if err != nil && err != ErrLimit {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if e.marker {
|
|
||||||
if err := e.writeMatch(eosMatch); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = e.re.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compressed returns the number bytes of the input data that been
|
|
||||||
// compressed.
|
|
||||||
func (e *encoder) Compressed() int64 {
|
|
||||||
return e.dict.Pos() - e.start
|
|
||||||
}
|
|
149
vendor/github.com/ulikunitz/xz/lzma/encoderdict.go
generated
vendored
149
vendor/github.com/ulikunitz/xz/lzma/encoderdict.go
generated
vendored
@ -1,149 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// matcher is an interface that supports the identification of the next
|
|
||||||
// operation.
|
|
||||||
type matcher interface {
|
|
||||||
io.Writer
|
|
||||||
SetDict(d *encoderDict)
|
|
||||||
NextOp(rep [4]uint32) operation
|
|
||||||
}
|
|
||||||
|
|
||||||
// encoderDict provides the dictionary of the encoder. It includes an
|
|
||||||
// addtional buffer atop of the actual dictionary.
|
|
||||||
type encoderDict struct {
|
|
||||||
buf buffer
|
|
||||||
m matcher
|
|
||||||
head int64
|
|
||||||
capacity int
|
|
||||||
// preallocated array
|
|
||||||
data [maxMatchLen]byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// newEncoderDict creates the encoder dictionary. The argument bufSize
|
|
||||||
// defines the size of the additional buffer.
|
|
||||||
func newEncoderDict(dictCap, bufSize int, m matcher) (d *encoderDict, err error) {
|
|
||||||
if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) {
|
|
||||||
return nil, errors.New(
|
|
||||||
"lzma: dictionary capacity out of range")
|
|
||||||
}
|
|
||||||
if bufSize < 1 {
|
|
||||||
return nil, errors.New(
|
|
||||||
"lzma: buffer size must be larger than zero")
|
|
||||||
}
|
|
||||||
d = &encoderDict{
|
|
||||||
buf: *newBuffer(dictCap + bufSize),
|
|
||||||
capacity: dictCap,
|
|
||||||
m: m,
|
|
||||||
}
|
|
||||||
m.SetDict(d)
|
|
||||||
return d, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Discard discards n bytes. Note that n must not be larger than
|
|
||||||
// MaxMatchLen.
|
|
||||||
func (d *encoderDict) Discard(n int) {
|
|
||||||
p := d.data[:n]
|
|
||||||
k, _ := d.buf.Read(p)
|
|
||||||
if k < n {
|
|
||||||
panic(fmt.Errorf("lzma: can't discard %d bytes", n))
|
|
||||||
}
|
|
||||||
d.head += int64(n)
|
|
||||||
d.m.Write(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the data available in the encoder dictionary.
|
|
||||||
func (d *encoderDict) Len() int {
|
|
||||||
n := d.buf.Available()
|
|
||||||
if int64(n) > d.head {
|
|
||||||
return int(d.head)
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
// DictLen returns the actual length of data in the dictionary.
|
|
||||||
func (d *encoderDict) DictLen() int {
|
|
||||||
if d.head < int64(d.capacity) {
|
|
||||||
return int(d.head)
|
|
||||||
}
|
|
||||||
return d.capacity
|
|
||||||
}
|
|
||||||
|
|
||||||
// Available returns the number of bytes that can be written by a
|
|
||||||
// following Write call.
|
|
||||||
func (d *encoderDict) Available() int {
|
|
||||||
return d.buf.Available() - d.DictLen()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write writes data into the dictionary buffer. Note that the position
|
|
||||||
// of the dictionary head will not be moved. If there is not enough
|
|
||||||
// space in the buffer ErrNoSpace will be returned.
|
|
||||||
func (d *encoderDict) Write(p []byte) (n int, err error) {
|
|
||||||
m := d.Available()
|
|
||||||
if len(p) > m {
|
|
||||||
p = p[:m]
|
|
||||||
err = ErrNoSpace
|
|
||||||
}
|
|
||||||
var e error
|
|
||||||
if n, e = d.buf.Write(p); e != nil {
|
|
||||||
err = e
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pos returns the position of the head.
|
|
||||||
func (d *encoderDict) Pos() int64 { return d.head }
|
|
||||||
|
|
||||||
// ByteAt returns the byte at the given distance.
|
|
||||||
func (d *encoderDict) ByteAt(distance int) byte {
|
|
||||||
if !(0 < distance && distance <= d.Len()) {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
i := d.buf.rear - distance
|
|
||||||
if i < 0 {
|
|
||||||
i += len(d.buf.data)
|
|
||||||
}
|
|
||||||
return d.buf.data[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyN copies the last n bytes from the dictionary into the provided
|
|
||||||
// writer. This is used for copying uncompressed data into an
|
|
||||||
// uncompressed segment.
|
|
||||||
func (d *encoderDict) CopyN(w io.Writer, n int) (written int, err error) {
|
|
||||||
if n <= 0 {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
m := d.Len()
|
|
||||||
if n > m {
|
|
||||||
n = m
|
|
||||||
err = ErrNoSpace
|
|
||||||
}
|
|
||||||
i := d.buf.rear - n
|
|
||||||
var e error
|
|
||||||
if i < 0 {
|
|
||||||
i += len(d.buf.data)
|
|
||||||
if written, e = w.Write(d.buf.data[i:]); e != nil {
|
|
||||||
return written, e
|
|
||||||
}
|
|
||||||
i = 0
|
|
||||||
}
|
|
||||||
var k int
|
|
||||||
k, e = w.Write(d.buf.data[i:d.buf.rear])
|
|
||||||
written += k
|
|
||||||
if e != nil {
|
|
||||||
err = e
|
|
||||||
}
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Buffered returns the number of bytes in the buffer.
|
|
||||||
func (d *encoderDict) Buffered() int { return d.buf.Buffered() }
|
|
BIN
vendor/github.com/ulikunitz/xz/lzma/fox.lzma
generated
vendored
BIN
vendor/github.com/ulikunitz/xz/lzma/fox.lzma
generated
vendored
Binary file not shown.
309
vendor/github.com/ulikunitz/xz/lzma/hashtable.go
generated
vendored
309
vendor/github.com/ulikunitz/xz/lzma/hashtable.go
generated
vendored
@ -1,309 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/ulikunitz/xz/internal/hash"
|
|
||||||
)
|
|
||||||
|
|
||||||
/* For compression we need to find byte sequences that match the byte
|
|
||||||
* sequence at the dictionary head. A hash table is a simple method to
|
|
||||||
* provide this capability.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// maxMatches limits the number of matches requested from the Matches
|
|
||||||
// function. This controls the speed of the overall encoding.
|
|
||||||
const maxMatches = 16
|
|
||||||
|
|
||||||
// shortDists defines the number of short distances supported by the
|
|
||||||
// implementation.
|
|
||||||
const shortDists = 8
|
|
||||||
|
|
||||||
// The minimum is somehow arbitrary but the maximum is limited by the
|
|
||||||
// memory requirements of the hash table.
|
|
||||||
const (
|
|
||||||
minTableExponent = 9
|
|
||||||
maxTableExponent = 20
|
|
||||||
)
|
|
||||||
|
|
||||||
// newRoller contains the function used to create an instance of the
|
|
||||||
// hash.Roller.
|
|
||||||
var newRoller = func(n int) hash.Roller { return hash.NewCyclicPoly(n) }
|
|
||||||
|
|
||||||
// hashTable stores the hash table including the rolling hash method.
|
|
||||||
//
|
|
||||||
// We implement chained hashing into a circular buffer. Each entry in
|
|
||||||
// the circular buffer stores the delta distance to the next position with a
|
|
||||||
// word that has the same hash value.
|
|
||||||
type hashTable struct {
|
|
||||||
dict *encoderDict
|
|
||||||
// actual hash table
|
|
||||||
t []int64
|
|
||||||
// circular list data with the offset to the next word
|
|
||||||
data []uint32
|
|
||||||
front int
|
|
||||||
// mask for computing the index for the hash table
|
|
||||||
mask uint64
|
|
||||||
// hash offset; initial value is -int64(wordLen)
|
|
||||||
hoff int64
|
|
||||||
// length of the hashed word
|
|
||||||
wordLen int
|
|
||||||
// hash roller for computing the hash values for the Write
|
|
||||||
// method
|
|
||||||
wr hash.Roller
|
|
||||||
// hash roller for computing arbitrary hashes
|
|
||||||
hr hash.Roller
|
|
||||||
// preallocated slices
|
|
||||||
p [maxMatches]int64
|
|
||||||
distances [maxMatches + shortDists]int
|
|
||||||
}
|
|
||||||
|
|
||||||
// hashTableExponent derives the hash table exponent from the dictionary
|
|
||||||
// capacity.
|
|
||||||
func hashTableExponent(n uint32) int {
|
|
||||||
e := 30 - nlz32(n)
|
|
||||||
switch {
|
|
||||||
case e < minTableExponent:
|
|
||||||
e = minTableExponent
|
|
||||||
case e > maxTableExponent:
|
|
||||||
e = maxTableExponent
|
|
||||||
}
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// newHashTable creates a new hash table for words of length wordLen
|
|
||||||
func newHashTable(capacity int, wordLen int) (t *hashTable, err error) {
|
|
||||||
if !(0 < capacity) {
|
|
||||||
return nil, errors.New(
|
|
||||||
"newHashTable: capacity must not be negative")
|
|
||||||
}
|
|
||||||
exp := hashTableExponent(uint32(capacity))
|
|
||||||
if !(1 <= wordLen && wordLen <= 4) {
|
|
||||||
return nil, errors.New("newHashTable: " +
|
|
||||||
"argument wordLen out of range")
|
|
||||||
}
|
|
||||||
n := 1 << uint(exp)
|
|
||||||
if n <= 0 {
|
|
||||||
panic("newHashTable: exponent is too large")
|
|
||||||
}
|
|
||||||
t = &hashTable{
|
|
||||||
t: make([]int64, n),
|
|
||||||
data: make([]uint32, capacity),
|
|
||||||
mask: (uint64(1) << uint(exp)) - 1,
|
|
||||||
hoff: -int64(wordLen),
|
|
||||||
wordLen: wordLen,
|
|
||||||
wr: newRoller(wordLen),
|
|
||||||
hr: newRoller(wordLen),
|
|
||||||
}
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *hashTable) SetDict(d *encoderDict) { t.dict = d }
|
|
||||||
|
|
||||||
// buffered returns the number of bytes that are currently hashed.
|
|
||||||
func (t *hashTable) buffered() int {
|
|
||||||
n := t.hoff + 1
|
|
||||||
switch {
|
|
||||||
case n <= 0:
|
|
||||||
return 0
|
|
||||||
case n >= int64(len(t.data)):
|
|
||||||
return len(t.data)
|
|
||||||
}
|
|
||||||
return int(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// addIndex adds n to an index ensuring that is stays inside the
|
|
||||||
// circular buffer for the hash chain.
|
|
||||||
func (t *hashTable) addIndex(i, n int) int {
|
|
||||||
i += n - len(t.data)
|
|
||||||
if i < 0 {
|
|
||||||
i += len(t.data)
|
|
||||||
}
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// putDelta puts the delta instance at the current front of the circular
|
|
||||||
// chain buffer.
|
|
||||||
func (t *hashTable) putDelta(delta uint32) {
|
|
||||||
t.data[t.front] = delta
|
|
||||||
t.front = t.addIndex(t.front, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// putEntry puts a new entry into the hash table. If there is already a
|
|
||||||
// value stored it is moved into the circular chain buffer.
|
|
||||||
func (t *hashTable) putEntry(h uint64, pos int64) {
|
|
||||||
if pos < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
i := h & t.mask
|
|
||||||
old := t.t[i] - 1
|
|
||||||
t.t[i] = pos + 1
|
|
||||||
var delta int64
|
|
||||||
if old >= 0 {
|
|
||||||
delta = pos - old
|
|
||||||
if delta > 1<<32-1 || delta > int64(t.buffered()) {
|
|
||||||
delta = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t.putDelta(uint32(delta))
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteByte converts a single byte into a hash and puts them into the hash
|
|
||||||
// table.
|
|
||||||
func (t *hashTable) WriteByte(b byte) error {
|
|
||||||
h := t.wr.RollByte(b)
|
|
||||||
t.hoff++
|
|
||||||
t.putEntry(h, t.hoff)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write converts the bytes provided into hash tables and stores the
|
|
||||||
// abbreviated offsets into the hash table. The method will never return an
|
|
||||||
// error.
|
|
||||||
func (t *hashTable) Write(p []byte) (n int, err error) {
|
|
||||||
for _, b := range p {
|
|
||||||
// WriteByte doesn't generate an error.
|
|
||||||
t.WriteByte(b)
|
|
||||||
}
|
|
||||||
return len(p), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getMatches the matches for a specific hash. The functions returns the
|
|
||||||
// number of positions found.
|
|
||||||
//
|
|
||||||
// TODO: Make a getDistances because that we are actually interested in.
|
|
||||||
func (t *hashTable) getMatches(h uint64, positions []int64) (n int) {
|
|
||||||
if t.hoff < 0 || len(positions) == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
buffered := t.buffered()
|
|
||||||
tailPos := t.hoff + 1 - int64(buffered)
|
|
||||||
rear := t.front - buffered
|
|
||||||
if rear >= 0 {
|
|
||||||
rear -= len(t.data)
|
|
||||||
}
|
|
||||||
// get the slot for the hash
|
|
||||||
pos := t.t[h&t.mask] - 1
|
|
||||||
delta := pos - tailPos
|
|
||||||
for {
|
|
||||||
if delta < 0 {
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
positions[n] = tailPos + delta
|
|
||||||
n++
|
|
||||||
if n >= len(positions) {
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
i := rear + int(delta)
|
|
||||||
if i < 0 {
|
|
||||||
i += len(t.data)
|
|
||||||
}
|
|
||||||
u := t.data[i]
|
|
||||||
if u == 0 {
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
delta -= int64(u)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// hash computes the rolling hash for the word stored in p. For correct
|
|
||||||
// results its length must be equal to t.wordLen.
|
|
||||||
func (t *hashTable) hash(p []byte) uint64 {
|
|
||||||
var h uint64
|
|
||||||
for _, b := range p {
|
|
||||||
h = t.hr.RollByte(b)
|
|
||||||
}
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// Matches fills the positions slice with potential matches. The
|
|
||||||
// functions returns the number of positions filled into positions. The
|
|
||||||
// byte slice p must have word length of the hash table.
|
|
||||||
func (t *hashTable) Matches(p []byte, positions []int64) int {
|
|
||||||
if len(p) != t.wordLen {
|
|
||||||
panic(fmt.Errorf(
|
|
||||||
"byte slice must have length %d", t.wordLen))
|
|
||||||
}
|
|
||||||
h := t.hash(p)
|
|
||||||
return t.getMatches(h, positions)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextOp identifies the next operation using the hash table.
|
|
||||||
//
|
|
||||||
// TODO: Use all repetitions to find matches.
|
|
||||||
func (t *hashTable) NextOp(rep [4]uint32) operation {
|
|
||||||
// get positions
|
|
||||||
data := t.dict.data[:maxMatchLen]
|
|
||||||
n, _ := t.dict.buf.Peek(data)
|
|
||||||
data = data[:n]
|
|
||||||
var p []int64
|
|
||||||
if n < t.wordLen {
|
|
||||||
p = t.p[:0]
|
|
||||||
} else {
|
|
||||||
p = t.p[:maxMatches]
|
|
||||||
n = t.Matches(data[:t.wordLen], p)
|
|
||||||
p = p[:n]
|
|
||||||
}
|
|
||||||
|
|
||||||
// convert positions in potential distances
|
|
||||||
head := t.dict.head
|
|
||||||
dists := append(t.distances[:0], 1, 2, 3, 4, 5, 6, 7, 8)
|
|
||||||
for _, pos := range p {
|
|
||||||
dis := int(head - pos)
|
|
||||||
if dis > shortDists {
|
|
||||||
dists = append(dists, dis)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// check distances
|
|
||||||
var m match
|
|
||||||
dictLen := t.dict.DictLen()
|
|
||||||
for _, dist := range dists {
|
|
||||||
if dist > dictLen {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Here comes a trick. We are only interested in matches
|
|
||||||
// that are longer than the matches we have been found
|
|
||||||
// before. So before we test the whole byte sequence at
|
|
||||||
// the given distance, we test the first byte that would
|
|
||||||
// make the match longer. If it doesn't match the byte
|
|
||||||
// to match, we don't to care any longer.
|
|
||||||
i := t.dict.buf.rear - dist + m.n
|
|
||||||
if i < 0 {
|
|
||||||
i += len(t.dict.buf.data)
|
|
||||||
}
|
|
||||||
if t.dict.buf.data[i] != data[m.n] {
|
|
||||||
// We can't get a longer match. Jump to the next
|
|
||||||
// distance.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
n := t.dict.buf.matchLen(dist, data)
|
|
||||||
switch n {
|
|
||||||
case 0:
|
|
||||||
continue
|
|
||||||
case 1:
|
|
||||||
if uint32(dist-minDistance) != rep[0] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if n > m.n {
|
|
||||||
m = match{int64(dist), n}
|
|
||||||
if n == len(data) {
|
|
||||||
// No better match will be found.
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if m.n == 0 {
|
|
||||||
return lit{data[0]}
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
167
vendor/github.com/ulikunitz/xz/lzma/header.go
generated
vendored
167
vendor/github.com/ulikunitz/xz/lzma/header.go
generated
vendored
@ -1,167 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// uint32LE reads an uint32 integer from a byte slice
|
|
||||||
func uint32LE(b []byte) uint32 {
|
|
||||||
x := uint32(b[3]) << 24
|
|
||||||
x |= uint32(b[2]) << 16
|
|
||||||
x |= uint32(b[1]) << 8
|
|
||||||
x |= uint32(b[0])
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// uint64LE converts the uint64 value stored as little endian to an uint64
|
|
||||||
// value.
|
|
||||||
func uint64LE(b []byte) uint64 {
|
|
||||||
x := uint64(b[7]) << 56
|
|
||||||
x |= uint64(b[6]) << 48
|
|
||||||
x |= uint64(b[5]) << 40
|
|
||||||
x |= uint64(b[4]) << 32
|
|
||||||
x |= uint64(b[3]) << 24
|
|
||||||
x |= uint64(b[2]) << 16
|
|
||||||
x |= uint64(b[1]) << 8
|
|
||||||
x |= uint64(b[0])
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// putUint32LE puts an uint32 integer into a byte slice that must have at least
|
|
||||||
// a length of 4 bytes.
|
|
||||||
func putUint32LE(b []byte, x uint32) {
|
|
||||||
b[0] = byte(x)
|
|
||||||
b[1] = byte(x >> 8)
|
|
||||||
b[2] = byte(x >> 16)
|
|
||||||
b[3] = byte(x >> 24)
|
|
||||||
}
|
|
||||||
|
|
||||||
// putUint64LE puts the uint64 value into the byte slice as little endian
|
|
||||||
// value. The byte slice b must have at least place for 8 bytes.
|
|
||||||
func putUint64LE(b []byte, x uint64) {
|
|
||||||
b[0] = byte(x)
|
|
||||||
b[1] = byte(x >> 8)
|
|
||||||
b[2] = byte(x >> 16)
|
|
||||||
b[3] = byte(x >> 24)
|
|
||||||
b[4] = byte(x >> 32)
|
|
||||||
b[5] = byte(x >> 40)
|
|
||||||
b[6] = byte(x >> 48)
|
|
||||||
b[7] = byte(x >> 56)
|
|
||||||
}
|
|
||||||
|
|
||||||
// noHeaderSize defines the value of the length field in the LZMA header.
|
|
||||||
const noHeaderSize uint64 = 1<<64 - 1
|
|
||||||
|
|
||||||
// HeaderLen provides the length of the LZMA file header.
|
|
||||||
const HeaderLen = 13
|
|
||||||
|
|
||||||
// header represents the header of an LZMA file.
|
|
||||||
type header struct {
|
|
||||||
properties Properties
|
|
||||||
dictCap int
|
|
||||||
// uncompressed size; negative value if no size is given
|
|
||||||
size int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// marshalBinary marshals the header.
|
|
||||||
func (h *header) marshalBinary() (data []byte, err error) {
|
|
||||||
if err = h.properties.verify(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if !(0 <= h.dictCap && int64(h.dictCap) <= MaxDictCap) {
|
|
||||||
return nil, fmt.Errorf("lzma: DictCap %d out of range",
|
|
||||||
h.dictCap)
|
|
||||||
}
|
|
||||||
|
|
||||||
data = make([]byte, 13)
|
|
||||||
|
|
||||||
// property byte
|
|
||||||
data[0] = h.properties.Code()
|
|
||||||
|
|
||||||
// dictionary capacity
|
|
||||||
putUint32LE(data[1:5], uint32(h.dictCap))
|
|
||||||
|
|
||||||
// uncompressed size
|
|
||||||
var s uint64
|
|
||||||
if h.size > 0 {
|
|
||||||
s = uint64(h.size)
|
|
||||||
} else {
|
|
||||||
s = noHeaderSize
|
|
||||||
}
|
|
||||||
putUint64LE(data[5:], s)
|
|
||||||
|
|
||||||
return data, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// unmarshalBinary unmarshals the header.
|
|
||||||
func (h *header) unmarshalBinary(data []byte) error {
|
|
||||||
if len(data) != HeaderLen {
|
|
||||||
return errors.New("lzma.unmarshalBinary: data has wrong length")
|
|
||||||
}
|
|
||||||
|
|
||||||
// properties
|
|
||||||
var err error
|
|
||||||
if h.properties, err = PropertiesForCode(data[0]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// dictionary capacity
|
|
||||||
h.dictCap = int(uint32LE(data[1:]))
|
|
||||||
if h.dictCap < 0 {
|
|
||||||
return errors.New(
|
|
||||||
"LZMA header: dictionary capacity exceeds maximum " +
|
|
||||||
"integer")
|
|
||||||
}
|
|
||||||
|
|
||||||
// uncompressed size
|
|
||||||
s := uint64LE(data[5:])
|
|
||||||
if s == noHeaderSize {
|
|
||||||
h.size = -1
|
|
||||||
} else {
|
|
||||||
h.size = int64(s)
|
|
||||||
if h.size < 0 {
|
|
||||||
return errors.New(
|
|
||||||
"LZMA header: uncompressed size " +
|
|
||||||
"out of int64 range")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validDictCap checks whether the dictionary capacity is correct. This
|
|
||||||
// is used to weed out wrong file headers.
|
|
||||||
func validDictCap(dictcap int) bool {
|
|
||||||
if int64(dictcap) == MaxDictCap {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
for n := uint(10); n < 32; n++ {
|
|
||||||
if dictcap == 1<<n {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if dictcap == 1<<n+1<<(n-1) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidHeader checks for a valid LZMA file header. It allows only
|
|
||||||
// dictionary sizes of 2^n or 2^n+2^(n-1) with n >= 10 or 2^32-1. If
|
|
||||||
// there is an explicit size it must not exceed 256 GiB. The length of
|
|
||||||
// the data argument must be HeaderLen.
|
|
||||||
func ValidHeader(data []byte) bool {
|
|
||||||
var h header
|
|
||||||
if err := h.unmarshalBinary(data); err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !validDictCap(h.dictCap) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return h.size < 0 || h.size <= 1<<38
|
|
||||||
}
|
|
398
vendor/github.com/ulikunitz/xz/lzma/header2.go
generated
vendored
398
vendor/github.com/ulikunitz/xz/lzma/header2.go
generated
vendored
@ -1,398 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// maximum size of compressed data in a chunk
|
|
||||||
maxCompressed = 1 << 16
|
|
||||||
// maximum size of uncompressed data in a chunk
|
|
||||||
maxUncompressed = 1 << 21
|
|
||||||
)
|
|
||||||
|
|
||||||
// chunkType represents the type of an LZMA2 chunk. Note that this
|
|
||||||
// value is an internal representation and no actual encoding of a LZMA2
|
|
||||||
// chunk header.
|
|
||||||
type chunkType byte
|
|
||||||
|
|
||||||
// Possible values for the chunk type.
|
|
||||||
const (
|
|
||||||
// end of stream
|
|
||||||
cEOS chunkType = iota
|
|
||||||
// uncompressed; reset dictionary
|
|
||||||
cUD
|
|
||||||
// uncompressed; no reset of dictionary
|
|
||||||
cU
|
|
||||||
// LZMA compressed; no reset
|
|
||||||
cL
|
|
||||||
// LZMA compressed; reset state
|
|
||||||
cLR
|
|
||||||
// LZMA compressed; reset state; new property value
|
|
||||||
cLRN
|
|
||||||
// LZMA compressed; reset state; new property value; reset dictionary
|
|
||||||
cLRND
|
|
||||||
)
|
|
||||||
|
|
||||||
// chunkTypeStrings provide a string representation for the chunk types.
|
|
||||||
var chunkTypeStrings = [...]string{
|
|
||||||
cEOS: "EOS",
|
|
||||||
cU: "U",
|
|
||||||
cUD: "UD",
|
|
||||||
cL: "L",
|
|
||||||
cLR: "LR",
|
|
||||||
cLRN: "LRN",
|
|
||||||
cLRND: "LRND",
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string representation of the chunk type.
|
|
||||||
func (c chunkType) String() string {
|
|
||||||
if !(cEOS <= c && c <= cLRND) {
|
|
||||||
return "unknown"
|
|
||||||
}
|
|
||||||
return chunkTypeStrings[c]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Actual encodings for the chunk types in the value. Note that the high
|
|
||||||
// uncompressed size bits are stored in the header byte additionally.
|
|
||||||
const (
|
|
||||||
hEOS = 0
|
|
||||||
hUD = 1
|
|
||||||
hU = 2
|
|
||||||
hL = 1 << 7
|
|
||||||
hLR = 1<<7 | 1<<5
|
|
||||||
hLRN = 1<<7 | 1<<6
|
|
||||||
hLRND = 1<<7 | 1<<6 | 1<<5
|
|
||||||
)
|
|
||||||
|
|
||||||
// errHeaderByte indicates an unsupported value for the chunk header
|
|
||||||
// byte. These bytes starts the variable-length chunk header.
|
|
||||||
var errHeaderByte = errors.New("lzma: unsupported chunk header byte")
|
|
||||||
|
|
||||||
// headerChunkType converts the header byte into a chunk type. It
|
|
||||||
// ignores the uncompressed size bits in the chunk header byte.
|
|
||||||
func headerChunkType(h byte) (c chunkType, err error) {
|
|
||||||
if h&hL == 0 {
|
|
||||||
// no compression
|
|
||||||
switch h {
|
|
||||||
case hEOS:
|
|
||||||
c = cEOS
|
|
||||||
case hUD:
|
|
||||||
c = cUD
|
|
||||||
case hU:
|
|
||||||
c = cU
|
|
||||||
default:
|
|
||||||
return 0, errHeaderByte
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch h & hLRND {
|
|
||||||
case hL:
|
|
||||||
c = cL
|
|
||||||
case hLR:
|
|
||||||
c = cLR
|
|
||||||
case hLRN:
|
|
||||||
c = cLRN
|
|
||||||
case hLRND:
|
|
||||||
c = cLRND
|
|
||||||
default:
|
|
||||||
return 0, errHeaderByte
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// uncompressedHeaderLen provides the length of an uncompressed header
|
|
||||||
const uncompressedHeaderLen = 3
|
|
||||||
|
|
||||||
// headerLen returns the length of the LZMA2 header for a given chunk
|
|
||||||
// type.
|
|
||||||
func headerLen(c chunkType) int {
|
|
||||||
switch c {
|
|
||||||
case cEOS:
|
|
||||||
return 1
|
|
||||||
case cU, cUD:
|
|
||||||
return uncompressedHeaderLen
|
|
||||||
case cL, cLR:
|
|
||||||
return 5
|
|
||||||
case cLRN, cLRND:
|
|
||||||
return 6
|
|
||||||
}
|
|
||||||
panic(fmt.Errorf("unsupported chunk type %d", c))
|
|
||||||
}
|
|
||||||
|
|
||||||
// chunkHeader represents the contents of a chunk header.
|
|
||||||
type chunkHeader struct {
|
|
||||||
ctype chunkType
|
|
||||||
uncompressed uint32
|
|
||||||
compressed uint16
|
|
||||||
props Properties
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string representation of the chunk header.
|
|
||||||
func (h *chunkHeader) String() string {
|
|
||||||
return fmt.Sprintf("%s %d %d %s", h.ctype, h.uncompressed,
|
|
||||||
h.compressed, &h.props)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalBinary reads the content of the chunk header from the data
|
|
||||||
// slice. The slice must have the correct length.
|
|
||||||
func (h *chunkHeader) UnmarshalBinary(data []byte) error {
|
|
||||||
if len(data) == 0 {
|
|
||||||
return errors.New("no data")
|
|
||||||
}
|
|
||||||
c, err := headerChunkType(data[0])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
n := headerLen(c)
|
|
||||||
if len(data) < n {
|
|
||||||
return errors.New("incomplete data")
|
|
||||||
}
|
|
||||||
if len(data) > n {
|
|
||||||
return errors.New("invalid data length")
|
|
||||||
}
|
|
||||||
|
|
||||||
*h = chunkHeader{ctype: c}
|
|
||||||
if c == cEOS {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
h.uncompressed = uint32(uint16BE(data[1:3]))
|
|
||||||
if c <= cU {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
h.uncompressed |= uint32(data[0]&^hLRND) << 16
|
|
||||||
|
|
||||||
h.compressed = uint16BE(data[3:5])
|
|
||||||
if c <= cLR {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
h.props, err = PropertiesForCode(data[5])
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalBinary encodes the chunk header value. The function checks
|
|
||||||
// whether the content of the chunk header is correct.
|
|
||||||
func (h *chunkHeader) MarshalBinary() (data []byte, err error) {
|
|
||||||
if h.ctype > cLRND {
|
|
||||||
return nil, errors.New("invalid chunk type")
|
|
||||||
}
|
|
||||||
if err = h.props.verify(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
data = make([]byte, headerLen(h.ctype))
|
|
||||||
|
|
||||||
switch h.ctype {
|
|
||||||
case cEOS:
|
|
||||||
return data, nil
|
|
||||||
case cUD:
|
|
||||||
data[0] = hUD
|
|
||||||
case cU:
|
|
||||||
data[0] = hU
|
|
||||||
case cL:
|
|
||||||
data[0] = hL
|
|
||||||
case cLR:
|
|
||||||
data[0] = hLR
|
|
||||||
case cLRN:
|
|
||||||
data[0] = hLRN
|
|
||||||
case cLRND:
|
|
||||||
data[0] = hLRND
|
|
||||||
}
|
|
||||||
|
|
||||||
putUint16BE(data[1:3], uint16(h.uncompressed))
|
|
||||||
if h.ctype <= cU {
|
|
||||||
return data, nil
|
|
||||||
}
|
|
||||||
data[0] |= byte(h.uncompressed>>16) &^ hLRND
|
|
||||||
|
|
||||||
putUint16BE(data[3:5], h.compressed)
|
|
||||||
if h.ctype <= cLR {
|
|
||||||
return data, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
data[5] = h.props.Code()
|
|
||||||
return data, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// readChunkHeader reads the chunk header from the IO reader.
|
|
||||||
func readChunkHeader(r io.Reader) (h *chunkHeader, err error) {
|
|
||||||
p := make([]byte, 1, 6)
|
|
||||||
if _, err = io.ReadFull(r, p); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c, err := headerChunkType(p[0])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p = p[:headerLen(c)]
|
|
||||||
if _, err = io.ReadFull(r, p[1:]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h = new(chunkHeader)
|
|
||||||
if err = h.UnmarshalBinary(p); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return h, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// uint16BE converts a big-endian uint16 representation to an uint16
|
|
||||||
// value.
|
|
||||||
func uint16BE(p []byte) uint16 {
|
|
||||||
return uint16(p[0])<<8 | uint16(p[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
// putUint16BE puts the big-endian uint16 presentation into the given
|
|
||||||
// slice.
|
|
||||||
func putUint16BE(p []byte, x uint16) {
|
|
||||||
p[0] = byte(x >> 8)
|
|
||||||
p[1] = byte(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// chunkState is used to manage the state of the chunks
|
|
||||||
type chunkState byte
|
|
||||||
|
|
||||||
// start and stop define the initial and terminating state of the chunk
|
|
||||||
// state
|
|
||||||
const (
|
|
||||||
start chunkState = 'S'
|
|
||||||
stop = 'T'
|
|
||||||
)
|
|
||||||
|
|
||||||
// errors for the chunk state handling
|
|
||||||
var (
|
|
||||||
errChunkType = errors.New("lzma: unexpected chunk type")
|
|
||||||
errState = errors.New("lzma: wrong chunk state")
|
|
||||||
)
|
|
||||||
|
|
||||||
// next transitions state based on chunk type input
|
|
||||||
func (c *chunkState) next(ctype chunkType) error {
|
|
||||||
switch *c {
|
|
||||||
// start state
|
|
||||||
case 'S':
|
|
||||||
switch ctype {
|
|
||||||
case cEOS:
|
|
||||||
*c = 'T'
|
|
||||||
case cUD:
|
|
||||||
*c = 'R'
|
|
||||||
case cLRND:
|
|
||||||
*c = 'L'
|
|
||||||
default:
|
|
||||||
return errChunkType
|
|
||||||
}
|
|
||||||
// normal LZMA mode
|
|
||||||
case 'L':
|
|
||||||
switch ctype {
|
|
||||||
case cEOS:
|
|
||||||
*c = 'T'
|
|
||||||
case cUD:
|
|
||||||
*c = 'R'
|
|
||||||
case cU:
|
|
||||||
*c = 'U'
|
|
||||||
case cL, cLR, cLRN, cLRND:
|
|
||||||
break
|
|
||||||
default:
|
|
||||||
return errChunkType
|
|
||||||
}
|
|
||||||
// reset required
|
|
||||||
case 'R':
|
|
||||||
switch ctype {
|
|
||||||
case cEOS:
|
|
||||||
*c = 'T'
|
|
||||||
case cUD, cU:
|
|
||||||
break
|
|
||||||
case cLRN, cLRND:
|
|
||||||
*c = 'L'
|
|
||||||
default:
|
|
||||||
return errChunkType
|
|
||||||
}
|
|
||||||
// uncompressed
|
|
||||||
case 'U':
|
|
||||||
switch ctype {
|
|
||||||
case cEOS:
|
|
||||||
*c = 'T'
|
|
||||||
case cUD:
|
|
||||||
*c = 'R'
|
|
||||||
case cU:
|
|
||||||
break
|
|
||||||
case cL, cLR, cLRN, cLRND:
|
|
||||||
*c = 'L'
|
|
||||||
default:
|
|
||||||
return errChunkType
|
|
||||||
}
|
|
||||||
// terminal state
|
|
||||||
case 'T':
|
|
||||||
return errChunkType
|
|
||||||
default:
|
|
||||||
return errState
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// defaultChunkType returns the default chunk type for each chunk state.
|
|
||||||
func (c chunkState) defaultChunkType() chunkType {
|
|
||||||
switch c {
|
|
||||||
case 'S':
|
|
||||||
return cLRND
|
|
||||||
case 'L', 'U':
|
|
||||||
return cL
|
|
||||||
case 'R':
|
|
||||||
return cLRN
|
|
||||||
default:
|
|
||||||
// no error
|
|
||||||
return cEOS
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// maxDictCap defines the maximum dictionary capacity supported by the
|
|
||||||
// LZMA2 dictionary capacity encoding.
|
|
||||||
const maxDictCap = 1<<32 - 1
|
|
||||||
|
|
||||||
// maxDictCapCode defines the maximum dictionary capacity code.
|
|
||||||
const maxDictCapCode = 40
|
|
||||||
|
|
||||||
// The function decodes the dictionary capacity byte, but doesn't change
|
|
||||||
// for the correct range of the given byte.
|
|
||||||
func decodeDictCap(c byte) int64 {
|
|
||||||
return (2 | int64(c)&1) << (11 + (c>>1)&0x1f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeDictCap decodes the encoded dictionary capacity. The function
|
|
||||||
// returns an error if the code is out of range.
|
|
||||||
func DecodeDictCap(c byte) (n int64, err error) {
|
|
||||||
if c >= maxDictCapCode {
|
|
||||||
if c == maxDictCapCode {
|
|
||||||
return maxDictCap, nil
|
|
||||||
}
|
|
||||||
return 0, errors.New("lzma: invalid dictionary size code")
|
|
||||||
}
|
|
||||||
return decodeDictCap(c), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeDictCap encodes a dictionary capacity. The function returns the
|
|
||||||
// code for the capacity that is greater or equal n. If n exceeds the
|
|
||||||
// maximum support dictionary capacity, the maximum value is returned.
|
|
||||||
func EncodeDictCap(n int64) byte {
|
|
||||||
a, b := byte(0), byte(40)
|
|
||||||
for a < b {
|
|
||||||
c := a + (b-a)>>1
|
|
||||||
m := decodeDictCap(c)
|
|
||||||
if n <= m {
|
|
||||||
if n == m {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
b = c
|
|
||||||
} else {
|
|
||||||
a = c + 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return a
|
|
||||||
}
|
|
129
vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go
generated
vendored
129
vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go
generated
vendored
@ -1,129 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
import "errors"
|
|
||||||
|
|
||||||
// maxPosBits defines the number of bits of the position value that are used to
|
|
||||||
// to compute the posState value. The value is used to select the tree codec
|
|
||||||
// for length encoding and decoding.
|
|
||||||
const maxPosBits = 4
|
|
||||||
|
|
||||||
// minMatchLen and maxMatchLen give the minimum and maximum values for
|
|
||||||
// encoding and decoding length values. minMatchLen is also used as base
|
|
||||||
// for the encoded length values.
|
|
||||||
const (
|
|
||||||
minMatchLen = 2
|
|
||||||
maxMatchLen = minMatchLen + 16 + 256 - 1
|
|
||||||
)
|
|
||||||
|
|
||||||
// lengthCodec support the encoding of the length value.
|
|
||||||
type lengthCodec struct {
|
|
||||||
choice [2]prob
|
|
||||||
low [1 << maxPosBits]treeCodec
|
|
||||||
mid [1 << maxPosBits]treeCodec
|
|
||||||
high treeCodec
|
|
||||||
}
|
|
||||||
|
|
||||||
// deepcopy initializes the lc value as deep copy of the source value.
|
|
||||||
func (lc *lengthCodec) deepcopy(src *lengthCodec) {
|
|
||||||
if lc == src {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lc.choice = src.choice
|
|
||||||
for i := range lc.low {
|
|
||||||
lc.low[i].deepcopy(&src.low[i])
|
|
||||||
}
|
|
||||||
for i := range lc.mid {
|
|
||||||
lc.mid[i].deepcopy(&src.mid[i])
|
|
||||||
}
|
|
||||||
lc.high.deepcopy(&src.high)
|
|
||||||
}
|
|
||||||
|
|
||||||
// init initializes a new length codec.
|
|
||||||
func (lc *lengthCodec) init() {
|
|
||||||
for i := range lc.choice {
|
|
||||||
lc.choice[i] = probInit
|
|
||||||
}
|
|
||||||
for i := range lc.low {
|
|
||||||
lc.low[i] = makeTreeCodec(3)
|
|
||||||
}
|
|
||||||
for i := range lc.mid {
|
|
||||||
lc.mid[i] = makeTreeCodec(3)
|
|
||||||
}
|
|
||||||
lc.high = makeTreeCodec(8)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lBits gives the number of bits used for the encoding of the l value
|
|
||||||
// provided to the range encoder.
|
|
||||||
func lBits(l uint32) int {
|
|
||||||
switch {
|
|
||||||
case l < 8:
|
|
||||||
return 4
|
|
||||||
case l < 16:
|
|
||||||
return 5
|
|
||||||
default:
|
|
||||||
return 10
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode encodes the length offset. The length offset l can be compute by
|
|
||||||
// subtracting minMatchLen (2) from the actual length.
|
|
||||||
//
|
|
||||||
// l = length - minMatchLen
|
|
||||||
//
|
|
||||||
func (lc *lengthCodec) Encode(e *rangeEncoder, l uint32, posState uint32,
|
|
||||||
) (err error) {
|
|
||||||
if l > maxMatchLen-minMatchLen {
|
|
||||||
return errors.New("lengthCodec.Encode: l out of range")
|
|
||||||
}
|
|
||||||
if l < 8 {
|
|
||||||
if err = lc.choice[0].Encode(e, 0); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return lc.low[posState].Encode(e, l)
|
|
||||||
}
|
|
||||||
if err = lc.choice[0].Encode(e, 1); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if l < 16 {
|
|
||||||
if err = lc.choice[1].Encode(e, 0); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return lc.mid[posState].Encode(e, l-8)
|
|
||||||
}
|
|
||||||
if err = lc.choice[1].Encode(e, 1); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err = lc.high.Encode(e, l-16); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode reads the length offset. Add minMatchLen to compute the actual length
|
|
||||||
// to the length offset l.
|
|
||||||
func (lc *lengthCodec) Decode(d *rangeDecoder, posState uint32,
|
|
||||||
) (l uint32, err error) {
|
|
||||||
var b uint32
|
|
||||||
if b, err = lc.choice[0].Decode(d); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if b == 0 {
|
|
||||||
l, err = lc.low[posState].Decode(d)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if b, err = lc.choice[1].Decode(d); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if b == 0 {
|
|
||||||
l, err = lc.mid[posState].Decode(d)
|
|
||||||
l += 8
|
|
||||||
return
|
|
||||||
}
|
|
||||||
l, err = lc.high.Decode(d)
|
|
||||||
l += 16
|
|
||||||
return
|
|
||||||
}
|
|
132
vendor/github.com/ulikunitz/xz/lzma/literalcodec.go
generated
vendored
132
vendor/github.com/ulikunitz/xz/lzma/literalcodec.go
generated
vendored
@ -1,132 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
// literalCodec supports the encoding of literal. It provides 768 probability
|
|
||||||
// values per literal state. The upper 512 probabilities are used with the
|
|
||||||
// context of a match bit.
|
|
||||||
type literalCodec struct {
|
|
||||||
probs []prob
|
|
||||||
}
|
|
||||||
|
|
||||||
// deepcopy initializes literal codec c as a deep copy of the source.
|
|
||||||
func (c *literalCodec) deepcopy(src *literalCodec) {
|
|
||||||
if c == src {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.probs = make([]prob, len(src.probs))
|
|
||||||
copy(c.probs, src.probs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// init initializes the literal codec.
|
|
||||||
func (c *literalCodec) init(lc, lp int) {
|
|
||||||
switch {
|
|
||||||
case !(minLC <= lc && lc <= maxLC):
|
|
||||||
panic("lc out of range")
|
|
||||||
case !(minLP <= lp && lp <= maxLP):
|
|
||||||
panic("lp out of range")
|
|
||||||
}
|
|
||||||
c.probs = make([]prob, 0x300<<uint(lc+lp))
|
|
||||||
for i := range c.probs {
|
|
||||||
c.probs[i] = probInit
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode encodes the byte s using a range encoder as well as the current LZMA
|
|
||||||
// encoder state, a match byte and the literal state.
|
|
||||||
func (c *literalCodec) Encode(e *rangeEncoder, s byte,
|
|
||||||
state uint32, match byte, litState uint32,
|
|
||||||
) (err error) {
|
|
||||||
k := litState * 0x300
|
|
||||||
probs := c.probs[k : k+0x300]
|
|
||||||
symbol := uint32(1)
|
|
||||||
r := uint32(s)
|
|
||||||
if state >= 7 {
|
|
||||||
m := uint32(match)
|
|
||||||
for {
|
|
||||||
matchBit := (m >> 7) & 1
|
|
||||||
m <<= 1
|
|
||||||
bit := (r >> 7) & 1
|
|
||||||
r <<= 1
|
|
||||||
i := ((1 + matchBit) << 8) | symbol
|
|
||||||
if err = probs[i].Encode(e, bit); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
symbol = (symbol << 1) | bit
|
|
||||||
if matchBit != bit {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if symbol >= 0x100 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for symbol < 0x100 {
|
|
||||||
bit := (r >> 7) & 1
|
|
||||||
r <<= 1
|
|
||||||
if err = probs[symbol].Encode(e, bit); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
symbol = (symbol << 1) | bit
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode decodes a literal byte using the range decoder as well as the LZMA
|
|
||||||
// state, a match byte, and the literal state.
|
|
||||||
func (c *literalCodec) Decode(d *rangeDecoder,
|
|
||||||
state uint32, match byte, litState uint32,
|
|
||||||
) (s byte, err error) {
|
|
||||||
k := litState * 0x300
|
|
||||||
probs := c.probs[k : k+0x300]
|
|
||||||
symbol := uint32(1)
|
|
||||||
if state >= 7 {
|
|
||||||
m := uint32(match)
|
|
||||||
for {
|
|
||||||
matchBit := (m >> 7) & 1
|
|
||||||
m <<= 1
|
|
||||||
i := ((1 + matchBit) << 8) | symbol
|
|
||||||
bit, err := d.DecodeBit(&probs[i])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
symbol = (symbol << 1) | bit
|
|
||||||
if matchBit != bit {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if symbol >= 0x100 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for symbol < 0x100 {
|
|
||||||
bit, err := d.DecodeBit(&probs[symbol])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
symbol = (symbol << 1) | bit
|
|
||||||
}
|
|
||||||
s = byte(symbol - 0x100)
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// minLC and maxLC define the range for LC values.
|
|
||||||
const (
|
|
||||||
minLC = 0
|
|
||||||
maxLC = 8
|
|
||||||
)
|
|
||||||
|
|
||||||
// minLC and maxLC define the range for LP values.
|
|
||||||
const (
|
|
||||||
minLP = 0
|
|
||||||
maxLP = 4
|
|
||||||
)
|
|
||||||
|
|
||||||
// minState and maxState define a range for the state values stored in
|
|
||||||
// the State values.
|
|
||||||
const (
|
|
||||||
minState = 0
|
|
||||||
maxState = 11
|
|
||||||
)
|
|
52
vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go
generated
vendored
52
vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go
generated
vendored
@ -1,52 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
import "errors"
|
|
||||||
|
|
||||||
// MatchAlgorithm identifies an algorithm to find matches in the
|
|
||||||
// dictionary.
|
|
||||||
type MatchAlgorithm byte
|
|
||||||
|
|
||||||
// Supported matcher algorithms.
|
|
||||||
const (
|
|
||||||
HashTable4 MatchAlgorithm = iota
|
|
||||||
BinaryTree
|
|
||||||
)
|
|
||||||
|
|
||||||
// maStrings are used by the String method.
|
|
||||||
var maStrings = map[MatchAlgorithm]string{
|
|
||||||
HashTable4: "HashTable4",
|
|
||||||
BinaryTree: "BinaryTree",
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string representation of the Matcher.
|
|
||||||
func (a MatchAlgorithm) String() string {
|
|
||||||
if s, ok := maStrings[a]; ok {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return "unknown"
|
|
||||||
}
|
|
||||||
|
|
||||||
var errUnsupportedMatchAlgorithm = errors.New(
|
|
||||||
"lzma: unsupported match algorithm value")
|
|
||||||
|
|
||||||
// verify checks whether the matcher value is supported.
|
|
||||||
func (a MatchAlgorithm) verify() error {
|
|
||||||
if _, ok := maStrings[a]; !ok {
|
|
||||||
return errUnsupportedMatchAlgorithm
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a MatchAlgorithm) new(dictCap int) (m matcher, err error) {
|
|
||||||
switch a {
|
|
||||||
case HashTable4:
|
|
||||||
return newHashTable(dictCap, 4)
|
|
||||||
case BinaryTree:
|
|
||||||
return newBinTree(dictCap)
|
|
||||||
}
|
|
||||||
return nil, errUnsupportedMatchAlgorithm
|
|
||||||
}
|
|
80
vendor/github.com/ulikunitz/xz/lzma/operation.go
generated
vendored
80
vendor/github.com/ulikunitz/xz/lzma/operation.go
generated
vendored
@ -1,80 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
// operation represents an operation on the dictionary during encoding or
|
|
||||||
// decoding.
|
|
||||||
type operation interface {
|
|
||||||
Len() int
|
|
||||||
}
|
|
||||||
|
|
||||||
// rep represents a repetition at the given distance and the given length
|
|
||||||
type match struct {
|
|
||||||
// supports all possible distance values, including the eos marker
|
|
||||||
distance int64
|
|
||||||
// length
|
|
||||||
n int
|
|
||||||
}
|
|
||||||
|
|
||||||
// verify checks whether the match is valid. If that is not the case an
|
|
||||||
// error is returned.
|
|
||||||
func (m match) verify() error {
|
|
||||||
if !(minDistance <= m.distance && m.distance <= maxDistance) {
|
|
||||||
return errors.New("distance out of range")
|
|
||||||
}
|
|
||||||
if !(1 <= m.n && m.n <= maxMatchLen) {
|
|
||||||
return errors.New("length out of range")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// l return the l-value for the match, which is the difference of length
|
|
||||||
// n and 2.
|
|
||||||
func (m match) l() uint32 {
|
|
||||||
return uint32(m.n - minMatchLen)
|
|
||||||
}
|
|
||||||
|
|
||||||
// dist returns the dist value for the match, which is one less of the
|
|
||||||
// distance stored in the match.
|
|
||||||
func (m match) dist() uint32 {
|
|
||||||
return uint32(m.distance - minDistance)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of bytes matched.
|
|
||||||
func (m match) Len() int {
|
|
||||||
return m.n
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string representation for the repetition.
|
|
||||||
func (m match) String() string {
|
|
||||||
return fmt.Sprintf("M{%d,%d}", m.distance, m.n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lit represents a single byte literal.
|
|
||||||
type lit struct {
|
|
||||||
b byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns 1 for the single byte literal.
|
|
||||||
func (l lit) Len() int {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string representation for the literal.
|
|
||||||
func (l lit) String() string {
|
|
||||||
var c byte
|
|
||||||
if unicode.IsPrint(rune(l.b)) {
|
|
||||||
c = l.b
|
|
||||||
} else {
|
|
||||||
c = '.'
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("L{%c/%02x}", c, l.b)
|
|
||||||
}
|
|
53
vendor/github.com/ulikunitz/xz/lzma/prob.go
generated
vendored
53
vendor/github.com/ulikunitz/xz/lzma/prob.go
generated
vendored
@ -1,53 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
// movebits defines the number of bits used for the updates of probability
|
|
||||||
// values.
|
|
||||||
const movebits = 5
|
|
||||||
|
|
||||||
// probbits defines the number of bits of a probability value.
|
|
||||||
const probbits = 11
|
|
||||||
|
|
||||||
// probInit defines 0.5 as initial value for prob values.
|
|
||||||
const probInit prob = 1 << (probbits - 1)
|
|
||||||
|
|
||||||
// Type prob represents probabilities. The type can also be used to encode and
|
|
||||||
// decode single bits.
|
|
||||||
type prob uint16
|
|
||||||
|
|
||||||
// Dec decreases the probability. The decrease is proportional to the
|
|
||||||
// probability value.
|
|
||||||
func (p *prob) dec() {
|
|
||||||
*p -= *p >> movebits
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inc increases the probability. The Increase is proportional to the
|
|
||||||
// difference of 1 and the probability value.
|
|
||||||
func (p *prob) inc() {
|
|
||||||
*p += ((1 << probbits) - *p) >> movebits
|
|
||||||
}
|
|
||||||
|
|
||||||
// Computes the new bound for a given range using the probability value.
|
|
||||||
func (p prob) bound(r uint32) uint32 {
|
|
||||||
return (r >> probbits) * uint32(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bits returns 1. One is the number of bits that can be encoded or decoded
|
|
||||||
// with a single prob value.
|
|
||||||
func (p prob) Bits() int {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode encodes the least-significant bit of v. Note that the p value will be
|
|
||||||
// changed.
|
|
||||||
func (p *prob) Encode(e *rangeEncoder, v uint32) error {
|
|
||||||
return e.EncodeBit(v, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode decodes a single bit. Note that the p value will change.
|
|
||||||
func (p *prob) Decode(d *rangeDecoder) (v uint32, err error) {
|
|
||||||
return d.DecodeBit(p)
|
|
||||||
}
|
|
69
vendor/github.com/ulikunitz/xz/lzma/properties.go
generated
vendored
69
vendor/github.com/ulikunitz/xz/lzma/properties.go
generated
vendored
@ -1,69 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// maximum and minimum values for the LZMA properties.
|
|
||||||
const (
|
|
||||||
minPB = 0
|
|
||||||
maxPB = 4
|
|
||||||
)
|
|
||||||
|
|
||||||
// maxPropertyCode is the possible maximum of a properties code byte.
|
|
||||||
const maxPropertyCode = (maxPB+1)*(maxLP+1)*(maxLC+1) - 1
|
|
||||||
|
|
||||||
// Properties contains the parameters LC, LP and PB. The parameter LC
|
|
||||||
// defines the number of literal context bits; parameter LP the number
|
|
||||||
// of literal position bits and PB the number of position bits.
|
|
||||||
type Properties struct {
|
|
||||||
LC int
|
|
||||||
LP int
|
|
||||||
PB int
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the properties in a string representation.
|
|
||||||
func (p *Properties) String() string {
|
|
||||||
return fmt.Sprintf("LC %d LP %d PB %d", p.LC, p.LP, p.PB)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PropertiesForCode converts a properties code byte into a Properties value.
|
|
||||||
func PropertiesForCode(code byte) (p Properties, err error) {
|
|
||||||
if code > maxPropertyCode {
|
|
||||||
return p, errors.New("lzma: invalid properties code")
|
|
||||||
}
|
|
||||||
p.LC = int(code % 9)
|
|
||||||
code /= 9
|
|
||||||
p.LP = int(code % 5)
|
|
||||||
code /= 5
|
|
||||||
p.PB = int(code % 5)
|
|
||||||
return p, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// verify checks the properties for correctness.
|
|
||||||
func (p *Properties) verify() error {
|
|
||||||
if p == nil {
|
|
||||||
return errors.New("lzma: properties are nil")
|
|
||||||
}
|
|
||||||
if !(minLC <= p.LC && p.LC <= maxLC) {
|
|
||||||
return errors.New("lzma: lc out of range")
|
|
||||||
}
|
|
||||||
if !(minLP <= p.LP && p.LP <= maxLP) {
|
|
||||||
return errors.New("lzma: lp out of range")
|
|
||||||
}
|
|
||||||
if !(minPB <= p.PB && p.PB <= maxPB) {
|
|
||||||
return errors.New("lzma: pb out of range")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Code converts the properties to a byte. The function assumes that
|
|
||||||
// the properties components are all in range.
|
|
||||||
func (p Properties) Code() byte {
|
|
||||||
return byte((p.PB*5+p.LP)*9 + p.LC)
|
|
||||||
}
|
|
248
vendor/github.com/ulikunitz/xz/lzma/rangecodec.go
generated
vendored
248
vendor/github.com/ulikunitz/xz/lzma/rangecodec.go
generated
vendored
@ -1,248 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// rangeEncoder implements range encoding of single bits. The low value can
|
|
||||||
// overflow therefore we need uint64. The cache value is used to handle
|
|
||||||
// overflows.
|
|
||||||
type rangeEncoder struct {
|
|
||||||
lbw *LimitedByteWriter
|
|
||||||
nrange uint32
|
|
||||||
low uint64
|
|
||||||
cacheLen int64
|
|
||||||
cache byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// maxInt64 provides the maximal value of the int64 type
|
|
||||||
const maxInt64 = 1<<63 - 1
|
|
||||||
|
|
||||||
// newRangeEncoder creates a new range encoder.
|
|
||||||
func newRangeEncoder(bw io.ByteWriter) (re *rangeEncoder, err error) {
|
|
||||||
lbw, ok := bw.(*LimitedByteWriter)
|
|
||||||
if !ok {
|
|
||||||
lbw = &LimitedByteWriter{BW: bw, N: maxInt64}
|
|
||||||
}
|
|
||||||
return &rangeEncoder{
|
|
||||||
lbw: lbw,
|
|
||||||
nrange: 0xffffffff,
|
|
||||||
cacheLen: 1}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Available returns the number of bytes that still can be written. The
|
|
||||||
// method takes the bytes that will be currently written by Close into
|
|
||||||
// account.
|
|
||||||
func (e *rangeEncoder) Available() int64 {
|
|
||||||
return e.lbw.N - (e.cacheLen + 4)
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeByte writes a single byte to the underlying writer. An error is
|
|
||||||
// returned if the limit is reached. The written byte will be counted if
|
|
||||||
// the underlying writer doesn't return an error.
|
|
||||||
func (e *rangeEncoder) writeByte(c byte) error {
|
|
||||||
if e.Available() < 1 {
|
|
||||||
return ErrLimit
|
|
||||||
}
|
|
||||||
return e.lbw.WriteByte(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirectEncodeBit encodes the least-significant bit of b with probability 1/2.
|
|
||||||
func (e *rangeEncoder) DirectEncodeBit(b uint32) error {
|
|
||||||
e.nrange >>= 1
|
|
||||||
e.low += uint64(e.nrange) & (0 - (uint64(b) & 1))
|
|
||||||
|
|
||||||
// normalize
|
|
||||||
const top = 1 << 24
|
|
||||||
if e.nrange >= top {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
e.nrange <<= 8
|
|
||||||
return e.shiftLow()
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeBit encodes the least significant bit of b. The p value will be
|
|
||||||
// updated by the function depending on the bit encoded.
|
|
||||||
func (e *rangeEncoder) EncodeBit(b uint32, p *prob) error {
|
|
||||||
bound := p.bound(e.nrange)
|
|
||||||
if b&1 == 0 {
|
|
||||||
e.nrange = bound
|
|
||||||
p.inc()
|
|
||||||
} else {
|
|
||||||
e.low += uint64(bound)
|
|
||||||
e.nrange -= bound
|
|
||||||
p.dec()
|
|
||||||
}
|
|
||||||
|
|
||||||
// normalize
|
|
||||||
const top = 1 << 24
|
|
||||||
if e.nrange >= top {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
e.nrange <<= 8
|
|
||||||
return e.shiftLow()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close writes a complete copy of the low value.
|
|
||||||
func (e *rangeEncoder) Close() error {
|
|
||||||
for i := 0; i < 5; i++ {
|
|
||||||
if err := e.shiftLow(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// shiftLow shifts the low value for 8 bit. The shifted byte is written into
|
|
||||||
// the byte writer. The cache value is used to handle overflows.
|
|
||||||
func (e *rangeEncoder) shiftLow() error {
|
|
||||||
if uint32(e.low) < 0xff000000 || (e.low>>32) != 0 {
|
|
||||||
tmp := e.cache
|
|
||||||
for {
|
|
||||||
err := e.writeByte(tmp + byte(e.low>>32))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
tmp = 0xff
|
|
||||||
e.cacheLen--
|
|
||||||
if e.cacheLen <= 0 {
|
|
||||||
if e.cacheLen < 0 {
|
|
||||||
panic("negative cacheLen")
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
e.cache = byte(uint32(e.low) >> 24)
|
|
||||||
}
|
|
||||||
e.cacheLen++
|
|
||||||
e.low = uint64(uint32(e.low) << 8)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// rangeDecoder decodes single bits of the range encoding stream.
|
|
||||||
type rangeDecoder struct {
|
|
||||||
br io.ByteReader
|
|
||||||
nrange uint32
|
|
||||||
code uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// init initializes the range decoder, by reading from the byte reader.
|
|
||||||
func (d *rangeDecoder) init() error {
|
|
||||||
d.nrange = 0xffffffff
|
|
||||||
d.code = 0
|
|
||||||
|
|
||||||
b, err := d.br.ReadByte()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if b != 0 {
|
|
||||||
return errors.New("newRangeDecoder: first byte not zero")
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < 4; i++ {
|
|
||||||
if err = d.updateCode(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.code >= d.nrange {
|
|
||||||
return errors.New("newRangeDecoder: d.code >= d.nrange")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newRangeDecoder initializes a range decoder. It reads five bytes from the
|
|
||||||
// reader and therefore may return an error.
|
|
||||||
func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) {
|
|
||||||
d = &rangeDecoder{br: br, nrange: 0xffffffff}
|
|
||||||
|
|
||||||
b, err := d.br.ReadByte()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if b != 0 {
|
|
||||||
return nil, errors.New("newRangeDecoder: first byte not zero")
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < 4; i++ {
|
|
||||||
if err = d.updateCode(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.code >= d.nrange {
|
|
||||||
return nil, errors.New("newRangeDecoder: d.code >= d.nrange")
|
|
||||||
}
|
|
||||||
|
|
||||||
return d, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// possiblyAtEnd checks whether the decoder may be at the end of the stream.
|
|
||||||
func (d *rangeDecoder) possiblyAtEnd() bool {
|
|
||||||
return d.code == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirectDecodeBit decodes a bit with probability 1/2. The return value b will
|
|
||||||
// contain the bit at the least-significant position. All other bits will be
|
|
||||||
// zero.
|
|
||||||
func (d *rangeDecoder) DirectDecodeBit() (b uint32, err error) {
|
|
||||||
d.nrange >>= 1
|
|
||||||
d.code -= d.nrange
|
|
||||||
t := 0 - (d.code >> 31)
|
|
||||||
d.code += d.nrange & t
|
|
||||||
b = (t + 1) & 1
|
|
||||||
|
|
||||||
// d.code will stay less then d.nrange
|
|
||||||
|
|
||||||
// normalize
|
|
||||||
// assume d.code < d.nrange
|
|
||||||
const top = 1 << 24
|
|
||||||
if d.nrange >= top {
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
d.nrange <<= 8
|
|
||||||
// d.code < d.nrange will be maintained
|
|
||||||
return b, d.updateCode()
|
|
||||||
}
|
|
||||||
|
|
||||||
// decodeBit decodes a single bit. The bit will be returned at the
|
|
||||||
// least-significant position. All other bits will be zero. The probability
|
|
||||||
// value will be updated.
|
|
||||||
func (d *rangeDecoder) DecodeBit(p *prob) (b uint32, err error) {
|
|
||||||
bound := p.bound(d.nrange)
|
|
||||||
if d.code < bound {
|
|
||||||
d.nrange = bound
|
|
||||||
p.inc()
|
|
||||||
b = 0
|
|
||||||
} else {
|
|
||||||
d.code -= bound
|
|
||||||
d.nrange -= bound
|
|
||||||
p.dec()
|
|
||||||
b = 1
|
|
||||||
}
|
|
||||||
// normalize
|
|
||||||
// assume d.code < d.nrange
|
|
||||||
const top = 1 << 24
|
|
||||||
if d.nrange >= top {
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
d.nrange <<= 8
|
|
||||||
// d.code < d.nrange will be maintained
|
|
||||||
return b, d.updateCode()
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateCode reads a new byte into the code.
|
|
||||||
func (d *rangeDecoder) updateCode() error {
|
|
||||||
b, err := d.br.ReadByte()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
d.code = (d.code << 8) | uint32(b)
|
|
||||||
return nil
|
|
||||||
}
|
|
100
vendor/github.com/ulikunitz/xz/lzma/reader.go
generated
vendored
100
vendor/github.com/ulikunitz/xz/lzma/reader.go
generated
vendored
@ -1,100 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package lzma supports the decoding and encoding of LZMA streams.
|
|
||||||
// Reader and Writer support the classic LZMA format. Reader2 and
|
|
||||||
// Writer2 support the decoding and encoding of LZMA2 streams.
|
|
||||||
//
|
|
||||||
// The package is written completely in Go and doesn't rely on any external
|
|
||||||
// library.
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ReaderConfig stores the parameters for the reader of the classic LZMA
|
|
||||||
// format.
|
|
||||||
type ReaderConfig struct {
|
|
||||||
DictCap int
|
|
||||||
}
|
|
||||||
|
|
||||||
// fill converts the zero values of the configuration to the default values.
|
|
||||||
func (c *ReaderConfig) fill() {
|
|
||||||
if c.DictCap == 0 {
|
|
||||||
c.DictCap = 8 * 1024 * 1024
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify checks the reader configuration for errors. Zero values will
|
|
||||||
// be replaced by default values.
|
|
||||||
func (c *ReaderConfig) Verify() error {
|
|
||||||
c.fill()
|
|
||||||
if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) {
|
|
||||||
return errors.New("lzma: dictionary capacity is out of range")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reader provides a reader for LZMA files or streams.
|
|
||||||
type Reader struct {
|
|
||||||
lzma io.Reader
|
|
||||||
h header
|
|
||||||
d *decoder
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReader creates a new reader for an LZMA stream using the classic
|
|
||||||
// format. NewReader reads and checks the header of the LZMA stream.
|
|
||||||
func NewReader(lzma io.Reader) (r *Reader, err error) {
|
|
||||||
return ReaderConfig{}.NewReader(lzma)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReader creates a new reader for an LZMA stream in the classic
|
|
||||||
// format. The function reads and verifies the the header of the LZMA
|
|
||||||
// stream.
|
|
||||||
func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) {
|
|
||||||
if err = c.Verify(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
data := make([]byte, HeaderLen)
|
|
||||||
if _, err := io.ReadFull(lzma, data); err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
return nil, errors.New("lzma: unexpected EOF")
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
r = &Reader{lzma: lzma}
|
|
||||||
if err = r.h.unmarshalBinary(data); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if r.h.dictCap < MinDictCap {
|
|
||||||
return nil, errors.New("lzma: dictionary capacity too small")
|
|
||||||
}
|
|
||||||
dictCap := r.h.dictCap
|
|
||||||
if c.DictCap > dictCap {
|
|
||||||
dictCap = c.DictCap
|
|
||||||
}
|
|
||||||
|
|
||||||
state := newState(r.h.properties)
|
|
||||||
dict, err := newDecoderDict(dictCap)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
r.d, err = newDecoder(ByteReader(lzma), state, dict, r.h.size)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EOSMarker indicates that an EOS marker has been encountered.
|
|
||||||
func (r *Reader) EOSMarker() bool {
|
|
||||||
return r.d.eosMarker
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read returns uncompressed data.
|
|
||||||
func (r *Reader) Read(p []byte) (n int, err error) {
|
|
||||||
return r.d.Read(p)
|
|
||||||
}
|
|
232
vendor/github.com/ulikunitz/xz/lzma/reader2.go
generated
vendored
232
vendor/github.com/ulikunitz/xz/lzma/reader2.go
generated
vendored
@ -1,232 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/ulikunitz/xz/internal/xlog"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reader2Config stores the parameters for the LZMA2 reader.
|
|
||||||
// format.
|
|
||||||
type Reader2Config struct {
|
|
||||||
DictCap int
|
|
||||||
}
|
|
||||||
|
|
||||||
// fill converts the zero values of the configuration to the default values.
|
|
||||||
func (c *Reader2Config) fill() {
|
|
||||||
if c.DictCap == 0 {
|
|
||||||
c.DictCap = 8 * 1024 * 1024
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify checks the reader configuration for errors. Zero configuration values
|
|
||||||
// will be replaced by default values.
|
|
||||||
func (c *Reader2Config) Verify() error {
|
|
||||||
c.fill()
|
|
||||||
if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) {
|
|
||||||
return errors.New("lzma: dictionary capacity is out of range")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reader2 supports the reading of LZMA2 chunk sequences. Note that the
|
|
||||||
// first chunk should have a dictionary reset and the first compressed
|
|
||||||
// chunk a properties reset. The chunk sequence may not be terminated by
|
|
||||||
// an end-of-stream chunk.
|
|
||||||
type Reader2 struct {
|
|
||||||
r io.Reader
|
|
||||||
err error
|
|
||||||
|
|
||||||
dict *decoderDict
|
|
||||||
ur *uncompressedReader
|
|
||||||
decoder *decoder
|
|
||||||
chunkReader io.Reader
|
|
||||||
|
|
||||||
cstate chunkState
|
|
||||||
ctype chunkType
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReader2 creates a reader for an LZMA2 chunk sequence.
|
|
||||||
func NewReader2(lzma2 io.Reader) (r *Reader2, err error) {
|
|
||||||
return Reader2Config{}.NewReader2(lzma2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReader2 creates an LZMA2 reader using the given configuration.
|
|
||||||
func (c Reader2Config) NewReader2(lzma2 io.Reader) (r *Reader2, err error) {
|
|
||||||
if err = c.Verify(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
r = &Reader2{r: lzma2, cstate: start}
|
|
||||||
r.dict, err = newDecoderDict(c.DictCap)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err = r.startChunk(); err != nil {
|
|
||||||
r.err = err
|
|
||||||
}
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// uncompressed tests whether the chunk type specifies an uncompressed
|
|
||||||
// chunk.
|
|
||||||
func uncompressed(ctype chunkType) bool {
|
|
||||||
return ctype == cU || ctype == cUD
|
|
||||||
}
|
|
||||||
|
|
||||||
// startChunk parses a new chunk.
|
|
||||||
func (r *Reader2) startChunk() error {
|
|
||||||
r.chunkReader = nil
|
|
||||||
header, err := readChunkHeader(r.r)
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
xlog.Debugf("chunk header %v", header)
|
|
||||||
if err = r.cstate.next(header.ctype); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if r.cstate == stop {
|
|
||||||
return io.EOF
|
|
||||||
}
|
|
||||||
if header.ctype == cUD || header.ctype == cLRND {
|
|
||||||
r.dict.Reset()
|
|
||||||
}
|
|
||||||
size := int64(header.uncompressed) + 1
|
|
||||||
if uncompressed(header.ctype) {
|
|
||||||
if r.ur != nil {
|
|
||||||
r.ur.Reopen(r.r, size)
|
|
||||||
} else {
|
|
||||||
r.ur = newUncompressedReader(r.r, r.dict, size)
|
|
||||||
}
|
|
||||||
r.chunkReader = r.ur
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
br := ByteReader(io.LimitReader(r.r, int64(header.compressed)+1))
|
|
||||||
if r.decoder == nil {
|
|
||||||
state := newState(header.props)
|
|
||||||
r.decoder, err = newDecoder(br, state, r.dict, size)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
r.chunkReader = r.decoder
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
switch header.ctype {
|
|
||||||
case cLR:
|
|
||||||
r.decoder.State.Reset()
|
|
||||||
case cLRN, cLRND:
|
|
||||||
r.decoder.State = newState(header.props)
|
|
||||||
}
|
|
||||||
err = r.decoder.Reopen(br, size)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
r.chunkReader = r.decoder
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads data from the LZMA2 chunk sequence.
|
|
||||||
func (r *Reader2) Read(p []byte) (n int, err error) {
|
|
||||||
if r.err != nil {
|
|
||||||
return 0, r.err
|
|
||||||
}
|
|
||||||
for n < len(p) {
|
|
||||||
var k int
|
|
||||||
k, err = r.chunkReader.Read(p[n:])
|
|
||||||
n += k
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
err = r.startChunk()
|
|
||||||
if err == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
r.err = err
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
if k == 0 {
|
|
||||||
r.err = errors.New("lzma: Reader2 doesn't get data")
|
|
||||||
return n, r.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EOS returns whether the LZMA2 stream has been terminated by an
|
|
||||||
// end-of-stream chunk.
|
|
||||||
func (r *Reader2) EOS() bool {
|
|
||||||
return r.cstate == stop
|
|
||||||
}
|
|
||||||
|
|
||||||
// uncompressedReader is used to read uncompressed chunks.
|
|
||||||
type uncompressedReader struct {
|
|
||||||
lr io.LimitedReader
|
|
||||||
Dict *decoderDict
|
|
||||||
eof bool
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// newUncompressedReader initializes a new uncompressedReader.
|
|
||||||
func newUncompressedReader(r io.Reader, dict *decoderDict, size int64) *uncompressedReader {
|
|
||||||
ur := &uncompressedReader{
|
|
||||||
lr: io.LimitedReader{R: r, N: size},
|
|
||||||
Dict: dict,
|
|
||||||
}
|
|
||||||
return ur
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reopen reinitializes an uncompressed reader.
|
|
||||||
func (ur *uncompressedReader) Reopen(r io.Reader, size int64) {
|
|
||||||
ur.err = nil
|
|
||||||
ur.eof = false
|
|
||||||
ur.lr = io.LimitedReader{R: r, N: size}
|
|
||||||
}
|
|
||||||
|
|
||||||
// fill reads uncompressed data into the dictionary.
|
|
||||||
func (ur *uncompressedReader) fill() error {
|
|
||||||
if !ur.eof {
|
|
||||||
n, err := io.CopyN(ur.Dict, &ur.lr, int64(ur.Dict.Available()))
|
|
||||||
if err != io.EOF {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ur.eof = true
|
|
||||||
if n > 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ur.lr.N != 0 {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads uncompressed data from the limited reader.
|
|
||||||
func (ur *uncompressedReader) Read(p []byte) (n int, err error) {
|
|
||||||
if ur.err != nil {
|
|
||||||
return 0, ur.err
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
var k int
|
|
||||||
k, err = ur.Dict.Read(p[n:])
|
|
||||||
n += k
|
|
||||||
if n >= len(p) {
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
err = ur.fill()
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ur.err = err
|
|
||||||
return n, err
|
|
||||||
}
|
|
151
vendor/github.com/ulikunitz/xz/lzma/state.go
generated
vendored
151
vendor/github.com/ulikunitz/xz/lzma/state.go
generated
vendored
@ -1,151 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
// states defines the overall state count
|
|
||||||
const states = 12
|
|
||||||
|
|
||||||
// State maintains the full state of the operation encoding or decoding
|
|
||||||
// process.
|
|
||||||
type state struct {
|
|
||||||
rep [4]uint32
|
|
||||||
isMatch [states << maxPosBits]prob
|
|
||||||
isRepG0Long [states << maxPosBits]prob
|
|
||||||
isRep [states]prob
|
|
||||||
isRepG0 [states]prob
|
|
||||||
isRepG1 [states]prob
|
|
||||||
isRepG2 [states]prob
|
|
||||||
litCodec literalCodec
|
|
||||||
lenCodec lengthCodec
|
|
||||||
repLenCodec lengthCodec
|
|
||||||
distCodec distCodec
|
|
||||||
state uint32
|
|
||||||
posBitMask uint32
|
|
||||||
Properties Properties
|
|
||||||
}
|
|
||||||
|
|
||||||
// initProbSlice initializes a slice of probabilities.
|
|
||||||
func initProbSlice(p []prob) {
|
|
||||||
for i := range p {
|
|
||||||
p[i] = probInit
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset sets all state information to the original values.
|
|
||||||
func (s *state) Reset() {
|
|
||||||
p := s.Properties
|
|
||||||
*s = state{
|
|
||||||
Properties: p,
|
|
||||||
// dict: s.dict,
|
|
||||||
posBitMask: (uint32(1) << uint(p.PB)) - 1,
|
|
||||||
}
|
|
||||||
initProbSlice(s.isMatch[:])
|
|
||||||
initProbSlice(s.isRep[:])
|
|
||||||
initProbSlice(s.isRepG0[:])
|
|
||||||
initProbSlice(s.isRepG1[:])
|
|
||||||
initProbSlice(s.isRepG2[:])
|
|
||||||
initProbSlice(s.isRepG0Long[:])
|
|
||||||
s.litCodec.init(p.LC, p.LP)
|
|
||||||
s.lenCodec.init()
|
|
||||||
s.repLenCodec.init()
|
|
||||||
s.distCodec.init()
|
|
||||||
}
|
|
||||||
|
|
||||||
// initState initializes the state.
|
|
||||||
func initState(s *state, p Properties) {
|
|
||||||
*s = state{Properties: p}
|
|
||||||
s.Reset()
|
|
||||||
}
|
|
||||||
|
|
||||||
// newState creates a new state from the give Properties.
|
|
||||||
func newState(p Properties) *state {
|
|
||||||
s := &state{Properties: p}
|
|
||||||
s.Reset()
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// deepcopy initializes s as a deep copy of the source.
|
|
||||||
func (s *state) deepcopy(src *state) {
|
|
||||||
if s == src {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.rep = src.rep
|
|
||||||
s.isMatch = src.isMatch
|
|
||||||
s.isRepG0Long = src.isRepG0Long
|
|
||||||
s.isRep = src.isRep
|
|
||||||
s.isRepG0 = src.isRepG0
|
|
||||||
s.isRepG1 = src.isRepG1
|
|
||||||
s.isRepG2 = src.isRepG2
|
|
||||||
s.litCodec.deepcopy(&src.litCodec)
|
|
||||||
s.lenCodec.deepcopy(&src.lenCodec)
|
|
||||||
s.repLenCodec.deepcopy(&src.repLenCodec)
|
|
||||||
s.distCodec.deepcopy(&src.distCodec)
|
|
||||||
s.state = src.state
|
|
||||||
s.posBitMask = src.posBitMask
|
|
||||||
s.Properties = src.Properties
|
|
||||||
}
|
|
||||||
|
|
||||||
// cloneState creates a new clone of the give state.
|
|
||||||
func cloneState(src *state) *state {
|
|
||||||
s := new(state)
|
|
||||||
s.deepcopy(src)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateStateLiteral updates the state for a literal.
|
|
||||||
func (s *state) updateStateLiteral() {
|
|
||||||
switch {
|
|
||||||
case s.state < 4:
|
|
||||||
s.state = 0
|
|
||||||
return
|
|
||||||
case s.state < 10:
|
|
||||||
s.state -= 3
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.state -= 6
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateStateMatch updates the state for a match.
|
|
||||||
func (s *state) updateStateMatch() {
|
|
||||||
if s.state < 7 {
|
|
||||||
s.state = 7
|
|
||||||
} else {
|
|
||||||
s.state = 10
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateStateRep updates the state for a repetition.
|
|
||||||
func (s *state) updateStateRep() {
|
|
||||||
if s.state < 7 {
|
|
||||||
s.state = 8
|
|
||||||
} else {
|
|
||||||
s.state = 11
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateStateShortRep updates the state for a short repetition.
|
|
||||||
func (s *state) updateStateShortRep() {
|
|
||||||
if s.state < 7 {
|
|
||||||
s.state = 9
|
|
||||||
} else {
|
|
||||||
s.state = 11
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// states computes the states of the operation codec.
|
|
||||||
func (s *state) states(dictHead int64) (state1, state2, posState uint32) {
|
|
||||||
state1 = s.state
|
|
||||||
posState = uint32(dictHead) & s.posBitMask
|
|
||||||
state2 = (s.state << maxPosBits) | posState
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// litState computes the literal state.
|
|
||||||
func (s *state) litState(prev byte, dictHead int64) uint32 {
|
|
||||||
lp, lc := uint(s.Properties.LP), uint(s.Properties.LC)
|
|
||||||
litState := ((uint32(dictHead) & ((1 << lp) - 1)) << lc) |
|
|
||||||
(uint32(prev) >> (8 - lc))
|
|
||||||
return litState
|
|
||||||
}
|
|
133
vendor/github.com/ulikunitz/xz/lzma/treecodecs.go
generated
vendored
133
vendor/github.com/ulikunitz/xz/lzma/treecodecs.go
generated
vendored
@ -1,133 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
// treeCodec encodes or decodes values with a fixed bit size. It is using a
|
|
||||||
// tree of probability value. The root of the tree is the most-significant bit.
|
|
||||||
type treeCodec struct {
|
|
||||||
probTree
|
|
||||||
}
|
|
||||||
|
|
||||||
// makeTreeCodec makes a tree codec. The bits value must be inside the range
|
|
||||||
// [1,32].
|
|
||||||
func makeTreeCodec(bits int) treeCodec {
|
|
||||||
return treeCodec{makeProbTree(bits)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// deepcopy initializes tc as a deep copy of the source.
|
|
||||||
func (tc *treeCodec) deepcopy(src *treeCodec) {
|
|
||||||
tc.probTree.deepcopy(&src.probTree)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode uses the range encoder to encode a fixed-bit-size value.
|
|
||||||
func (tc *treeCodec) Encode(e *rangeEncoder, v uint32) (err error) {
|
|
||||||
m := uint32(1)
|
|
||||||
for i := int(tc.bits) - 1; i >= 0; i-- {
|
|
||||||
b := (v >> uint(i)) & 1
|
|
||||||
if err := e.EncodeBit(b, &tc.probs[m]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
m = (m << 1) | b
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decodes uses the range decoder to decode a fixed-bit-size value. Errors may
|
|
||||||
// be caused by the range decoder.
|
|
||||||
func (tc *treeCodec) Decode(d *rangeDecoder) (v uint32, err error) {
|
|
||||||
m := uint32(1)
|
|
||||||
for j := 0; j < int(tc.bits); j++ {
|
|
||||||
b, err := d.DecodeBit(&tc.probs[m])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
m = (m << 1) | b
|
|
||||||
}
|
|
||||||
return m - (1 << uint(tc.bits)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// treeReverseCodec is another tree codec, where the least-significant bit is
|
|
||||||
// the start of the probability tree.
|
|
||||||
type treeReverseCodec struct {
|
|
||||||
probTree
|
|
||||||
}
|
|
||||||
|
|
||||||
// deepcopy initializes the treeReverseCodec as a deep copy of the
|
|
||||||
// source.
|
|
||||||
func (tc *treeReverseCodec) deepcopy(src *treeReverseCodec) {
|
|
||||||
tc.probTree.deepcopy(&src.probTree)
|
|
||||||
}
|
|
||||||
|
|
||||||
// makeTreeReverseCodec creates treeReverseCodec value. The bits argument must
|
|
||||||
// be in the range [1,32].
|
|
||||||
func makeTreeReverseCodec(bits int) treeReverseCodec {
|
|
||||||
return treeReverseCodec{makeProbTree(bits)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode uses range encoder to encode a fixed-bit-size value. The range
|
|
||||||
// encoder may cause errors.
|
|
||||||
func (tc *treeReverseCodec) Encode(v uint32, e *rangeEncoder) (err error) {
|
|
||||||
m := uint32(1)
|
|
||||||
for i := uint(0); i < uint(tc.bits); i++ {
|
|
||||||
b := (v >> i) & 1
|
|
||||||
if err := e.EncodeBit(b, &tc.probs[m]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
m = (m << 1) | b
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decodes uses the range decoder to decode a fixed-bit-size value. Errors
|
|
||||||
// returned by the range decoder will be returned.
|
|
||||||
func (tc *treeReverseCodec) Decode(d *rangeDecoder) (v uint32, err error) {
|
|
||||||
m := uint32(1)
|
|
||||||
for j := uint(0); j < uint(tc.bits); j++ {
|
|
||||||
b, err := d.DecodeBit(&tc.probs[m])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
m = (m << 1) | b
|
|
||||||
v |= b << j
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// probTree stores enough probability values to be used by the treeEncode and
|
|
||||||
// treeDecode methods of the range coder types.
|
|
||||||
type probTree struct {
|
|
||||||
probs []prob
|
|
||||||
bits byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// deepcopy initializes the probTree value as a deep copy of the source.
|
|
||||||
func (t *probTree) deepcopy(src *probTree) {
|
|
||||||
if t == src {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.probs = make([]prob, len(src.probs))
|
|
||||||
copy(t.probs, src.probs)
|
|
||||||
t.bits = src.bits
|
|
||||||
}
|
|
||||||
|
|
||||||
// makeProbTree initializes a probTree structure.
|
|
||||||
func makeProbTree(bits int) probTree {
|
|
||||||
if !(1 <= bits && bits <= 32) {
|
|
||||||
panic("bits outside of range [1,32]")
|
|
||||||
}
|
|
||||||
t := probTree{
|
|
||||||
bits: byte(bits),
|
|
||||||
probs: make([]prob, 1<<uint(bits)),
|
|
||||||
}
|
|
||||||
for i := range t.probs {
|
|
||||||
t.probs[i] = probInit
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bits provides the number of bits for the values to de- or encode.
|
|
||||||
func (t *probTree) Bits() int {
|
|
||||||
return int(t.bits)
|
|
||||||
}
|
|
209
vendor/github.com/ulikunitz/xz/lzma/writer.go
generated
vendored
209
vendor/github.com/ulikunitz/xz/lzma/writer.go
generated
vendored
@ -1,209 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MinDictCap and MaxDictCap provide the range of supported dictionary
|
|
||||||
// capacities.
|
|
||||||
const (
|
|
||||||
MinDictCap = 1 << 12
|
|
||||||
MaxDictCap = 1<<32 - 1
|
|
||||||
)
|
|
||||||
|
|
||||||
// WriterConfig defines the configuration parameter for a writer.
|
|
||||||
type WriterConfig struct {
|
|
||||||
// Properties for the encoding. If the it is nil the value
|
|
||||||
// {LC: 3, LP: 0, PB: 2} will be chosen.
|
|
||||||
Properties *Properties
|
|
||||||
// The capacity of the dictionary. If DictCap is zero, the value
|
|
||||||
// 8 MiB will be chosen.
|
|
||||||
DictCap int
|
|
||||||
// Size of the lookahead buffer; value 0 indicates default size
|
|
||||||
// 4096
|
|
||||||
BufSize int
|
|
||||||
// Match algorithm
|
|
||||||
Matcher MatchAlgorithm
|
|
||||||
// SizeInHeader indicates that the header will contain an
|
|
||||||
// explicit size.
|
|
||||||
SizeInHeader bool
|
|
||||||
// Size of the data to be encoded. A positive value will imply
|
|
||||||
// than an explicit size will be set in the header.
|
|
||||||
Size int64
|
|
||||||
// EOSMarker requests whether the EOSMarker needs to be written.
|
|
||||||
// If no explicit size is been given the EOSMarker will be
|
|
||||||
// set automatically.
|
|
||||||
EOSMarker bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// fill converts zero-value fields to their explicit default values.
|
|
||||||
func (c *WriterConfig) fill() {
|
|
||||||
if c.Properties == nil {
|
|
||||||
c.Properties = &Properties{LC: 3, LP: 0, PB: 2}
|
|
||||||
}
|
|
||||||
if c.DictCap == 0 {
|
|
||||||
c.DictCap = 8 * 1024 * 1024
|
|
||||||
}
|
|
||||||
if c.BufSize == 0 {
|
|
||||||
c.BufSize = 4096
|
|
||||||
}
|
|
||||||
if c.Size > 0 {
|
|
||||||
c.SizeInHeader = true
|
|
||||||
}
|
|
||||||
if !c.SizeInHeader {
|
|
||||||
c.EOSMarker = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify checks WriterConfig for errors. Verify will replace zero
|
|
||||||
// values with default values.
|
|
||||||
func (c *WriterConfig) Verify() error {
|
|
||||||
c.fill()
|
|
||||||
var err error
|
|
||||||
if c == nil {
|
|
||||||
return errors.New("lzma: WriterConfig is nil")
|
|
||||||
}
|
|
||||||
if c.Properties == nil {
|
|
||||||
return errors.New("lzma: WriterConfig has no Properties set")
|
|
||||||
}
|
|
||||||
if err = c.Properties.verify(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) {
|
|
||||||
return errors.New("lzma: dictionary capacity is out of range")
|
|
||||||
}
|
|
||||||
if !(maxMatchLen <= c.BufSize) {
|
|
||||||
return errors.New("lzma: lookahead buffer size too small")
|
|
||||||
}
|
|
||||||
if c.SizeInHeader {
|
|
||||||
if c.Size < 0 {
|
|
||||||
return errors.New("lzma: negative size not supported")
|
|
||||||
}
|
|
||||||
} else if !c.EOSMarker {
|
|
||||||
return errors.New("lzma: EOS marker is required")
|
|
||||||
}
|
|
||||||
if err = c.Matcher.verify(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// header returns the header structure for this configuration.
|
|
||||||
func (c *WriterConfig) header() header {
|
|
||||||
h := header{
|
|
||||||
properties: *c.Properties,
|
|
||||||
dictCap: c.DictCap,
|
|
||||||
size: -1,
|
|
||||||
}
|
|
||||||
if c.SizeInHeader {
|
|
||||||
h.size = c.Size
|
|
||||||
}
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writer writes an LZMA stream in the classic format.
|
|
||||||
type Writer struct {
|
|
||||||
h header
|
|
||||||
bw io.ByteWriter
|
|
||||||
buf *bufio.Writer
|
|
||||||
e *encoder
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriter creates a new LZMA writer for the classic format. The
|
|
||||||
// method will write the header to the underlying stream.
|
|
||||||
func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) {
|
|
||||||
if err = c.Verify(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
w = &Writer{h: c.header()}
|
|
||||||
|
|
||||||
var ok bool
|
|
||||||
w.bw, ok = lzma.(io.ByteWriter)
|
|
||||||
if !ok {
|
|
||||||
w.buf = bufio.NewWriter(lzma)
|
|
||||||
w.bw = w.buf
|
|
||||||
}
|
|
||||||
state := newState(w.h.properties)
|
|
||||||
m, err := c.Matcher.new(w.h.dictCap)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
dict, err := newEncoderDict(w.h.dictCap, c.BufSize, m)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var flags encoderFlags
|
|
||||||
if c.EOSMarker {
|
|
||||||
flags = eosMarker
|
|
||||||
}
|
|
||||||
if w.e, err = newEncoder(w.bw, state, dict, flags); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = w.writeHeader(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return w, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriter creates a new LZMA writer using the classic format. The
|
|
||||||
// function writes the header to the underlying stream.
|
|
||||||
func NewWriter(lzma io.Writer) (w *Writer, err error) {
|
|
||||||
return WriterConfig{}.NewWriter(lzma)
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeHeader writes the LZMA header into the stream.
|
|
||||||
func (w *Writer) writeHeader() error {
|
|
||||||
data, err := w.h.marshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = w.bw.(io.Writer).Write(data)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write puts data into the Writer.
|
|
||||||
func (w *Writer) Write(p []byte) (n int, err error) {
|
|
||||||
if w.h.size >= 0 {
|
|
||||||
m := w.h.size
|
|
||||||
m -= w.e.Compressed() + int64(w.e.dict.Buffered())
|
|
||||||
if m < 0 {
|
|
||||||
m = 0
|
|
||||||
}
|
|
||||||
if m < int64(len(p)) {
|
|
||||||
p = p[:m]
|
|
||||||
err = ErrNoSpace
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var werr error
|
|
||||||
if n, werr = w.e.Write(p); werr != nil {
|
|
||||||
err = werr
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the writer stream. It ensures that all data from the
|
|
||||||
// buffer will be compressed and the LZMA stream will be finished.
|
|
||||||
func (w *Writer) Close() error {
|
|
||||||
if w.h.size >= 0 {
|
|
||||||
n := w.e.Compressed() + int64(w.e.dict.Buffered())
|
|
||||||
if n != w.h.size {
|
|
||||||
return errSize
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err := w.e.Close()
|
|
||||||
if w.buf != nil {
|
|
||||||
ferr := w.buf.Flush()
|
|
||||||
if err == nil {
|
|
||||||
err = ferr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
305
vendor/github.com/ulikunitz/xz/lzma/writer2.go
generated
vendored
305
vendor/github.com/ulikunitz/xz/lzma/writer2.go
generated
vendored
@ -1,305 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package lzma
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Writer2Config is used to create a Writer2 using parameters.
|
|
||||||
type Writer2Config struct {
|
|
||||||
// The properties for the encoding. If the it is nil the value
|
|
||||||
// {LC: 3, LP: 0, PB: 2} will be chosen.
|
|
||||||
Properties *Properties
|
|
||||||
// The capacity of the dictionary. If DictCap is zero, the value
|
|
||||||
// 8 MiB will be chosen.
|
|
||||||
DictCap int
|
|
||||||
// Size of the lookahead buffer; value 0 indicates default size
|
|
||||||
// 4096
|
|
||||||
BufSize int
|
|
||||||
// Match algorithm
|
|
||||||
Matcher MatchAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
// fill replaces zero values with default values.
|
|
||||||
func (c *Writer2Config) fill() {
|
|
||||||
if c.Properties == nil {
|
|
||||||
c.Properties = &Properties{LC: 3, LP: 0, PB: 2}
|
|
||||||
}
|
|
||||||
if c.DictCap == 0 {
|
|
||||||
c.DictCap = 8 * 1024 * 1024
|
|
||||||
}
|
|
||||||
if c.BufSize == 0 {
|
|
||||||
c.BufSize = 4096
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify checks the Writer2Config for correctness. Zero values will be
|
|
||||||
// replaced by default values.
|
|
||||||
func (c *Writer2Config) Verify() error {
|
|
||||||
c.fill()
|
|
||||||
var err error
|
|
||||||
if c == nil {
|
|
||||||
return errors.New("lzma: WriterConfig is nil")
|
|
||||||
}
|
|
||||||
if c.Properties == nil {
|
|
||||||
return errors.New("lzma: WriterConfig has no Properties set")
|
|
||||||
}
|
|
||||||
if err = c.Properties.verify(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) {
|
|
||||||
return errors.New("lzma: dictionary capacity is out of range")
|
|
||||||
}
|
|
||||||
if !(maxMatchLen <= c.BufSize) {
|
|
||||||
return errors.New("lzma: lookahead buffer size too small")
|
|
||||||
}
|
|
||||||
if c.Properties.LC+c.Properties.LP > 4 {
|
|
||||||
return errors.New("lzma: sum of lc and lp exceeds 4")
|
|
||||||
}
|
|
||||||
if err = c.Matcher.verify(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writer2 supports the creation of an LZMA2 stream. But note that
|
|
||||||
// written data is buffered, so call Flush or Close to write data to the
|
|
||||||
// underlying writer. The Close method writes the end-of-stream marker
|
|
||||||
// to the stream. So you may be able to concatenate the output of two
|
|
||||||
// writers as long the output of the first writer has only been flushed
|
|
||||||
// but not closed.
|
|
||||||
//
|
|
||||||
// Any change to the fields Properties, DictCap must be done before the
|
|
||||||
// first call to Write, Flush or Close.
|
|
||||||
type Writer2 struct {
|
|
||||||
w io.Writer
|
|
||||||
|
|
||||||
start *state
|
|
||||||
encoder *encoder
|
|
||||||
|
|
||||||
cstate chunkState
|
|
||||||
ctype chunkType
|
|
||||||
|
|
||||||
buf bytes.Buffer
|
|
||||||
lbw LimitedByteWriter
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriter2 creates an LZMA2 chunk sequence writer with the default
|
|
||||||
// parameters and options.
|
|
||||||
func NewWriter2(lzma2 io.Writer) (w *Writer2, err error) {
|
|
||||||
return Writer2Config{}.NewWriter2(lzma2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriter2 creates a new LZMA2 writer using the given configuration.
|
|
||||||
func (c Writer2Config) NewWriter2(lzma2 io.Writer) (w *Writer2, err error) {
|
|
||||||
if err = c.Verify(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
w = &Writer2{
|
|
||||||
w: lzma2,
|
|
||||||
start: newState(*c.Properties),
|
|
||||||
cstate: start,
|
|
||||||
ctype: start.defaultChunkType(),
|
|
||||||
}
|
|
||||||
w.buf.Grow(maxCompressed)
|
|
||||||
w.lbw = LimitedByteWriter{BW: &w.buf, N: maxCompressed}
|
|
||||||
m, err := c.Matcher.new(c.DictCap)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
d, err := newEncoderDict(c.DictCap, c.BufSize, m)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
w.encoder, err = newEncoder(&w.lbw, cloneState(w.start), d, 0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return w, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// written returns the number of bytes written to the current chunk
|
|
||||||
func (w *Writer2) written() int {
|
|
||||||
if w.encoder == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return int(w.encoder.Compressed()) + w.encoder.dict.Buffered()
|
|
||||||
}
|
|
||||||
|
|
||||||
// errClosed indicates that the writer is closed.
|
|
||||||
var errClosed = errors.New("lzma: writer closed")
|
|
||||||
|
|
||||||
// Writes data to LZMA2 stream. Note that written data will be buffered.
|
|
||||||
// Use Flush or Close to ensure that data is written to the underlying
|
|
||||||
// writer.
|
|
||||||
func (w *Writer2) Write(p []byte) (n int, err error) {
|
|
||||||
if w.cstate == stop {
|
|
||||||
return 0, errClosed
|
|
||||||
}
|
|
||||||
for n < len(p) {
|
|
||||||
m := maxUncompressed - w.written()
|
|
||||||
if m <= 0 {
|
|
||||||
panic("lzma: maxUncompressed reached")
|
|
||||||
}
|
|
||||||
var q []byte
|
|
||||||
if n+m < len(p) {
|
|
||||||
q = p[n : n+m]
|
|
||||||
} else {
|
|
||||||
q = p[n:]
|
|
||||||
}
|
|
||||||
k, err := w.encoder.Write(q)
|
|
||||||
n += k
|
|
||||||
if err != nil && err != ErrLimit {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
if err == ErrLimit || k == m {
|
|
||||||
if err = w.flushChunk(); err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeUncompressedChunk writes an uncompressed chunk to the LZMA2
|
|
||||||
// stream.
|
|
||||||
func (w *Writer2) writeUncompressedChunk() error {
|
|
||||||
u := w.encoder.Compressed()
|
|
||||||
if u <= 0 {
|
|
||||||
return errors.New("lzma: can't write empty uncompressed chunk")
|
|
||||||
}
|
|
||||||
if u > maxUncompressed {
|
|
||||||
panic("overrun of uncompressed data limit")
|
|
||||||
}
|
|
||||||
switch w.ctype {
|
|
||||||
case cLRND:
|
|
||||||
w.ctype = cUD
|
|
||||||
default:
|
|
||||||
w.ctype = cU
|
|
||||||
}
|
|
||||||
w.encoder.state = w.start
|
|
||||||
|
|
||||||
header := chunkHeader{
|
|
||||||
ctype: w.ctype,
|
|
||||||
uncompressed: uint32(u - 1),
|
|
||||||
}
|
|
||||||
hdata, err := header.MarshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err = w.w.Write(hdata); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = w.encoder.dict.CopyN(w.w, int(u))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeCompressedChunk writes a compressed chunk to the underlying
|
|
||||||
// writer.
|
|
||||||
func (w *Writer2) writeCompressedChunk() error {
|
|
||||||
if w.ctype == cU || w.ctype == cUD {
|
|
||||||
panic("chunk type uncompressed")
|
|
||||||
}
|
|
||||||
|
|
||||||
u := w.encoder.Compressed()
|
|
||||||
if u <= 0 {
|
|
||||||
return errors.New("writeCompressedChunk: empty chunk")
|
|
||||||
}
|
|
||||||
if u > maxUncompressed {
|
|
||||||
panic("overrun of uncompressed data limit")
|
|
||||||
}
|
|
||||||
c := w.buf.Len()
|
|
||||||
if c <= 0 {
|
|
||||||
panic("no compressed data")
|
|
||||||
}
|
|
||||||
if c > maxCompressed {
|
|
||||||
panic("overrun of compressed data limit")
|
|
||||||
}
|
|
||||||
header := chunkHeader{
|
|
||||||
ctype: w.ctype,
|
|
||||||
uncompressed: uint32(u - 1),
|
|
||||||
compressed: uint16(c - 1),
|
|
||||||
props: w.encoder.state.Properties,
|
|
||||||
}
|
|
||||||
hdata, err := header.MarshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err = w.w.Write(hdata); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = io.Copy(w.w, &w.buf)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// writes a single chunk to the underlying writer.
|
|
||||||
func (w *Writer2) writeChunk() error {
|
|
||||||
u := int(uncompressedHeaderLen + w.encoder.Compressed())
|
|
||||||
c := headerLen(w.ctype) + w.buf.Len()
|
|
||||||
if u < c {
|
|
||||||
return w.writeUncompressedChunk()
|
|
||||||
}
|
|
||||||
return w.writeCompressedChunk()
|
|
||||||
}
|
|
||||||
|
|
||||||
// flushChunk terminates the current chunk. The encoder will be reset
|
|
||||||
// to support the next chunk.
|
|
||||||
func (w *Writer2) flushChunk() error {
|
|
||||||
if w.written() == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
if err = w.encoder.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err = w.writeChunk(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.buf.Reset()
|
|
||||||
w.lbw.N = maxCompressed
|
|
||||||
if err = w.encoder.Reopen(&w.lbw); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err = w.cstate.next(w.ctype); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.ctype = w.cstate.defaultChunkType()
|
|
||||||
w.start = cloneState(w.encoder.state)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush writes all buffered data out to the underlying stream. This
|
|
||||||
// could result in multiple chunks to be created.
|
|
||||||
func (w *Writer2) Flush() error {
|
|
||||||
if w.cstate == stop {
|
|
||||||
return errClosed
|
|
||||||
}
|
|
||||||
for w.written() > 0 {
|
|
||||||
if err := w.flushChunk(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close terminates the LZMA2 stream with an EOS chunk.
|
|
||||||
func (w *Writer2) Close() error {
|
|
||||||
if w.cstate == stop {
|
|
||||||
return errClosed
|
|
||||||
}
|
|
||||||
if err := w.Flush(); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// write zero byte EOS chunk
|
|
||||||
_, err := w.w.Write([]byte{0})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.cstate = stop
|
|
||||||
return nil
|
|
||||||
}
|
|
117
vendor/github.com/ulikunitz/xz/lzmafilter.go
generated
vendored
117
vendor/github.com/ulikunitz/xz/lzmafilter.go
generated
vendored
@ -1,117 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package xz
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/ulikunitz/xz/lzma"
|
|
||||||
)
|
|
||||||
|
|
||||||
// LZMA filter constants.
|
|
||||||
const (
|
|
||||||
lzmaFilterID = 0x21
|
|
||||||
lzmaFilterLen = 3
|
|
||||||
)
|
|
||||||
|
|
||||||
// lzmaFilter declares the LZMA2 filter information stored in an xz
|
|
||||||
// block header.
|
|
||||||
type lzmaFilter struct {
|
|
||||||
dictCap int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a representation of the LZMA filter.
|
|
||||||
func (f lzmaFilter) String() string {
|
|
||||||
return fmt.Sprintf("LZMA dict cap %#x", f.dictCap)
|
|
||||||
}
|
|
||||||
|
|
||||||
// id returns the ID for the LZMA2 filter.
|
|
||||||
func (f lzmaFilter) id() uint64 { return lzmaFilterID }
|
|
||||||
|
|
||||||
// MarshalBinary converts the lzmaFilter in its encoded representation.
|
|
||||||
func (f lzmaFilter) MarshalBinary() (data []byte, err error) {
|
|
||||||
c := lzma.EncodeDictCap(f.dictCap)
|
|
||||||
return []byte{lzmaFilterID, 1, c}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalBinary unmarshals the given data representation of the LZMA2
|
|
||||||
// filter.
|
|
||||||
func (f *lzmaFilter) UnmarshalBinary(data []byte) error {
|
|
||||||
if len(data) != lzmaFilterLen {
|
|
||||||
return errors.New("xz: data for LZMA2 filter has wrong length")
|
|
||||||
}
|
|
||||||
if data[0] != lzmaFilterID {
|
|
||||||
return errors.New("xz: wrong LZMA2 filter id")
|
|
||||||
}
|
|
||||||
if data[1] != 1 {
|
|
||||||
return errors.New("xz: wrong LZMA2 filter size")
|
|
||||||
}
|
|
||||||
dc, err := lzma.DecodeDictCap(data[2])
|
|
||||||
if err != nil {
|
|
||||||
return errors.New("xz: wrong LZMA2 dictionary size property")
|
|
||||||
}
|
|
||||||
|
|
||||||
f.dictCap = dc
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// reader creates a new reader for the LZMA2 filter.
|
|
||||||
func (f lzmaFilter) reader(r io.Reader, c *ReaderConfig) (fr io.Reader,
|
|
||||||
err error) {
|
|
||||||
|
|
||||||
config := new(lzma.Reader2Config)
|
|
||||||
if c != nil {
|
|
||||||
config.DictCap = c.DictCap
|
|
||||||
}
|
|
||||||
dc := int(f.dictCap)
|
|
||||||
if dc < 1 {
|
|
||||||
return nil, errors.New("xz: LZMA2 filter parameter " +
|
|
||||||
"dictionary capacity overflow")
|
|
||||||
}
|
|
||||||
if dc > config.DictCap {
|
|
||||||
config.DictCap = dc
|
|
||||||
}
|
|
||||||
|
|
||||||
fr, err = config.NewReader2(r)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return fr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeCloser creates a io.WriteCloser for the LZMA2 filter.
|
|
||||||
func (f lzmaFilter) writeCloser(w io.WriteCloser, c *WriterConfig,
|
|
||||||
) (fw io.WriteCloser, err error) {
|
|
||||||
config := new(lzma.Writer2Config)
|
|
||||||
if c != nil {
|
|
||||||
*config = lzma.Writer2Config{
|
|
||||||
Properties: c.Properties,
|
|
||||||
DictCap: c.DictCap,
|
|
||||||
BufSize: c.BufSize,
|
|
||||||
Matcher: c.Matcher,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dc := int(f.dictCap)
|
|
||||||
if dc < 1 {
|
|
||||||
return nil, errors.New("xz: LZMA2 filter parameter " +
|
|
||||||
"dictionary capacity overflow")
|
|
||||||
}
|
|
||||||
if dc > config.DictCap {
|
|
||||||
config.DictCap = dc
|
|
||||||
}
|
|
||||||
|
|
||||||
fw, err = config.NewWriter2(w)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return fw, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// last returns true, because an LZMA2 filter must be the last filter in
|
|
||||||
// the filter list.
|
|
||||||
func (f lzmaFilter) last() bool { return true }
|
|
5
vendor/github.com/ulikunitz/xz/make-docs
generated
vendored
5
vendor/github.com/ulikunitz/xz/make-docs
generated
vendored
@ -1,5 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
set -x
|
|
||||||
pandoc -t html5 -f markdown -s --css=doc/md.css -o README.html README.md
|
|
||||||
pandoc -t html5 -f markdown -s --css=doc/md.css -o TODO.html TODO.md
|
|
373
vendor/github.com/ulikunitz/xz/reader.go
generated
vendored
373
vendor/github.com/ulikunitz/xz/reader.go
generated
vendored
@ -1,373 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package xz supports the compression and decompression of xz files. It
|
|
||||||
// supports version 1.0.4 of the specification without the non-LZMA2
|
|
||||||
// filters. See http://tukaani.org/xz/xz-file-format-1.0.4.txt
|
|
||||||
package xz
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"hash"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/ulikunitz/xz/internal/xlog"
|
|
||||||
"github.com/ulikunitz/xz/lzma"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ReaderConfig defines the parameters for the xz reader. The
|
|
||||||
// SingleStream parameter requests the reader to assume that the
|
|
||||||
// underlying stream contains only a single stream.
|
|
||||||
type ReaderConfig struct {
|
|
||||||
DictCap int
|
|
||||||
SingleStream bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// fill replaces all zero values with their default values.
|
|
||||||
func (c *ReaderConfig) fill() {
|
|
||||||
if c.DictCap == 0 {
|
|
||||||
c.DictCap = 8 * 1024 * 1024
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify checks the reader parameters for Validity. Zero values will be
|
|
||||||
// replaced by default values.
|
|
||||||
func (c *ReaderConfig) Verify() error {
|
|
||||||
if c == nil {
|
|
||||||
return errors.New("xz: reader parameters are nil")
|
|
||||||
}
|
|
||||||
lc := lzma.Reader2Config{DictCap: c.DictCap}
|
|
||||||
if err := lc.Verify(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reader supports the reading of one or multiple xz streams.
|
|
||||||
type Reader struct {
|
|
||||||
ReaderConfig
|
|
||||||
|
|
||||||
xz io.Reader
|
|
||||||
sr *streamReader
|
|
||||||
}
|
|
||||||
|
|
||||||
// streamReader decodes a single xz stream
|
|
||||||
type streamReader struct {
|
|
||||||
ReaderConfig
|
|
||||||
|
|
||||||
xz io.Reader
|
|
||||||
br *blockReader
|
|
||||||
newHash func() hash.Hash
|
|
||||||
h header
|
|
||||||
index []record
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReader creates a new xz reader using the default parameters.
|
|
||||||
// The function reads and checks the header of the first XZ stream. The
|
|
||||||
// reader will process multiple streams including padding.
|
|
||||||
func NewReader(xz io.Reader) (r *Reader, err error) {
|
|
||||||
return ReaderConfig{}.NewReader(xz)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReader creates an xz stream reader. The created reader will be
|
|
||||||
// able to process multiple streams and padding unless a SingleStream
|
|
||||||
// has been set in the reader configuration c.
|
|
||||||
func (c ReaderConfig) NewReader(xz io.Reader) (r *Reader, err error) {
|
|
||||||
if err = c.Verify(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
r = &Reader{
|
|
||||||
ReaderConfig: c,
|
|
||||||
xz: xz,
|
|
||||||
}
|
|
||||||
if r.sr, err = c.newStreamReader(xz); err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var errUnexpectedData = errors.New("xz: unexpected data after stream")
|
|
||||||
|
|
||||||
// Read reads uncompressed data from the stream.
|
|
||||||
func (r *Reader) Read(p []byte) (n int, err error) {
|
|
||||||
for n < len(p) {
|
|
||||||
if r.sr == nil {
|
|
||||||
if r.SingleStream {
|
|
||||||
data := make([]byte, 1)
|
|
||||||
_, err = io.ReadFull(r.xz, data)
|
|
||||||
if err != io.EOF {
|
|
||||||
return n, errUnexpectedData
|
|
||||||
}
|
|
||||||
return n, io.EOF
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
r.sr, err = r.ReaderConfig.newStreamReader(r.xz)
|
|
||||||
if err != errPadding {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
k, err := r.sr.Read(p[n:])
|
|
||||||
n += k
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
r.sr = nil
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var errPadding = errors.New("xz: padding (4 zero bytes) encountered")
|
|
||||||
|
|
||||||
// newStreamReader creates a new xz stream reader using the given configuration
|
|
||||||
// parameters. NewReader reads and checks the header of the xz stream.
|
|
||||||
func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error) {
|
|
||||||
if err = c.Verify(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
data := make([]byte, HeaderLen)
|
|
||||||
if _, err := io.ReadFull(xz, data[:4]); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if bytes.Equal(data[:4], []byte{0, 0, 0, 0}) {
|
|
||||||
return nil, errPadding
|
|
||||||
}
|
|
||||||
if _, err = io.ReadFull(xz, data[4:]); err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
r = &streamReader{
|
|
||||||
ReaderConfig: c,
|
|
||||||
xz: xz,
|
|
||||||
index: make([]record, 0, 4),
|
|
||||||
}
|
|
||||||
if err = r.h.UnmarshalBinary(data); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
xlog.Debugf("xz header %s", r.h)
|
|
||||||
if r.newHash, err = newHashFunc(r.h.flags); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// errIndex indicates an error with the xz file index.
|
|
||||||
var errIndex = errors.New("xz: error in xz file index")
|
|
||||||
|
|
||||||
// readTail reads the index body and the xz footer.
|
|
||||||
func (r *streamReader) readTail() error {
|
|
||||||
index, n, err := readIndexBody(r.xz)
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(index) != len(r.index) {
|
|
||||||
return fmt.Errorf("xz: index length is %d; want %d",
|
|
||||||
len(index), len(r.index))
|
|
||||||
}
|
|
||||||
for i, rec := range r.index {
|
|
||||||
if rec != index[i] {
|
|
||||||
return fmt.Errorf("xz: record %d is %v; want %v",
|
|
||||||
i, rec, index[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
p := make([]byte, footerLen)
|
|
||||||
if _, err = io.ReadFull(r.xz, p); err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var f footer
|
|
||||||
if err = f.UnmarshalBinary(p); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
xlog.Debugf("xz footer %s", f)
|
|
||||||
if f.flags != r.h.flags {
|
|
||||||
return errors.New("xz: footer flags incorrect")
|
|
||||||
}
|
|
||||||
if f.indexSize != int64(n)+1 {
|
|
||||||
return errors.New("xz: index size in footer wrong")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads actual data from the xz stream.
|
|
||||||
func (r *streamReader) Read(p []byte) (n int, err error) {
|
|
||||||
for n < len(p) {
|
|
||||||
if r.br == nil {
|
|
||||||
bh, hlen, err := readBlockHeader(r.xz)
|
|
||||||
if err != nil {
|
|
||||||
if err == errIndexIndicator {
|
|
||||||
if err = r.readTail(); err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
return n, io.EOF
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
xlog.Debugf("block %v", *bh)
|
|
||||||
r.br, err = r.ReaderConfig.newBlockReader(r.xz, bh,
|
|
||||||
hlen, r.newHash())
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
k, err := r.br.Read(p[n:])
|
|
||||||
n += k
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
r.index = append(r.index, r.br.record())
|
|
||||||
r.br = nil
|
|
||||||
} else {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// countingReader is a reader that counts the bytes read.
|
|
||||||
type countingReader struct {
|
|
||||||
r io.Reader
|
|
||||||
n int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads data from the wrapped reader and adds it to the n field.
|
|
||||||
func (lr *countingReader) Read(p []byte) (n int, err error) {
|
|
||||||
n, err = lr.r.Read(p)
|
|
||||||
lr.n += int64(n)
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// blockReader supports the reading of a block.
|
|
||||||
type blockReader struct {
|
|
||||||
lxz countingReader
|
|
||||||
header *blockHeader
|
|
||||||
headerLen int
|
|
||||||
n int64
|
|
||||||
hash hash.Hash
|
|
||||||
r io.Reader
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// newBlockReader creates a new block reader.
|
|
||||||
func (c *ReaderConfig) newBlockReader(xz io.Reader, h *blockHeader,
|
|
||||||
hlen int, hash hash.Hash) (br *blockReader, err error) {
|
|
||||||
|
|
||||||
br = &blockReader{
|
|
||||||
lxz: countingReader{r: xz},
|
|
||||||
header: h,
|
|
||||||
headerLen: hlen,
|
|
||||||
hash: hash,
|
|
||||||
}
|
|
||||||
|
|
||||||
fr, err := c.newFilterReader(&br.lxz, h.filters)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
br.r = io.TeeReader(fr, br.hash)
|
|
||||||
|
|
||||||
return br, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// uncompressedSize returns the uncompressed size of the block.
|
|
||||||
func (br *blockReader) uncompressedSize() int64 {
|
|
||||||
return br.n
|
|
||||||
}
|
|
||||||
|
|
||||||
// compressedSize returns the compressed size of the block.
|
|
||||||
func (br *blockReader) compressedSize() int64 {
|
|
||||||
return br.lxz.n
|
|
||||||
}
|
|
||||||
|
|
||||||
// unpaddedSize computes the unpadded size for the block.
|
|
||||||
func (br *blockReader) unpaddedSize() int64 {
|
|
||||||
n := int64(br.headerLen)
|
|
||||||
n += br.compressedSize()
|
|
||||||
n += int64(br.hash.Size())
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
// record returns the index record for the current block.
|
|
||||||
func (br *blockReader) record() record {
|
|
||||||
return record{br.unpaddedSize(), br.uncompressedSize()}
|
|
||||||
}
|
|
||||||
|
|
||||||
// errBlockSize indicates that the size of the block in the block header
|
|
||||||
// is wrong.
|
|
||||||
var errBlockSize = errors.New("xz: wrong uncompressed size for block")
|
|
||||||
|
|
||||||
// Read reads data from the block.
|
|
||||||
func (br *blockReader) Read(p []byte) (n int, err error) {
|
|
||||||
n, err = br.r.Read(p)
|
|
||||||
br.n += int64(n)
|
|
||||||
|
|
||||||
u := br.header.uncompressedSize
|
|
||||||
if u >= 0 && br.uncompressedSize() > u {
|
|
||||||
return n, errors.New("xz: wrong uncompressed size for block")
|
|
||||||
}
|
|
||||||
c := br.header.compressedSize
|
|
||||||
if c >= 0 && br.compressedSize() > c {
|
|
||||||
return n, errors.New("xz: wrong compressed size for block")
|
|
||||||
}
|
|
||||||
if err != io.EOF {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
if br.uncompressedSize() < u || br.compressedSize() < c {
|
|
||||||
return n, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
|
|
||||||
s := br.hash.Size()
|
|
||||||
k := padLen(br.lxz.n)
|
|
||||||
q := make([]byte, k+s, k+2*s)
|
|
||||||
if _, err = io.ReadFull(br.lxz.r, q); err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
if !allZeros(q[:k]) {
|
|
||||||
return n, errors.New("xz: non-zero block padding")
|
|
||||||
}
|
|
||||||
checkSum := q[k:]
|
|
||||||
computedSum := br.hash.Sum(checkSum[s:])
|
|
||||||
if !bytes.Equal(checkSum, computedSum) {
|
|
||||||
return n, errors.New("xz: checksum error for block")
|
|
||||||
}
|
|
||||||
return n, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *ReaderConfig) newFilterReader(r io.Reader, f []filter) (fr io.Reader,
|
|
||||||
err error) {
|
|
||||||
|
|
||||||
if err = verifyFilters(f); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
fr = r
|
|
||||||
for i := len(f) - 1; i >= 0; i-- {
|
|
||||||
fr, err = f[i].reader(fr, c)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fr, nil
|
|
||||||
}
|
|
386
vendor/github.com/ulikunitz/xz/writer.go
generated
vendored
386
vendor/github.com/ulikunitz/xz/writer.go
generated
vendored
@ -1,386 +0,0 @@
|
|||||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package xz
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"hash"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/ulikunitz/xz/lzma"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WriterConfig describe the parameters for an xz writer.
|
|
||||||
type WriterConfig struct {
|
|
||||||
Properties *lzma.Properties
|
|
||||||
DictCap int
|
|
||||||
BufSize int
|
|
||||||
BlockSize int64
|
|
||||||
// checksum method: CRC32, CRC64 or SHA256
|
|
||||||
CheckSum byte
|
|
||||||
// match algorithm
|
|
||||||
Matcher lzma.MatchAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
// fill replaces zero values with default values.
|
|
||||||
func (c *WriterConfig) fill() {
|
|
||||||
if c.Properties == nil {
|
|
||||||
c.Properties = &lzma.Properties{LC: 3, LP: 0, PB: 2}
|
|
||||||
}
|
|
||||||
if c.DictCap == 0 {
|
|
||||||
c.DictCap = 8 * 1024 * 1024
|
|
||||||
}
|
|
||||||
if c.BufSize == 0 {
|
|
||||||
c.BufSize = 4096
|
|
||||||
}
|
|
||||||
if c.BlockSize == 0 {
|
|
||||||
c.BlockSize = maxInt64
|
|
||||||
}
|
|
||||||
if c.CheckSum == 0 {
|
|
||||||
c.CheckSum = CRC64
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify checks the configuration for errors. Zero values will be
|
|
||||||
// replaced by default values.
|
|
||||||
func (c *WriterConfig) Verify() error {
|
|
||||||
if c == nil {
|
|
||||||
return errors.New("xz: writer configuration is nil")
|
|
||||||
}
|
|
||||||
c.fill()
|
|
||||||
lc := lzma.Writer2Config{
|
|
||||||
Properties: c.Properties,
|
|
||||||
DictCap: c.DictCap,
|
|
||||||
BufSize: c.BufSize,
|
|
||||||
Matcher: c.Matcher,
|
|
||||||
}
|
|
||||||
if err := lc.Verify(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if c.BlockSize <= 0 {
|
|
||||||
return errors.New("xz: block size out of range")
|
|
||||||
}
|
|
||||||
if err := verifyFlags(c.CheckSum); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// filters creates the filter list for the given parameters.
|
|
||||||
func (c *WriterConfig) filters() []filter {
|
|
||||||
return []filter{&lzmaFilter{int64(c.DictCap)}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// maxInt64 defines the maximum 64-bit signed integer.
|
|
||||||
const maxInt64 = 1<<63 - 1
|
|
||||||
|
|
||||||
// verifyFilters checks the filter list for the length and the right
|
|
||||||
// sequence of filters.
|
|
||||||
func verifyFilters(f []filter) error {
|
|
||||||
if len(f) == 0 {
|
|
||||||
return errors.New("xz: no filters")
|
|
||||||
}
|
|
||||||
if len(f) > 4 {
|
|
||||||
return errors.New("xz: more than four filters")
|
|
||||||
}
|
|
||||||
for _, g := range f[:len(f)-1] {
|
|
||||||
if g.last() {
|
|
||||||
return errors.New("xz: last filter is not last")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !f[len(f)-1].last() {
|
|
||||||
return errors.New("xz: wrong last filter")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newFilterWriteCloser converts a filter list into a WriteCloser that
|
|
||||||
// can be used by a blockWriter.
|
|
||||||
func (c *WriterConfig) newFilterWriteCloser(w io.Writer, f []filter) (fw io.WriteCloser, err error) {
|
|
||||||
if err = verifyFilters(f); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
fw = nopWriteCloser(w)
|
|
||||||
for i := len(f) - 1; i >= 0; i-- {
|
|
||||||
fw, err = f[i].writeCloser(fw, c)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fw, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// nopWCloser implements a WriteCloser with a Close method not doing
|
|
||||||
// anything.
|
|
||||||
type nopWCloser struct {
|
|
||||||
io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close returns nil and doesn't do anything else.
|
|
||||||
func (c nopWCloser) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// nopWriteCloser converts the Writer into a WriteCloser with a Close
|
|
||||||
// function that does nothing beside returning nil.
|
|
||||||
func nopWriteCloser(w io.Writer) io.WriteCloser {
|
|
||||||
return nopWCloser{w}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writer compresses data written to it. It is an io.WriteCloser.
|
|
||||||
type Writer struct {
|
|
||||||
WriterConfig
|
|
||||||
|
|
||||||
xz io.Writer
|
|
||||||
bw *blockWriter
|
|
||||||
newHash func() hash.Hash
|
|
||||||
h header
|
|
||||||
index []record
|
|
||||||
closed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// newBlockWriter creates a new block writer writes the header out.
|
|
||||||
func (w *Writer) newBlockWriter() error {
|
|
||||||
var err error
|
|
||||||
w.bw, err = w.WriterConfig.newBlockWriter(w.xz, w.newHash())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err = w.bw.writeHeader(w.xz); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// closeBlockWriter closes a block writer and records the sizes in the
|
|
||||||
// index.
|
|
||||||
func (w *Writer) closeBlockWriter() error {
|
|
||||||
var err error
|
|
||||||
if err = w.bw.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.index = append(w.index, w.bw.record())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriter creates a new xz writer using default parameters.
|
|
||||||
func NewWriter(xz io.Writer) (w *Writer, err error) {
|
|
||||||
return WriterConfig{}.NewWriter(xz)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriter creates a new Writer using the given configuration parameters.
|
|
||||||
func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) {
|
|
||||||
if err = c.Verify(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
w = &Writer{
|
|
||||||
WriterConfig: c,
|
|
||||||
xz: xz,
|
|
||||||
h: header{c.CheckSum},
|
|
||||||
index: make([]record, 0, 4),
|
|
||||||
}
|
|
||||||
if w.newHash, err = newHashFunc(c.CheckSum); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
data, err := w.h.MarshalBinary()
|
|
||||||
if _, err = xz.Write(data); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err = w.newBlockWriter(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return w, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write compresses the uncompressed data provided.
|
|
||||||
func (w *Writer) Write(p []byte) (n int, err error) {
|
|
||||||
if w.closed {
|
|
||||||
return 0, errClosed
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
k, err := w.bw.Write(p[n:])
|
|
||||||
n += k
|
|
||||||
if err != errNoSpace {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
if err = w.closeBlockWriter(); err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
if err = w.newBlockWriter(); err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the writer and adds the footer to the Writer. Close
|
|
||||||
// doesn't close the underlying writer.
|
|
||||||
func (w *Writer) Close() error {
|
|
||||||
if w.closed {
|
|
||||||
return errClosed
|
|
||||||
}
|
|
||||||
w.closed = true
|
|
||||||
var err error
|
|
||||||
if err = w.closeBlockWriter(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
f := footer{flags: w.h.flags}
|
|
||||||
if f.indexSize, err = writeIndex(w.xz, w.index); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
data, err := f.MarshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err = w.xz.Write(data); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// countingWriter is a writer that counts all data written to it.
|
|
||||||
type countingWriter struct {
|
|
||||||
w io.Writer
|
|
||||||
n int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write writes data to the countingWriter.
|
|
||||||
func (cw *countingWriter) Write(p []byte) (n int, err error) {
|
|
||||||
n, err = cw.w.Write(p)
|
|
||||||
cw.n += int64(n)
|
|
||||||
if err == nil && cw.n < 0 {
|
|
||||||
return n, errors.New("xz: counter overflow")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// blockWriter is writes a single block.
|
|
||||||
type blockWriter struct {
|
|
||||||
cxz countingWriter
|
|
||||||
// mw combines io.WriteCloser w and the hash.
|
|
||||||
mw io.Writer
|
|
||||||
w io.WriteCloser
|
|
||||||
n int64
|
|
||||||
blockSize int64
|
|
||||||
closed bool
|
|
||||||
headerLen int
|
|
||||||
|
|
||||||
filters []filter
|
|
||||||
hash hash.Hash
|
|
||||||
}
|
|
||||||
|
|
||||||
// newBlockWriter creates a new block writer.
|
|
||||||
func (c *WriterConfig) newBlockWriter(xz io.Writer, hash hash.Hash) (bw *blockWriter, err error) {
|
|
||||||
bw = &blockWriter{
|
|
||||||
cxz: countingWriter{w: xz},
|
|
||||||
blockSize: c.BlockSize,
|
|
||||||
filters: c.filters(),
|
|
||||||
hash: hash,
|
|
||||||
}
|
|
||||||
bw.w, err = c.newFilterWriteCloser(&bw.cxz, bw.filters)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
bw.mw = io.MultiWriter(bw.w, bw.hash)
|
|
||||||
return bw, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeHeader writes the header. If the function is called after Close
|
|
||||||
// the commpressedSize and uncompressedSize fields will be filled.
|
|
||||||
func (bw *blockWriter) writeHeader(w io.Writer) error {
|
|
||||||
h := blockHeader{
|
|
||||||
compressedSize: -1,
|
|
||||||
uncompressedSize: -1,
|
|
||||||
filters: bw.filters,
|
|
||||||
}
|
|
||||||
if bw.closed {
|
|
||||||
h.compressedSize = bw.compressedSize()
|
|
||||||
h.uncompressedSize = bw.uncompressedSize()
|
|
||||||
}
|
|
||||||
data, err := h.MarshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err = w.Write(data); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
bw.headerLen = len(data)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// compressed size returns the amount of data written to the underlying
|
|
||||||
// stream.
|
|
||||||
func (bw *blockWriter) compressedSize() int64 {
|
|
||||||
return bw.cxz.n
|
|
||||||
}
|
|
||||||
|
|
||||||
// uncompressedSize returns the number of data written to the
|
|
||||||
// blockWriter
|
|
||||||
func (bw *blockWriter) uncompressedSize() int64 {
|
|
||||||
return bw.n
|
|
||||||
}
|
|
||||||
|
|
||||||
// unpaddedSize returns the sum of the header length, the uncompressed
|
|
||||||
// size of the block and the hash size.
|
|
||||||
func (bw *blockWriter) unpaddedSize() int64 {
|
|
||||||
if bw.headerLen <= 0 {
|
|
||||||
panic("xz: block header not written")
|
|
||||||
}
|
|
||||||
n := int64(bw.headerLen)
|
|
||||||
n += bw.compressedSize()
|
|
||||||
n += int64(bw.hash.Size())
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
// record returns the record for the current stream. Call Close before
|
|
||||||
// calling this method.
|
|
||||||
func (bw *blockWriter) record() record {
|
|
||||||
return record{bw.unpaddedSize(), bw.uncompressedSize()}
|
|
||||||
}
|
|
||||||
|
|
||||||
var errClosed = errors.New("xz: writer already closed")
|
|
||||||
|
|
||||||
var errNoSpace = errors.New("xz: no space")
|
|
||||||
|
|
||||||
// Write writes uncompressed data to the block writer.
|
|
||||||
func (bw *blockWriter) Write(p []byte) (n int, err error) {
|
|
||||||
if bw.closed {
|
|
||||||
return 0, errClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
t := bw.blockSize - bw.n
|
|
||||||
if int64(len(p)) > t {
|
|
||||||
err = errNoSpace
|
|
||||||
p = p[:t]
|
|
||||||
}
|
|
||||||
|
|
||||||
var werr error
|
|
||||||
n, werr = bw.mw.Write(p)
|
|
||||||
bw.n += int64(n)
|
|
||||||
if werr != nil {
|
|
||||||
return n, werr
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the writer.
|
|
||||||
func (bw *blockWriter) Close() error {
|
|
||||||
if bw.closed {
|
|
||||||
return errClosed
|
|
||||||
}
|
|
||||||
bw.closed = true
|
|
||||||
if err := bw.w.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s := bw.hash.Size()
|
|
||||||
k := padLen(bw.cxz.n)
|
|
||||||
p := make([]byte, k+s)
|
|
||||||
bw.hash.Sum(p[k:k])
|
|
||||||
if _, err := bw.cxz.w.Write(p); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
12
vendor/gopkg.in/yaml.v2/.travis.yml
generated
vendored
12
vendor/gopkg.in/yaml.v2/.travis.yml
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.4
|
|
||||||
- 1.5
|
|
||||||
- 1.6
|
|
||||||
- 1.7
|
|
||||||
- 1.8
|
|
||||||
- 1.9
|
|
||||||
- tip
|
|
||||||
|
|
||||||
go_import_path: gopkg.in/yaml.v2
|
|
201
vendor/gopkg.in/yaml.v2/LICENSE
generated
vendored
201
vendor/gopkg.in/yaml.v2/LICENSE
generated
vendored
@ -1,201 +0,0 @@
|
|||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright {yyyy} {name of copyright owner}
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
31
vendor/gopkg.in/yaml.v2/LICENSE.libyaml
generated
vendored
31
vendor/gopkg.in/yaml.v2/LICENSE.libyaml
generated
vendored
@ -1,31 +0,0 @@
|
|||||||
The following files were ported to Go from C files of libyaml, and thus
|
|
||||||
are still covered by their original copyright and license:
|
|
||||||
|
|
||||||
apic.go
|
|
||||||
emitterc.go
|
|
||||||
parserc.go
|
|
||||||
readerc.go
|
|
||||||
scannerc.go
|
|
||||||
writerc.go
|
|
||||||
yamlh.go
|
|
||||||
yamlprivateh.go
|
|
||||||
|
|
||||||
Copyright (c) 2006 Kirill Simonov
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
|
||||||
this software and associated documentation files (the "Software"), to deal in
|
|
||||||
the Software without restriction, including without limitation the rights to
|
|
||||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
|
||||||
of the Software, and to permit persons to whom the Software is furnished to do
|
|
||||||
so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
13
vendor/gopkg.in/yaml.v2/NOTICE
generated
vendored
13
vendor/gopkg.in/yaml.v2/NOTICE
generated
vendored
@ -1,13 +0,0 @@
|
|||||||
Copyright 2011-2016 Canonical Ltd.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
133
vendor/gopkg.in/yaml.v2/README.md
generated
vendored
133
vendor/gopkg.in/yaml.v2/README.md
generated
vendored
@ -1,133 +0,0 @@
|
|||||||
# YAML support for the Go language
|
|
||||||
|
|
||||||
Introduction
|
|
||||||
------------
|
|
||||||
|
|
||||||
The yaml package enables Go programs to comfortably encode and decode YAML
|
|
||||||
values. It was developed within [Canonical](https://www.canonical.com) as
|
|
||||||
part of the [juju](https://juju.ubuntu.com) project, and is based on a
|
|
||||||
pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
|
|
||||||
C library to parse and generate YAML data quickly and reliably.
|
|
||||||
|
|
||||||
Compatibility
|
|
||||||
-------------
|
|
||||||
|
|
||||||
The yaml package supports most of YAML 1.1 and 1.2, including support for
|
|
||||||
anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
|
|
||||||
implemented, and base-60 floats from YAML 1.1 are purposefully not
|
|
||||||
supported since they're a poor design and are gone in YAML 1.2.
|
|
||||||
|
|
||||||
Installation and usage
|
|
||||||
----------------------
|
|
||||||
|
|
||||||
The import path for the package is *gopkg.in/yaml.v2*.
|
|
||||||
|
|
||||||
To install it, run:
|
|
||||||
|
|
||||||
go get gopkg.in/yaml.v2
|
|
||||||
|
|
||||||
API documentation
|
|
||||||
-----------------
|
|
||||||
|
|
||||||
If opened in a browser, the import path itself leads to the API documentation:
|
|
||||||
|
|
||||||
* [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
|
|
||||||
|
|
||||||
API stability
|
|
||||||
-------------
|
|
||||||
|
|
||||||
The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
|
|
||||||
|
|
||||||
|
|
||||||
License
|
|
||||||
-------
|
|
||||||
|
|
||||||
The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
|
|
||||||
|
|
||||||
|
|
||||||
Example
|
|
||||||
-------
|
|
||||||
|
|
||||||
```Go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"gopkg.in/yaml.v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
var data = `
|
|
||||||
a: Easy!
|
|
||||||
b:
|
|
||||||
c: 2
|
|
||||||
d: [3, 4]
|
|
||||||
`
|
|
||||||
|
|
||||||
// Note: struct fields must be public in order for unmarshal to
|
|
||||||
// correctly populate the data.
|
|
||||||
type T struct {
|
|
||||||
A string
|
|
||||||
B struct {
|
|
||||||
RenamedC int `yaml:"c"`
|
|
||||||
D []int `yaml:",flow"`
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
t := T{}
|
|
||||||
|
|
||||||
err := yaml.Unmarshal([]byte(data), &t)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("error: %v", err)
|
|
||||||
}
|
|
||||||
fmt.Printf("--- t:\n%v\n\n", t)
|
|
||||||
|
|
||||||
d, err := yaml.Marshal(&t)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("error: %v", err)
|
|
||||||
}
|
|
||||||
fmt.Printf("--- t dump:\n%s\n\n", string(d))
|
|
||||||
|
|
||||||
m := make(map[interface{}]interface{})
|
|
||||||
|
|
||||||
err = yaml.Unmarshal([]byte(data), &m)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("error: %v", err)
|
|
||||||
}
|
|
||||||
fmt.Printf("--- m:\n%v\n\n", m)
|
|
||||||
|
|
||||||
d, err = yaml.Marshal(&m)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("error: %v", err)
|
|
||||||
}
|
|
||||||
fmt.Printf("--- m dump:\n%s\n\n", string(d))
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This example will generate the following output:
|
|
||||||
|
|
||||||
```
|
|
||||||
--- t:
|
|
||||||
{Easy! {2 [3 4]}}
|
|
||||||
|
|
||||||
--- t dump:
|
|
||||||
a: Easy!
|
|
||||||
b:
|
|
||||||
c: 2
|
|
||||||
d: [3, 4]
|
|
||||||
|
|
||||||
|
|
||||||
--- m:
|
|
||||||
map[a:Easy! b:map[c:2 d:[3 4]]]
|
|
||||||
|
|
||||||
--- m dump:
|
|
||||||
a: Easy!
|
|
||||||
b:
|
|
||||||
c: 2
|
|
||||||
d:
|
|
||||||
- 3
|
|
||||||
- 4
|
|
||||||
```
|
|
||||||
|
|
739
vendor/gopkg.in/yaml.v2/apic.go
generated
vendored
739
vendor/gopkg.in/yaml.v2/apic.go
generated
vendored
@ -1,739 +0,0 @@
|
|||||||
package yaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
|
|
||||||
//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
|
|
||||||
|
|
||||||
// Check if we can move the queue at the beginning of the buffer.
|
|
||||||
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
|
|
||||||
if parser.tokens_head != len(parser.tokens) {
|
|
||||||
copy(parser.tokens, parser.tokens[parser.tokens_head:])
|
|
||||||
}
|
|
||||||
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
|
|
||||||
parser.tokens_head = 0
|
|
||||||
}
|
|
||||||
parser.tokens = append(parser.tokens, *token)
|
|
||||||
if pos < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
|
|
||||||
parser.tokens[parser.tokens_head+pos] = *token
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new parser object.
|
|
||||||
func yaml_parser_initialize(parser *yaml_parser_t) bool {
|
|
||||||
*parser = yaml_parser_t{
|
|
||||||
raw_buffer: make([]byte, 0, input_raw_buffer_size),
|
|
||||||
buffer: make([]byte, 0, input_buffer_size),
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Destroy a parser object.
|
|
||||||
func yaml_parser_delete(parser *yaml_parser_t) {
|
|
||||||
*parser = yaml_parser_t{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// String read handler.
|
|
||||||
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
|
||||||
if parser.input_pos == len(parser.input) {
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
n = copy(buffer, parser.input[parser.input_pos:])
|
|
||||||
parser.input_pos += n
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reader read handler.
|
|
||||||
func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
|
||||||
return parser.input_reader.Read(buffer)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set a string input.
|
|
||||||
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
|
|
||||||
if parser.read_handler != nil {
|
|
||||||
panic("must set the input source only once")
|
|
||||||
}
|
|
||||||
parser.read_handler = yaml_string_read_handler
|
|
||||||
parser.input = input
|
|
||||||
parser.input_pos = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set a file input.
|
|
||||||
func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
|
|
||||||
if parser.read_handler != nil {
|
|
||||||
panic("must set the input source only once")
|
|
||||||
}
|
|
||||||
parser.read_handler = yaml_reader_read_handler
|
|
||||||
parser.input_reader = r
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the source encoding.
|
|
||||||
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
|
|
||||||
if parser.encoding != yaml_ANY_ENCODING {
|
|
||||||
panic("must set the encoding only once")
|
|
||||||
}
|
|
||||||
parser.encoding = encoding
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new emitter object.
|
|
||||||
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
|
|
||||||
*emitter = yaml_emitter_t{
|
|
||||||
buffer: make([]byte, output_buffer_size),
|
|
||||||
raw_buffer: make([]byte, 0, output_raw_buffer_size),
|
|
||||||
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
|
|
||||||
events: make([]yaml_event_t, 0, initial_queue_size),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Destroy an emitter object.
|
|
||||||
func yaml_emitter_delete(emitter *yaml_emitter_t) {
|
|
||||||
*emitter = yaml_emitter_t{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// String write handler.
|
|
||||||
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
|
||||||
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// yaml_writer_write_handler uses emitter.output_writer to write the
|
|
||||||
// emitted text.
|
|
||||||
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
|
||||||
_, err := emitter.output_writer.Write(buffer)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set a string output.
|
|
||||||
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
|
|
||||||
if emitter.write_handler != nil {
|
|
||||||
panic("must set the output target only once")
|
|
||||||
}
|
|
||||||
emitter.write_handler = yaml_string_write_handler
|
|
||||||
emitter.output_buffer = output_buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set a file output.
|
|
||||||
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
|
|
||||||
if emitter.write_handler != nil {
|
|
||||||
panic("must set the output target only once")
|
|
||||||
}
|
|
||||||
emitter.write_handler = yaml_writer_write_handler
|
|
||||||
emitter.output_writer = w
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the output encoding.
|
|
||||||
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
|
|
||||||
if emitter.encoding != yaml_ANY_ENCODING {
|
|
||||||
panic("must set the output encoding only once")
|
|
||||||
}
|
|
||||||
emitter.encoding = encoding
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the canonical output style.
|
|
||||||
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
|
|
||||||
emitter.canonical = canonical
|
|
||||||
}
|
|
||||||
|
|
||||||
//// Set the indentation increment.
|
|
||||||
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
|
|
||||||
if indent < 2 || indent > 9 {
|
|
||||||
indent = 2
|
|
||||||
}
|
|
||||||
emitter.best_indent = indent
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the preferred line width.
|
|
||||||
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
|
|
||||||
if width < 0 {
|
|
||||||
width = -1
|
|
||||||
}
|
|
||||||
emitter.best_width = width
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set if unescaped non-ASCII characters are allowed.
|
|
||||||
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
|
|
||||||
emitter.unicode = unicode
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the preferred line break character.
|
|
||||||
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
|
|
||||||
emitter.line_break = line_break
|
|
||||||
}
|
|
||||||
|
|
||||||
///*
|
|
||||||
// * Destroy a token object.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
//YAML_DECLARE(void)
|
|
||||||
//yaml_token_delete(yaml_token_t *token)
|
|
||||||
//{
|
|
||||||
// assert(token); // Non-NULL token object expected.
|
|
||||||
//
|
|
||||||
// switch (token.type)
|
|
||||||
// {
|
|
||||||
// case YAML_TAG_DIRECTIVE_TOKEN:
|
|
||||||
// yaml_free(token.data.tag_directive.handle);
|
|
||||||
// yaml_free(token.data.tag_directive.prefix);
|
|
||||||
// break;
|
|
||||||
//
|
|
||||||
// case YAML_ALIAS_TOKEN:
|
|
||||||
// yaml_free(token.data.alias.value);
|
|
||||||
// break;
|
|
||||||
//
|
|
||||||
// case YAML_ANCHOR_TOKEN:
|
|
||||||
// yaml_free(token.data.anchor.value);
|
|
||||||
// break;
|
|
||||||
//
|
|
||||||
// case YAML_TAG_TOKEN:
|
|
||||||
// yaml_free(token.data.tag.handle);
|
|
||||||
// yaml_free(token.data.tag.suffix);
|
|
||||||
// break;
|
|
||||||
//
|
|
||||||
// case YAML_SCALAR_TOKEN:
|
|
||||||
// yaml_free(token.data.scalar.value);
|
|
||||||
// break;
|
|
||||||
//
|
|
||||||
// default:
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// memset(token, 0, sizeof(yaml_token_t));
|
|
||||||
//}
|
|
||||||
//
|
|
||||||
///*
|
|
||||||
// * Check if a string is a valid UTF-8 sequence.
|
|
||||||
// *
|
|
||||||
// * Check 'reader.c' for more details on UTF-8 encoding.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
//static int
|
|
||||||
//yaml_check_utf8(yaml_char_t *start, size_t length)
|
|
||||||
//{
|
|
||||||
// yaml_char_t *end = start+length;
|
|
||||||
// yaml_char_t *pointer = start;
|
|
||||||
//
|
|
||||||
// while (pointer < end) {
|
|
||||||
// unsigned char octet;
|
|
||||||
// unsigned int width;
|
|
||||||
// unsigned int value;
|
|
||||||
// size_t k;
|
|
||||||
//
|
|
||||||
// octet = pointer[0];
|
|
||||||
// width = (octet & 0x80) == 0x00 ? 1 :
|
|
||||||
// (octet & 0xE0) == 0xC0 ? 2 :
|
|
||||||
// (octet & 0xF0) == 0xE0 ? 3 :
|
|
||||||
// (octet & 0xF8) == 0xF0 ? 4 : 0;
|
|
||||||
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
|
|
||||||
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
|
|
||||||
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
|
|
||||||
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
|
|
||||||
// if (!width) return 0;
|
|
||||||
// if (pointer+width > end) return 0;
|
|
||||||
// for (k = 1; k < width; k ++) {
|
|
||||||
// octet = pointer[k];
|
|
||||||
// if ((octet & 0xC0) != 0x80) return 0;
|
|
||||||
// value = (value << 6) + (octet & 0x3F);
|
|
||||||
// }
|
|
||||||
// if (!((width == 1) ||
|
|
||||||
// (width == 2 && value >= 0x80) ||
|
|
||||||
// (width == 3 && value >= 0x800) ||
|
|
||||||
// (width == 4 && value >= 0x10000))) return 0;
|
|
||||||
//
|
|
||||||
// pointer += width;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// return 1;
|
|
||||||
//}
|
|
||||||
//
|
|
||||||
|
|
||||||
// Create STREAM-START.
|
|
||||||
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
typ: yaml_STREAM_START_EVENT,
|
|
||||||
encoding: encoding,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create STREAM-END.
|
|
||||||
func yaml_stream_end_event_initialize(event *yaml_event_t) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
typ: yaml_STREAM_END_EVENT,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create DOCUMENT-START.
|
|
||||||
func yaml_document_start_event_initialize(
|
|
||||||
event *yaml_event_t,
|
|
||||||
version_directive *yaml_version_directive_t,
|
|
||||||
tag_directives []yaml_tag_directive_t,
|
|
||||||
implicit bool,
|
|
||||||
) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
typ: yaml_DOCUMENT_START_EVENT,
|
|
||||||
version_directive: version_directive,
|
|
||||||
tag_directives: tag_directives,
|
|
||||||
implicit: implicit,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create DOCUMENT-END.
|
|
||||||
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
typ: yaml_DOCUMENT_END_EVENT,
|
|
||||||
implicit: implicit,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
///*
|
|
||||||
// * Create ALIAS.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
//YAML_DECLARE(int)
|
|
||||||
//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
|
|
||||||
//{
|
|
||||||
// mark yaml_mark_t = { 0, 0, 0 }
|
|
||||||
// anchor_copy *yaml_char_t = NULL
|
|
||||||
//
|
|
||||||
// assert(event) // Non-NULL event object is expected.
|
|
||||||
// assert(anchor) // Non-NULL anchor is expected.
|
|
||||||
//
|
|
||||||
// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
|
|
||||||
//
|
|
||||||
// anchor_copy = yaml_strdup(anchor)
|
|
||||||
// if (!anchor_copy)
|
|
||||||
// return 0
|
|
||||||
//
|
|
||||||
// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
|
|
||||||
//
|
|
||||||
// return 1
|
|
||||||
//}
|
|
||||||
|
|
||||||
// Create SCALAR.
|
|
||||||
func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
typ: yaml_SCALAR_EVENT,
|
|
||||||
anchor: anchor,
|
|
||||||
tag: tag,
|
|
||||||
value: value,
|
|
||||||
implicit: plain_implicit,
|
|
||||||
quoted_implicit: quoted_implicit,
|
|
||||||
style: yaml_style_t(style),
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create SEQUENCE-START.
|
|
||||||
func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
typ: yaml_SEQUENCE_START_EVENT,
|
|
||||||
anchor: anchor,
|
|
||||||
tag: tag,
|
|
||||||
implicit: implicit,
|
|
||||||
style: yaml_style_t(style),
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create SEQUENCE-END.
|
|
||||||
func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
typ: yaml_SEQUENCE_END_EVENT,
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create MAPPING-START.
|
|
||||||
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
typ: yaml_MAPPING_START_EVENT,
|
|
||||||
anchor: anchor,
|
|
||||||
tag: tag,
|
|
||||||
implicit: implicit,
|
|
||||||
style: yaml_style_t(style),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create MAPPING-END.
|
|
||||||
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
typ: yaml_MAPPING_END_EVENT,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Destroy an event object.
|
|
||||||
func yaml_event_delete(event *yaml_event_t) {
|
|
||||||
*event = yaml_event_t{}
|
|
||||||
}
|
|
||||||
|
|
||||||
///*
|
|
||||||
// * Create a document object.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
//YAML_DECLARE(int)
|
|
||||||
//yaml_document_initialize(document *yaml_document_t,
|
|
||||||
// version_directive *yaml_version_directive_t,
|
|
||||||
// tag_directives_start *yaml_tag_directive_t,
|
|
||||||
// tag_directives_end *yaml_tag_directive_t,
|
|
||||||
// start_implicit int, end_implicit int)
|
|
||||||
//{
|
|
||||||
// struct {
|
|
||||||
// error yaml_error_type_t
|
|
||||||
// } context
|
|
||||||
// struct {
|
|
||||||
// start *yaml_node_t
|
|
||||||
// end *yaml_node_t
|
|
||||||
// top *yaml_node_t
|
|
||||||
// } nodes = { NULL, NULL, NULL }
|
|
||||||
// version_directive_copy *yaml_version_directive_t = NULL
|
|
||||||
// struct {
|
|
||||||
// start *yaml_tag_directive_t
|
|
||||||
// end *yaml_tag_directive_t
|
|
||||||
// top *yaml_tag_directive_t
|
|
||||||
// } tag_directives_copy = { NULL, NULL, NULL }
|
|
||||||
// value yaml_tag_directive_t = { NULL, NULL }
|
|
||||||
// mark yaml_mark_t = { 0, 0, 0 }
|
|
||||||
//
|
|
||||||
// assert(document) // Non-NULL document object is expected.
|
|
||||||
// assert((tag_directives_start && tag_directives_end) ||
|
|
||||||
// (tag_directives_start == tag_directives_end))
|
|
||||||
// // Valid tag directives are expected.
|
|
||||||
//
|
|
||||||
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
|
|
||||||
//
|
|
||||||
// if (version_directive) {
|
|
||||||
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
|
|
||||||
// if (!version_directive_copy) goto error
|
|
||||||
// version_directive_copy.major = version_directive.major
|
|
||||||
// version_directive_copy.minor = version_directive.minor
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (tag_directives_start != tag_directives_end) {
|
|
||||||
// tag_directive *yaml_tag_directive_t
|
|
||||||
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
|
|
||||||
// goto error
|
|
||||||
// for (tag_directive = tag_directives_start
|
|
||||||
// tag_directive != tag_directives_end; tag_directive ++) {
|
|
||||||
// assert(tag_directive.handle)
|
|
||||||
// assert(tag_directive.prefix)
|
|
||||||
// if (!yaml_check_utf8(tag_directive.handle,
|
|
||||||
// strlen((char *)tag_directive.handle)))
|
|
||||||
// goto error
|
|
||||||
// if (!yaml_check_utf8(tag_directive.prefix,
|
|
||||||
// strlen((char *)tag_directive.prefix)))
|
|
||||||
// goto error
|
|
||||||
// value.handle = yaml_strdup(tag_directive.handle)
|
|
||||||
// value.prefix = yaml_strdup(tag_directive.prefix)
|
|
||||||
// if (!value.handle || !value.prefix) goto error
|
|
||||||
// if (!PUSH(&context, tag_directives_copy, value))
|
|
||||||
// goto error
|
|
||||||
// value.handle = NULL
|
|
||||||
// value.prefix = NULL
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
|
|
||||||
// tag_directives_copy.start, tag_directives_copy.top,
|
|
||||||
// start_implicit, end_implicit, mark, mark)
|
|
||||||
//
|
|
||||||
// return 1
|
|
||||||
//
|
|
||||||
//error:
|
|
||||||
// STACK_DEL(&context, nodes)
|
|
||||||
// yaml_free(version_directive_copy)
|
|
||||||
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
|
|
||||||
// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
|
|
||||||
// yaml_free(value.handle)
|
|
||||||
// yaml_free(value.prefix)
|
|
||||||
// }
|
|
||||||
// STACK_DEL(&context, tag_directives_copy)
|
|
||||||
// yaml_free(value.handle)
|
|
||||||
// yaml_free(value.prefix)
|
|
||||||
//
|
|
||||||
// return 0
|
|
||||||
//}
|
|
||||||
//
|
|
||||||
///*
|
|
||||||
// * Destroy a document object.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
//YAML_DECLARE(void)
|
|
||||||
//yaml_document_delete(document *yaml_document_t)
|
|
||||||
//{
|
|
||||||
// struct {
|
|
||||||
// error yaml_error_type_t
|
|
||||||
// } context
|
|
||||||
// tag_directive *yaml_tag_directive_t
|
|
||||||
//
|
|
||||||
// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
|
|
||||||
//
|
|
||||||
// assert(document) // Non-NULL document object is expected.
|
|
||||||
//
|
|
||||||
// while (!STACK_EMPTY(&context, document.nodes)) {
|
|
||||||
// node yaml_node_t = POP(&context, document.nodes)
|
|
||||||
// yaml_free(node.tag)
|
|
||||||
// switch (node.type) {
|
|
||||||
// case YAML_SCALAR_NODE:
|
|
||||||
// yaml_free(node.data.scalar.value)
|
|
||||||
// break
|
|
||||||
// case YAML_SEQUENCE_NODE:
|
|
||||||
// STACK_DEL(&context, node.data.sequence.items)
|
|
||||||
// break
|
|
||||||
// case YAML_MAPPING_NODE:
|
|
||||||
// STACK_DEL(&context, node.data.mapping.pairs)
|
|
||||||
// break
|
|
||||||
// default:
|
|
||||||
// assert(0) // Should not happen.
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// STACK_DEL(&context, document.nodes)
|
|
||||||
//
|
|
||||||
// yaml_free(document.version_directive)
|
|
||||||
// for (tag_directive = document.tag_directives.start
|
|
||||||
// tag_directive != document.tag_directives.end
|
|
||||||
// tag_directive++) {
|
|
||||||
// yaml_free(tag_directive.handle)
|
|
||||||
// yaml_free(tag_directive.prefix)
|
|
||||||
// }
|
|
||||||
// yaml_free(document.tag_directives.start)
|
|
||||||
//
|
|
||||||
// memset(document, 0, sizeof(yaml_document_t))
|
|
||||||
//}
|
|
||||||
//
|
|
||||||
///**
|
|
||||||
// * Get a document node.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
//YAML_DECLARE(yaml_node_t *)
|
|
||||||
//yaml_document_get_node(document *yaml_document_t, index int)
|
|
||||||
//{
|
|
||||||
// assert(document) // Non-NULL document object is expected.
|
|
||||||
//
|
|
||||||
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
|
|
||||||
// return document.nodes.start + index - 1
|
|
||||||
// }
|
|
||||||
// return NULL
|
|
||||||
//}
|
|
||||||
//
|
|
||||||
///**
|
|
||||||
// * Get the root object.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
//YAML_DECLARE(yaml_node_t *)
|
|
||||||
//yaml_document_get_root_node(document *yaml_document_t)
|
|
||||||
//{
|
|
||||||
// assert(document) // Non-NULL document object is expected.
|
|
||||||
//
|
|
||||||
// if (document.nodes.top != document.nodes.start) {
|
|
||||||
// return document.nodes.start
|
|
||||||
// }
|
|
||||||
// return NULL
|
|
||||||
//}
|
|
||||||
//
|
|
||||||
///*
|
|
||||||
// * Add a scalar node to a document.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
//YAML_DECLARE(int)
|
|
||||||
//yaml_document_add_scalar(document *yaml_document_t,
|
|
||||||
// tag *yaml_char_t, value *yaml_char_t, length int,
|
|
||||||
// style yaml_scalar_style_t)
|
|
||||||
//{
|
|
||||||
// struct {
|
|
||||||
// error yaml_error_type_t
|
|
||||||
// } context
|
|
||||||
// mark yaml_mark_t = { 0, 0, 0 }
|
|
||||||
// tag_copy *yaml_char_t = NULL
|
|
||||||
// value_copy *yaml_char_t = NULL
|
|
||||||
// node yaml_node_t
|
|
||||||
//
|
|
||||||
// assert(document) // Non-NULL document object is expected.
|
|
||||||
// assert(value) // Non-NULL value is expected.
|
|
||||||
//
|
|
||||||
// if (!tag) {
|
|
||||||
// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
|
||||||
// tag_copy = yaml_strdup(tag)
|
|
||||||
// if (!tag_copy) goto error
|
|
||||||
//
|
|
||||||
// if (length < 0) {
|
|
||||||
// length = strlen((char *)value)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (!yaml_check_utf8(value, length)) goto error
|
|
||||||
// value_copy = yaml_malloc(length+1)
|
|
||||||
// if (!value_copy) goto error
|
|
||||||
// memcpy(value_copy, value, length)
|
|
||||||
// value_copy[length] = '\0'
|
|
||||||
//
|
|
||||||
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
|
|
||||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
|
||||||
//
|
|
||||||
// return document.nodes.top - document.nodes.start
|
|
||||||
//
|
|
||||||
//error:
|
|
||||||
// yaml_free(tag_copy)
|
|
||||||
// yaml_free(value_copy)
|
|
||||||
//
|
|
||||||
// return 0
|
|
||||||
//}
|
|
||||||
//
|
|
||||||
///*
|
|
||||||
// * Add a sequence node to a document.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
//YAML_DECLARE(int)
|
|
||||||
//yaml_document_add_sequence(document *yaml_document_t,
|
|
||||||
// tag *yaml_char_t, style yaml_sequence_style_t)
|
|
||||||
//{
|
|
||||||
// struct {
|
|
||||||
// error yaml_error_type_t
|
|
||||||
// } context
|
|
||||||
// mark yaml_mark_t = { 0, 0, 0 }
|
|
||||||
// tag_copy *yaml_char_t = NULL
|
|
||||||
// struct {
|
|
||||||
// start *yaml_node_item_t
|
|
||||||
// end *yaml_node_item_t
|
|
||||||
// top *yaml_node_item_t
|
|
||||||
// } items = { NULL, NULL, NULL }
|
|
||||||
// node yaml_node_t
|
|
||||||
//
|
|
||||||
// assert(document) // Non-NULL document object is expected.
|
|
||||||
//
|
|
||||||
// if (!tag) {
|
|
||||||
// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
|
||||||
// tag_copy = yaml_strdup(tag)
|
|
||||||
// if (!tag_copy) goto error
|
|
||||||
//
|
|
||||||
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
|
|
||||||
//
|
|
||||||
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
|
|
||||||
// style, mark, mark)
|
|
||||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
|
||||||
//
|
|
||||||
// return document.nodes.top - document.nodes.start
|
|
||||||
//
|
|
||||||
//error:
|
|
||||||
// STACK_DEL(&context, items)
|
|
||||||
// yaml_free(tag_copy)
|
|
||||||
//
|
|
||||||
// return 0
|
|
||||||
//}
|
|
||||||
//
|
|
||||||
///*
|
|
||||||
// * Add a mapping node to a document.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
//YAML_DECLARE(int)
|
|
||||||
//yaml_document_add_mapping(document *yaml_document_t,
|
|
||||||
// tag *yaml_char_t, style yaml_mapping_style_t)
|
|
||||||
//{
|
|
||||||
// struct {
|
|
||||||
// error yaml_error_type_t
|
|
||||||
// } context
|
|
||||||
// mark yaml_mark_t = { 0, 0, 0 }
|
|
||||||
// tag_copy *yaml_char_t = NULL
|
|
||||||
// struct {
|
|
||||||
// start *yaml_node_pair_t
|
|
||||||
// end *yaml_node_pair_t
|
|
||||||
// top *yaml_node_pair_t
|
|
||||||
// } pairs = { NULL, NULL, NULL }
|
|
||||||
// node yaml_node_t
|
|
||||||
//
|
|
||||||
// assert(document) // Non-NULL document object is expected.
|
|
||||||
//
|
|
||||||
// if (!tag) {
|
|
||||||
// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
|
||||||
// tag_copy = yaml_strdup(tag)
|
|
||||||
// if (!tag_copy) goto error
|
|
||||||
//
|
|
||||||
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
|
|
||||||
//
|
|
||||||
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
|
|
||||||
// style, mark, mark)
|
|
||||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
|
||||||
//
|
|
||||||
// return document.nodes.top - document.nodes.start
|
|
||||||
//
|
|
||||||
//error:
|
|
||||||
// STACK_DEL(&context, pairs)
|
|
||||||
// yaml_free(tag_copy)
|
|
||||||
//
|
|
||||||
// return 0
|
|
||||||
//}
|
|
||||||
//
|
|
||||||
///*
|
|
||||||
// * Append an item to a sequence node.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
//YAML_DECLARE(int)
|
|
||||||
//yaml_document_append_sequence_item(document *yaml_document_t,
|
|
||||||
// sequence int, item int)
|
|
||||||
//{
|
|
||||||
// struct {
|
|
||||||
// error yaml_error_type_t
|
|
||||||
// } context
|
|
||||||
//
|
|
||||||
// assert(document) // Non-NULL document is required.
|
|
||||||
// assert(sequence > 0
|
|
||||||
// && document.nodes.start + sequence <= document.nodes.top)
|
|
||||||
// // Valid sequence id is required.
|
|
||||||
// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
|
|
||||||
// // A sequence node is required.
|
|
||||||
// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
|
|
||||||
// // Valid item id is required.
|
|
||||||
//
|
|
||||||
// if (!PUSH(&context,
|
|
||||||
// document.nodes.start[sequence-1].data.sequence.items, item))
|
|
||||||
// return 0
|
|
||||||
//
|
|
||||||
// return 1
|
|
||||||
//}
|
|
||||||
//
|
|
||||||
///*
|
|
||||||
// * Append a pair of a key and a value to a mapping node.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
//YAML_DECLARE(int)
|
|
||||||
//yaml_document_append_mapping_pair(document *yaml_document_t,
|
|
||||||
// mapping int, key int, value int)
|
|
||||||
//{
|
|
||||||
// struct {
|
|
||||||
// error yaml_error_type_t
|
|
||||||
// } context
|
|
||||||
//
|
|
||||||
// pair yaml_node_pair_t
|
|
||||||
//
|
|
||||||
// assert(document) // Non-NULL document is required.
|
|
||||||
// assert(mapping > 0
|
|
||||||
// && document.nodes.start + mapping <= document.nodes.top)
|
|
||||||
// // Valid mapping id is required.
|
|
||||||
// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
|
|
||||||
// // A mapping node is required.
|
|
||||||
// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
|
|
||||||
// // Valid key id is required.
|
|
||||||
// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
|
|
||||||
// // Valid value id is required.
|
|
||||||
//
|
|
||||||
// pair.key = key
|
|
||||||
// pair.value = value
|
|
||||||
//
|
|
||||||
// if (!PUSH(&context,
|
|
||||||
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
|
|
||||||
// return 0
|
|
||||||
//
|
|
||||||
// return 1
|
|
||||||
//}
|
|
||||||
//
|
|
||||||
//
|
|
775
vendor/gopkg.in/yaml.v2/decode.go
generated
vendored
775
vendor/gopkg.in/yaml.v2/decode.go
generated
vendored
@ -1,775 +0,0 @@
|
|||||||
package yaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding"
|
|
||||||
"encoding/base64"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
documentNode = 1 << iota
|
|
||||||
mappingNode
|
|
||||||
sequenceNode
|
|
||||||
scalarNode
|
|
||||||
aliasNode
|
|
||||||
)
|
|
||||||
|
|
||||||
type node struct {
|
|
||||||
kind int
|
|
||||||
line, column int
|
|
||||||
tag string
|
|
||||||
// For an alias node, alias holds the resolved alias.
|
|
||||||
alias *node
|
|
||||||
value string
|
|
||||||
implicit bool
|
|
||||||
children []*node
|
|
||||||
anchors map[string]*node
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
// Parser, produces a node tree out of a libyaml event stream.
|
|
||||||
|
|
||||||
type parser struct {
|
|
||||||
parser yaml_parser_t
|
|
||||||
event yaml_event_t
|
|
||||||
doc *node
|
|
||||||
doneInit bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newParser(b []byte) *parser {
|
|
||||||
p := parser{}
|
|
||||||
if !yaml_parser_initialize(&p.parser) {
|
|
||||||
panic("failed to initialize YAML emitter")
|
|
||||||
}
|
|
||||||
if len(b) == 0 {
|
|
||||||
b = []byte{'\n'}
|
|
||||||
}
|
|
||||||
yaml_parser_set_input_string(&p.parser, b)
|
|
||||||
return &p
|
|
||||||
}
|
|
||||||
|
|
||||||
func newParserFromReader(r io.Reader) *parser {
|
|
||||||
p := parser{}
|
|
||||||
if !yaml_parser_initialize(&p.parser) {
|
|
||||||
panic("failed to initialize YAML emitter")
|
|
||||||
}
|
|
||||||
yaml_parser_set_input_reader(&p.parser, r)
|
|
||||||
return &p
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) init() {
|
|
||||||
if p.doneInit {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.expect(yaml_STREAM_START_EVENT)
|
|
||||||
p.doneInit = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) destroy() {
|
|
||||||
if p.event.typ != yaml_NO_EVENT {
|
|
||||||
yaml_event_delete(&p.event)
|
|
||||||
}
|
|
||||||
yaml_parser_delete(&p.parser)
|
|
||||||
}
|
|
||||||
|
|
||||||
// expect consumes an event from the event stream and
|
|
||||||
// checks that it's of the expected type.
|
|
||||||
func (p *parser) expect(e yaml_event_type_t) {
|
|
||||||
if p.event.typ == yaml_NO_EVENT {
|
|
||||||
if !yaml_parser_parse(&p.parser, &p.event) {
|
|
||||||
p.fail()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if p.event.typ == yaml_STREAM_END_EVENT {
|
|
||||||
failf("attempted to go past the end of stream; corrupted value?")
|
|
||||||
}
|
|
||||||
if p.event.typ != e {
|
|
||||||
p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
|
|
||||||
p.fail()
|
|
||||||
}
|
|
||||||
yaml_event_delete(&p.event)
|
|
||||||
p.event.typ = yaml_NO_EVENT
|
|
||||||
}
|
|
||||||
|
|
||||||
// peek peeks at the next event in the event stream,
|
|
||||||
// puts the results into p.event and returns the event type.
|
|
||||||
func (p *parser) peek() yaml_event_type_t {
|
|
||||||
if p.event.typ != yaml_NO_EVENT {
|
|
||||||
return p.event.typ
|
|
||||||
}
|
|
||||||
if !yaml_parser_parse(&p.parser, &p.event) {
|
|
||||||
p.fail()
|
|
||||||
}
|
|
||||||
return p.event.typ
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) fail() {
|
|
||||||
var where string
|
|
||||||
var line int
|
|
||||||
if p.parser.problem_mark.line != 0 {
|
|
||||||
line = p.parser.problem_mark.line
|
|
||||||
// Scanner errors don't iterate line before returning error
|
|
||||||
if p.parser.error == yaml_SCANNER_ERROR {
|
|
||||||
line++
|
|
||||||
}
|
|
||||||
} else if p.parser.context_mark.line != 0 {
|
|
||||||
line = p.parser.context_mark.line
|
|
||||||
}
|
|
||||||
if line != 0 {
|
|
||||||
where = "line " + strconv.Itoa(line) + ": "
|
|
||||||
}
|
|
||||||
var msg string
|
|
||||||
if len(p.parser.problem) > 0 {
|
|
||||||
msg = p.parser.problem
|
|
||||||
} else {
|
|
||||||
msg = "unknown problem parsing YAML content"
|
|
||||||
}
|
|
||||||
failf("%s%s", where, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) anchor(n *node, anchor []byte) {
|
|
||||||
if anchor != nil {
|
|
||||||
p.doc.anchors[string(anchor)] = n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) parse() *node {
|
|
||||||
p.init()
|
|
||||||
switch p.peek() {
|
|
||||||
case yaml_SCALAR_EVENT:
|
|
||||||
return p.scalar()
|
|
||||||
case yaml_ALIAS_EVENT:
|
|
||||||
return p.alias()
|
|
||||||
case yaml_MAPPING_START_EVENT:
|
|
||||||
return p.mapping()
|
|
||||||
case yaml_SEQUENCE_START_EVENT:
|
|
||||||
return p.sequence()
|
|
||||||
case yaml_DOCUMENT_START_EVENT:
|
|
||||||
return p.document()
|
|
||||||
case yaml_STREAM_END_EVENT:
|
|
||||||
// Happens when attempting to decode an empty buffer.
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
panic("attempted to parse unknown event: " + p.event.typ.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) node(kind int) *node {
|
|
||||||
return &node{
|
|
||||||
kind: kind,
|
|
||||||
line: p.event.start_mark.line,
|
|
||||||
column: p.event.start_mark.column,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) document() *node {
|
|
||||||
n := p.node(documentNode)
|
|
||||||
n.anchors = make(map[string]*node)
|
|
||||||
p.doc = n
|
|
||||||
p.expect(yaml_DOCUMENT_START_EVENT)
|
|
||||||
n.children = append(n.children, p.parse())
|
|
||||||
p.expect(yaml_DOCUMENT_END_EVENT)
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) alias() *node {
|
|
||||||
n := p.node(aliasNode)
|
|
||||||
n.value = string(p.event.anchor)
|
|
||||||
n.alias = p.doc.anchors[n.value]
|
|
||||||
if n.alias == nil {
|
|
||||||
failf("unknown anchor '%s' referenced", n.value)
|
|
||||||
}
|
|
||||||
p.expect(yaml_ALIAS_EVENT)
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) scalar() *node {
|
|
||||||
n := p.node(scalarNode)
|
|
||||||
n.value = string(p.event.value)
|
|
||||||
n.tag = string(p.event.tag)
|
|
||||||
n.implicit = p.event.implicit
|
|
||||||
p.anchor(n, p.event.anchor)
|
|
||||||
p.expect(yaml_SCALAR_EVENT)
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) sequence() *node {
|
|
||||||
n := p.node(sequenceNode)
|
|
||||||
p.anchor(n, p.event.anchor)
|
|
||||||
p.expect(yaml_SEQUENCE_START_EVENT)
|
|
||||||
for p.peek() != yaml_SEQUENCE_END_EVENT {
|
|
||||||
n.children = append(n.children, p.parse())
|
|
||||||
}
|
|
||||||
p.expect(yaml_SEQUENCE_END_EVENT)
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) mapping() *node {
|
|
||||||
n := p.node(mappingNode)
|
|
||||||
p.anchor(n, p.event.anchor)
|
|
||||||
p.expect(yaml_MAPPING_START_EVENT)
|
|
||||||
for p.peek() != yaml_MAPPING_END_EVENT {
|
|
||||||
n.children = append(n.children, p.parse(), p.parse())
|
|
||||||
}
|
|
||||||
p.expect(yaml_MAPPING_END_EVENT)
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
// Decoder, unmarshals a node into a provided value.
|
|
||||||
|
|
||||||
type decoder struct {
|
|
||||||
doc *node
|
|
||||||
aliases map[*node]bool
|
|
||||||
mapType reflect.Type
|
|
||||||
terrors []string
|
|
||||||
strict bool
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
mapItemType = reflect.TypeOf(MapItem{})
|
|
||||||
durationType = reflect.TypeOf(time.Duration(0))
|
|
||||||
defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
|
|
||||||
ifaceType = defaultMapType.Elem()
|
|
||||||
timeType = reflect.TypeOf(time.Time{})
|
|
||||||
ptrTimeType = reflect.TypeOf(&time.Time{})
|
|
||||||
)
|
|
||||||
|
|
||||||
func newDecoder(strict bool) *decoder {
|
|
||||||
d := &decoder{mapType: defaultMapType, strict: strict}
|
|
||||||
d.aliases = make(map[*node]bool)
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *decoder) terror(n *node, tag string, out reflect.Value) {
|
|
||||||
if n.tag != "" {
|
|
||||||
tag = n.tag
|
|
||||||
}
|
|
||||||
value := n.value
|
|
||||||
if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
|
|
||||||
if len(value) > 10 {
|
|
||||||
value = " `" + value[:7] + "...`"
|
|
||||||
} else {
|
|
||||||
value = " `" + value + "`"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
|
|
||||||
terrlen := len(d.terrors)
|
|
||||||
err := u.UnmarshalYAML(func(v interface{}) (err error) {
|
|
||||||
defer handleErr(&err)
|
|
||||||
d.unmarshal(n, reflect.ValueOf(v))
|
|
||||||
if len(d.terrors) > terrlen {
|
|
||||||
issues := d.terrors[terrlen:]
|
|
||||||
d.terrors = d.terrors[:terrlen]
|
|
||||||
return &TypeError{issues}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if e, ok := err.(*TypeError); ok {
|
|
||||||
d.terrors = append(d.terrors, e.Errors...)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fail(err)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
|
|
||||||
// if a value is found to implement it.
|
|
||||||
// It returns the initialized and dereferenced out value, whether
|
|
||||||
// unmarshalling was already done by UnmarshalYAML, and if so whether
|
|
||||||
// its types unmarshalled appropriately.
|
|
||||||
//
|
|
||||||
// If n holds a null value, prepare returns before doing anything.
|
|
||||||
func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
|
|
||||||
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
|
|
||||||
return out, false, false
|
|
||||||
}
|
|
||||||
again := true
|
|
||||||
for again {
|
|
||||||
again = false
|
|
||||||
if out.Kind() == reflect.Ptr {
|
|
||||||
if out.IsNil() {
|
|
||||||
out.Set(reflect.New(out.Type().Elem()))
|
|
||||||
}
|
|
||||||
out = out.Elem()
|
|
||||||
again = true
|
|
||||||
}
|
|
||||||
if out.CanAddr() {
|
|
||||||
if u, ok := out.Addr().Interface().(Unmarshaler); ok {
|
|
||||||
good = d.callUnmarshaler(n, u)
|
|
||||||
return out, true, good
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out, false, false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
|
|
||||||
switch n.kind {
|
|
||||||
case documentNode:
|
|
||||||
return d.document(n, out)
|
|
||||||
case aliasNode:
|
|
||||||
return d.alias(n, out)
|
|
||||||
}
|
|
||||||
out, unmarshaled, good := d.prepare(n, out)
|
|
||||||
if unmarshaled {
|
|
||||||
return good
|
|
||||||
}
|
|
||||||
switch n.kind {
|
|
||||||
case scalarNode:
|
|
||||||
good = d.scalar(n, out)
|
|
||||||
case mappingNode:
|
|
||||||
good = d.mapping(n, out)
|
|
||||||
case sequenceNode:
|
|
||||||
good = d.sequence(n, out)
|
|
||||||
default:
|
|
||||||
panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
|
|
||||||
}
|
|
||||||
return good
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *decoder) document(n *node, out reflect.Value) (good bool) {
|
|
||||||
if len(n.children) == 1 {
|
|
||||||
d.doc = n
|
|
||||||
d.unmarshal(n.children[0], out)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
|
|
||||||
if d.aliases[n] {
|
|
||||||
// TODO this could actually be allowed in some circumstances.
|
|
||||||
failf("anchor '%s' value contains itself", n.value)
|
|
||||||
}
|
|
||||||
d.aliases[n] = true
|
|
||||||
good = d.unmarshal(n.alias, out)
|
|
||||||
delete(d.aliases, n)
|
|
||||||
return good
|
|
||||||
}
|
|
||||||
|
|
||||||
var zeroValue reflect.Value
|
|
||||||
|
|
||||||
func resetMap(out reflect.Value) {
|
|
||||||
for _, k := range out.MapKeys() {
|
|
||||||
out.SetMapIndex(k, zeroValue)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *decoder) scalar(n *node, out reflect.Value) bool {
|
|
||||||
var tag string
|
|
||||||
var resolved interface{}
|
|
||||||
if n.tag == "" && !n.implicit {
|
|
||||||
tag = yaml_STR_TAG
|
|
||||||
resolved = n.value
|
|
||||||
} else {
|
|
||||||
tag, resolved = resolve(n.tag, n.value)
|
|
||||||
if tag == yaml_BINARY_TAG {
|
|
||||||
data, err := base64.StdEncoding.DecodeString(resolved.(string))
|
|
||||||
if err != nil {
|
|
||||||
failf("!!binary value contains invalid base64 data")
|
|
||||||
}
|
|
||||||
resolved = string(data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if resolved == nil {
|
|
||||||
if out.Kind() == reflect.Map && !out.CanAddr() {
|
|
||||||
resetMap(out)
|
|
||||||
} else {
|
|
||||||
out.Set(reflect.Zero(out.Type()))
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
|
|
||||||
// We've resolved to exactly the type we want, so use that.
|
|
||||||
out.Set(resolvedv)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Perhaps we can use the value as a TextUnmarshaler to
|
|
||||||
// set its value.
|
|
||||||
if out.CanAddr() {
|
|
||||||
u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
|
|
||||||
if ok {
|
|
||||||
var text []byte
|
|
||||||
if tag == yaml_BINARY_TAG {
|
|
||||||
text = []byte(resolved.(string))
|
|
||||||
} else {
|
|
||||||
// We let any value be unmarshaled into TextUnmarshaler.
|
|
||||||
// That might be more lax than we'd like, but the
|
|
||||||
// TextUnmarshaler itself should bowl out any dubious values.
|
|
||||||
text = []byte(n.value)
|
|
||||||
}
|
|
||||||
err := u.UnmarshalText(text)
|
|
||||||
if err != nil {
|
|
||||||
fail(err)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
switch out.Kind() {
|
|
||||||
case reflect.String:
|
|
||||||
if tag == yaml_BINARY_TAG {
|
|
||||||
out.SetString(resolved.(string))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if resolved != nil {
|
|
||||||
out.SetString(n.value)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case reflect.Interface:
|
|
||||||
if resolved == nil {
|
|
||||||
out.Set(reflect.Zero(out.Type()))
|
|
||||||
} else if tag == yaml_TIMESTAMP_TAG {
|
|
||||||
// It looks like a timestamp but for backward compatibility
|
|
||||||
// reasons we set it as a string, so that code that unmarshals
|
|
||||||
// timestamp-like values into interface{} will continue to
|
|
||||||
// see a string and not a time.Time.
|
|
||||||
// TODO(v3) Drop this.
|
|
||||||
out.Set(reflect.ValueOf(n.value))
|
|
||||||
} else {
|
|
||||||
out.Set(reflect.ValueOf(resolved))
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
switch resolved := resolved.(type) {
|
|
||||||
case int:
|
|
||||||
if !out.OverflowInt(int64(resolved)) {
|
|
||||||
out.SetInt(int64(resolved))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case int64:
|
|
||||||
if !out.OverflowInt(resolved) {
|
|
||||||
out.SetInt(resolved)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case uint64:
|
|
||||||
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
|
|
||||||
out.SetInt(int64(resolved))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case float64:
|
|
||||||
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
|
|
||||||
out.SetInt(int64(resolved))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case string:
|
|
||||||
if out.Type() == durationType {
|
|
||||||
d, err := time.ParseDuration(resolved)
|
|
||||||
if err == nil {
|
|
||||||
out.SetInt(int64(d))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
||||||
switch resolved := resolved.(type) {
|
|
||||||
case int:
|
|
||||||
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
|
|
||||||
out.SetUint(uint64(resolved))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case int64:
|
|
||||||
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
|
|
||||||
out.SetUint(uint64(resolved))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case uint64:
|
|
||||||
if !out.OverflowUint(uint64(resolved)) {
|
|
||||||
out.SetUint(uint64(resolved))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case float64:
|
|
||||||
if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
|
|
||||||
out.SetUint(uint64(resolved))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Bool:
|
|
||||||
switch resolved := resolved.(type) {
|
|
||||||
case bool:
|
|
||||||
out.SetBool(resolved)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
switch resolved := resolved.(type) {
|
|
||||||
case int:
|
|
||||||
out.SetFloat(float64(resolved))
|
|
||||||
return true
|
|
||||||
case int64:
|
|
||||||
out.SetFloat(float64(resolved))
|
|
||||||
return true
|
|
||||||
case uint64:
|
|
||||||
out.SetFloat(float64(resolved))
|
|
||||||
return true
|
|
||||||
case float64:
|
|
||||||
out.SetFloat(resolved)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case reflect.Struct:
|
|
||||||
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
|
|
||||||
out.Set(resolvedv)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case reflect.Ptr:
|
|
||||||
if out.Type().Elem() == reflect.TypeOf(resolved) {
|
|
||||||
// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
|
|
||||||
elem := reflect.New(out.Type().Elem())
|
|
||||||
elem.Elem().Set(reflect.ValueOf(resolved))
|
|
||||||
out.Set(elem)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
d.terror(n, tag, out)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func settableValueOf(i interface{}) reflect.Value {
|
|
||||||
v := reflect.ValueOf(i)
|
|
||||||
sv := reflect.New(v.Type()).Elem()
|
|
||||||
sv.Set(v)
|
|
||||||
return sv
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
|
|
||||||
l := len(n.children)
|
|
||||||
|
|
||||||
var iface reflect.Value
|
|
||||||
switch out.Kind() {
|
|
||||||
case reflect.Slice:
|
|
||||||
out.Set(reflect.MakeSlice(out.Type(), l, l))
|
|
||||||
case reflect.Array:
|
|
||||||
if l != out.Len() {
|
|
||||||
failf("invalid array: want %d elements but got %d", out.Len(), l)
|
|
||||||
}
|
|
||||||
case reflect.Interface:
|
|
||||||
// No type hints. Will have to use a generic sequence.
|
|
||||||
iface = out
|
|
||||||
out = settableValueOf(make([]interface{}, l))
|
|
||||||
default:
|
|
||||||
d.terror(n, yaml_SEQ_TAG, out)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
et := out.Type().Elem()
|
|
||||||
|
|
||||||
j := 0
|
|
||||||
for i := 0; i < l; i++ {
|
|
||||||
e := reflect.New(et).Elem()
|
|
||||||
if ok := d.unmarshal(n.children[i], e); ok {
|
|
||||||
out.Index(j).Set(e)
|
|
||||||
j++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if out.Kind() != reflect.Array {
|
|
||||||
out.Set(out.Slice(0, j))
|
|
||||||
}
|
|
||||||
if iface.IsValid() {
|
|
||||||
iface.Set(out)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
|
|
||||||
switch out.Kind() {
|
|
||||||
case reflect.Struct:
|
|
||||||
return d.mappingStruct(n, out)
|
|
||||||
case reflect.Slice:
|
|
||||||
return d.mappingSlice(n, out)
|
|
||||||
case reflect.Map:
|
|
||||||
// okay
|
|
||||||
case reflect.Interface:
|
|
||||||
if d.mapType.Kind() == reflect.Map {
|
|
||||||
iface := out
|
|
||||||
out = reflect.MakeMap(d.mapType)
|
|
||||||
iface.Set(out)
|
|
||||||
} else {
|
|
||||||
slicev := reflect.New(d.mapType).Elem()
|
|
||||||
if !d.mappingSlice(n, slicev) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
out.Set(slicev)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
d.terror(n, yaml_MAP_TAG, out)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
outt := out.Type()
|
|
||||||
kt := outt.Key()
|
|
||||||
et := outt.Elem()
|
|
||||||
|
|
||||||
mapType := d.mapType
|
|
||||||
if outt.Key() == ifaceType && outt.Elem() == ifaceType {
|
|
||||||
d.mapType = outt
|
|
||||||
}
|
|
||||||
|
|
||||||
if out.IsNil() {
|
|
||||||
out.Set(reflect.MakeMap(outt))
|
|
||||||
}
|
|
||||||
l := len(n.children)
|
|
||||||
for i := 0; i < l; i += 2 {
|
|
||||||
if isMerge(n.children[i]) {
|
|
||||||
d.merge(n.children[i+1], out)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
k := reflect.New(kt).Elem()
|
|
||||||
if d.unmarshal(n.children[i], k) {
|
|
||||||
kkind := k.Kind()
|
|
||||||
if kkind == reflect.Interface {
|
|
||||||
kkind = k.Elem().Kind()
|
|
||||||
}
|
|
||||||
if kkind == reflect.Map || kkind == reflect.Slice {
|
|
||||||
failf("invalid map key: %#v", k.Interface())
|
|
||||||
}
|
|
||||||
e := reflect.New(et).Elem()
|
|
||||||
if d.unmarshal(n.children[i+1], e) {
|
|
||||||
d.setMapIndex(n.children[i+1], out, k, e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
d.mapType = mapType
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
|
|
||||||
if d.strict && out.MapIndex(k) != zeroValue {
|
|
||||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
out.SetMapIndex(k, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
|
|
||||||
outt := out.Type()
|
|
||||||
if outt.Elem() != mapItemType {
|
|
||||||
d.terror(n, yaml_MAP_TAG, out)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
mapType := d.mapType
|
|
||||||
d.mapType = outt
|
|
||||||
|
|
||||||
var slice []MapItem
|
|
||||||
var l = len(n.children)
|
|
||||||
for i := 0; i < l; i += 2 {
|
|
||||||
if isMerge(n.children[i]) {
|
|
||||||
d.merge(n.children[i+1], out)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
item := MapItem{}
|
|
||||||
k := reflect.ValueOf(&item.Key).Elem()
|
|
||||||
if d.unmarshal(n.children[i], k) {
|
|
||||||
v := reflect.ValueOf(&item.Value).Elem()
|
|
||||||
if d.unmarshal(n.children[i+1], v) {
|
|
||||||
slice = append(slice, item)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
out.Set(reflect.ValueOf(slice))
|
|
||||||
d.mapType = mapType
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
|
|
||||||
sinfo, err := getStructInfo(out.Type())
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
name := settableValueOf("")
|
|
||||||
l := len(n.children)
|
|
||||||
|
|
||||||
var inlineMap reflect.Value
|
|
||||||
var elemType reflect.Type
|
|
||||||
if sinfo.InlineMap != -1 {
|
|
||||||
inlineMap = out.Field(sinfo.InlineMap)
|
|
||||||
inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
|
|
||||||
elemType = inlineMap.Type().Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
var doneFields []bool
|
|
||||||
if d.strict {
|
|
||||||
doneFields = make([]bool, len(sinfo.FieldsList))
|
|
||||||
}
|
|
||||||
for i := 0; i < l; i += 2 {
|
|
||||||
ni := n.children[i]
|
|
||||||
if isMerge(ni) {
|
|
||||||
d.merge(n.children[i+1], out)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !d.unmarshal(ni, name) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if info, ok := sinfo.FieldsMap[name.String()]; ok {
|
|
||||||
if d.strict {
|
|
||||||
if doneFields[info.Id] {
|
|
||||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
doneFields[info.Id] = true
|
|
||||||
}
|
|
||||||
var field reflect.Value
|
|
||||||
if info.Inline == nil {
|
|
||||||
field = out.Field(info.Num)
|
|
||||||
} else {
|
|
||||||
field = out.FieldByIndex(info.Inline)
|
|
||||||
}
|
|
||||||
d.unmarshal(n.children[i+1], field)
|
|
||||||
} else if sinfo.InlineMap != -1 {
|
|
||||||
if inlineMap.IsNil() {
|
|
||||||
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
|
|
||||||
}
|
|
||||||
value := reflect.New(elemType).Elem()
|
|
||||||
d.unmarshal(n.children[i+1], value)
|
|
||||||
d.setMapIndex(n.children[i+1], inlineMap, name, value)
|
|
||||||
} else if d.strict {
|
|
||||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func failWantMap() {
|
|
||||||
failf("map merge requires map or sequence of maps as the value")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *decoder) merge(n *node, out reflect.Value) {
|
|
||||||
switch n.kind {
|
|
||||||
case mappingNode:
|
|
||||||
d.unmarshal(n, out)
|
|
||||||
case aliasNode:
|
|
||||||
an, ok := d.doc.anchors[n.value]
|
|
||||||
if ok && an.kind != mappingNode {
|
|
||||||
failWantMap()
|
|
||||||
}
|
|
||||||
d.unmarshal(n, out)
|
|
||||||
case sequenceNode:
|
|
||||||
// Step backwards as earlier nodes take precedence.
|
|
||||||
for i := len(n.children) - 1; i >= 0; i-- {
|
|
||||||
ni := n.children[i]
|
|
||||||
if ni.kind == aliasNode {
|
|
||||||
an, ok := d.doc.anchors[ni.value]
|
|
||||||
if ok && an.kind != mappingNode {
|
|
||||||
failWantMap()
|
|
||||||
}
|
|
||||||
} else if ni.kind != mappingNode {
|
|
||||||
failWantMap()
|
|
||||||
}
|
|
||||||
d.unmarshal(ni, out)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
failWantMap()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func isMerge(n *node) bool {
|
|
||||||
return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
|
|
||||||
}
|
|
1685
vendor/gopkg.in/yaml.v2/emitterc.go
generated
vendored
1685
vendor/gopkg.in/yaml.v2/emitterc.go
generated
vendored
File diff suppressed because it is too large
Load Diff
390
vendor/gopkg.in/yaml.v2/encode.go
generated
vendored
390
vendor/gopkg.in/yaml.v2/encode.go
generated
vendored
@ -1,390 +0,0 @@
|
|||||||
package yaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
// jsonNumber is the interface of the encoding/json.Number datatype.
|
|
||||||
// Repeating the interface here avoids a dependency on encoding/json, and also
|
|
||||||
// supports other libraries like jsoniter, which use a similar datatype with
|
|
||||||
// the same interface. Detecting this interface is useful when dealing with
|
|
||||||
// structures containing json.Number, which is a string under the hood. The
|
|
||||||
// encoder should prefer the use of Int64(), Float64() and string(), in that
|
|
||||||
// order, when encoding this type.
|
|
||||||
type jsonNumber interface {
|
|
||||||
Float64() (float64, error)
|
|
||||||
Int64() (int64, error)
|
|
||||||
String() string
|
|
||||||
}
|
|
||||||
|
|
||||||
type encoder struct {
|
|
||||||
emitter yaml_emitter_t
|
|
||||||
event yaml_event_t
|
|
||||||
out []byte
|
|
||||||
flow bool
|
|
||||||
// doneInit holds whether the initial stream_start_event has been
|
|
||||||
// emitted.
|
|
||||||
doneInit bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newEncoder() *encoder {
|
|
||||||
e := &encoder{}
|
|
||||||
yaml_emitter_initialize(&e.emitter)
|
|
||||||
yaml_emitter_set_output_string(&e.emitter, &e.out)
|
|
||||||
yaml_emitter_set_unicode(&e.emitter, true)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func newEncoderWithWriter(w io.Writer) *encoder {
|
|
||||||
e := &encoder{}
|
|
||||||
yaml_emitter_initialize(&e.emitter)
|
|
||||||
yaml_emitter_set_output_writer(&e.emitter, w)
|
|
||||||
yaml_emitter_set_unicode(&e.emitter, true)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *encoder) init() {
|
|
||||||
if e.doneInit {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
|
|
||||||
e.emit()
|
|
||||||
e.doneInit = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *encoder) finish() {
|
|
||||||
e.emitter.open_ended = false
|
|
||||||
yaml_stream_end_event_initialize(&e.event)
|
|
||||||
e.emit()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *encoder) destroy() {
|
|
||||||
yaml_emitter_delete(&e.emitter)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *encoder) emit() {
|
|
||||||
// This will internally delete the e.event value.
|
|
||||||
e.must(yaml_emitter_emit(&e.emitter, &e.event))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *encoder) must(ok bool) {
|
|
||||||
if !ok {
|
|
||||||
msg := e.emitter.problem
|
|
||||||
if msg == "" {
|
|
||||||
msg = "unknown problem generating YAML content"
|
|
||||||
}
|
|
||||||
failf("%s", msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *encoder) marshalDoc(tag string, in reflect.Value) {
|
|
||||||
e.init()
|
|
||||||
yaml_document_start_event_initialize(&e.event, nil, nil, true)
|
|
||||||
e.emit()
|
|
||||||
e.marshal(tag, in)
|
|
||||||
yaml_document_end_event_initialize(&e.event, true)
|
|
||||||
e.emit()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *encoder) marshal(tag string, in reflect.Value) {
|
|
||||||
if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
|
|
||||||
e.nilv()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
iface := in.Interface()
|
|
||||||
switch m := iface.(type) {
|
|
||||||
case jsonNumber:
|
|
||||||
integer, err := m.Int64()
|
|
||||||
if err == nil {
|
|
||||||
// In this case the json.Number is a valid int64
|
|
||||||
in = reflect.ValueOf(integer)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
float, err := m.Float64()
|
|
||||||
if err == nil {
|
|
||||||
// In this case the json.Number is a valid float64
|
|
||||||
in = reflect.ValueOf(float)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// fallback case - no number could be obtained
|
|
||||||
in = reflect.ValueOf(m.String())
|
|
||||||
case time.Time, *time.Time:
|
|
||||||
// Although time.Time implements TextMarshaler,
|
|
||||||
// we don't want to treat it as a string for YAML
|
|
||||||
// purposes because YAML has special support for
|
|
||||||
// timestamps.
|
|
||||||
case Marshaler:
|
|
||||||
v, err := m.MarshalYAML()
|
|
||||||
if err != nil {
|
|
||||||
fail(err)
|
|
||||||
}
|
|
||||||
if v == nil {
|
|
||||||
e.nilv()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
in = reflect.ValueOf(v)
|
|
||||||
case encoding.TextMarshaler:
|
|
||||||
text, err := m.MarshalText()
|
|
||||||
if err != nil {
|
|
||||||
fail(err)
|
|
||||||
}
|
|
||||||
in = reflect.ValueOf(string(text))
|
|
||||||
case nil:
|
|
||||||
e.nilv()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch in.Kind() {
|
|
||||||
case reflect.Interface:
|
|
||||||
e.marshal(tag, in.Elem())
|
|
||||||
case reflect.Map:
|
|
||||||
e.mapv(tag, in)
|
|
||||||
case reflect.Ptr:
|
|
||||||
if in.Type() == ptrTimeType {
|
|
||||||
e.timev(tag, in.Elem())
|
|
||||||
} else {
|
|
||||||
e.marshal(tag, in.Elem())
|
|
||||||
}
|
|
||||||
case reflect.Struct:
|
|
||||||
if in.Type() == timeType {
|
|
||||||
e.timev(tag, in)
|
|
||||||
} else {
|
|
||||||
e.structv(tag, in)
|
|
||||||
}
|
|
||||||
case reflect.Slice, reflect.Array:
|
|
||||||
if in.Type().Elem() == mapItemType {
|
|
||||||
e.itemsv(tag, in)
|
|
||||||
} else {
|
|
||||||
e.slicev(tag, in)
|
|
||||||
}
|
|
||||||
case reflect.String:
|
|
||||||
e.stringv(tag, in)
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
if in.Type() == durationType {
|
|
||||||
e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
|
|
||||||
} else {
|
|
||||||
e.intv(tag, in)
|
|
||||||
}
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
||||||
e.uintv(tag, in)
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
e.floatv(tag, in)
|
|
||||||
case reflect.Bool:
|
|
||||||
e.boolv(tag, in)
|
|
||||||
default:
|
|
||||||
panic("cannot marshal type: " + in.Type().String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *encoder) mapv(tag string, in reflect.Value) {
|
|
||||||
e.mappingv(tag, func() {
|
|
||||||
keys := keyList(in.MapKeys())
|
|
||||||
sort.Sort(keys)
|
|
||||||
for _, k := range keys {
|
|
||||||
e.marshal("", k)
|
|
||||||
e.marshal("", in.MapIndex(k))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *encoder) itemsv(tag string, in reflect.Value) {
|
|
||||||
e.mappingv(tag, func() {
|
|
||||||
slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
|
|
||||||
for _, item := range slice {
|
|
||||||
e.marshal("", reflect.ValueOf(item.Key))
|
|
||||||
e.marshal("", reflect.ValueOf(item.Value))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *encoder) structv(tag string, in reflect.Value) {
|
|
||||||
sinfo, err := getStructInfo(in.Type())
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
e.mappingv(tag, func() {
|
|
||||||
for _, info := range sinfo.FieldsList {
|
|
||||||
var value reflect.Value
|
|
||||||
if info.Inline == nil {
|
|
||||||
value = in.Field(info.Num)
|
|
||||||
} else {
|
|
||||||
value = in.FieldByIndex(info.Inline)
|
|
||||||
}
|
|
||||||
if info.OmitEmpty && isZero(value) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
e.marshal("", reflect.ValueOf(info.Key))
|
|
||||||
e.flow = info.Flow
|
|
||||||
e.marshal("", value)
|
|
||||||
}
|
|
||||||
if sinfo.InlineMap >= 0 {
|
|
||||||
m := in.Field(sinfo.InlineMap)
|
|
||||||
if m.Len() > 0 {
|
|
||||||
e.flow = false
|
|
||||||
keys := keyList(m.MapKeys())
|
|
||||||
sort.Sort(keys)
|
|
||||||
for _, k := range keys {
|
|
||||||
if _, found := sinfo.FieldsMap[k.String()]; found {
|
|
||||||
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
|
|
||||||
}
|
|
||||||
e.marshal("", k)
|
|
||||||
e.flow = false
|
|
||||||
e.marshal("", m.MapIndex(k))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *encoder) mappingv(tag string, f func()) {
|
|
||||||
implicit := tag == ""
|
|
||||||
style := yaml_BLOCK_MAPPING_STYLE
|
|
||||||
if e.flow {
|
|
||||||
e.flow = false
|
|
||||||
style = yaml_FLOW_MAPPING_STYLE
|
|
||||||
}
|
|
||||||
yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
|
|
||||||
e.emit()
|
|
||||||
f()
|
|
||||||
yaml_mapping_end_event_initialize(&e.event)
|
|
||||||
e.emit()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *encoder) slicev(tag string, in reflect.Value) {
|
|
||||||
implicit := tag == ""
|
|
||||||
style := yaml_BLOCK_SEQUENCE_STYLE
|
|
||||||
if e.flow {
|
|
||||||
e.flow = false
|
|
||||||
style = yaml_FLOW_SEQUENCE_STYLE
|
|
||||||
}
|
|
||||||
e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
|
|
||||||
e.emit()
|
|
||||||
n := in.Len()
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
e.marshal("", in.Index(i))
|
|
||||||
}
|
|
||||||
e.must(yaml_sequence_end_event_initialize(&e.event))
|
|
||||||
e.emit()
|
|
||||||
}
|
|
||||||
|
|
||||||
// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
|
|
||||||
//
|
|
||||||
// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
|
|
||||||
// in YAML 1.2 and by this package, but these should be marshalled quoted for
|
|
||||||
// the time being for compatibility with other parsers.
|
|
||||||
func isBase60Float(s string) (result bool) {
|
|
||||||
// Fast path.
|
|
||||||
if s == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
c := s[0]
|
|
||||||
if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// Do the full match.
|
|
||||||
return base60float.MatchString(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// From http://yaml.org/type/float.html, except the regular expression there
|
|
||||||
// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
|
|
||||||
var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
|
|
||||||
|
|
||||||
func (e *encoder) stringv(tag string, in reflect.Value) {
|
|
||||||
var style yaml_scalar_style_t
|
|
||||||
s := in.String()
|
|
||||||
canUsePlain := true
|
|
||||||
switch {
|
|
||||||
case !utf8.ValidString(s):
|
|
||||||
if tag == yaml_BINARY_TAG {
|
|
||||||
failf("explicitly tagged !!binary data must be base64-encoded")
|
|
||||||
}
|
|
||||||
if tag != "" {
|
|
||||||
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
|
|
||||||
}
|
|
||||||
// It can't be encoded directly as YAML so use a binary tag
|
|
||||||
// and encode it as base64.
|
|
||||||
tag = yaml_BINARY_TAG
|
|
||||||
s = encodeBase64(s)
|
|
||||||
case tag == "":
|
|
||||||
// Check to see if it would resolve to a specific
|
|
||||||
// tag when encoded unquoted. If it doesn't,
|
|
||||||
// there's no need to quote it.
|
|
||||||
rtag, _ := resolve("", s)
|
|
||||||
canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
|
|
||||||
}
|
|
||||||
// Note: it's possible for user code to emit invalid YAML
|
|
||||||
// if they explicitly specify a tag and a string containing
|
|
||||||
// text that's incompatible with that tag.
|
|
||||||
switch {
|
|
||||||
case strings.Contains(s, "\n"):
|
|
||||||
style = yaml_LITERAL_SCALAR_STYLE
|
|
||||||
case canUsePlain:
|
|
||||||
style = yaml_PLAIN_SCALAR_STYLE
|
|
||||||
default:
|
|
||||||
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
|
||||||
}
|
|
||||||
e.emitScalar(s, "", tag, style)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *encoder) boolv(tag string, in reflect.Value) {
|
|
||||||
var s string
|
|
||||||
if in.Bool() {
|
|
||||||
s = "true"
|
|
||||||
} else {
|
|
||||||
s = "false"
|
|
||||||
}
|
|
||||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *encoder) intv(tag string, in reflect.Value) {
|
|
||||||
s := strconv.FormatInt(in.Int(), 10)
|
|
||||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *encoder) uintv(tag string, in reflect.Value) {
|
|
||||||
s := strconv.FormatUint(in.Uint(), 10)
|
|
||||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *encoder) timev(tag string, in reflect.Value) {
|
|
||||||
t := in.Interface().(time.Time)
|
|
||||||
s := t.Format(time.RFC3339Nano)
|
|
||||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *encoder) floatv(tag string, in reflect.Value) {
|
|
||||||
// Issue #352: When formatting, use the precision of the underlying value
|
|
||||||
precision := 64
|
|
||||||
if in.Kind() == reflect.Float32 {
|
|
||||||
precision = 32
|
|
||||||
}
|
|
||||||
|
|
||||||
s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
|
|
||||||
switch s {
|
|
||||||
case "+Inf":
|
|
||||||
s = ".inf"
|
|
||||||
case "-Inf":
|
|
||||||
s = "-.inf"
|
|
||||||
case "NaN":
|
|
||||||
s = ".nan"
|
|
||||||
}
|
|
||||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *encoder) nilv() {
|
|
||||||
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
|
|
||||||
implicit := tag == ""
|
|
||||||
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
|
|
||||||
e.emit()
|
|
||||||
}
|
|
5
vendor/gopkg.in/yaml.v2/go.mod
generated
vendored
5
vendor/gopkg.in/yaml.v2/go.mod
generated
vendored
@ -1,5 +0,0 @@
|
|||||||
module "gopkg.in/yaml.v2"
|
|
||||||
|
|
||||||
require (
|
|
||||||
"gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
|
|
||||||
)
|
|
1095
vendor/gopkg.in/yaml.v2/parserc.go
generated
vendored
1095
vendor/gopkg.in/yaml.v2/parserc.go
generated
vendored
File diff suppressed because it is too large
Load Diff
412
vendor/gopkg.in/yaml.v2/readerc.go
generated
vendored
412
vendor/gopkg.in/yaml.v2/readerc.go
generated
vendored
@ -1,412 +0,0 @@
|
|||||||
package yaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Set the reader error and return 0.
|
|
||||||
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
|
|
||||||
parser.error = yaml_READER_ERROR
|
|
||||||
parser.problem = problem
|
|
||||||
parser.problem_offset = offset
|
|
||||||
parser.problem_value = value
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Byte order marks.
|
|
||||||
const (
|
|
||||||
bom_UTF8 = "\xef\xbb\xbf"
|
|
||||||
bom_UTF16LE = "\xff\xfe"
|
|
||||||
bom_UTF16BE = "\xfe\xff"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Determine the input stream encoding by checking the BOM symbol. If no BOM is
|
|
||||||
// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
|
|
||||||
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
|
|
||||||
// Ensure that we had enough bytes in the raw buffer.
|
|
||||||
for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
|
|
||||||
if !yaml_parser_update_raw_buffer(parser) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine the encoding.
|
|
||||||
buf := parser.raw_buffer
|
|
||||||
pos := parser.raw_buffer_pos
|
|
||||||
avail := len(buf) - pos
|
|
||||||
if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
|
|
||||||
parser.encoding = yaml_UTF16LE_ENCODING
|
|
||||||
parser.raw_buffer_pos += 2
|
|
||||||
parser.offset += 2
|
|
||||||
} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
|
|
||||||
parser.encoding = yaml_UTF16BE_ENCODING
|
|
||||||
parser.raw_buffer_pos += 2
|
|
||||||
parser.offset += 2
|
|
||||||
} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
|
|
||||||
parser.encoding = yaml_UTF8_ENCODING
|
|
||||||
parser.raw_buffer_pos += 3
|
|
||||||
parser.offset += 3
|
|
||||||
} else {
|
|
||||||
parser.encoding = yaml_UTF8_ENCODING
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the raw buffer.
|
|
||||||
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
|
|
||||||
size_read := 0
|
|
||||||
|
|
||||||
// Return if the raw buffer is full.
|
|
||||||
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return on EOF.
|
|
||||||
if parser.eof {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Move the remaining bytes in the raw buffer to the beginning.
|
|
||||||
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
|
|
||||||
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
|
|
||||||
}
|
|
||||||
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
|
|
||||||
parser.raw_buffer_pos = 0
|
|
||||||
|
|
||||||
// Call the read handler to fill the buffer.
|
|
||||||
size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
|
|
||||||
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
|
|
||||||
if err == io.EOF {
|
|
||||||
parser.eof = true
|
|
||||||
} else if err != nil {
|
|
||||||
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that the buffer contains at least `length` characters.
|
|
||||||
// Return true on success, false on failure.
|
|
||||||
//
|
|
||||||
// The length is supposed to be significantly less that the buffer size.
|
|
||||||
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
|
|
||||||
if parser.read_handler == nil {
|
|
||||||
panic("read handler must be set")
|
|
||||||
}
|
|
||||||
|
|
||||||
// [Go] This function was changed to guarantee the requested length size at EOF.
|
|
||||||
// The fact we need to do this is pretty awful, but the description above implies
|
|
||||||
// for that to be the case, and there are tests
|
|
||||||
|
|
||||||
// If the EOF flag is set and the raw buffer is empty, do nothing.
|
|
||||||
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
|
|
||||||
// [Go] ACTUALLY! Read the documentation of this function above.
|
|
||||||
// This is just broken. To return true, we need to have the
|
|
||||||
// given length in the buffer. Not doing that means every single
|
|
||||||
// check that calls this function to make sure the buffer has a
|
|
||||||
// given length is Go) panicking; or C) accessing invalid memory.
|
|
||||||
//return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return if the buffer contains enough characters.
|
|
||||||
if parser.unread >= length {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine the input encoding if it is not known yet.
|
|
||||||
if parser.encoding == yaml_ANY_ENCODING {
|
|
||||||
if !yaml_parser_determine_encoding(parser) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Move the unread characters to the beginning of the buffer.
|
|
||||||
buffer_len := len(parser.buffer)
|
|
||||||
if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
|
|
||||||
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
|
|
||||||
buffer_len -= parser.buffer_pos
|
|
||||||
parser.buffer_pos = 0
|
|
||||||
} else if parser.buffer_pos == buffer_len {
|
|
||||||
buffer_len = 0
|
|
||||||
parser.buffer_pos = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open the whole buffer for writing, and cut it before returning.
|
|
||||||
parser.buffer = parser.buffer[:cap(parser.buffer)]
|
|
||||||
|
|
||||||
// Fill the buffer until it has enough characters.
|
|
||||||
first := true
|
|
||||||
for parser.unread < length {
|
|
||||||
|
|
||||||
// Fill the raw buffer if necessary.
|
|
||||||
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
|
|
||||||
if !yaml_parser_update_raw_buffer(parser) {
|
|
||||||
parser.buffer = parser.buffer[:buffer_len]
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
first = false
|
|
||||||
|
|
||||||
// Decode the raw buffer.
|
|
||||||
inner:
|
|
||||||
for parser.raw_buffer_pos != len(parser.raw_buffer) {
|
|
||||||
var value rune
|
|
||||||
var width int
|
|
||||||
|
|
||||||
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
|
|
||||||
|
|
||||||
// Decode the next character.
|
|
||||||
switch parser.encoding {
|
|
||||||
case yaml_UTF8_ENCODING:
|
|
||||||
// Decode a UTF-8 character. Check RFC 3629
|
|
||||||
// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
|
|
||||||
//
|
|
||||||
// The following table (taken from the RFC) is used for
|
|
||||||
// decoding.
|
|
||||||
//
|
|
||||||
// Char. number range | UTF-8 octet sequence
|
|
||||||
// (hexadecimal) | (binary)
|
|
||||||
// --------------------+------------------------------------
|
|
||||||
// 0000 0000-0000 007F | 0xxxxxxx
|
|
||||||
// 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
|
|
||||||
// 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
|
|
||||||
// 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
|
||||||
//
|
|
||||||
// Additionally, the characters in the range 0xD800-0xDFFF
|
|
||||||
// are prohibited as they are reserved for use with UTF-16
|
|
||||||
// surrogate pairs.
|
|
||||||
|
|
||||||
// Determine the length of the UTF-8 sequence.
|
|
||||||
octet := parser.raw_buffer[parser.raw_buffer_pos]
|
|
||||||
switch {
|
|
||||||
case octet&0x80 == 0x00:
|
|
||||||
width = 1
|
|
||||||
case octet&0xE0 == 0xC0:
|
|
||||||
width = 2
|
|
||||||
case octet&0xF0 == 0xE0:
|
|
||||||
width = 3
|
|
||||||
case octet&0xF8 == 0xF0:
|
|
||||||
width = 4
|
|
||||||
default:
|
|
||||||
// The leading octet is invalid.
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"invalid leading UTF-8 octet",
|
|
||||||
parser.offset, int(octet))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the raw buffer contains an incomplete character.
|
|
||||||
if width > raw_unread {
|
|
||||||
if parser.eof {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"incomplete UTF-8 octet sequence",
|
|
||||||
parser.offset, -1)
|
|
||||||
}
|
|
||||||
break inner
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode the leading octet.
|
|
||||||
switch {
|
|
||||||
case octet&0x80 == 0x00:
|
|
||||||
value = rune(octet & 0x7F)
|
|
||||||
case octet&0xE0 == 0xC0:
|
|
||||||
value = rune(octet & 0x1F)
|
|
||||||
case octet&0xF0 == 0xE0:
|
|
||||||
value = rune(octet & 0x0F)
|
|
||||||
case octet&0xF8 == 0xF0:
|
|
||||||
value = rune(octet & 0x07)
|
|
||||||
default:
|
|
||||||
value = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check and decode the trailing octets.
|
|
||||||
for k := 1; k < width; k++ {
|
|
||||||
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
|
|
||||||
|
|
||||||
// Check if the octet is valid.
|
|
||||||
if (octet & 0xC0) != 0x80 {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"invalid trailing UTF-8 octet",
|
|
||||||
parser.offset+k, int(octet))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode the octet.
|
|
||||||
value = (value << 6) + rune(octet&0x3F)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the length of the sequence against the value.
|
|
||||||
switch {
|
|
||||||
case width == 1:
|
|
||||||
case width == 2 && value >= 0x80:
|
|
||||||
case width == 3 && value >= 0x800:
|
|
||||||
case width == 4 && value >= 0x10000:
|
|
||||||
default:
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"invalid length of a UTF-8 sequence",
|
|
||||||
parser.offset, -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the range of the value.
|
|
||||||
if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"invalid Unicode character",
|
|
||||||
parser.offset, int(value))
|
|
||||||
}
|
|
||||||
|
|
||||||
case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
|
|
||||||
var low, high int
|
|
||||||
if parser.encoding == yaml_UTF16LE_ENCODING {
|
|
||||||
low, high = 0, 1
|
|
||||||
} else {
|
|
||||||
low, high = 1, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// The UTF-16 encoding is not as simple as one might
|
|
||||||
// naively think. Check RFC 2781
|
|
||||||
// (http://www.ietf.org/rfc/rfc2781.txt).
|
|
||||||
//
|
|
||||||
// Normally, two subsequent bytes describe a Unicode
|
|
||||||
// character. However a special technique (called a
|
|
||||||
// surrogate pair) is used for specifying character
|
|
||||||
// values larger than 0xFFFF.
|
|
||||||
//
|
|
||||||
// A surrogate pair consists of two pseudo-characters:
|
|
||||||
// high surrogate area (0xD800-0xDBFF)
|
|
||||||
// low surrogate area (0xDC00-0xDFFF)
|
|
||||||
//
|
|
||||||
// The following formulas are used for decoding
|
|
||||||
// and encoding characters using surrogate pairs:
|
|
||||||
//
|
|
||||||
// U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
|
|
||||||
// U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
|
|
||||||
// W1 = 110110yyyyyyyyyy
|
|
||||||
// W2 = 110111xxxxxxxxxx
|
|
||||||
//
|
|
||||||
// where U is the character value, W1 is the high surrogate
|
|
||||||
// area, W2 is the low surrogate area.
|
|
||||||
|
|
||||||
// Check for incomplete UTF-16 character.
|
|
||||||
if raw_unread < 2 {
|
|
||||||
if parser.eof {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"incomplete UTF-16 character",
|
|
||||||
parser.offset, -1)
|
|
||||||
}
|
|
||||||
break inner
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the character.
|
|
||||||
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
|
|
||||||
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
|
|
||||||
|
|
||||||
// Check for unexpected low surrogate area.
|
|
||||||
if value&0xFC00 == 0xDC00 {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"unexpected low surrogate area",
|
|
||||||
parser.offset, int(value))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for a high surrogate area.
|
|
||||||
if value&0xFC00 == 0xD800 {
|
|
||||||
width = 4
|
|
||||||
|
|
||||||
// Check for incomplete surrogate pair.
|
|
||||||
if raw_unread < 4 {
|
|
||||||
if parser.eof {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"incomplete UTF-16 surrogate pair",
|
|
||||||
parser.offset, -1)
|
|
||||||
}
|
|
||||||
break inner
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the next character.
|
|
||||||
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
|
|
||||||
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
|
|
||||||
|
|
||||||
// Check for a low surrogate area.
|
|
||||||
if value2&0xFC00 != 0xDC00 {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"expected low surrogate area",
|
|
||||||
parser.offset+2, int(value2))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate the value of the surrogate pair.
|
|
||||||
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
|
|
||||||
} else {
|
|
||||||
width = 2
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
panic("impossible")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the character is in the allowed range:
|
|
||||||
// #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
|
|
||||||
// | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
|
|
||||||
// | [#x10000-#x10FFFF] (32 bit)
|
|
||||||
switch {
|
|
||||||
case value == 0x09:
|
|
||||||
case value == 0x0A:
|
|
||||||
case value == 0x0D:
|
|
||||||
case value >= 0x20 && value <= 0x7E:
|
|
||||||
case value == 0x85:
|
|
||||||
case value >= 0xA0 && value <= 0xD7FF:
|
|
||||||
case value >= 0xE000 && value <= 0xFFFD:
|
|
||||||
case value >= 0x10000 && value <= 0x10FFFF:
|
|
||||||
default:
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"control characters are not allowed",
|
|
||||||
parser.offset, int(value))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Move the raw pointers.
|
|
||||||
parser.raw_buffer_pos += width
|
|
||||||
parser.offset += width
|
|
||||||
|
|
||||||
// Finally put the character into the buffer.
|
|
||||||
if value <= 0x7F {
|
|
||||||
// 0000 0000-0000 007F . 0xxxxxxx
|
|
||||||
parser.buffer[buffer_len+0] = byte(value)
|
|
||||||
buffer_len += 1
|
|
||||||
} else if value <= 0x7FF {
|
|
||||||
// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
|
|
||||||
parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
|
|
||||||
parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
|
|
||||||
buffer_len += 2
|
|
||||||
} else if value <= 0xFFFF {
|
|
||||||
// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
|
|
||||||
parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
|
|
||||||
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
|
|
||||||
parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
|
|
||||||
buffer_len += 3
|
|
||||||
} else {
|
|
||||||
// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
|
||||||
parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
|
|
||||||
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
|
|
||||||
parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
|
|
||||||
parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
|
|
||||||
buffer_len += 4
|
|
||||||
}
|
|
||||||
|
|
||||||
parser.unread++
|
|
||||||
}
|
|
||||||
|
|
||||||
// On EOF, put NUL into the buffer and return.
|
|
||||||
if parser.eof {
|
|
||||||
parser.buffer[buffer_len] = 0
|
|
||||||
buffer_len++
|
|
||||||
parser.unread++
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// [Go] Read the documentation of this function above. To return true,
|
|
||||||
// we need to have the given length in the buffer. Not doing that means
|
|
||||||
// every single check that calls this function to make sure the buffer
|
|
||||||
// has a given length is Go) panicking; or C) accessing invalid memory.
|
|
||||||
// This happens here due to the EOF above breaking early.
|
|
||||||
for buffer_len < length {
|
|
||||||
parser.buffer[buffer_len] = 0
|
|
||||||
buffer_len++
|
|
||||||
}
|
|
||||||
parser.buffer = parser.buffer[:buffer_len]
|
|
||||||
return true
|
|
||||||
}
|
|
258
vendor/gopkg.in/yaml.v2/resolve.go
generated
vendored
258
vendor/gopkg.in/yaml.v2/resolve.go
generated
vendored
@ -1,258 +0,0 @@
|
|||||||
package yaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/base64"
|
|
||||||
"math"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type resolveMapItem struct {
|
|
||||||
value interface{}
|
|
||||||
tag string
|
|
||||||
}
|
|
||||||
|
|
||||||
var resolveTable = make([]byte, 256)
|
|
||||||
var resolveMap = make(map[string]resolveMapItem)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
t := resolveTable
|
|
||||||
t[int('+')] = 'S' // Sign
|
|
||||||
t[int('-')] = 'S'
|
|
||||||
for _, c := range "0123456789" {
|
|
||||||
t[int(c)] = 'D' // Digit
|
|
||||||
}
|
|
||||||
for _, c := range "yYnNtTfFoO~" {
|
|
||||||
t[int(c)] = 'M' // In map
|
|
||||||
}
|
|
||||||
t[int('.')] = '.' // Float (potentially in map)
|
|
||||||
|
|
||||||
var resolveMapList = []struct {
|
|
||||||
v interface{}
|
|
||||||
tag string
|
|
||||||
l []string
|
|
||||||
}{
|
|
||||||
{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
|
|
||||||
{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
|
|
||||||
{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
|
|
||||||
{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
|
|
||||||
{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
|
|
||||||
{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
|
|
||||||
{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
|
|
||||||
{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
|
|
||||||
{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
|
|
||||||
{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
|
|
||||||
{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
|
|
||||||
{"<<", yaml_MERGE_TAG, []string{"<<"}},
|
|
||||||
}
|
|
||||||
|
|
||||||
m := resolveMap
|
|
||||||
for _, item := range resolveMapList {
|
|
||||||
for _, s := range item.l {
|
|
||||||
m[s] = resolveMapItem{item.v, item.tag}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const longTagPrefix = "tag:yaml.org,2002:"
|
|
||||||
|
|
||||||
func shortTag(tag string) string {
|
|
||||||
// TODO This can easily be made faster and produce less garbage.
|
|
||||||
if strings.HasPrefix(tag, longTagPrefix) {
|
|
||||||
return "!!" + tag[len(longTagPrefix):]
|
|
||||||
}
|
|
||||||
return tag
|
|
||||||
}
|
|
||||||
|
|
||||||
func longTag(tag string) string {
|
|
||||||
if strings.HasPrefix(tag, "!!") {
|
|
||||||
return longTagPrefix + tag[2:]
|
|
||||||
}
|
|
||||||
return tag
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolvableTag(tag string) bool {
|
|
||||||
switch tag {
|
|
||||||
case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`)
|
|
||||||
|
|
||||||
func resolve(tag string, in string) (rtag string, out interface{}) {
|
|
||||||
if !resolvableTag(tag) {
|
|
||||||
return tag, in
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
switch tag {
|
|
||||||
case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
|
|
||||||
return
|
|
||||||
case yaml_FLOAT_TAG:
|
|
||||||
if rtag == yaml_INT_TAG {
|
|
||||||
switch v := out.(type) {
|
|
||||||
case int64:
|
|
||||||
rtag = yaml_FLOAT_TAG
|
|
||||||
out = float64(v)
|
|
||||||
return
|
|
||||||
case int:
|
|
||||||
rtag = yaml_FLOAT_TAG
|
|
||||||
out = float64(v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Any data is accepted as a !!str or !!binary.
|
|
||||||
// Otherwise, the prefix is enough of a hint about what it might be.
|
|
||||||
hint := byte('N')
|
|
||||||
if in != "" {
|
|
||||||
hint = resolveTable[in[0]]
|
|
||||||
}
|
|
||||||
if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
|
|
||||||
// Handle things we can lookup in a map.
|
|
||||||
if item, ok := resolveMap[in]; ok {
|
|
||||||
return item.tag, item.value
|
|
||||||
}
|
|
||||||
|
|
||||||
// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
|
|
||||||
// are purposefully unsupported here. They're still quoted on
|
|
||||||
// the way out for compatibility with other parser, though.
|
|
||||||
|
|
||||||
switch hint {
|
|
||||||
case 'M':
|
|
||||||
// We've already checked the map above.
|
|
||||||
|
|
||||||
case '.':
|
|
||||||
// Not in the map, so maybe a normal float.
|
|
||||||
floatv, err := strconv.ParseFloat(in, 64)
|
|
||||||
if err == nil {
|
|
||||||
return yaml_FLOAT_TAG, floatv
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'D', 'S':
|
|
||||||
// Int, float, or timestamp.
|
|
||||||
// Only try values as a timestamp if the value is unquoted or there's an explicit
|
|
||||||
// !!timestamp tag.
|
|
||||||
if tag == "" || tag == yaml_TIMESTAMP_TAG {
|
|
||||||
t, ok := parseTimestamp(in)
|
|
||||||
if ok {
|
|
||||||
return yaml_TIMESTAMP_TAG, t
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
plain := strings.Replace(in, "_", "", -1)
|
|
||||||
intv, err := strconv.ParseInt(plain, 0, 64)
|
|
||||||
if err == nil {
|
|
||||||
if intv == int64(int(intv)) {
|
|
||||||
return yaml_INT_TAG, int(intv)
|
|
||||||
} else {
|
|
||||||
return yaml_INT_TAG, intv
|
|
||||||
}
|
|
||||||
}
|
|
||||||
uintv, err := strconv.ParseUint(plain, 0, 64)
|
|
||||||
if err == nil {
|
|
||||||
return yaml_INT_TAG, uintv
|
|
||||||
}
|
|
||||||
if yamlStyleFloat.MatchString(plain) {
|
|
||||||
floatv, err := strconv.ParseFloat(plain, 64)
|
|
||||||
if err == nil {
|
|
||||||
return yaml_FLOAT_TAG, floatv
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(plain, "0b") {
|
|
||||||
intv, err := strconv.ParseInt(plain[2:], 2, 64)
|
|
||||||
if err == nil {
|
|
||||||
if intv == int64(int(intv)) {
|
|
||||||
return yaml_INT_TAG, int(intv)
|
|
||||||
} else {
|
|
||||||
return yaml_INT_TAG, intv
|
|
||||||
}
|
|
||||||
}
|
|
||||||
uintv, err := strconv.ParseUint(plain[2:], 2, 64)
|
|
||||||
if err == nil {
|
|
||||||
return yaml_INT_TAG, uintv
|
|
||||||
}
|
|
||||||
} else if strings.HasPrefix(plain, "-0b") {
|
|
||||||
intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
|
|
||||||
if err == nil {
|
|
||||||
if true || intv == int64(int(intv)) {
|
|
||||||
return yaml_INT_TAG, int(intv)
|
|
||||||
} else {
|
|
||||||
return yaml_INT_TAG, intv
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return yaml_STR_TAG, in
|
|
||||||
}
|
|
||||||
|
|
||||||
// encodeBase64 encodes s as base64 that is broken up into multiple lines
|
|
||||||
// as appropriate for the resulting length.
|
|
||||||
func encodeBase64(s string) string {
|
|
||||||
const lineLen = 70
|
|
||||||
encLen := base64.StdEncoding.EncodedLen(len(s))
|
|
||||||
lines := encLen/lineLen + 1
|
|
||||||
buf := make([]byte, encLen*2+lines)
|
|
||||||
in := buf[0:encLen]
|
|
||||||
out := buf[encLen:]
|
|
||||||
base64.StdEncoding.Encode(in, []byte(s))
|
|
||||||
k := 0
|
|
||||||
for i := 0; i < len(in); i += lineLen {
|
|
||||||
j := i + lineLen
|
|
||||||
if j > len(in) {
|
|
||||||
j = len(in)
|
|
||||||
}
|
|
||||||
k += copy(out[k:], in[i:j])
|
|
||||||
if lines > 1 {
|
|
||||||
out[k] = '\n'
|
|
||||||
k++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return string(out[:k])
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is a subset of the formats allowed by the regular expression
|
|
||||||
// defined at http://yaml.org/type/timestamp.html.
|
|
||||||
var allowedTimestampFormats = []string{
|
|
||||||
"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
|
|
||||||
"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
|
|
||||||
"2006-1-2 15:4:5.999999999", // space separated with no time zone
|
|
||||||
"2006-1-2", // date only
|
|
||||||
// Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
|
|
||||||
// from the set of examples.
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseTimestamp parses s as a timestamp string and
|
|
||||||
// returns the timestamp and reports whether it succeeded.
|
|
||||||
// Timestamp formats are defined at http://yaml.org/type/timestamp.html
|
|
||||||
func parseTimestamp(s string) (time.Time, bool) {
|
|
||||||
// TODO write code to check all the formats supported by
|
|
||||||
// http://yaml.org/type/timestamp.html instead of using time.Parse.
|
|
||||||
|
|
||||||
// Quick check: all date formats start with YYYY-.
|
|
||||||
i := 0
|
|
||||||
for ; i < len(s); i++ {
|
|
||||||
if c := s[i]; c < '0' || c > '9' {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if i != 4 || i == len(s) || s[i] != '-' {
|
|
||||||
return time.Time{}, false
|
|
||||||
}
|
|
||||||
for _, format := range allowedTimestampFormats {
|
|
||||||
if t, err := time.Parse(format, s); err == nil {
|
|
||||||
return t, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return time.Time{}, false
|
|
||||||
}
|
|
2696
vendor/gopkg.in/yaml.v2/scannerc.go
generated
vendored
2696
vendor/gopkg.in/yaml.v2/scannerc.go
generated
vendored
File diff suppressed because it is too large
Load Diff
113
vendor/gopkg.in/yaml.v2/sorter.go
generated
vendored
113
vendor/gopkg.in/yaml.v2/sorter.go
generated
vendored
@ -1,113 +0,0 @@
|
|||||||
package yaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
type keyList []reflect.Value
|
|
||||||
|
|
||||||
func (l keyList) Len() int { return len(l) }
|
|
||||||
func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
|
||||||
func (l keyList) Less(i, j int) bool {
|
|
||||||
a := l[i]
|
|
||||||
b := l[j]
|
|
||||||
ak := a.Kind()
|
|
||||||
bk := b.Kind()
|
|
||||||
for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
|
|
||||||
a = a.Elem()
|
|
||||||
ak = a.Kind()
|
|
||||||
}
|
|
||||||
for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
|
|
||||||
b = b.Elem()
|
|
||||||
bk = b.Kind()
|
|
||||||
}
|
|
||||||
af, aok := keyFloat(a)
|
|
||||||
bf, bok := keyFloat(b)
|
|
||||||
if aok && bok {
|
|
||||||
if af != bf {
|
|
||||||
return af < bf
|
|
||||||
}
|
|
||||||
if ak != bk {
|
|
||||||
return ak < bk
|
|
||||||
}
|
|
||||||
return numLess(a, b)
|
|
||||||
}
|
|
||||||
if ak != reflect.String || bk != reflect.String {
|
|
||||||
return ak < bk
|
|
||||||
}
|
|
||||||
ar, br := []rune(a.String()), []rune(b.String())
|
|
||||||
for i := 0; i < len(ar) && i < len(br); i++ {
|
|
||||||
if ar[i] == br[i] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
al := unicode.IsLetter(ar[i])
|
|
||||||
bl := unicode.IsLetter(br[i])
|
|
||||||
if al && bl {
|
|
||||||
return ar[i] < br[i]
|
|
||||||
}
|
|
||||||
if al || bl {
|
|
||||||
return bl
|
|
||||||
}
|
|
||||||
var ai, bi int
|
|
||||||
var an, bn int64
|
|
||||||
if ar[i] == '0' || br[i] == '0' {
|
|
||||||
for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
|
|
||||||
if ar[j] != '0' {
|
|
||||||
an = 1
|
|
||||||
bn = 1
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
|
|
||||||
an = an*10 + int64(ar[ai]-'0')
|
|
||||||
}
|
|
||||||
for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
|
|
||||||
bn = bn*10 + int64(br[bi]-'0')
|
|
||||||
}
|
|
||||||
if an != bn {
|
|
||||||
return an < bn
|
|
||||||
}
|
|
||||||
if ai != bi {
|
|
||||||
return ai < bi
|
|
||||||
}
|
|
||||||
return ar[i] < br[i]
|
|
||||||
}
|
|
||||||
return len(ar) < len(br)
|
|
||||||
}
|
|
||||||
|
|
||||||
// keyFloat returns a float value for v if it is a number/bool
|
|
||||||
// and whether it is a number/bool or not.
|
|
||||||
func keyFloat(v reflect.Value) (f float64, ok bool) {
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return float64(v.Int()), true
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return v.Float(), true
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
||||||
return float64(v.Uint()), true
|
|
||||||
case reflect.Bool:
|
|
||||||
if v.Bool() {
|
|
||||||
return 1, true
|
|
||||||
}
|
|
||||||
return 0, true
|
|
||||||
}
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// numLess returns whether a < b.
|
|
||||||
// a and b must necessarily have the same kind.
|
|
||||||
func numLess(a, b reflect.Value) bool {
|
|
||||||
switch a.Kind() {
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return a.Int() < b.Int()
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return a.Float() < b.Float()
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
||||||
return a.Uint() < b.Uint()
|
|
||||||
case reflect.Bool:
|
|
||||||
return !a.Bool() && b.Bool()
|
|
||||||
}
|
|
||||||
panic("not a number")
|
|
||||||
}
|
|
26
vendor/gopkg.in/yaml.v2/writerc.go
generated
vendored
26
vendor/gopkg.in/yaml.v2/writerc.go
generated
vendored
@ -1,26 +0,0 @@
|
|||||||
package yaml
|
|
||||||
|
|
||||||
// Set the writer error and return false.
|
|
||||||
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
|
|
||||||
emitter.error = yaml_WRITER_ERROR
|
|
||||||
emitter.problem = problem
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush the output buffer.
|
|
||||||
func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
|
|
||||||
if emitter.write_handler == nil {
|
|
||||||
panic("write handler not set")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the buffer is empty.
|
|
||||||
if emitter.buffer_pos == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
|
|
||||||
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
|
|
||||||
}
|
|
||||||
emitter.buffer_pos = 0
|
|
||||||
return true
|
|
||||||
}
|
|
466
vendor/gopkg.in/yaml.v2/yaml.go
generated
vendored
466
vendor/gopkg.in/yaml.v2/yaml.go
generated
vendored
@ -1,466 +0,0 @@
|
|||||||
// Package yaml implements YAML support for the Go language.
|
|
||||||
//
|
|
||||||
// Source code and other details for the project are available at GitHub:
|
|
||||||
//
|
|
||||||
// https://github.com/go-yaml/yaml
|
|
||||||
//
|
|
||||||
package yaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MapSlice encodes and decodes as a YAML map.
|
|
||||||
// The order of keys is preserved when encoding and decoding.
|
|
||||||
type MapSlice []MapItem
|
|
||||||
|
|
||||||
// MapItem is an item in a MapSlice.
|
|
||||||
type MapItem struct {
|
|
||||||
Key, Value interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The Unmarshaler interface may be implemented by types to customize their
|
|
||||||
// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
|
|
||||||
// method receives a function that may be called to unmarshal the original
|
|
||||||
// YAML value into a field or variable. It is safe to call the unmarshal
|
|
||||||
// function parameter more than once if necessary.
|
|
||||||
type Unmarshaler interface {
|
|
||||||
UnmarshalYAML(unmarshal func(interface{}) error) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// The Marshaler interface may be implemented by types to customize their
|
|
||||||
// behavior when being marshaled into a YAML document. The returned value
|
|
||||||
// is marshaled in place of the original value implementing Marshaler.
|
|
||||||
//
|
|
||||||
// If an error is returned by MarshalYAML, the marshaling procedure stops
|
|
||||||
// and returns with the provided error.
|
|
||||||
type Marshaler interface {
|
|
||||||
MarshalYAML() (interface{}, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal decodes the first document found within the in byte slice
|
|
||||||
// and assigns decoded values into the out value.
|
|
||||||
//
|
|
||||||
// Maps and pointers (to a struct, string, int, etc) are accepted as out
|
|
||||||
// values. If an internal pointer within a struct is not initialized,
|
|
||||||
// the yaml package will initialize it if necessary for unmarshalling
|
|
||||||
// the provided data. The out parameter must not be nil.
|
|
||||||
//
|
|
||||||
// The type of the decoded values should be compatible with the respective
|
|
||||||
// values in out. If one or more values cannot be decoded due to a type
|
|
||||||
// mismatches, decoding continues partially until the end of the YAML
|
|
||||||
// content, and a *yaml.TypeError is returned with details for all
|
|
||||||
// missed values.
|
|
||||||
//
|
|
||||||
// Struct fields are only unmarshalled if they are exported (have an
|
|
||||||
// upper case first letter), and are unmarshalled using the field name
|
|
||||||
// lowercased as the default key. Custom keys may be defined via the
|
|
||||||
// "yaml" name in the field tag: the content preceding the first comma
|
|
||||||
// is used as the key, and the following comma-separated options are
|
|
||||||
// used to tweak the marshalling process (see Marshal).
|
|
||||||
// Conflicting names result in a runtime error.
|
|
||||||
//
|
|
||||||
// For example:
|
|
||||||
//
|
|
||||||
// type T struct {
|
|
||||||
// F int `yaml:"a,omitempty"`
|
|
||||||
// B int
|
|
||||||
// }
|
|
||||||
// var t T
|
|
||||||
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
|
|
||||||
//
|
|
||||||
// See the documentation of Marshal for the format of tags and a list of
|
|
||||||
// supported tag options.
|
|
||||||
//
|
|
||||||
func Unmarshal(in []byte, out interface{}) (err error) {
|
|
||||||
return unmarshal(in, out, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalStrict is like Unmarshal except that any fields that are found
|
|
||||||
// in the data that do not have corresponding struct members, or mapping
|
|
||||||
// keys that are duplicates, will result in
|
|
||||||
// an error.
|
|
||||||
func UnmarshalStrict(in []byte, out interface{}) (err error) {
|
|
||||||
return unmarshal(in, out, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Decorder reads and decodes YAML values from an input stream.
|
|
||||||
type Decoder struct {
|
|
||||||
strict bool
|
|
||||||
parser *parser
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDecoder returns a new decoder that reads from r.
|
|
||||||
//
|
|
||||||
// The decoder introduces its own buffering and may read
|
|
||||||
// data from r beyond the YAML values requested.
|
|
||||||
func NewDecoder(r io.Reader) *Decoder {
|
|
||||||
return &Decoder{
|
|
||||||
parser: newParserFromReader(r),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetStrict sets whether strict decoding behaviour is enabled when
|
|
||||||
// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
|
|
||||||
func (dec *Decoder) SetStrict(strict bool) {
|
|
||||||
dec.strict = strict
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode reads the next YAML-encoded value from its input
|
|
||||||
// and stores it in the value pointed to by v.
|
|
||||||
//
|
|
||||||
// See the documentation for Unmarshal for details about the
|
|
||||||
// conversion of YAML into a Go value.
|
|
||||||
func (dec *Decoder) Decode(v interface{}) (err error) {
|
|
||||||
d := newDecoder(dec.strict)
|
|
||||||
defer handleErr(&err)
|
|
||||||
node := dec.parser.parse()
|
|
||||||
if node == nil {
|
|
||||||
return io.EOF
|
|
||||||
}
|
|
||||||
out := reflect.ValueOf(v)
|
|
||||||
if out.Kind() == reflect.Ptr && !out.IsNil() {
|
|
||||||
out = out.Elem()
|
|
||||||
}
|
|
||||||
d.unmarshal(node, out)
|
|
||||||
if len(d.terrors) > 0 {
|
|
||||||
return &TypeError{d.terrors}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func unmarshal(in []byte, out interface{}, strict bool) (err error) {
|
|
||||||
defer handleErr(&err)
|
|
||||||
d := newDecoder(strict)
|
|
||||||
p := newParser(in)
|
|
||||||
defer p.destroy()
|
|
||||||
node := p.parse()
|
|
||||||
if node != nil {
|
|
||||||
v := reflect.ValueOf(out)
|
|
||||||
if v.Kind() == reflect.Ptr && !v.IsNil() {
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
d.unmarshal(node, v)
|
|
||||||
}
|
|
||||||
if len(d.terrors) > 0 {
|
|
||||||
return &TypeError{d.terrors}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshal serializes the value provided into a YAML document. The structure
|
|
||||||
// of the generated document will reflect the structure of the value itself.
|
|
||||||
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
|
|
||||||
//
|
|
||||||
// Struct fields are only marshalled if they are exported (have an upper case
|
|
||||||
// first letter), and are marshalled using the field name lowercased as the
|
|
||||||
// default key. Custom keys may be defined via the "yaml" name in the field
|
|
||||||
// tag: the content preceding the first comma is used as the key, and the
|
|
||||||
// following comma-separated options are used to tweak the marshalling process.
|
|
||||||
// Conflicting names result in a runtime error.
|
|
||||||
//
|
|
||||||
// The field tag format accepted is:
|
|
||||||
//
|
|
||||||
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
|
||||||
//
|
|
||||||
// The following flags are currently supported:
|
|
||||||
//
|
|
||||||
// omitempty Only include the field if it's not set to the zero
|
|
||||||
// value for the type or to empty slices or maps.
|
|
||||||
// Zero valued structs will be omitted if all their public
|
|
||||||
// fields are zero, unless they implement an IsZero
|
|
||||||
// method (see the IsZeroer interface type), in which
|
|
||||||
// case the field will be included if that method returns true.
|
|
||||||
//
|
|
||||||
// flow Marshal using a flow style (useful for structs,
|
|
||||||
// sequences and maps).
|
|
||||||
//
|
|
||||||
// inline Inline the field, which must be a struct or a map,
|
|
||||||
// causing all of its fields or keys to be processed as if
|
|
||||||
// they were part of the outer struct. For maps, keys must
|
|
||||||
// not conflict with the yaml keys of other struct fields.
|
|
||||||
//
|
|
||||||
// In addition, if the key is "-", the field is ignored.
|
|
||||||
//
|
|
||||||
// For example:
|
|
||||||
//
|
|
||||||
// type T struct {
|
|
||||||
// F int `yaml:"a,omitempty"`
|
|
||||||
// B int
|
|
||||||
// }
|
|
||||||
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
|
|
||||||
// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
|
|
||||||
//
|
|
||||||
func Marshal(in interface{}) (out []byte, err error) {
|
|
||||||
defer handleErr(&err)
|
|
||||||
e := newEncoder()
|
|
||||||
defer e.destroy()
|
|
||||||
e.marshalDoc("", reflect.ValueOf(in))
|
|
||||||
e.finish()
|
|
||||||
out = e.out
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// An Encoder writes YAML values to an output stream.
|
|
||||||
type Encoder struct {
|
|
||||||
encoder *encoder
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEncoder returns a new encoder that writes to w.
|
|
||||||
// The Encoder should be closed after use to flush all data
|
|
||||||
// to w.
|
|
||||||
func NewEncoder(w io.Writer) *Encoder {
|
|
||||||
return &Encoder{
|
|
||||||
encoder: newEncoderWithWriter(w),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode writes the YAML encoding of v to the stream.
|
|
||||||
// If multiple items are encoded to the stream, the
|
|
||||||
// second and subsequent document will be preceded
|
|
||||||
// with a "---" document separator, but the first will not.
|
|
||||||
//
|
|
||||||
// See the documentation for Marshal for details about the conversion of Go
|
|
||||||
// values to YAML.
|
|
||||||
func (e *Encoder) Encode(v interface{}) (err error) {
|
|
||||||
defer handleErr(&err)
|
|
||||||
e.encoder.marshalDoc("", reflect.ValueOf(v))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the encoder by writing any remaining data.
|
|
||||||
// It does not write a stream terminating string "...".
|
|
||||||
func (e *Encoder) Close() (err error) {
|
|
||||||
defer handleErr(&err)
|
|
||||||
e.encoder.finish()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleErr(err *error) {
|
|
||||||
if v := recover(); v != nil {
|
|
||||||
if e, ok := v.(yamlError); ok {
|
|
||||||
*err = e.err
|
|
||||||
} else {
|
|
||||||
panic(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type yamlError struct {
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func fail(err error) {
|
|
||||||
panic(yamlError{err})
|
|
||||||
}
|
|
||||||
|
|
||||||
func failf(format string, args ...interface{}) {
|
|
||||||
panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
|
|
||||||
}
|
|
||||||
|
|
||||||
// A TypeError is returned by Unmarshal when one or more fields in
|
|
||||||
// the YAML document cannot be properly decoded into the requested
|
|
||||||
// types. When this error is returned, the value is still
|
|
||||||
// unmarshaled partially.
|
|
||||||
type TypeError struct {
|
|
||||||
Errors []string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *TypeError) Error() string {
|
|
||||||
return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
|
|
||||||
}
|
|
||||||
|
|
||||||
// --------------------------------------------------------------------------
|
|
||||||
// Maintain a mapping of keys to structure field indexes
|
|
||||||
|
|
||||||
// The code in this section was copied from mgo/bson.
|
|
||||||
|
|
||||||
// structInfo holds details for the serialization of fields of
|
|
||||||
// a given struct.
|
|
||||||
type structInfo struct {
|
|
||||||
FieldsMap map[string]fieldInfo
|
|
||||||
FieldsList []fieldInfo
|
|
||||||
|
|
||||||
// InlineMap is the number of the field in the struct that
|
|
||||||
// contains an ,inline map, or -1 if there's none.
|
|
||||||
InlineMap int
|
|
||||||
}
|
|
||||||
|
|
||||||
type fieldInfo struct {
|
|
||||||
Key string
|
|
||||||
Num int
|
|
||||||
OmitEmpty bool
|
|
||||||
Flow bool
|
|
||||||
// Id holds the unique field identifier, so we can cheaply
|
|
||||||
// check for field duplicates without maintaining an extra map.
|
|
||||||
Id int
|
|
||||||
|
|
||||||
// Inline holds the field index if the field is part of an inlined struct.
|
|
||||||
Inline []int
|
|
||||||
}
|
|
||||||
|
|
||||||
var structMap = make(map[reflect.Type]*structInfo)
|
|
||||||
var fieldMapMutex sync.RWMutex
|
|
||||||
|
|
||||||
func getStructInfo(st reflect.Type) (*structInfo, error) {
|
|
||||||
fieldMapMutex.RLock()
|
|
||||||
sinfo, found := structMap[st]
|
|
||||||
fieldMapMutex.RUnlock()
|
|
||||||
if found {
|
|
||||||
return sinfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
n := st.NumField()
|
|
||||||
fieldsMap := make(map[string]fieldInfo)
|
|
||||||
fieldsList := make([]fieldInfo, 0, n)
|
|
||||||
inlineMap := -1
|
|
||||||
for i := 0; i != n; i++ {
|
|
||||||
field := st.Field(i)
|
|
||||||
if field.PkgPath != "" && !field.Anonymous {
|
|
||||||
continue // Private field
|
|
||||||
}
|
|
||||||
|
|
||||||
info := fieldInfo{Num: i}
|
|
||||||
|
|
||||||
tag := field.Tag.Get("yaml")
|
|
||||||
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
|
|
||||||
tag = string(field.Tag)
|
|
||||||
}
|
|
||||||
if tag == "-" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
inline := false
|
|
||||||
fields := strings.Split(tag, ",")
|
|
||||||
if len(fields) > 1 {
|
|
||||||
for _, flag := range fields[1:] {
|
|
||||||
switch flag {
|
|
||||||
case "omitempty":
|
|
||||||
info.OmitEmpty = true
|
|
||||||
case "flow":
|
|
||||||
info.Flow = true
|
|
||||||
case "inline":
|
|
||||||
inline = true
|
|
||||||
default:
|
|
||||||
return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
tag = fields[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
if inline {
|
|
||||||
switch field.Type.Kind() {
|
|
||||||
case reflect.Map:
|
|
||||||
if inlineMap >= 0 {
|
|
||||||
return nil, errors.New("Multiple ,inline maps in struct " + st.String())
|
|
||||||
}
|
|
||||||
if field.Type.Key() != reflect.TypeOf("") {
|
|
||||||
return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
|
|
||||||
}
|
|
||||||
inlineMap = info.Num
|
|
||||||
case reflect.Struct:
|
|
||||||
sinfo, err := getStructInfo(field.Type)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, finfo := range sinfo.FieldsList {
|
|
||||||
if _, found := fieldsMap[finfo.Key]; found {
|
|
||||||
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
|
|
||||||
return nil, errors.New(msg)
|
|
||||||
}
|
|
||||||
if finfo.Inline == nil {
|
|
||||||
finfo.Inline = []int{i, finfo.Num}
|
|
||||||
} else {
|
|
||||||
finfo.Inline = append([]int{i}, finfo.Inline...)
|
|
||||||
}
|
|
||||||
finfo.Id = len(fieldsList)
|
|
||||||
fieldsMap[finfo.Key] = finfo
|
|
||||||
fieldsList = append(fieldsList, finfo)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
//return nil, errors.New("Option ,inline needs a struct value or map field")
|
|
||||||
return nil, errors.New("Option ,inline needs a struct value field")
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if tag != "" {
|
|
||||||
info.Key = tag
|
|
||||||
} else {
|
|
||||||
info.Key = strings.ToLower(field.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, found = fieldsMap[info.Key]; found {
|
|
||||||
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
|
|
||||||
return nil, errors.New(msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
info.Id = len(fieldsList)
|
|
||||||
fieldsList = append(fieldsList, info)
|
|
||||||
fieldsMap[info.Key] = info
|
|
||||||
}
|
|
||||||
|
|
||||||
sinfo = &structInfo{
|
|
||||||
FieldsMap: fieldsMap,
|
|
||||||
FieldsList: fieldsList,
|
|
||||||
InlineMap: inlineMap,
|
|
||||||
}
|
|
||||||
|
|
||||||
fieldMapMutex.Lock()
|
|
||||||
structMap[st] = sinfo
|
|
||||||
fieldMapMutex.Unlock()
|
|
||||||
return sinfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsZeroer is used to check whether an object is zero to
|
|
||||||
// determine whether it should be omitted when marshaling
|
|
||||||
// with the omitempty flag. One notable implementation
|
|
||||||
// is time.Time.
|
|
||||||
type IsZeroer interface {
|
|
||||||
IsZero() bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func isZero(v reflect.Value) bool {
|
|
||||||
kind := v.Kind()
|
|
||||||
if z, ok := v.Interface().(IsZeroer); ok {
|
|
||||||
if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return z.IsZero()
|
|
||||||
}
|
|
||||||
switch kind {
|
|
||||||
case reflect.String:
|
|
||||||
return len(v.String()) == 0
|
|
||||||
case reflect.Interface, reflect.Ptr:
|
|
||||||
return v.IsNil()
|
|
||||||
case reflect.Slice:
|
|
||||||
return v.Len() == 0
|
|
||||||
case reflect.Map:
|
|
||||||
return v.Len() == 0
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return v.Int() == 0
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return v.Float() == 0
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
||||||
return v.Uint() == 0
|
|
||||||
case reflect.Bool:
|
|
||||||
return !v.Bool()
|
|
||||||
case reflect.Struct:
|
|
||||||
vt := v.Type()
|
|
||||||
for i := v.NumField() - 1; i >= 0; i-- {
|
|
||||||
if vt.Field(i).PkgPath != "" {
|
|
||||||
continue // Private field
|
|
||||||
}
|
|
||||||
if !isZero(v.Field(i)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
738
vendor/gopkg.in/yaml.v2/yamlh.go
generated
vendored
738
vendor/gopkg.in/yaml.v2/yamlh.go
generated
vendored
@ -1,738 +0,0 @@
|
|||||||
package yaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The version directive data.
|
|
||||||
type yaml_version_directive_t struct {
|
|
||||||
major int8 // The major version number.
|
|
||||||
minor int8 // The minor version number.
|
|
||||||
}
|
|
||||||
|
|
||||||
// The tag directive data.
|
|
||||||
type yaml_tag_directive_t struct {
|
|
||||||
handle []byte // The tag handle.
|
|
||||||
prefix []byte // The tag prefix.
|
|
||||||
}
|
|
||||||
|
|
||||||
type yaml_encoding_t int
|
|
||||||
|
|
||||||
// The stream encoding.
|
|
||||||
const (
|
|
||||||
// Let the parser choose the encoding.
|
|
||||||
yaml_ANY_ENCODING yaml_encoding_t = iota
|
|
||||||
|
|
||||||
yaml_UTF8_ENCODING // The default UTF-8 encoding.
|
|
||||||
yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
|
|
||||||
yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
|
|
||||||
)
|
|
||||||
|
|
||||||
type yaml_break_t int
|
|
||||||
|
|
||||||
// Line break types.
|
|
||||||
const (
|
|
||||||
// Let the parser choose the break type.
|
|
||||||
yaml_ANY_BREAK yaml_break_t = iota
|
|
||||||
|
|
||||||
yaml_CR_BREAK // Use CR for line breaks (Mac style).
|
|
||||||
yaml_LN_BREAK // Use LN for line breaks (Unix style).
|
|
||||||
yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
|
|
||||||
)
|
|
||||||
|
|
||||||
type yaml_error_type_t int
|
|
||||||
|
|
||||||
// Many bad things could happen with the parser and emitter.
|
|
||||||
const (
|
|
||||||
// No error is produced.
|
|
||||||
yaml_NO_ERROR yaml_error_type_t = iota
|
|
||||||
|
|
||||||
yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
|
|
||||||
yaml_READER_ERROR // Cannot read or decode the input stream.
|
|
||||||
yaml_SCANNER_ERROR // Cannot scan the input stream.
|
|
||||||
yaml_PARSER_ERROR // Cannot parse the input stream.
|
|
||||||
yaml_COMPOSER_ERROR // Cannot compose a YAML document.
|
|
||||||
yaml_WRITER_ERROR // Cannot write to the output stream.
|
|
||||||
yaml_EMITTER_ERROR // Cannot emit a YAML stream.
|
|
||||||
)
|
|
||||||
|
|
||||||
// The pointer position.
|
|
||||||
type yaml_mark_t struct {
|
|
||||||
index int // The position index.
|
|
||||||
line int // The position line.
|
|
||||||
column int // The position column.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Node Styles
|
|
||||||
|
|
||||||
type yaml_style_t int8
|
|
||||||
|
|
||||||
type yaml_scalar_style_t yaml_style_t
|
|
||||||
|
|
||||||
// Scalar styles.
|
|
||||||
const (
|
|
||||||
// Let the emitter choose the style.
|
|
||||||
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
|
|
||||||
|
|
||||||
yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
|
|
||||||
yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
|
|
||||||
yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
|
|
||||||
yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
|
|
||||||
yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
|
|
||||||
)
|
|
||||||
|
|
||||||
type yaml_sequence_style_t yaml_style_t
|
|
||||||
|
|
||||||
// Sequence styles.
|
|
||||||
const (
|
|
||||||
// Let the emitter choose the style.
|
|
||||||
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
|
|
||||||
|
|
||||||
yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
|
|
||||||
yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
|
|
||||||
)
|
|
||||||
|
|
||||||
type yaml_mapping_style_t yaml_style_t
|
|
||||||
|
|
||||||
// Mapping styles.
|
|
||||||
const (
|
|
||||||
// Let the emitter choose the style.
|
|
||||||
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
|
|
||||||
|
|
||||||
yaml_BLOCK_MAPPING_STYLE // The block mapping style.
|
|
||||||
yaml_FLOW_MAPPING_STYLE // The flow mapping style.
|
|
||||||
)
|
|
||||||
|
|
||||||
// Tokens
|
|
||||||
|
|
||||||
type yaml_token_type_t int
|
|
||||||
|
|
||||||
// Token types.
|
|
||||||
const (
|
|
||||||
// An empty token.
|
|
||||||
yaml_NO_TOKEN yaml_token_type_t = iota
|
|
||||||
|
|
||||||
yaml_STREAM_START_TOKEN // A STREAM-START token.
|
|
||||||
yaml_STREAM_END_TOKEN // A STREAM-END token.
|
|
||||||
|
|
||||||
yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
|
|
||||||
yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
|
|
||||||
yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
|
|
||||||
yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
|
|
||||||
|
|
||||||
yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
|
|
||||||
yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
|
|
||||||
yaml_BLOCK_END_TOKEN // A BLOCK-END token.
|
|
||||||
|
|
||||||
yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
|
|
||||||
yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
|
|
||||||
yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
|
|
||||||
yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
|
|
||||||
|
|
||||||
yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
|
|
||||||
yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
|
|
||||||
yaml_KEY_TOKEN // A KEY token.
|
|
||||||
yaml_VALUE_TOKEN // A VALUE token.
|
|
||||||
|
|
||||||
yaml_ALIAS_TOKEN // An ALIAS token.
|
|
||||||
yaml_ANCHOR_TOKEN // An ANCHOR token.
|
|
||||||
yaml_TAG_TOKEN // A TAG token.
|
|
||||||
yaml_SCALAR_TOKEN // A SCALAR token.
|
|
||||||
)
|
|
||||||
|
|
||||||
func (tt yaml_token_type_t) String() string {
|
|
||||||
switch tt {
|
|
||||||
case yaml_NO_TOKEN:
|
|
||||||
return "yaml_NO_TOKEN"
|
|
||||||
case yaml_STREAM_START_TOKEN:
|
|
||||||
return "yaml_STREAM_START_TOKEN"
|
|
||||||
case yaml_STREAM_END_TOKEN:
|
|
||||||
return "yaml_STREAM_END_TOKEN"
|
|
||||||
case yaml_VERSION_DIRECTIVE_TOKEN:
|
|
||||||
return "yaml_VERSION_DIRECTIVE_TOKEN"
|
|
||||||
case yaml_TAG_DIRECTIVE_TOKEN:
|
|
||||||
return "yaml_TAG_DIRECTIVE_TOKEN"
|
|
||||||
case yaml_DOCUMENT_START_TOKEN:
|
|
||||||
return "yaml_DOCUMENT_START_TOKEN"
|
|
||||||
case yaml_DOCUMENT_END_TOKEN:
|
|
||||||
return "yaml_DOCUMENT_END_TOKEN"
|
|
||||||
case yaml_BLOCK_SEQUENCE_START_TOKEN:
|
|
||||||
return "yaml_BLOCK_SEQUENCE_START_TOKEN"
|
|
||||||
case yaml_BLOCK_MAPPING_START_TOKEN:
|
|
||||||
return "yaml_BLOCK_MAPPING_START_TOKEN"
|
|
||||||
case yaml_BLOCK_END_TOKEN:
|
|
||||||
return "yaml_BLOCK_END_TOKEN"
|
|
||||||
case yaml_FLOW_SEQUENCE_START_TOKEN:
|
|
||||||
return "yaml_FLOW_SEQUENCE_START_TOKEN"
|
|
||||||
case yaml_FLOW_SEQUENCE_END_TOKEN:
|
|
||||||
return "yaml_FLOW_SEQUENCE_END_TOKEN"
|
|
||||||
case yaml_FLOW_MAPPING_START_TOKEN:
|
|
||||||
return "yaml_FLOW_MAPPING_START_TOKEN"
|
|
||||||
case yaml_FLOW_MAPPING_END_TOKEN:
|
|
||||||
return "yaml_FLOW_MAPPING_END_TOKEN"
|
|
||||||
case yaml_BLOCK_ENTRY_TOKEN:
|
|
||||||
return "yaml_BLOCK_ENTRY_TOKEN"
|
|
||||||
case yaml_FLOW_ENTRY_TOKEN:
|
|
||||||
return "yaml_FLOW_ENTRY_TOKEN"
|
|
||||||
case yaml_KEY_TOKEN:
|
|
||||||
return "yaml_KEY_TOKEN"
|
|
||||||
case yaml_VALUE_TOKEN:
|
|
||||||
return "yaml_VALUE_TOKEN"
|
|
||||||
case yaml_ALIAS_TOKEN:
|
|
||||||
return "yaml_ALIAS_TOKEN"
|
|
||||||
case yaml_ANCHOR_TOKEN:
|
|
||||||
return "yaml_ANCHOR_TOKEN"
|
|
||||||
case yaml_TAG_TOKEN:
|
|
||||||
return "yaml_TAG_TOKEN"
|
|
||||||
case yaml_SCALAR_TOKEN:
|
|
||||||
return "yaml_SCALAR_TOKEN"
|
|
||||||
}
|
|
||||||
return "<unknown token>"
|
|
||||||
}
|
|
||||||
|
|
||||||
// The token structure.
|
|
||||||
type yaml_token_t struct {
|
|
||||||
// The token type.
|
|
||||||
typ yaml_token_type_t
|
|
||||||
|
|
||||||
// The start/end of the token.
|
|
||||||
start_mark, end_mark yaml_mark_t
|
|
||||||
|
|
||||||
// The stream encoding (for yaml_STREAM_START_TOKEN).
|
|
||||||
encoding yaml_encoding_t
|
|
||||||
|
|
||||||
// The alias/anchor/scalar value or tag/tag directive handle
|
|
||||||
// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
|
|
||||||
value []byte
|
|
||||||
|
|
||||||
// The tag suffix (for yaml_TAG_TOKEN).
|
|
||||||
suffix []byte
|
|
||||||
|
|
||||||
// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
|
|
||||||
prefix []byte
|
|
||||||
|
|
||||||
// The scalar style (for yaml_SCALAR_TOKEN).
|
|
||||||
style yaml_scalar_style_t
|
|
||||||
|
|
||||||
// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
|
|
||||||
major, minor int8
|
|
||||||
}
|
|
||||||
|
|
||||||
// Events
|
|
||||||
|
|
||||||
type yaml_event_type_t int8
|
|
||||||
|
|
||||||
// Event types.
|
|
||||||
const (
|
|
||||||
// An empty event.
|
|
||||||
yaml_NO_EVENT yaml_event_type_t = iota
|
|
||||||
|
|
||||||
yaml_STREAM_START_EVENT // A STREAM-START event.
|
|
||||||
yaml_STREAM_END_EVENT // A STREAM-END event.
|
|
||||||
yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
|
|
||||||
yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
|
|
||||||
yaml_ALIAS_EVENT // An ALIAS event.
|
|
||||||
yaml_SCALAR_EVENT // A SCALAR event.
|
|
||||||
yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
|
|
||||||
yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
|
|
||||||
yaml_MAPPING_START_EVENT // A MAPPING-START event.
|
|
||||||
yaml_MAPPING_END_EVENT // A MAPPING-END event.
|
|
||||||
)
|
|
||||||
|
|
||||||
var eventStrings = []string{
|
|
||||||
yaml_NO_EVENT: "none",
|
|
||||||
yaml_STREAM_START_EVENT: "stream start",
|
|
||||||
yaml_STREAM_END_EVENT: "stream end",
|
|
||||||
yaml_DOCUMENT_START_EVENT: "document start",
|
|
||||||
yaml_DOCUMENT_END_EVENT: "document end",
|
|
||||||
yaml_ALIAS_EVENT: "alias",
|
|
||||||
yaml_SCALAR_EVENT: "scalar",
|
|
||||||
yaml_SEQUENCE_START_EVENT: "sequence start",
|
|
||||||
yaml_SEQUENCE_END_EVENT: "sequence end",
|
|
||||||
yaml_MAPPING_START_EVENT: "mapping start",
|
|
||||||
yaml_MAPPING_END_EVENT: "mapping end",
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e yaml_event_type_t) String() string {
|
|
||||||
if e < 0 || int(e) >= len(eventStrings) {
|
|
||||||
return fmt.Sprintf("unknown event %d", e)
|
|
||||||
}
|
|
||||||
return eventStrings[e]
|
|
||||||
}
|
|
||||||
|
|
||||||
// The event structure.
|
|
||||||
type yaml_event_t struct {
|
|
||||||
|
|
||||||
// The event type.
|
|
||||||
typ yaml_event_type_t
|
|
||||||
|
|
||||||
// The start and end of the event.
|
|
||||||
start_mark, end_mark yaml_mark_t
|
|
||||||
|
|
||||||
// The document encoding (for yaml_STREAM_START_EVENT).
|
|
||||||
encoding yaml_encoding_t
|
|
||||||
|
|
||||||
// The version directive (for yaml_DOCUMENT_START_EVENT).
|
|
||||||
version_directive *yaml_version_directive_t
|
|
||||||
|
|
||||||
// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
|
|
||||||
tag_directives []yaml_tag_directive_t
|
|
||||||
|
|
||||||
// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
|
|
||||||
anchor []byte
|
|
||||||
|
|
||||||
// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
|
|
||||||
tag []byte
|
|
||||||
|
|
||||||
// The scalar value (for yaml_SCALAR_EVENT).
|
|
||||||
value []byte
|
|
||||||
|
|
||||||
// Is the document start/end indicator implicit, or the tag optional?
|
|
||||||
// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
|
|
||||||
implicit bool
|
|
||||||
|
|
||||||
// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
|
|
||||||
quoted_implicit bool
|
|
||||||
|
|
||||||
// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
|
|
||||||
style yaml_style_t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
|
|
||||||
func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
|
|
||||||
func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
|
|
||||||
|
|
||||||
// Nodes
|
|
||||||
|
|
||||||
const (
|
|
||||||
yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
|
|
||||||
yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
|
|
||||||
yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
|
|
||||||
yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
|
|
||||||
yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
|
|
||||||
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
|
|
||||||
|
|
||||||
yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
|
|
||||||
yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
|
|
||||||
|
|
||||||
// Not in original libyaml.
|
|
||||||
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
|
|
||||||
yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
|
|
||||||
|
|
||||||
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
|
|
||||||
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
|
|
||||||
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
|
|
||||||
)
|
|
||||||
|
|
||||||
type yaml_node_type_t int
|
|
||||||
|
|
||||||
// Node types.
|
|
||||||
const (
|
|
||||||
// An empty node.
|
|
||||||
yaml_NO_NODE yaml_node_type_t = iota
|
|
||||||
|
|
||||||
yaml_SCALAR_NODE // A scalar node.
|
|
||||||
yaml_SEQUENCE_NODE // A sequence node.
|
|
||||||
yaml_MAPPING_NODE // A mapping node.
|
|
||||||
)
|
|
||||||
|
|
||||||
// An element of a sequence node.
|
|
||||||
type yaml_node_item_t int
|
|
||||||
|
|
||||||
// An element of a mapping node.
|
|
||||||
type yaml_node_pair_t struct {
|
|
||||||
key int // The key of the element.
|
|
||||||
value int // The value of the element.
|
|
||||||
}
|
|
||||||
|
|
||||||
// The node structure.
|
|
||||||
type yaml_node_t struct {
|
|
||||||
typ yaml_node_type_t // The node type.
|
|
||||||
tag []byte // The node tag.
|
|
||||||
|
|
||||||
// The node data.
|
|
||||||
|
|
||||||
// The scalar parameters (for yaml_SCALAR_NODE).
|
|
||||||
scalar struct {
|
|
||||||
value []byte // The scalar value.
|
|
||||||
length int // The length of the scalar value.
|
|
||||||
style yaml_scalar_style_t // The scalar style.
|
|
||||||
}
|
|
||||||
|
|
||||||
// The sequence parameters (for YAML_SEQUENCE_NODE).
|
|
||||||
sequence struct {
|
|
||||||
items_data []yaml_node_item_t // The stack of sequence items.
|
|
||||||
style yaml_sequence_style_t // The sequence style.
|
|
||||||
}
|
|
||||||
|
|
||||||
// The mapping parameters (for yaml_MAPPING_NODE).
|
|
||||||
mapping struct {
|
|
||||||
pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
|
|
||||||
pairs_start *yaml_node_pair_t // The beginning of the stack.
|
|
||||||
pairs_end *yaml_node_pair_t // The end of the stack.
|
|
||||||
pairs_top *yaml_node_pair_t // The top of the stack.
|
|
||||||
style yaml_mapping_style_t // The mapping style.
|
|
||||||
}
|
|
||||||
|
|
||||||
start_mark yaml_mark_t // The beginning of the node.
|
|
||||||
end_mark yaml_mark_t // The end of the node.
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// The document structure.
|
|
||||||
type yaml_document_t struct {
|
|
||||||
|
|
||||||
// The document nodes.
|
|
||||||
nodes []yaml_node_t
|
|
||||||
|
|
||||||
// The version directive.
|
|
||||||
version_directive *yaml_version_directive_t
|
|
||||||
|
|
||||||
// The list of tag directives.
|
|
||||||
tag_directives_data []yaml_tag_directive_t
|
|
||||||
tag_directives_start int // The beginning of the tag directives list.
|
|
||||||
tag_directives_end int // The end of the tag directives list.
|
|
||||||
|
|
||||||
start_implicit int // Is the document start indicator implicit?
|
|
||||||
end_implicit int // Is the document end indicator implicit?
|
|
||||||
|
|
||||||
// The start/end of the document.
|
|
||||||
start_mark, end_mark yaml_mark_t
|
|
||||||
}
|
|
||||||
|
|
||||||
// The prototype of a read handler.
|
|
||||||
//
|
|
||||||
// The read handler is called when the parser needs to read more bytes from the
|
|
||||||
// source. The handler should write not more than size bytes to the buffer.
|
|
||||||
// The number of written bytes should be set to the size_read variable.
|
|
||||||
//
|
|
||||||
// [in,out] data A pointer to an application data specified by
|
|
||||||
// yaml_parser_set_input().
|
|
||||||
// [out] buffer The buffer to write the data from the source.
|
|
||||||
// [in] size The size of the buffer.
|
|
||||||
// [out] size_read The actual number of bytes read from the source.
|
|
||||||
//
|
|
||||||
// On success, the handler should return 1. If the handler failed,
|
|
||||||
// the returned value should be 0. On EOF, the handler should set the
|
|
||||||
// size_read to 0 and return 1.
|
|
||||||
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
|
|
||||||
|
|
||||||
// This structure holds information about a potential simple key.
|
|
||||||
type yaml_simple_key_t struct {
|
|
||||||
possible bool // Is a simple key possible?
|
|
||||||
required bool // Is a simple key required?
|
|
||||||
token_number int // The number of the token.
|
|
||||||
mark yaml_mark_t // The position mark.
|
|
||||||
}
|
|
||||||
|
|
||||||
// The states of the parser.
|
|
||||||
type yaml_parser_state_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
|
|
||||||
|
|
||||||
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
|
|
||||||
yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
|
|
||||||
yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
|
|
||||||
yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
|
|
||||||
yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
|
|
||||||
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
|
|
||||||
yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
|
|
||||||
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
|
|
||||||
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
|
|
||||||
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
|
|
||||||
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
|
|
||||||
yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
|
|
||||||
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
|
|
||||||
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
|
|
||||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
|
|
||||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
|
|
||||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
|
|
||||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
|
|
||||||
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
|
|
||||||
yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
|
|
||||||
yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
|
|
||||||
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
|
|
||||||
yaml_PARSE_END_STATE // Expect nothing.
|
|
||||||
)
|
|
||||||
|
|
||||||
func (ps yaml_parser_state_t) String() string {
|
|
||||||
switch ps {
|
|
||||||
case yaml_PARSE_STREAM_START_STATE:
|
|
||||||
return "yaml_PARSE_STREAM_START_STATE"
|
|
||||||
case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
|
|
||||||
return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
|
|
||||||
case yaml_PARSE_DOCUMENT_START_STATE:
|
|
||||||
return "yaml_PARSE_DOCUMENT_START_STATE"
|
|
||||||
case yaml_PARSE_DOCUMENT_CONTENT_STATE:
|
|
||||||
return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
|
|
||||||
case yaml_PARSE_DOCUMENT_END_STATE:
|
|
||||||
return "yaml_PARSE_DOCUMENT_END_STATE"
|
|
||||||
case yaml_PARSE_BLOCK_NODE_STATE:
|
|
||||||
return "yaml_PARSE_BLOCK_NODE_STATE"
|
|
||||||
case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
|
|
||||||
return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
|
|
||||||
case yaml_PARSE_FLOW_NODE_STATE:
|
|
||||||
return "yaml_PARSE_FLOW_NODE_STATE"
|
|
||||||
case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
|
|
||||||
return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
|
|
||||||
case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
|
|
||||||
return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
|
|
||||||
case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
|
|
||||||
return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
|
|
||||||
case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
|
|
||||||
return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
|
|
||||||
case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
|
|
||||||
return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
|
|
||||||
case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
|
|
||||||
return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
|
|
||||||
case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
|
|
||||||
return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
|
|
||||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
|
|
||||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
|
|
||||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
|
|
||||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
|
|
||||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
|
|
||||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
|
|
||||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
|
|
||||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
|
|
||||||
case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
|
|
||||||
return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
|
|
||||||
case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
|
|
||||||
return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
|
|
||||||
case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
|
|
||||||
return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
|
|
||||||
case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
|
|
||||||
return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
|
|
||||||
case yaml_PARSE_END_STATE:
|
|
||||||
return "yaml_PARSE_END_STATE"
|
|
||||||
}
|
|
||||||
return "<unknown parser state>"
|
|
||||||
}
|
|
||||||
|
|
||||||
// This structure holds aliases data.
|
|
||||||
type yaml_alias_data_t struct {
|
|
||||||
anchor []byte // The anchor.
|
|
||||||
index int // The node id.
|
|
||||||
mark yaml_mark_t // The anchor mark.
|
|
||||||
}
|
|
||||||
|
|
||||||
// The parser structure.
|
|
||||||
//
|
|
||||||
// All members are internal. Manage the structure using the
|
|
||||||
// yaml_parser_ family of functions.
|
|
||||||
type yaml_parser_t struct {
|
|
||||||
|
|
||||||
// Error handling
|
|
||||||
|
|
||||||
error yaml_error_type_t // Error type.
|
|
||||||
|
|
||||||
problem string // Error description.
|
|
||||||
|
|
||||||
// The byte about which the problem occurred.
|
|
||||||
problem_offset int
|
|
||||||
problem_value int
|
|
||||||
problem_mark yaml_mark_t
|
|
||||||
|
|
||||||
// The error context.
|
|
||||||
context string
|
|
||||||
context_mark yaml_mark_t
|
|
||||||
|
|
||||||
// Reader stuff
|
|
||||||
|
|
||||||
read_handler yaml_read_handler_t // Read handler.
|
|
||||||
|
|
||||||
input_reader io.Reader // File input data.
|
|
||||||
input []byte // String input data.
|
|
||||||
input_pos int
|
|
||||||
|
|
||||||
eof bool // EOF flag
|
|
||||||
|
|
||||||
buffer []byte // The working buffer.
|
|
||||||
buffer_pos int // The current position of the buffer.
|
|
||||||
|
|
||||||
unread int // The number of unread characters in the buffer.
|
|
||||||
|
|
||||||
raw_buffer []byte // The raw buffer.
|
|
||||||
raw_buffer_pos int // The current position of the buffer.
|
|
||||||
|
|
||||||
encoding yaml_encoding_t // The input encoding.
|
|
||||||
|
|
||||||
offset int // The offset of the current position (in bytes).
|
|
||||||
mark yaml_mark_t // The mark of the current position.
|
|
||||||
|
|
||||||
// Scanner stuff
|
|
||||||
|
|
||||||
stream_start_produced bool // Have we started to scan the input stream?
|
|
||||||
stream_end_produced bool // Have we reached the end of the input stream?
|
|
||||||
|
|
||||||
flow_level int // The number of unclosed '[' and '{' indicators.
|
|
||||||
|
|
||||||
tokens []yaml_token_t // The tokens queue.
|
|
||||||
tokens_head int // The head of the tokens queue.
|
|
||||||
tokens_parsed int // The number of tokens fetched from the queue.
|
|
||||||
token_available bool // Does the tokens queue contain a token ready for dequeueing.
|
|
||||||
|
|
||||||
indent int // The current indentation level.
|
|
||||||
indents []int // The indentation levels stack.
|
|
||||||
|
|
||||||
simple_key_allowed bool // May a simple key occur at the current position?
|
|
||||||
simple_keys []yaml_simple_key_t // The stack of simple keys.
|
|
||||||
|
|
||||||
// Parser stuff
|
|
||||||
|
|
||||||
state yaml_parser_state_t // The current parser state.
|
|
||||||
states []yaml_parser_state_t // The parser states stack.
|
|
||||||
marks []yaml_mark_t // The stack of marks.
|
|
||||||
tag_directives []yaml_tag_directive_t // The list of TAG directives.
|
|
||||||
|
|
||||||
// Dumper stuff
|
|
||||||
|
|
||||||
aliases []yaml_alias_data_t // The alias data.
|
|
||||||
|
|
||||||
document *yaml_document_t // The currently parsed document.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Emitter Definitions
|
|
||||||
|
|
||||||
// The prototype of a write handler.
|
|
||||||
//
|
|
||||||
// The write handler is called when the emitter needs to flush the accumulated
|
|
||||||
// characters to the output. The handler should write @a size bytes of the
|
|
||||||
// @a buffer to the output.
|
|
||||||
//
|
|
||||||
// @param[in,out] data A pointer to an application data specified by
|
|
||||||
// yaml_emitter_set_output().
|
|
||||||
// @param[in] buffer The buffer with bytes to be written.
|
|
||||||
// @param[in] size The size of the buffer.
|
|
||||||
//
|
|
||||||
// @returns On success, the handler should return @c 1. If the handler failed,
|
|
||||||
// the returned value should be @c 0.
|
|
||||||
//
|
|
||||||
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
|
|
||||||
|
|
||||||
type yaml_emitter_state_t int
|
|
||||||
|
|
||||||
// The emitter states.
|
|
||||||
const (
|
|
||||||
// Expect STREAM-START.
|
|
||||||
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
|
|
||||||
|
|
||||||
yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
|
|
||||||
yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
|
|
||||||
yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
|
|
||||||
yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
|
|
||||||
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
|
|
||||||
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
|
|
||||||
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
|
|
||||||
yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
|
|
||||||
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
|
|
||||||
yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
|
|
||||||
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
|
|
||||||
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
|
|
||||||
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
|
|
||||||
yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
|
|
||||||
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
|
|
||||||
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
|
|
||||||
yaml_EMIT_END_STATE // Expect nothing.
|
|
||||||
)
|
|
||||||
|
|
||||||
// The emitter structure.
|
|
||||||
//
|
|
||||||
// All members are internal. Manage the structure using the @c yaml_emitter_
|
|
||||||
// family of functions.
|
|
||||||
type yaml_emitter_t struct {
|
|
||||||
|
|
||||||
// Error handling
|
|
||||||
|
|
||||||
error yaml_error_type_t // Error type.
|
|
||||||
problem string // Error description.
|
|
||||||
|
|
||||||
// Writer stuff
|
|
||||||
|
|
||||||
write_handler yaml_write_handler_t // Write handler.
|
|
||||||
|
|
||||||
output_buffer *[]byte // String output data.
|
|
||||||
output_writer io.Writer // File output data.
|
|
||||||
|
|
||||||
buffer []byte // The working buffer.
|
|
||||||
buffer_pos int // The current position of the buffer.
|
|
||||||
|
|
||||||
raw_buffer []byte // The raw buffer.
|
|
||||||
raw_buffer_pos int // The current position of the buffer.
|
|
||||||
|
|
||||||
encoding yaml_encoding_t // The stream encoding.
|
|
||||||
|
|
||||||
// Emitter stuff
|
|
||||||
|
|
||||||
canonical bool // If the output is in the canonical style?
|
|
||||||
best_indent int // The number of indentation spaces.
|
|
||||||
best_width int // The preferred width of the output lines.
|
|
||||||
unicode bool // Allow unescaped non-ASCII characters?
|
|
||||||
line_break yaml_break_t // The preferred line break.
|
|
||||||
|
|
||||||
state yaml_emitter_state_t // The current emitter state.
|
|
||||||
states []yaml_emitter_state_t // The stack of states.
|
|
||||||
|
|
||||||
events []yaml_event_t // The event queue.
|
|
||||||
events_head int // The head of the event queue.
|
|
||||||
|
|
||||||
indents []int // The stack of indentation levels.
|
|
||||||
|
|
||||||
tag_directives []yaml_tag_directive_t // The list of tag directives.
|
|
||||||
|
|
||||||
indent int // The current indentation level.
|
|
||||||
|
|
||||||
flow_level int // The current flow level.
|
|
||||||
|
|
||||||
root_context bool // Is it the document root context?
|
|
||||||
sequence_context bool // Is it a sequence context?
|
|
||||||
mapping_context bool // Is it a mapping context?
|
|
||||||
simple_key_context bool // Is it a simple mapping key context?
|
|
||||||
|
|
||||||
line int // The current line.
|
|
||||||
column int // The current column.
|
|
||||||
whitespace bool // If the last character was a whitespace?
|
|
||||||
indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
|
|
||||||
open_ended bool // If an explicit document end is required?
|
|
||||||
|
|
||||||
// Anchor analysis.
|
|
||||||
anchor_data struct {
|
|
||||||
anchor []byte // The anchor value.
|
|
||||||
alias bool // Is it an alias?
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tag analysis.
|
|
||||||
tag_data struct {
|
|
||||||
handle []byte // The tag handle.
|
|
||||||
suffix []byte // The tag suffix.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scalar analysis.
|
|
||||||
scalar_data struct {
|
|
||||||
value []byte // The scalar value.
|
|
||||||
multiline bool // Does the scalar contain line breaks?
|
|
||||||
flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
|
|
||||||
block_plain_allowed bool // Can the scalar be expressed in the block plain style?
|
|
||||||
single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
|
|
||||||
block_allowed bool // Can the scalar be expressed in the literal or folded styles?
|
|
||||||
style yaml_scalar_style_t // The output style.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dumper stuff
|
|
||||||
|
|
||||||
opened bool // If the stream was already opened?
|
|
||||||
closed bool // If the stream was already closed?
|
|
||||||
|
|
||||||
// The information associated with the document nodes.
|
|
||||||
anchors *struct {
|
|
||||||
references int // The number of references.
|
|
||||||
anchor int // The anchor id.
|
|
||||||
serialized bool // If the node has been emitted?
|
|
||||||
}
|
|
||||||
|
|
||||||
last_anchor_id int // The last assigned anchor id.
|
|
||||||
|
|
||||||
document *yaml_document_t // The currently emitted document.
|
|
||||||
}
|
|
173
vendor/gopkg.in/yaml.v2/yamlprivateh.go
generated
vendored
173
vendor/gopkg.in/yaml.v2/yamlprivateh.go
generated
vendored
@ -1,173 +0,0 @@
|
|||||||
package yaml
|
|
||||||
|
|
||||||
const (
|
|
||||||
// The size of the input raw buffer.
|
|
||||||
input_raw_buffer_size = 512
|
|
||||||
|
|
||||||
// The size of the input buffer.
|
|
||||||
// It should be possible to decode the whole raw buffer.
|
|
||||||
input_buffer_size = input_raw_buffer_size * 3
|
|
||||||
|
|
||||||
// The size of the output buffer.
|
|
||||||
output_buffer_size = 128
|
|
||||||
|
|
||||||
// The size of the output raw buffer.
|
|
||||||
// It should be possible to encode the whole output buffer.
|
|
||||||
output_raw_buffer_size = (output_buffer_size*2 + 2)
|
|
||||||
|
|
||||||
// The size of other stacks and queues.
|
|
||||||
initial_stack_size = 16
|
|
||||||
initial_queue_size = 16
|
|
||||||
initial_string_size = 16
|
|
||||||
)
|
|
||||||
|
|
||||||
// Check if the character at the specified position is an alphabetical
|
|
||||||
// character, a digit, '_', or '-'.
|
|
||||||
func is_alpha(b []byte, i int) bool {
|
|
||||||
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the character at the specified position is a digit.
|
|
||||||
func is_digit(b []byte, i int) bool {
|
|
||||||
return b[i] >= '0' && b[i] <= '9'
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the value of a digit.
|
|
||||||
func as_digit(b []byte, i int) int {
|
|
||||||
return int(b[i]) - '0'
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the character at the specified position is a hex-digit.
|
|
||||||
func is_hex(b []byte, i int) bool {
|
|
||||||
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the value of a hex-digit.
|
|
||||||
func as_hex(b []byte, i int) int {
|
|
||||||
bi := b[i]
|
|
||||||
if bi >= 'A' && bi <= 'F' {
|
|
||||||
return int(bi) - 'A' + 10
|
|
||||||
}
|
|
||||||
if bi >= 'a' && bi <= 'f' {
|
|
||||||
return int(bi) - 'a' + 10
|
|
||||||
}
|
|
||||||
return int(bi) - '0'
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the character is ASCII.
|
|
||||||
func is_ascii(b []byte, i int) bool {
|
|
||||||
return b[i] <= 0x7F
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the character at the start of the buffer can be printed unescaped.
|
|
||||||
func is_printable(b []byte, i int) bool {
|
|
||||||
return ((b[i] == 0x0A) || // . == #x0A
|
|
||||||
(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
|
|
||||||
(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
|
|
||||||
(b[i] > 0xC2 && b[i] < 0xED) ||
|
|
||||||
(b[i] == 0xED && b[i+1] < 0xA0) ||
|
|
||||||
(b[i] == 0xEE) ||
|
|
||||||
(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
|
|
||||||
!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
|
|
||||||
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the character at the specified position is NUL.
|
|
||||||
func is_z(b []byte, i int) bool {
|
|
||||||
return b[i] == 0x00
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the beginning of the buffer is a BOM.
|
|
||||||
func is_bom(b []byte, i int) bool {
|
|
||||||
return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the character at the specified position is space.
|
|
||||||
func is_space(b []byte, i int) bool {
|
|
||||||
return b[i] == ' '
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the character at the specified position is tab.
|
|
||||||
func is_tab(b []byte, i int) bool {
|
|
||||||
return b[i] == '\t'
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the character at the specified position is blank (space or tab).
|
|
||||||
func is_blank(b []byte, i int) bool {
|
|
||||||
//return is_space(b, i) || is_tab(b, i)
|
|
||||||
return b[i] == ' ' || b[i] == '\t'
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the character at the specified position is a line break.
|
|
||||||
func is_break(b []byte, i int) bool {
|
|
||||||
return (b[i] == '\r' || // CR (#xD)
|
|
||||||
b[i] == '\n' || // LF (#xA)
|
|
||||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
|
||||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
|
||||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
|
|
||||||
}
|
|
||||||
|
|
||||||
func is_crlf(b []byte, i int) bool {
|
|
||||||
return b[i] == '\r' && b[i+1] == '\n'
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the character is a line break or NUL.
|
|
||||||
func is_breakz(b []byte, i int) bool {
|
|
||||||
//return is_break(b, i) || is_z(b, i)
|
|
||||||
return ( // is_break:
|
|
||||||
b[i] == '\r' || // CR (#xD)
|
|
||||||
b[i] == '\n' || // LF (#xA)
|
|
||||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
|
||||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
|
||||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
|
||||||
// is_z:
|
|
||||||
b[i] == 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the character is a line break, space, or NUL.
|
|
||||||
func is_spacez(b []byte, i int) bool {
|
|
||||||
//return is_space(b, i) || is_breakz(b, i)
|
|
||||||
return ( // is_space:
|
|
||||||
b[i] == ' ' ||
|
|
||||||
// is_breakz:
|
|
||||||
b[i] == '\r' || // CR (#xD)
|
|
||||||
b[i] == '\n' || // LF (#xA)
|
|
||||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
|
||||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
|
||||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
|
||||||
b[i] == 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the character is a line break, space, tab, or NUL.
|
|
||||||
func is_blankz(b []byte, i int) bool {
|
|
||||||
//return is_blank(b, i) || is_breakz(b, i)
|
|
||||||
return ( // is_blank:
|
|
||||||
b[i] == ' ' || b[i] == '\t' ||
|
|
||||||
// is_breakz:
|
|
||||||
b[i] == '\r' || // CR (#xD)
|
|
||||||
b[i] == '\n' || // LF (#xA)
|
|
||||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
|
||||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
|
||||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
|
||||||
b[i] == 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine the width of the character.
|
|
||||||
func width(b byte) int {
|
|
||||||
// Don't replace these by a switch without first
|
|
||||||
// confirming that it is being inlined.
|
|
||||||
if b&0x80 == 0x00 {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
if b&0xE0 == 0xC0 {
|
|
||||||
return 2
|
|
||||||
}
|
|
||||||
if b&0xF0 == 0xE0 {
|
|
||||||
return 3
|
|
||||||
}
|
|
||||||
if b&0xF8 == 0xF0 {
|
|
||||||
return 4
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
|
|
||||||
}
|
|
7
vendor/modules.txt
vendored
7
vendor/modules.txt
vendored
@ -1,7 +0,0 @@
|
|||||||
# github.com/ulikunitz/xz v0.5.5
|
|
||||||
github.com/ulikunitz/xz
|
|
||||||
github.com/ulikunitz/xz/internal/xlog
|
|
||||||
github.com/ulikunitz/xz/lzma
|
|
||||||
github.com/ulikunitz/xz/internal/hash
|
|
||||||
# gopkg.in/yaml.v2 v2.2.2
|
|
||||||
gopkg.in/yaml.v2
|
|
Loading…
Reference in New Issue
Block a user