vendor
This commit is contained in:
25
vendor/github.com/ulikunitz/xz/.gitignore
generated
vendored
Normal file
25
vendor/github.com/ulikunitz/xz/.gitignore
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
# .gitignore
|
||||
|
||||
TODO.html
|
||||
README.html
|
||||
|
||||
lzma/writer.txt
|
||||
lzma/reader.txt
|
||||
|
||||
cmd/gxz/gxz
|
||||
cmd/xb/xb
|
||||
|
||||
# test executables
|
||||
*.test
|
||||
|
||||
# profile files
|
||||
*.out
|
||||
|
||||
# vim swap file
|
||||
.*.swp
|
||||
|
||||
# executables on windows
|
||||
*.exe
|
||||
|
||||
# default compression test file
|
||||
enwik8*
|
26
vendor/github.com/ulikunitz/xz/LICENSE
generated
vendored
Normal file
26
vendor/github.com/ulikunitz/xz/LICENSE
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
Copyright (c) 2014-2016 Ulrich Kunitz
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* My name, Ulrich Kunitz, may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
71
vendor/github.com/ulikunitz/xz/README.md
generated
vendored
Normal file
71
vendor/github.com/ulikunitz/xz/README.md
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
# Package xz
|
||||
|
||||
This Go language package supports the reading and writing of xz
|
||||
compressed streams. It includes also a gxz command for compressing and
|
||||
decompressing data. The package is completely written in Go and doesn't
|
||||
have any dependency on any C code.
|
||||
|
||||
The package is currently under development. There might be bugs and APIs
|
||||
are not considered stable. At this time the package cannot compete with
|
||||
the xz tool regarding compression speed and size. The algorithms there
|
||||
have been developed over a long time and are highly optimized. However
|
||||
there are a number of improvements planned and I'm very optimistic about
|
||||
parallel compression and decompression. Stay tuned!
|
||||
|
||||
# Using the API
|
||||
|
||||
The following example program shows how to use the API.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/ulikunitz/xz"
|
||||
)
|
||||
|
||||
func main() {
|
||||
const text = "The quick brown fox jumps over the lazy dog.\n"
|
||||
var buf bytes.Buffer
|
||||
// compress text
|
||||
w, err := xz.NewWriter(&buf)
|
||||
if err != nil {
|
||||
log.Fatalf("xz.NewWriter error %s", err)
|
||||
}
|
||||
if _, err := io.WriteString(w, text); err != nil {
|
||||
log.Fatalf("WriteString error %s", err)
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
log.Fatalf("w.Close error %s", err)
|
||||
}
|
||||
// decompress buffer and write output to stdout
|
||||
r, err := xz.NewReader(&buf)
|
||||
if err != nil {
|
||||
log.Fatalf("NewReader error %s", err)
|
||||
}
|
||||
if _, err = io.Copy(os.Stdout, r); err != nil {
|
||||
log.Fatalf("io.Copy error %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
# Using the gxz compression tool
|
||||
|
||||
The package includes a gxz command line utility for compression and
|
||||
decompression.
|
||||
|
||||
Use following command for installation:
|
||||
|
||||
$ go get github.com/ulikunitz/xz/cmd/gxz
|
||||
|
||||
To test it call the following command.
|
||||
|
||||
$ gxz bigfile
|
||||
|
||||
After some time a much smaller file bigfile.xz will replace bigfile.
|
||||
To decompress it use the following command.
|
||||
|
||||
$ gxz -d bigfile.xz
|
||||
|
315
vendor/github.com/ulikunitz/xz/TODO.md
generated
vendored
Normal file
315
vendor/github.com/ulikunitz/xz/TODO.md
generated
vendored
Normal file
@ -0,0 +1,315 @@
|
||||
# TODO list
|
||||
|
||||
## Release v0.6
|
||||
|
||||
1. Review encoder and check for lzma improvements under xz.
|
||||
2. Fix binary tree matcher.
|
||||
3. Compare compression ratio with xz tool using comparable parameters
|
||||
and optimize parameters
|
||||
4. Do some optimizations
|
||||
- rename operation action and make it a simple type of size 8
|
||||
- make maxMatches, wordSize parameters
|
||||
- stop searching after a certain length is found (parameter sweetLen)
|
||||
|
||||
## Release v0.7
|
||||
|
||||
1. Optimize code
|
||||
2. Do statistical analysis to get linear presets.
|
||||
3. Test sync.Pool compatability for xz and lzma Writer and Reader
|
||||
3. Fuzz optimized code.
|
||||
|
||||
## Release v0.8
|
||||
|
||||
1. Support parallel go routines for writing and reading xz files.
|
||||
2. Support a ReaderAt interface for xz files with small block sizes.
|
||||
3. Improve compatibility between gxz and xz
|
||||
4. Provide manual page for gxz
|
||||
|
||||
## Release v0.9
|
||||
|
||||
1. Improve documentation
|
||||
2. Fuzz again
|
||||
|
||||
## Release v1.0
|
||||
|
||||
1. Full functioning gxz
|
||||
2. Add godoc URL to README.md (godoc.org)
|
||||
3. Resolve all issues.
|
||||
4. Define release candidates.
|
||||
5. Public announcement.
|
||||
|
||||
## Package lzma
|
||||
|
||||
### Release v0.6
|
||||
|
||||
- Rewrite Encoder into a simple greedy one-op-at-a-time encoder
|
||||
including
|
||||
+ simple scan at the dictionary head for the same byte
|
||||
+ use the killer byte (requiring matches to get longer, the first
|
||||
test should be the byte that would make the match longer)
|
||||
|
||||
|
||||
## Optimizations
|
||||
|
||||
- There may be a lot of false sharing in lzma.State; check whether this
|
||||
can be improved by reorganizing the internal structure of it.
|
||||
- Check whether batching encoding and decoding improves speed.
|
||||
|
||||
### DAG optimizations
|
||||
|
||||
- Use full buffer to create minimal bit-length above range encoder.
|
||||
- Might be too slow (see v0.4)
|
||||
|
||||
### Different match finders
|
||||
|
||||
- hashes with 2, 3 characters additional to 4 characters
|
||||
- binary trees with 2-7 characters (uint64 as key, use uint32 as
|
||||
pointers into a an array)
|
||||
- rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers
|
||||
into an array with bit-steeling for the colors)
|
||||
|
||||
## Release Procedure
|
||||
|
||||
- execute goch -l for all packages; probably with lower param like 0.5.
|
||||
- check orthography with gospell
|
||||
- Write release notes in doc/relnotes.
|
||||
- Update README.md
|
||||
- xb copyright . in xz directory to ensure all new files have Copyright
|
||||
header
|
||||
- VERSION=<version> go generate github.com/ulikunitz/xz/... to update
|
||||
version files
|
||||
- Execute test for Linux/amd64, Linux/x86 and Windows/amd64.
|
||||
- Update TODO.md - write short log entry
|
||||
- git checkout master && git merge dev
|
||||
- git tag -a <version>
|
||||
- git push
|
||||
|
||||
## Log
|
||||
|
||||
### 2017-06-05
|
||||
|
||||
Release v0.5.4 fixes issues #15 of another problem with the padding size
|
||||
check for the xz block header. I removed the check completely.
|
||||
|
||||
### 2017-02-15
|
||||
|
||||
Release v0.5.3 fixes issue #12 regarding the decompression of an empty
|
||||
XZ stream. Many thanks to Tomasz Kłak, who reported the issue.
|
||||
|
||||
### 2016-12-02
|
||||
|
||||
Release v0.5.2 became necessary to allow the decoding of xz files with
|
||||
4-byte padding in the block header. Many thanks to Greg, who reported
|
||||
the issue.
|
||||
|
||||
### 2016-07-23
|
||||
|
||||
Release v0.5.1 became necessary to fix problems with 32-bit platforms.
|
||||
Many thanks to Bruno Brigas, who reported the issue.
|
||||
|
||||
### 2016-07-04
|
||||
|
||||
Release v0.5 provides improvements to the compressor and provides support for
|
||||
the decompression of xz files with multiple xz streams.
|
||||
|
||||
### 2016-01-31
|
||||
|
||||
Another compression rate increase by checking the byte at length of the
|
||||
best match first, before checking the whole prefix. This makes the
|
||||
compressor even faster. We have now a large time budget to beat the
|
||||
compression ratio of the xz tool. For enwik8 we have now over 40 seconds
|
||||
to reduce the compressed file size for another 7 MiB.
|
||||
|
||||
### 2016-01-30
|
||||
|
||||
I simplified the encoder. Speed and compression rate increased
|
||||
dramatically. A high compression rate affects also the decompression
|
||||
speed. The approach with the buffer and optimizing for operation
|
||||
compression rate has not been successful. Going for the maximum length
|
||||
appears to be the best approach.
|
||||
|
||||
### 2016-01-28
|
||||
|
||||
The release v0.4 is ready. It provides a working xz implementation,
|
||||
which is rather slow, but works and is interoperable with the xz tool.
|
||||
It is an important milestone.
|
||||
|
||||
### 2016-01-10
|
||||
|
||||
I have the first working implementation of an xz reader and writer. I'm
|
||||
happy about reaching this milestone.
|
||||
|
||||
### 2015-12-02
|
||||
|
||||
I'm now ready to implement xz because, I have a working LZMA2
|
||||
implementation. I decided today that v0.4 will use the slow encoder
|
||||
using the operations buffer to be able to go back, if I intend to do so.
|
||||
|
||||
### 2015-10-21
|
||||
|
||||
I have restarted the work on the library. While trying to implement
|
||||
LZMA2, I discovered that I need to resimplify the encoder and decoder
|
||||
functions. The option approach is too complicated. Using a limited byte
|
||||
writer and not caring for written bytes at all and not to try to handle
|
||||
uncompressed data simplifies the LZMA encoder and decoder much.
|
||||
Processing uncompressed data and handling limits is a feature of the
|
||||
LZMA2 format not of LZMA.
|
||||
|
||||
I learned an interesting method from the LZO format. If the last copy is
|
||||
too far away they are moving the head one 2 bytes and not 1 byte to
|
||||
reduce processing times.
|
||||
|
||||
### 2015-08-26
|
||||
|
||||
I have now reimplemented the lzma package. The code is reasonably fast,
|
||||
but can still be optimized. The next step is to implement LZMA2 and then
|
||||
xz.
|
||||
|
||||
### 2015-07-05
|
||||
|
||||
Created release v0.3. The version is the foundation for a full xz
|
||||
implementation that is the target of v0.4.
|
||||
|
||||
### 2015-06-11
|
||||
|
||||
The gflag package has been developed because I couldn't use flag and
|
||||
pflag for a fully compatible support of gzip's and lzma's options. It
|
||||
seems to work now quite nicely.
|
||||
|
||||
### 2015-06-05
|
||||
|
||||
The overflow issue was interesting to research, however Henry S. Warren
|
||||
Jr. Hacker's Delight book was very helpful as usual and had the issue
|
||||
explained perfectly. Fefe's information on his website was based on the
|
||||
C FAQ and quite bad, because it didn't address the issue of -MININT ==
|
||||
MININT.
|
||||
|
||||
### 2015-06-04
|
||||
|
||||
It has been a productive day. I improved the interface of lzma.Reader
|
||||
and lzma.Writer and fixed the error handling.
|
||||
|
||||
### 2015-06-01
|
||||
|
||||
By computing the bit length of the LZMA operations I was able to
|
||||
improve the greedy algorithm implementation. By using an 8 MByte buffer
|
||||
the compression rate was not as good as for xz but already better then
|
||||
gzip default.
|
||||
|
||||
Compression is currently slow, but this is something we will be able to
|
||||
improve over time.
|
||||
|
||||
### 2015-05-26
|
||||
|
||||
Checked the license of ogier/pflag. The binary lzmago binary should
|
||||
include the license terms for the pflag library.
|
||||
|
||||
I added the endorsement clause as used by Google for the Go sources the
|
||||
LICENSE file.
|
||||
|
||||
### 2015-05-22
|
||||
|
||||
The package lzb contains now the basic implementation for creating or
|
||||
reading LZMA byte streams. It allows the support for the implementation
|
||||
of the DAG-shortest-path algorithm for the compression function.
|
||||
|
||||
### 2015-04-23
|
||||
|
||||
Completed yesterday the lzbase classes. I'm a little bit concerned that
|
||||
using the components may require too much code, but on the other hand
|
||||
there is a lot of flexibility.
|
||||
|
||||
### 2015-04-22
|
||||
|
||||
Implemented Reader and Writer during the Bayern game against Porto. The
|
||||
second half gave me enough time.
|
||||
|
||||
### 2015-04-21
|
||||
|
||||
While showering today morning I discovered that the design for OpEncoder
|
||||
and OpDecoder doesn't work, because encoding/decoding might depend on
|
||||
the current status of the dictionary. This is not exactly the right way
|
||||
to start the day.
|
||||
|
||||
Therefore we need to keep the Reader and Writer design. This time around
|
||||
we simplify it by ignoring size limits. These can be added by wrappers
|
||||
around the Reader and Writer interfaces. The Parameters type isn't
|
||||
needed anymore.
|
||||
|
||||
However I will implement a ReaderState and WriterState type to use
|
||||
static typing to ensure the right State object is combined with the
|
||||
right lzbase.Reader and lzbase.Writer.
|
||||
|
||||
As a start I have implemented ReaderState and WriterState to ensure
|
||||
that the state for reading is only used by readers and WriterState only
|
||||
used by Writers.
|
||||
|
||||
### 2015-04-20
|
||||
|
||||
Today I implemented the OpDecoder and tested OpEncoder and OpDecoder.
|
||||
|
||||
### 2015-04-08
|
||||
|
||||
Came up with a new simplified design for lzbase. I implemented already
|
||||
the type State that replaces OpCodec.
|
||||
|
||||
### 2015-04-06
|
||||
|
||||
The new lzma package is now fully usable and lzmago is using it now. The
|
||||
old lzma package has been completely removed.
|
||||
|
||||
### 2015-04-05
|
||||
|
||||
Implemented lzma.Reader and tested it.
|
||||
|
||||
### 2015-04-04
|
||||
|
||||
Implemented baseReader by adapting code form lzma.Reader.
|
||||
|
||||
### 2015-04-03
|
||||
|
||||
The opCodec has been copied yesterday to lzma2. opCodec has a high
|
||||
number of dependencies on other files in lzma2. Therefore I had to copy
|
||||
almost all files from lzma.
|
||||
|
||||
### 2015-03-31
|
||||
|
||||
Removed only a TODO item.
|
||||
|
||||
However in Francesco Campoy's presentation "Go for Javaneros
|
||||
(Javaïstes?)" is the the idea that using an embedded field E, all the
|
||||
methods of E will be defined on T. If E is an interface T satisfies E.
|
||||
|
||||
https://talks.golang.org/2014/go4java.slide#51
|
||||
|
||||
I have never used this, but it seems to be a cool idea.
|
||||
|
||||
### 2015-03-30
|
||||
|
||||
Finished the type writerDict and wrote a simple test.
|
||||
|
||||
### 2015-03-25
|
||||
|
||||
I started to implement the writerDict.
|
||||
|
||||
### 2015-03-24
|
||||
|
||||
After thinking long about the LZMA2 code and several false starts, I
|
||||
have now a plan to create a self-sufficient lzma2 package that supports
|
||||
the classic LZMA format as well as LZMA2. The core idea is to support a
|
||||
baseReader and baseWriter type that support the basic LZMA stream
|
||||
without any headers. Both types must support the reuse of dictionaries
|
||||
and the opCodec.
|
||||
|
||||
### 2015-01-10
|
||||
|
||||
1. Implemented simple lzmago tool
|
||||
2. Tested tool against large 4.4G file
|
||||
- compression worked correctly; tested decompression with lzma
|
||||
- decompression hits a full buffer condition
|
||||
3. Fixed a bug in the compressor and wrote a test for it
|
||||
4. Executed full cycle for 4.4 GB file; performance can be improved ;-)
|
||||
|
||||
### 2015-01-11
|
||||
|
||||
- Release v0.2 because of the working LZMA encoder and decoder
|
74
vendor/github.com/ulikunitz/xz/bits.go
generated
vendored
Normal file
74
vendor/github.com/ulikunitz/xz/bits.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xz
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// putUint32LE puts the little-endian representation of x into the first
|
||||
// four bytes of p.
|
||||
func putUint32LE(p []byte, x uint32) {
|
||||
p[0] = byte(x)
|
||||
p[1] = byte(x >> 8)
|
||||
p[2] = byte(x >> 16)
|
||||
p[3] = byte(x >> 24)
|
||||
}
|
||||
|
||||
// putUint64LE puts the little-endian representation of x into the first
|
||||
// eight bytes of p.
|
||||
func putUint64LE(p []byte, x uint64) {
|
||||
p[0] = byte(x)
|
||||
p[1] = byte(x >> 8)
|
||||
p[2] = byte(x >> 16)
|
||||
p[3] = byte(x >> 24)
|
||||
p[4] = byte(x >> 32)
|
||||
p[5] = byte(x >> 40)
|
||||
p[6] = byte(x >> 48)
|
||||
p[7] = byte(x >> 56)
|
||||
}
|
||||
|
||||
// uint32LE converts a little endian representation to an uint32 value.
|
||||
func uint32LE(p []byte) uint32 {
|
||||
return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 |
|
||||
uint32(p[3])<<24
|
||||
}
|
||||
|
||||
// putUvarint puts a uvarint representation of x into the byte slice.
|
||||
func putUvarint(p []byte, x uint64) int {
|
||||
i := 0
|
||||
for x >= 0x80 {
|
||||
p[i] = byte(x) | 0x80
|
||||
x >>= 7
|
||||
i++
|
||||
}
|
||||
p[i] = byte(x)
|
||||
return i + 1
|
||||
}
|
||||
|
||||
// errOverflow indicates an overflow of the 64-bit unsigned integer.
|
||||
var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer")
|
||||
|
||||
// readUvarint reads a uvarint from the given byte reader.
|
||||
func readUvarint(r io.ByteReader) (x uint64, n int, err error) {
|
||||
var s uint
|
||||
i := 0
|
||||
for {
|
||||
b, err := r.ReadByte()
|
||||
if err != nil {
|
||||
return x, i, err
|
||||
}
|
||||
i++
|
||||
if b < 0x80 {
|
||||
if i > 10 || i == 10 && b > 1 {
|
||||
return x, i, errOverflowU64
|
||||
}
|
||||
return x | uint64(b)<<s, i, nil
|
||||
}
|
||||
x |= uint64(b&0x7f) << s
|
||||
s += 7
|
||||
}
|
||||
}
|
33
vendor/github.com/ulikunitz/xz/bits_test.go
generated
vendored
Normal file
33
vendor/github.com/ulikunitz/xz/bits_test.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xz
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestUvarint(t *testing.T) {
|
||||
tests := []uint64{0, 0x80, 0x100, 0xffffffff, 0x100000000, 1<<64 - 1}
|
||||
p := make([]byte, 10)
|
||||
for _, u := range tests {
|
||||
p = p[:10]
|
||||
n := putUvarint(p, u)
|
||||
if n < 1 {
|
||||
t.Fatalf("putUvarint returned %d", n)
|
||||
}
|
||||
r := bytes.NewReader(p[:n])
|
||||
x, m, err := readUvarint(r)
|
||||
if err != nil {
|
||||
t.Fatalf("readUvarint returned %s", err)
|
||||
}
|
||||
if m != n {
|
||||
t.Fatalf("readUvarint read %d bytes; want %d", m, n)
|
||||
}
|
||||
if x != u {
|
||||
t.Fatalf("readUvarint returned 0x%x; want 0x%x", x, u)
|
||||
}
|
||||
}
|
||||
}
|
54
vendor/github.com/ulikunitz/xz/crc.go
generated
vendored
Normal file
54
vendor/github.com/ulikunitz/xz/crc.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xz
|
||||
|
||||
import (
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"hash/crc64"
|
||||
)
|
||||
|
||||
// crc32Hash implements the hash.Hash32 interface with Sum returning the
|
||||
// crc32 value in little-endian encoding.
|
||||
type crc32Hash struct {
|
||||
hash.Hash32
|
||||
}
|
||||
|
||||
// Sum returns the crc32 value as little endian.
|
||||
func (h crc32Hash) Sum(b []byte) []byte {
|
||||
p := make([]byte, 4)
|
||||
putUint32LE(p, h.Hash32.Sum32())
|
||||
b = append(b, p...)
|
||||
return b
|
||||
}
|
||||
|
||||
// newCRC32 returns a CRC-32 hash that returns the 64-bit value in
|
||||
// little-endian encoding using the IEEE polynomial.
|
||||
func newCRC32() hash.Hash {
|
||||
return crc32Hash{Hash32: crc32.NewIEEE()}
|
||||
}
|
||||
|
||||
// crc64Hash implements the Hash64 interface with Sum returning the
|
||||
// CRC-64 value in little-endian encoding.
|
||||
type crc64Hash struct {
|
||||
hash.Hash64
|
||||
}
|
||||
|
||||
// Sum returns the CRC-64 value in little-endian encoding.
|
||||
func (h crc64Hash) Sum(b []byte) []byte {
|
||||
p := make([]byte, 8)
|
||||
putUint64LE(p, h.Hash64.Sum64())
|
||||
b = append(b, p...)
|
||||
return b
|
||||
}
|
||||
|
||||
// crc64Table is used to create a CRC-64 hash.
|
||||
var crc64Table = crc64.MakeTable(crc64.ECMA)
|
||||
|
||||
// newCRC64 returns a CRC-64 hash that returns the 64-bit value in
|
||||
// little-endian encoding using the ECMA polynomial.
|
||||
func newCRC64() hash.Hash {
|
||||
return crc64Hash{Hash64: crc64.New(crc64Table)}
|
||||
}
|
40
vendor/github.com/ulikunitz/xz/example.go
generated
vendored
Normal file
40
vendor/github.com/ulikunitz/xz/example.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/ulikunitz/xz"
|
||||
)
|
||||
|
||||
func main() {
|
||||
const text = "The quick brown fox jumps over the lazy dog.\n"
|
||||
var buf bytes.Buffer
|
||||
// compress text
|
||||
w, err := xz.NewWriter(&buf)
|
||||
if err != nil {
|
||||
log.Fatalf("xz.NewWriter error %s", err)
|
||||
}
|
||||
if _, err := io.WriteString(w, text); err != nil {
|
||||
log.Fatalf("WriteString error %s", err)
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
log.Fatalf("w.Close error %s", err)
|
||||
}
|
||||
// decompress buffer and write output to stdout
|
||||
r, err := xz.NewReader(&buf)
|
||||
if err != nil {
|
||||
log.Fatalf("NewReader error %s", err)
|
||||
}
|
||||
if _, err = io.Copy(os.Stdout, r); err != nil {
|
||||
log.Fatalf("io.Copy error %s", err)
|
||||
}
|
||||
}
|
728
vendor/github.com/ulikunitz/xz/format.go
generated
vendored
Normal file
728
vendor/github.com/ulikunitz/xz/format.go
generated
vendored
Normal file
@ -0,0 +1,728 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xz
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
|
||||
"github.com/ulikunitz/xz/lzma"
|
||||
)
|
||||
|
||||
// allZeros checks whether a given byte slice has only zeros.
|
||||
func allZeros(p []byte) bool {
|
||||
for _, c := range p {
|
||||
if c != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// padLen returns the length of the padding required for the given
|
||||
// argument.
|
||||
func padLen(n int64) int {
|
||||
k := int(n % 4)
|
||||
if k > 0 {
|
||||
k = 4 - k
|
||||
}
|
||||
return k
|
||||
}
|
||||
|
||||
/*** Header ***/
|
||||
|
||||
// headerMagic stores the magic bytes for the header
|
||||
var headerMagic = []byte{0xfd, '7', 'z', 'X', 'Z', 0x00}
|
||||
|
||||
// HeaderLen provides the length of the xz file header.
|
||||
const HeaderLen = 12
|
||||
|
||||
// Constants for the checksum methods supported by xz.
|
||||
const (
|
||||
CRC32 byte = 0x1
|
||||
CRC64 = 0x4
|
||||
SHA256 = 0xa
|
||||
)
|
||||
|
||||
// errInvalidFlags indicates that flags are invalid.
|
||||
var errInvalidFlags = errors.New("xz: invalid flags")
|
||||
|
||||
// verifyFlags returns the error errInvalidFlags if the value is
|
||||
// invalid.
|
||||
func verifyFlags(flags byte) error {
|
||||
switch flags {
|
||||
case CRC32, CRC64, SHA256:
|
||||
return nil
|
||||
default:
|
||||
return errInvalidFlags
|
||||
}
|
||||
}
|
||||
|
||||
// flagstrings maps flag values to strings.
|
||||
var flagstrings = map[byte]string{
|
||||
CRC32: "CRC-32",
|
||||
CRC64: "CRC-64",
|
||||
SHA256: "SHA-256",
|
||||
}
|
||||
|
||||
// flagString returns the string representation for the given flags.
|
||||
func flagString(flags byte) string {
|
||||
s, ok := flagstrings[flags]
|
||||
if !ok {
|
||||
return "invalid"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// newHashFunc returns a function that creates hash instances for the
|
||||
// hash method encoded in flags.
|
||||
func newHashFunc(flags byte) (newHash func() hash.Hash, err error) {
|
||||
switch flags {
|
||||
case CRC32:
|
||||
newHash = newCRC32
|
||||
case CRC64:
|
||||
newHash = newCRC64
|
||||
case SHA256:
|
||||
newHash = sha256.New
|
||||
default:
|
||||
err = errInvalidFlags
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// header provides the actual content of the xz file header: the flags.
|
||||
type header struct {
|
||||
flags byte
|
||||
}
|
||||
|
||||
// Errors returned by readHeader.
|
||||
var errHeaderMagic = errors.New("xz: invalid header magic bytes")
|
||||
|
||||
// ValidHeader checks whether data is a correct xz file header. The
|
||||
// length of data must be HeaderLen.
|
||||
func ValidHeader(data []byte) bool {
|
||||
var h header
|
||||
err := h.UnmarshalBinary(data)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// String returns a string representation of the flags.
|
||||
func (h header) String() string {
|
||||
return flagString(h.flags)
|
||||
}
|
||||
|
||||
// UnmarshalBinary reads header from the provided data slice.
|
||||
func (h *header) UnmarshalBinary(data []byte) error {
|
||||
// header length
|
||||
if len(data) != HeaderLen {
|
||||
return errors.New("xz: wrong file header length")
|
||||
}
|
||||
|
||||
// magic header
|
||||
if !bytes.Equal(headerMagic, data[:6]) {
|
||||
return errHeaderMagic
|
||||
}
|
||||
|
||||
// checksum
|
||||
crc := crc32.NewIEEE()
|
||||
crc.Write(data[6:8])
|
||||
if uint32LE(data[8:]) != crc.Sum32() {
|
||||
return errors.New("xz: invalid checksum for file header")
|
||||
}
|
||||
|
||||
// stream flags
|
||||
if data[6] != 0 {
|
||||
return errInvalidFlags
|
||||
}
|
||||
flags := data[7]
|
||||
if err := verifyFlags(flags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
h.flags = flags
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary generates the xz file header.
|
||||
func (h *header) MarshalBinary() (data []byte, err error) {
|
||||
if err = verifyFlags(h.flags); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data = make([]byte, 12)
|
||||
copy(data, headerMagic)
|
||||
data[7] = h.flags
|
||||
|
||||
crc := crc32.NewIEEE()
|
||||
crc.Write(data[6:8])
|
||||
putUint32LE(data[8:], crc.Sum32())
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
/*** Footer ***/
|
||||
|
||||
// footerLen defines the length of the footer.
|
||||
const footerLen = 12
|
||||
|
||||
// footerMagic contains the footer magic bytes.
|
||||
var footerMagic = []byte{'Y', 'Z'}
|
||||
|
||||
// footer represents the content of the xz file footer.
|
||||
type footer struct {
|
||||
indexSize int64
|
||||
flags byte
|
||||
}
|
||||
|
||||
// String prints a string representation of the footer structure.
|
||||
func (f footer) String() string {
|
||||
return fmt.Sprintf("%s index size %d", flagString(f.flags), f.indexSize)
|
||||
}
|
||||
|
||||
// Minimum and maximum for the size of the index (backward size).
|
||||
const (
|
||||
minIndexSize = 4
|
||||
maxIndexSize = (1 << 32) * 4
|
||||
)
|
||||
|
||||
// MarshalBinary converts footer values into an xz file footer. Note
|
||||
// that the footer value is checked for correctness.
|
||||
func (f *footer) MarshalBinary() (data []byte, err error) {
|
||||
if err = verifyFlags(f.flags); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !(minIndexSize <= f.indexSize && f.indexSize <= maxIndexSize) {
|
||||
return nil, errors.New("xz: index size out of range")
|
||||
}
|
||||
if f.indexSize%4 != 0 {
|
||||
return nil, errors.New(
|
||||
"xz: index size not aligned to four bytes")
|
||||
}
|
||||
|
||||
data = make([]byte, footerLen)
|
||||
|
||||
// backward size (index size)
|
||||
s := (f.indexSize / 4) - 1
|
||||
putUint32LE(data[4:], uint32(s))
|
||||
// flags
|
||||
data[9] = f.flags
|
||||
// footer magic
|
||||
copy(data[10:], footerMagic)
|
||||
|
||||
// CRC-32
|
||||
crc := crc32.NewIEEE()
|
||||
crc.Write(data[4:10])
|
||||
putUint32LE(data, crc.Sum32())
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary sets the footer value by unmarshalling an xz file
|
||||
// footer.
|
||||
func (f *footer) UnmarshalBinary(data []byte) error {
|
||||
if len(data) != footerLen {
|
||||
return errors.New("xz: wrong footer length")
|
||||
}
|
||||
|
||||
// magic bytes
|
||||
if !bytes.Equal(data[10:], footerMagic) {
|
||||
return errors.New("xz: footer magic invalid")
|
||||
}
|
||||
|
||||
// CRC-32
|
||||
crc := crc32.NewIEEE()
|
||||
crc.Write(data[4:10])
|
||||
if uint32LE(data) != crc.Sum32() {
|
||||
return errors.New("xz: footer checksum error")
|
||||
}
|
||||
|
||||
var g footer
|
||||
// backward size (index size)
|
||||
g.indexSize = (int64(uint32LE(data[4:])) + 1) * 4
|
||||
|
||||
// flags
|
||||
if data[8] != 0 {
|
||||
return errInvalidFlags
|
||||
}
|
||||
g.flags = data[9]
|
||||
if err := verifyFlags(g.flags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*f = g
|
||||
return nil
|
||||
}
|
||||
|
||||
/*** Block Header ***/
|
||||
|
||||
// blockHeader represents the content of an xz block header.
|
||||
type blockHeader struct {
|
||||
compressedSize int64
|
||||
uncompressedSize int64
|
||||
filters []filter
|
||||
}
|
||||
|
||||
// String converts the block header into a string.
|
||||
func (h blockHeader) String() string {
|
||||
var buf bytes.Buffer
|
||||
first := true
|
||||
if h.compressedSize >= 0 {
|
||||
fmt.Fprintf(&buf, "compressed size %d", h.compressedSize)
|
||||
first = false
|
||||
}
|
||||
if h.uncompressedSize >= 0 {
|
||||
if !first {
|
||||
buf.WriteString(" ")
|
||||
}
|
||||
fmt.Fprintf(&buf, "uncompressed size %d", h.uncompressedSize)
|
||||
first = false
|
||||
}
|
||||
for _, f := range h.filters {
|
||||
if !first {
|
||||
buf.WriteString(" ")
|
||||
}
|
||||
fmt.Fprintf(&buf, "filter %s", f)
|
||||
first = false
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Masks for the block flags.
|
||||
const (
|
||||
filterCountMask = 0x03
|
||||
compressedSizePresent = 0x40
|
||||
uncompressedSizePresent = 0x80
|
||||
reservedBlockFlags = 0x3C
|
||||
)
|
||||
|
||||
// errIndexIndicator signals that an index indicator (0x00) has been found
|
||||
// instead of an expected block header indicator.
|
||||
var errIndexIndicator = errors.New("xz: found index indicator")
|
||||
|
||||
// readBlockHeader reads the block header.
|
||||
func readBlockHeader(r io.Reader) (h *blockHeader, n int, err error) {
|
||||
var buf bytes.Buffer
|
||||
buf.Grow(20)
|
||||
|
||||
// block header size
|
||||
z, err := io.CopyN(&buf, r, 1)
|
||||
n = int(z)
|
||||
if err != nil {
|
||||
return nil, n, err
|
||||
}
|
||||
s := buf.Bytes()[0]
|
||||
if s == 0 {
|
||||
return nil, n, errIndexIndicator
|
||||
}
|
||||
|
||||
// read complete header
|
||||
headerLen := (int(s) + 1) * 4
|
||||
buf.Grow(headerLen - 1)
|
||||
z, err = io.CopyN(&buf, r, int64(headerLen-1))
|
||||
n += int(z)
|
||||
if err != nil {
|
||||
return nil, n, err
|
||||
}
|
||||
|
||||
// unmarshal block header
|
||||
h = new(blockHeader)
|
||||
if err = h.UnmarshalBinary(buf.Bytes()); err != nil {
|
||||
return nil, n, err
|
||||
}
|
||||
|
||||
return h, n, nil
|
||||
}
|
||||
|
||||
// readSizeInBlockHeader reads the uncompressed or compressed size
|
||||
// fields in the block header. The present value informs the function
|
||||
// whether the respective field is actually present in the header.
|
||||
func readSizeInBlockHeader(r io.ByteReader, present bool) (n int64, err error) {
|
||||
if !present {
|
||||
return -1, nil
|
||||
}
|
||||
x, _, err := readUvarint(r)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if x >= 1<<63 {
|
||||
return 0, errors.New("xz: size overflow in block header")
|
||||
}
|
||||
return int64(x), nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary unmarshals the block header.
|
||||
func (h *blockHeader) UnmarshalBinary(data []byte) error {
|
||||
// Check header length
|
||||
s := data[0]
|
||||
if data[0] == 0 {
|
||||
return errIndexIndicator
|
||||
}
|
||||
headerLen := (int(s) + 1) * 4
|
||||
if len(data) != headerLen {
|
||||
return fmt.Errorf("xz: data length %d; want %d", len(data),
|
||||
headerLen)
|
||||
}
|
||||
n := headerLen - 4
|
||||
|
||||
// Check CRC-32
|
||||
crc := crc32.NewIEEE()
|
||||
crc.Write(data[:n])
|
||||
if crc.Sum32() != uint32LE(data[n:]) {
|
||||
return errors.New("xz: checksum error for block header")
|
||||
}
|
||||
|
||||
// Block header flags
|
||||
flags := data[1]
|
||||
if flags&reservedBlockFlags != 0 {
|
||||
return errors.New("xz: reserved block header flags set")
|
||||
}
|
||||
|
||||
r := bytes.NewReader(data[2:n])
|
||||
|
||||
// Compressed size
|
||||
var err error
|
||||
h.compressedSize, err = readSizeInBlockHeader(
|
||||
r, flags&compressedSizePresent != 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Uncompressed size
|
||||
h.uncompressedSize, err = readSizeInBlockHeader(
|
||||
r, flags&uncompressedSizePresent != 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
h.filters, err = readFilters(r, int(flags&filterCountMask)+1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check padding
|
||||
// Since headerLen is a multiple of 4 we don't need to check
|
||||
// alignment.
|
||||
k := r.Len()
|
||||
// The standard spec says that the padding should have not more
|
||||
// than 3 bytes. However we found paddings of 4 or 5 in the
|
||||
// wild. See https://github.com/ulikunitz/xz/pull/11 and
|
||||
// https://github.com/ulikunitz/xz/issues/15
|
||||
//
|
||||
// The only reasonable approach seems to be to ignore the
|
||||
// padding size. We still check that all padding bytes are zero.
|
||||
if !allZeros(data[n-k : n]) {
|
||||
return errPadding
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary marshals the binary header.
|
||||
func (h *blockHeader) MarshalBinary() (data []byte, err error) {
|
||||
if !(minFilters <= len(h.filters) && len(h.filters) <= maxFilters) {
|
||||
return nil, errors.New("xz: filter count wrong")
|
||||
}
|
||||
for i, f := range h.filters {
|
||||
if i < len(h.filters)-1 {
|
||||
if f.id() == lzmaFilterID {
|
||||
return nil, errors.New(
|
||||
"xz: LZMA2 filter is not the last")
|
||||
}
|
||||
} else {
|
||||
// last filter
|
||||
if f.id() != lzmaFilterID {
|
||||
return nil, errors.New("xz: " +
|
||||
"last filter must be the LZMA2 filter")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
// header size must set at the end
|
||||
buf.WriteByte(0)
|
||||
|
||||
// flags
|
||||
flags := byte(len(h.filters) - 1)
|
||||
if h.compressedSize >= 0 {
|
||||
flags |= compressedSizePresent
|
||||
}
|
||||
if h.uncompressedSize >= 0 {
|
||||
flags |= uncompressedSizePresent
|
||||
}
|
||||
buf.WriteByte(flags)
|
||||
|
||||
p := make([]byte, 10)
|
||||
if h.compressedSize >= 0 {
|
||||
k := putUvarint(p, uint64(h.compressedSize))
|
||||
buf.Write(p[:k])
|
||||
}
|
||||
if h.uncompressedSize >= 0 {
|
||||
k := putUvarint(p, uint64(h.uncompressedSize))
|
||||
buf.Write(p[:k])
|
||||
}
|
||||
|
||||
for _, f := range h.filters {
|
||||
fp, err := f.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf.Write(fp)
|
||||
}
|
||||
|
||||
// padding
|
||||
for i := padLen(int64(buf.Len())); i > 0; i-- {
|
||||
buf.WriteByte(0)
|
||||
}
|
||||
|
||||
// crc place holder
|
||||
buf.Write(p[:4])
|
||||
|
||||
data = buf.Bytes()
|
||||
if len(data)%4 != 0 {
|
||||
panic("data length not aligned")
|
||||
}
|
||||
s := len(data)/4 - 1
|
||||
if !(1 < s && s <= 255) {
|
||||
panic("wrong block header size")
|
||||
}
|
||||
data[0] = byte(s)
|
||||
|
||||
crc := crc32.NewIEEE()
|
||||
crc.Write(data[:len(data)-4])
|
||||
putUint32LE(data[len(data)-4:], crc.Sum32())
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Constants used for marshalling and unmarshalling filters in the xz
|
||||
// block header.
|
||||
const (
|
||||
minFilters = 1
|
||||
maxFilters = 4
|
||||
minReservedID = 1 << 62
|
||||
)
|
||||
|
||||
// filter represents a filter in the block header.
|
||||
type filter interface {
|
||||
id() uint64
|
||||
UnmarshalBinary(data []byte) error
|
||||
MarshalBinary() (data []byte, err error)
|
||||
reader(r io.Reader, c *ReaderConfig) (fr io.Reader, err error)
|
||||
writeCloser(w io.WriteCloser, c *WriterConfig) (fw io.WriteCloser, err error)
|
||||
// filter must be last filter
|
||||
last() bool
|
||||
}
|
||||
|
||||
// readFilter reads a block filter from the block header. At this point
|
||||
// in time only the LZMA2 filter is supported.
|
||||
func readFilter(r io.Reader) (f filter, err error) {
|
||||
br := lzma.ByteReader(r)
|
||||
|
||||
// index
|
||||
id, _, err := readUvarint(br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var data []byte
|
||||
switch id {
|
||||
case lzmaFilterID:
|
||||
data = make([]byte, lzmaFilterLen)
|
||||
data[0] = lzmaFilterID
|
||||
if _, err = io.ReadFull(r, data[1:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f = new(lzmaFilter)
|
||||
default:
|
||||
if id >= minReservedID {
|
||||
return nil, errors.New(
|
||||
"xz: reserved filter id in block stream header")
|
||||
}
|
||||
return nil, errors.New("xz: invalid filter id")
|
||||
}
|
||||
if err = f.UnmarshalBinary(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f, err
|
||||
}
|
||||
|
||||
// readFilters reads count filters. At this point in time only the count
|
||||
// 1 is supported.
|
||||
func readFilters(r io.Reader, count int) (filters []filter, err error) {
|
||||
if count != 1 {
|
||||
return nil, errors.New("xz: unsupported filter count")
|
||||
}
|
||||
f, err := readFilter(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []filter{f}, err
|
||||
}
|
||||
|
||||
// writeFilters writes the filters.
|
||||
func writeFilters(w io.Writer, filters []filter) (n int, err error) {
|
||||
for _, f := range filters {
|
||||
p, err := f.MarshalBinary()
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
k, err := w.Write(p)
|
||||
n += k
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
/*** Index ***/
|
||||
|
||||
// record describes a block in the xz file index.
|
||||
type record struct {
|
||||
unpaddedSize int64
|
||||
uncompressedSize int64
|
||||
}
|
||||
|
||||
// readRecord reads an index record.
|
||||
func readRecord(r io.ByteReader) (rec record, n int, err error) {
|
||||
u, k, err := readUvarint(r)
|
||||
n += k
|
||||
if err != nil {
|
||||
return rec, n, err
|
||||
}
|
||||
rec.unpaddedSize = int64(u)
|
||||
if rec.unpaddedSize < 0 {
|
||||
return rec, n, errors.New("xz: unpadded size negative")
|
||||
}
|
||||
|
||||
u, k, err = readUvarint(r)
|
||||
n += k
|
||||
if err != nil {
|
||||
return rec, n, err
|
||||
}
|
||||
rec.uncompressedSize = int64(u)
|
||||
if rec.uncompressedSize < 0 {
|
||||
return rec, n, errors.New("xz: uncompressed size negative")
|
||||
}
|
||||
|
||||
return rec, n, nil
|
||||
}
|
||||
|
||||
// MarshalBinary converts an index record in its binary encoding.
|
||||
func (rec *record) MarshalBinary() (data []byte, err error) {
|
||||
// maximum length of a uvarint is 10
|
||||
p := make([]byte, 20)
|
||||
n := putUvarint(p, uint64(rec.unpaddedSize))
|
||||
n += putUvarint(p[n:], uint64(rec.uncompressedSize))
|
||||
return p[:n], nil
|
||||
}
|
||||
|
||||
// writeIndex writes the index, a sequence of records.
|
||||
func writeIndex(w io.Writer, index []record) (n int64, err error) {
|
||||
crc := crc32.NewIEEE()
|
||||
mw := io.MultiWriter(w, crc)
|
||||
|
||||
// index indicator
|
||||
k, err := mw.Write([]byte{0})
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
// number of records
|
||||
p := make([]byte, 10)
|
||||
k = putUvarint(p, uint64(len(index)))
|
||||
k, err = mw.Write(p[:k])
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
// list of records
|
||||
for _, rec := range index {
|
||||
p, err := rec.MarshalBinary()
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
k, err = mw.Write(p)
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
// index padding
|
||||
k, err = mw.Write(make([]byte, padLen(int64(n))))
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
// crc32 checksum
|
||||
putUint32LE(p, crc.Sum32())
|
||||
k, err = w.Write(p[:4])
|
||||
n += int64(k)
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// readIndexBody reads the index from the reader. It assumes that the
|
||||
// index indicator has already been read.
|
||||
func readIndexBody(r io.Reader) (records []record, n int64, err error) {
|
||||
crc := crc32.NewIEEE()
|
||||
// index indicator
|
||||
crc.Write([]byte{0})
|
||||
|
||||
br := lzma.ByteReader(io.TeeReader(r, crc))
|
||||
|
||||
// number of records
|
||||
u, k, err := readUvarint(br)
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return nil, n, err
|
||||
}
|
||||
recLen := int(u)
|
||||
if recLen < 0 || uint64(recLen) != u {
|
||||
return nil, n, errors.New("xz: record number overflow")
|
||||
}
|
||||
|
||||
// list of records
|
||||
records = make([]record, recLen)
|
||||
for i := range records {
|
||||
records[i], k, err = readRecord(br)
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return nil, n, err
|
||||
}
|
||||
}
|
||||
|
||||
p := make([]byte, padLen(int64(n+1)), 4)
|
||||
k, err = io.ReadFull(br.(io.Reader), p)
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return nil, n, err
|
||||
}
|
||||
if !allZeros(p) {
|
||||
return nil, n, errors.New("xz: non-zero byte in index padding")
|
||||
}
|
||||
|
||||
// crc32
|
||||
s := crc.Sum32()
|
||||
p = p[:4]
|
||||
k, err = io.ReadFull(br.(io.Reader), p)
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return records, n, err
|
||||
}
|
||||
if uint32LE(p) != s {
|
||||
return nil, n, errors.New("xz: wrong checksum for index")
|
||||
}
|
||||
|
||||
return records, n, nil
|
||||
}
|
142
vendor/github.com/ulikunitz/xz/format_test.go
generated
vendored
Normal file
142
vendor/github.com/ulikunitz/xz/format_test.go
generated
vendored
Normal file
@ -0,0 +1,142 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xz
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestHeader(t *testing.T) {
|
||||
h := header{flags: CRC32}
|
||||
data, err := h.MarshalBinary()
|
||||
if err != nil {
|
||||
t.Fatalf("MarshalBinary error %s", err)
|
||||
}
|
||||
var g header
|
||||
if err = g.UnmarshalBinary(data); err != nil {
|
||||
t.Fatalf("UnmarshalBinary error %s", err)
|
||||
}
|
||||
if g != h {
|
||||
t.Fatalf("unmarshalled %#v; want %#v", g, h)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFooter(t *testing.T) {
|
||||
f := footer{indexSize: 64, flags: CRC32}
|
||||
data, err := f.MarshalBinary()
|
||||
if err != nil {
|
||||
t.Fatalf("MarshalBinary error %s", err)
|
||||
}
|
||||
var g footer
|
||||
if err = g.UnmarshalBinary(data); err != nil {
|
||||
t.Fatalf("UnmarshalBinary error %s", err)
|
||||
}
|
||||
if g != f {
|
||||
t.Fatalf("unmarshalled %#v; want %#v", g, f)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecord(t *testing.T) {
|
||||
r := record{1234567, 10000}
|
||||
p, err := r.MarshalBinary()
|
||||
if err != nil {
|
||||
t.Fatalf("MarshalBinary error %s", err)
|
||||
}
|
||||
n := len(p)
|
||||
buf := bytes.NewReader(p)
|
||||
g, m, err := readRecord(buf)
|
||||
if err != nil {
|
||||
t.Fatalf("readFrom error %s", err)
|
||||
}
|
||||
if m != n {
|
||||
t.Fatalf("read %d bytes; wrote %d", m, n)
|
||||
}
|
||||
if g.unpaddedSize != r.unpaddedSize {
|
||||
t.Fatalf("got unpaddedSize %d; want %d", g.unpaddedSize,
|
||||
r.unpaddedSize)
|
||||
}
|
||||
if g.uncompressedSize != r.uncompressedSize {
|
||||
t.Fatalf("got uncompressedSize %d; want %d", g.uncompressedSize,
|
||||
r.uncompressedSize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndex(t *testing.T) {
|
||||
records := []record{{1234, 1}, {2345, 2}}
|
||||
|
||||
var buf bytes.Buffer
|
||||
n, err := writeIndex(&buf, records)
|
||||
if err != nil {
|
||||
t.Fatalf("writeIndex error %s", err)
|
||||
}
|
||||
if n != int64(buf.Len()) {
|
||||
t.Fatalf("writeIndex returned %d; want %d", n, buf.Len())
|
||||
}
|
||||
|
||||
// indicator
|
||||
c, err := buf.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("buf.ReadByte error %s", err)
|
||||
}
|
||||
if c != 0 {
|
||||
t.Fatalf("indicator %d; want %d", c, 0)
|
||||
}
|
||||
|
||||
g, m, err := readIndexBody(&buf)
|
||||
if err != nil {
|
||||
for i, r := range g {
|
||||
t.Logf("records[%d] %v", i, r)
|
||||
}
|
||||
t.Fatalf("readIndexBody error %s", err)
|
||||
}
|
||||
if m != n-1 {
|
||||
t.Fatalf("readIndexBody returned %d; want %d", m, n-1)
|
||||
}
|
||||
for i, rec := range records {
|
||||
if g[i] != rec {
|
||||
t.Errorf("records[%d] is %v; want %v", i, g[i], rec)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockHeader(t *testing.T) {
|
||||
h := blockHeader{
|
||||
compressedSize: 1234,
|
||||
uncompressedSize: -1,
|
||||
filters: []filter{&lzmaFilter{4096}},
|
||||
}
|
||||
data, err := h.MarshalBinary()
|
||||
if err != nil {
|
||||
t.Fatalf("MarshalBinary error %s", err)
|
||||
}
|
||||
|
||||
r := bytes.NewReader(data)
|
||||
g, n, err := readBlockHeader(r)
|
||||
if err != nil {
|
||||
t.Fatalf("readBlockHeader error %s", err)
|
||||
}
|
||||
if n != len(data) {
|
||||
t.Fatalf("readBlockHeader returns %d bytes; want %d", n,
|
||||
len(data))
|
||||
}
|
||||
if g.compressedSize != h.compressedSize {
|
||||
t.Errorf("got compressedSize %d; want %d",
|
||||
g.compressedSize, h.compressedSize)
|
||||
}
|
||||
if g.uncompressedSize != h.uncompressedSize {
|
||||
t.Errorf("got uncompressedSize %d; want %d",
|
||||
g.uncompressedSize, h.uncompressedSize)
|
||||
}
|
||||
if len(g.filters) != len(h.filters) {
|
||||
t.Errorf("got len(filters) %d; want %d",
|
||||
len(g.filters), len(h.filters))
|
||||
}
|
||||
glf := g.filters[0].(*lzmaFilter)
|
||||
hlf := h.filters[0].(*lzmaFilter)
|
||||
if glf.dictCap != hlf.dictCap {
|
||||
t.Errorf("got dictCap %d; want %d", glf.dictCap, hlf.dictCap)
|
||||
}
|
||||
}
|
BIN
vendor/github.com/ulikunitz/xz/fox.xz
generated
vendored
Normal file
BIN
vendor/github.com/ulikunitz/xz/fox.xz
generated
vendored
Normal file
Binary file not shown.
181
vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go
generated
vendored
Normal file
181
vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go
generated
vendored
Normal file
@ -0,0 +1,181 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hash
|
||||
|
||||
// CyclicPoly provides a cyclic polynomial rolling hash.
|
||||
type CyclicPoly struct {
|
||||
h uint64
|
||||
p []uint64
|
||||
i int
|
||||
}
|
||||
|
||||
// ror rotates the unsigned 64-bit integer to right. The argument s must be
|
||||
// less than 64.
|
||||
func ror(x uint64, s uint) uint64 {
|
||||
return (x >> s) | (x << (64 - s))
|
||||
}
|
||||
|
||||
// NewCyclicPoly creates a new instance of the CyclicPoly structure. The
|
||||
// argument n gives the number of bytes for which a hash will be executed.
|
||||
// This number must be positive; the method panics if this isn't the case.
|
||||
func NewCyclicPoly(n int) *CyclicPoly {
|
||||
if n < 1 {
|
||||
panic("argument n must be positive")
|
||||
}
|
||||
return &CyclicPoly{p: make([]uint64, 0, n)}
|
||||
}
|
||||
|
||||
// Len returns the length of the byte sequence for which a hash is generated.
|
||||
func (r *CyclicPoly) Len() int {
|
||||
return cap(r.p)
|
||||
}
|
||||
|
||||
// RollByte hashes the next byte and returns a hash value. The complete becomes
|
||||
// available after at least Len() bytes have been hashed.
|
||||
func (r *CyclicPoly) RollByte(x byte) uint64 {
|
||||
y := hash[x]
|
||||
if len(r.p) < cap(r.p) {
|
||||
r.h = ror(r.h, 1) ^ y
|
||||
r.p = append(r.p, y)
|
||||
} else {
|
||||
r.h ^= ror(r.p[r.i], uint(cap(r.p)-1))
|
||||
r.h = ror(r.h, 1) ^ y
|
||||
r.p[r.i] = y
|
||||
r.i = (r.i + 1) % cap(r.p)
|
||||
}
|
||||
return r.h
|
||||
}
|
||||
|
||||
// Stores the hash for the individual bytes.
|
||||
var hash = [256]uint64{
|
||||
0x2e4fc3f904065142, 0xc790984cfbc99527,
|
||||
0x879f95eb8c62f187, 0x3b61be86b5021ef2,
|
||||
0x65a896a04196f0a5, 0xc5b307b80470b59e,
|
||||
0xd3bff376a70df14b, 0xc332f04f0b3f1701,
|
||||
0x753b5f0e9abf3e0d, 0xb41538fdfe66ef53,
|
||||
0x1906a10c2c1c0208, 0xfb0c712a03421c0d,
|
||||
0x38be311a65c9552b, 0xfee7ee4ca6445c7e,
|
||||
0x71aadeded184f21e, 0xd73426fccda23b2d,
|
||||
0x29773fb5fb9600b5, 0xce410261cd32981a,
|
||||
0xfe2848b3c62dbc2d, 0x459eaaff6e43e11c,
|
||||
0xc13e35fc9c73a887, 0xf30ed5c201e76dbc,
|
||||
0xa5f10b3910482cea, 0x2945d59be02dfaad,
|
||||
0x06ee334ff70571b5, 0xbabf9d8070f44380,
|
||||
0xee3e2e9912ffd27c, 0x2a7118d1ea6b8ea7,
|
||||
0x26183cb9f7b1664c, 0xea71dac7da068f21,
|
||||
0xea92eca5bd1d0bb7, 0x415595862defcd75,
|
||||
0x248a386023c60648, 0x9cf021ab284b3c8a,
|
||||
0xfc9372df02870f6c, 0x2b92d693eeb3b3fc,
|
||||
0x73e799d139dc6975, 0x7b15ae312486363c,
|
||||
0xb70e5454a2239c80, 0x208e3fb31d3b2263,
|
||||
0x01f563cabb930f44, 0x2ac4533d2a3240d8,
|
||||
0x84231ed1064f6f7c, 0xa9f020977c2a6d19,
|
||||
0x213c227271c20122, 0x09fe8a9a0a03d07a,
|
||||
0x4236dc75bcaf910c, 0x460a8b2bead8f17e,
|
||||
0xd9b27be1aa07055f, 0xd202d5dc4b11c33e,
|
||||
0x70adb010543bea12, 0xcdae938f7ea6f579,
|
||||
0x3f3d870208672f4d, 0x8e6ccbce9d349536,
|
||||
0xe4c0871a389095ae, 0xf5f2a49152bca080,
|
||||
0x9a43f9b97269934e, 0xc17b3753cb6f475c,
|
||||
0xd56d941e8e206bd4, 0xac0a4f3e525eda00,
|
||||
0xa06d5a011912a550, 0x5537ed19537ad1df,
|
||||
0xa32fe713d611449d, 0x2a1d05b47c3b579f,
|
||||
0x991d02dbd30a2a52, 0x39e91e7e28f93eb0,
|
||||
0x40d06adb3e92c9ac, 0x9b9d3afde1c77c97,
|
||||
0x9a3f3f41c02c616f, 0x22ecd4ba00f60c44,
|
||||
0x0b63d5d801708420, 0x8f227ca8f37ffaec,
|
||||
0x0256278670887c24, 0x107e14877dbf540b,
|
||||
0x32c19f2786ac1c05, 0x1df5b12bb4bc9c61,
|
||||
0xc0cac129d0d4c4e2, 0x9fdb52ee9800b001,
|
||||
0x31f601d5d31c48c4, 0x72ff3c0928bcaec7,
|
||||
0xd99264421147eb03, 0x535a2d6d38aefcfe,
|
||||
0x6ba8b4454a916237, 0xfa39366eaae4719c,
|
||||
0x10f00fd7bbb24b6f, 0x5bd23185c76c84d4,
|
||||
0xb22c3d7e1b00d33f, 0x3efc20aa6bc830a8,
|
||||
0xd61c2503fe639144, 0x30ce625441eb92d3,
|
||||
0xe5d34cf359e93100, 0xa8e5aa13f2b9f7a5,
|
||||
0x5c2b8d851ca254a6, 0x68fb6c5e8b0d5fdf,
|
||||
0xc7ea4872c96b83ae, 0x6dd5d376f4392382,
|
||||
0x1be88681aaa9792f, 0xfef465ee1b6c10d9,
|
||||
0x1f98b65ed43fcb2e, 0x4d1ca11eb6e9a9c9,
|
||||
0x7808e902b3857d0b, 0x171c9c4ea4607972,
|
||||
0x58d66274850146df, 0x42b311c10d3981d1,
|
||||
0x647fa8c621c41a4c, 0xf472771c66ddfedc,
|
||||
0x338d27e3f847b46b, 0x6402ce3da97545ce,
|
||||
0x5162db616fc38638, 0x9c83be97bc22a50e,
|
||||
0x2d3d7478a78d5e72, 0xe621a9b938fd5397,
|
||||
0x9454614eb0f81c45, 0x395fb6e742ed39b6,
|
||||
0x77dd9179d06037bf, 0xc478d0fee4d2656d,
|
||||
0x35d9d6cb772007af, 0x83a56e92c883f0f6,
|
||||
0x27937453250c00a1, 0x27bd6ebc3a46a97d,
|
||||
0x9f543bf784342d51, 0xd158f38c48b0ed52,
|
||||
0x8dd8537c045f66b4, 0x846a57230226f6d5,
|
||||
0x6b13939e0c4e7cdf, 0xfca25425d8176758,
|
||||
0x92e5fc6cd52788e6, 0x9992e13d7a739170,
|
||||
0x518246f7a199e8ea, 0xf104c2a71b9979c7,
|
||||
0x86b3ffaabea4768f, 0x6388061cf3e351ad,
|
||||
0x09d9b5295de5bbb5, 0x38bf1638c2599e92,
|
||||
0x1d759846499e148d, 0x4c0ff015e5f96ef4,
|
||||
0xa41a94cfa270f565, 0x42d76f9cb2326c0b,
|
||||
0x0cf385dd3c9c23ba, 0x0508a6c7508d6e7a,
|
||||
0x337523aabbe6cf8d, 0x646bb14001d42b12,
|
||||
0xc178729d138adc74, 0xf900ef4491f24086,
|
||||
0xee1a90d334bb5ac4, 0x9755c92247301a50,
|
||||
0xb999bf7c4ff1b610, 0x6aeeb2f3b21e8fc9,
|
||||
0x0fa8084cf91ac6ff, 0x10d226cf136e6189,
|
||||
0xd302057a07d4fb21, 0x5f03800e20a0fcc3,
|
||||
0x80118d4ae46bd210, 0x58ab61a522843733,
|
||||
0x51edd575c5432a4b, 0x94ee6ff67f9197f7,
|
||||
0x765669e0e5e8157b, 0xa5347830737132f0,
|
||||
0x3ba485a69f01510c, 0x0b247d7b957a01c3,
|
||||
0x1b3d63449fd807dc, 0x0fdc4721c30ad743,
|
||||
0x8b535ed3829b2b14, 0xee41d0cad65d232c,
|
||||
0xe6a99ed97a6a982f, 0x65ac6194c202003d,
|
||||
0x692accf3a70573eb, 0xcc3c02c3e200d5af,
|
||||
0x0d419e8b325914a3, 0x320f160f42c25e40,
|
||||
0x00710d647a51fe7a, 0x3c947692330aed60,
|
||||
0x9288aa280d355a7a, 0xa1806a9b791d1696,
|
||||
0x5d60e38496763da1, 0x6c69e22e613fd0f4,
|
||||
0x977fc2a5aadffb17, 0xfb7bd063fc5a94ba,
|
||||
0x460c17992cbaece1, 0xf7822c5444d3297f,
|
||||
0x344a9790c69b74aa, 0xb80a42e6cae09dce,
|
||||
0x1b1361eaf2b1e757, 0xd84c1e758e236f01,
|
||||
0x88e0b7be347627cc, 0x45246009b7a99490,
|
||||
0x8011c6dd3fe50472, 0xc341d682bffb99d7,
|
||||
0x2511be93808e2d15, 0xd5bc13d7fd739840,
|
||||
0x2a3cd030679ae1ec, 0x8ad9898a4b9ee157,
|
||||
0x3245fef0a8eaf521, 0x3d6d8dbbb427d2b0,
|
||||
0x1ed146d8968b3981, 0x0c6a28bf7d45f3fc,
|
||||
0x4a1fd3dbcee3c561, 0x4210ff6a476bf67e,
|
||||
0xa559cce0d9199aac, 0xde39d47ef3723380,
|
||||
0xe5b69d848ce42e35, 0xefa24296f8e79f52,
|
||||
0x70190b59db9a5afc, 0x26f166cdb211e7bf,
|
||||
0x4deaf2df3c6b8ef5, 0xf171dbdd670f1017,
|
||||
0xb9059b05e9420d90, 0x2f0da855c9388754,
|
||||
0x611d5e9ab77949cc, 0x2912038ac01163f4,
|
||||
0x0231df50402b2fba, 0x45660fc4f3245f58,
|
||||
0xb91cc97c7c8dac50, 0xb72d2aafe4953427,
|
||||
0xfa6463f87e813d6b, 0x4515f7ee95d5c6a2,
|
||||
0x1310e1c1a48d21c3, 0xad48a7810cdd8544,
|
||||
0x4d5bdfefd5c9e631, 0xa43ed43f1fdcb7de,
|
||||
0xe70cfc8fe1ee9626, 0xef4711b0d8dda442,
|
||||
0xb80dd9bd4dab6c93, 0xa23be08d31ba4d93,
|
||||
0x9b37db9d0335a39c, 0x494b6f870f5cfebc,
|
||||
0x6d1b3c1149dda943, 0x372c943a518c1093,
|
||||
0xad27af45e77c09c4, 0x3b6f92b646044604,
|
||||
0xac2917909f5fcf4f, 0x2069a60e977e5557,
|
||||
0x353a469e71014de5, 0x24be356281f55c15,
|
||||
0x2b6d710ba8e9adea, 0x404ad1751c749c29,
|
||||
0xed7311bf23d7f185, 0xba4f6976b4acc43e,
|
||||
0x32d7198d2bc39000, 0xee667019014d6e01,
|
||||
0x494ef3e128d14c83, 0x1f95a152baecd6be,
|
||||
0x201648dff1f483a5, 0x68c28550c8384af6,
|
||||
0x5fc834a6824a7f48, 0x7cd06cb7365eaf28,
|
||||
0xd82bbd95e9b30909, 0x234f0d1694c53f6d,
|
||||
0xd2fb7f4a96d83f4a, 0xff0d5da83acac05e,
|
||||
0xf8f6b97f5585080a, 0x74236084be57b95b,
|
||||
0xa25e40c03bbc36ad, 0x6b6e5c14ce88465b,
|
||||
0x4378ffe93e1528c5, 0x94ca92a17118e2d2,
|
||||
}
|
30
vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly_test.go
generated
vendored
Normal file
30
vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly_test.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hash
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestCyclicPolySimple(t *testing.T) {
|
||||
p := []byte("abcde")
|
||||
r := NewCyclicPoly(4)
|
||||
h2 := Hashes(r, p)
|
||||
for i, h := range h2 {
|
||||
w := Hashes(r, p[i:i+4])[0]
|
||||
t.Logf("%d h=%#016x w=%#016x", i, h, w)
|
||||
if h != w {
|
||||
t.Errorf("rolling hash %d: %#016x; want %#016x",
|
||||
i, h, w)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCyclicPoly(b *testing.B) {
|
||||
p := makeBenchmarkBytes(4096)
|
||||
r := NewCyclicPoly(4)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
Hashes(r, p)
|
||||
}
|
||||
}
|
14
vendor/github.com/ulikunitz/xz/internal/hash/doc.go
generated
vendored
Normal file
14
vendor/github.com/ulikunitz/xz/internal/hash/doc.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package hash provides rolling hashes.
|
||||
|
||||
Rolling hashes have to be used for maintaining the positions of n-byte
|
||||
sequences in the dictionary buffer.
|
||||
|
||||
The package provides currently the Rabin-Karp rolling hash and a Cyclic
|
||||
Polynomial hash. Both support the Hashes method to be used with an interface.
|
||||
*/
|
||||
package hash
|
66
vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go
generated
vendored
Normal file
66
vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hash
|
||||
|
||||
// A is the default constant for Robin-Karp rolling hash. This is a random
|
||||
// prime.
|
||||
const A = 0x97b548add41d5da1
|
||||
|
||||
// RabinKarp supports the computation of a rolling hash.
|
||||
type RabinKarp struct {
|
||||
A uint64
|
||||
// a^n
|
||||
aOldest uint64
|
||||
h uint64
|
||||
p []byte
|
||||
i int
|
||||
}
|
||||
|
||||
// NewRabinKarp creates a new RabinKarp value. The argument n defines the
|
||||
// length of the byte sequence to be hashed. The default constant will will be
|
||||
// used.
|
||||
func NewRabinKarp(n int) *RabinKarp {
|
||||
return NewRabinKarpConst(n, A)
|
||||
}
|
||||
|
||||
// NewRabinKarpConst creates a new RabinKarp value. The argument n defines the
|
||||
// length of the byte sequence to be hashed. The argument a provides the
|
||||
// constant used to compute the hash.
|
||||
func NewRabinKarpConst(n int, a uint64) *RabinKarp {
|
||||
if n <= 0 {
|
||||
panic("number of bytes n must be positive")
|
||||
}
|
||||
aOldest := uint64(1)
|
||||
// There are faster methods. For the small n required by the LZMA
|
||||
// compressor O(n) is sufficient.
|
||||
for i := 0; i < n; i++ {
|
||||
aOldest *= a
|
||||
}
|
||||
return &RabinKarp{
|
||||
A: a, aOldest: aOldest,
|
||||
p: make([]byte, 0, n),
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the length of the byte sequence.
|
||||
func (r *RabinKarp) Len() int {
|
||||
return cap(r.p)
|
||||
}
|
||||
|
||||
// RollByte computes the hash after x has been added.
|
||||
func (r *RabinKarp) RollByte(x byte) uint64 {
|
||||
if len(r.p) < cap(r.p) {
|
||||
r.h += uint64(x)
|
||||
r.h *= r.A
|
||||
r.p = append(r.p, x)
|
||||
} else {
|
||||
r.h -= uint64(r.p[r.i]) * r.aOldest
|
||||
r.h += uint64(x)
|
||||
r.h *= r.A
|
||||
r.p[r.i] = x
|
||||
r.i = (r.i + 1) % cap(r.p)
|
||||
}
|
||||
return r.h
|
||||
}
|
42
vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp_test.go
generated
vendored
Normal file
42
vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp_test.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hash
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRabinKarpSimple(t *testing.T) {
|
||||
p := []byte("abcde")
|
||||
r := NewRabinKarp(4)
|
||||
h2 := Hashes(r, p)
|
||||
for i, h := range h2 {
|
||||
w := Hashes(r, p[i:i+4])[0]
|
||||
t.Logf("%d h=%#016x w=%#016x", i, h, w)
|
||||
if h != w {
|
||||
t.Errorf("rolling hash %d: %#016x; want %#016x",
|
||||
i, h, w)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeBenchmarkBytes(n int) []byte {
|
||||
rnd := rand.New(rand.NewSource(42))
|
||||
p := make([]byte, n)
|
||||
for i := range p {
|
||||
p[i] = byte(rnd.Uint32())
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func BenchmarkRabinKarp(b *testing.B) {
|
||||
p := makeBenchmarkBytes(4096)
|
||||
r := NewRabinKarp(4)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
Hashes(r, p)
|
||||
}
|
||||
}
|
29
vendor/github.com/ulikunitz/xz/internal/hash/roller.go
generated
vendored
Normal file
29
vendor/github.com/ulikunitz/xz/internal/hash/roller.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hash
|
||||
|
||||
// Roller provides an interface for rolling hashes. The hash value will become
|
||||
// valid after hash has been called Len times.
|
||||
type Roller interface {
|
||||
Len() int
|
||||
RollByte(x byte) uint64
|
||||
}
|
||||
|
||||
// Hashes computes all hash values for the array p. Note that the state of the
|
||||
// roller is changed.
|
||||
func Hashes(r Roller, p []byte) []uint64 {
|
||||
n := r.Len()
|
||||
if len(p) < n {
|
||||
return nil
|
||||
}
|
||||
h := make([]uint64, len(p)-n+1)
|
||||
for i := 0; i < n-1; i++ {
|
||||
r.RollByte(p[i])
|
||||
}
|
||||
for i := range h {
|
||||
h[i] = r.RollByte(p[i+n-1])
|
||||
}
|
||||
return h
|
||||
}
|
17590
vendor/github.com/ulikunitz/xz/internal/randtxt/englm3.go
generated
vendored
Normal file
17590
vendor/github.com/ulikunitz/xz/internal/randtxt/englm3.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
82
vendor/github.com/ulikunitz/xz/internal/randtxt/groupreader.go
generated
vendored
Normal file
82
vendor/github.com/ulikunitz/xz/internal/randtxt/groupreader.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package randtxt
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// GroupReader groups the incoming text in groups of 5, whereby the
|
||||
// number of groups per line can be controlled.
|
||||
type GroupReader struct {
|
||||
R io.ByteReader
|
||||
GroupsPerLine int
|
||||
off int64
|
||||
eof bool
|
||||
}
|
||||
|
||||
// NewGroupReader creates a new group reader.
|
||||
func NewGroupReader(r io.Reader) *GroupReader {
|
||||
return &GroupReader{R: bufio.NewReader(r)}
|
||||
}
|
||||
|
||||
// Read formats the data provided by the internal reader in groups of 5
|
||||
// characters. If GroupsPerLine hasn't been initialized 8 groups per
|
||||
// line will be produced.
|
||||
func (r *GroupReader) Read(p []byte) (n int, err error) {
|
||||
if r.eof {
|
||||
return 0, io.EOF
|
||||
}
|
||||
groupsPerLine := r.GroupsPerLine
|
||||
if groupsPerLine < 1 {
|
||||
groupsPerLine = 8
|
||||
}
|
||||
lineLen := int64(groupsPerLine * 6)
|
||||
var c byte
|
||||
for i := range p {
|
||||
switch {
|
||||
case r.off%lineLen == lineLen-1:
|
||||
if i+1 == len(p) && len(p) > 1 {
|
||||
return i, nil
|
||||
}
|
||||
c = '\n'
|
||||
case r.off%6 == 5:
|
||||
if i+1 == len(p) && len(p) > 1 {
|
||||
return i, nil
|
||||
}
|
||||
c = ' '
|
||||
default:
|
||||
c, err = r.R.ReadByte()
|
||||
if err == io.EOF {
|
||||
r.eof = true
|
||||
if i > 0 {
|
||||
switch p[i-1] {
|
||||
case ' ':
|
||||
p[i-1] = '\n'
|
||||
fallthrough
|
||||
case '\n':
|
||||
return i, io.EOF
|
||||
}
|
||||
}
|
||||
p[i] = '\n'
|
||||
return i + 1, io.EOF
|
||||
}
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
switch {
|
||||
case c == ' ':
|
||||
c = '_'
|
||||
case !unicode.IsPrint(rune(c)):
|
||||
c = '-'
|
||||
}
|
||||
}
|
||||
p[i] = c
|
||||
r.off++
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
185
vendor/github.com/ulikunitz/xz/internal/randtxt/probs.go
generated
vendored
Normal file
185
vendor/github.com/ulikunitz/xz/internal/randtxt/probs.go
generated
vendored
Normal file
@ -0,0 +1,185 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package randtxt supports the generation of random text using a
|
||||
// trigram model for the English language.
|
||||
package randtxt
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/rand"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// ngram stores an entry from the language model.
|
||||
type ngram struct {
|
||||
s string
|
||||
lgP float64
|
||||
lgQ float64
|
||||
}
|
||||
|
||||
// ngrams represents a slice of ngram values and is used to represent a
|
||||
// language model.
|
||||
type ngrams []ngram
|
||||
|
||||
func (s ngrams) Len() int { return len(s) }
|
||||
func (s ngrams) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s ngrams) Less(i, j int) bool { return s[i].s < s[j].s }
|
||||
|
||||
// Sorts the language model in the sequence of their ngrams.
|
||||
func (s ngrams) Sort() { sort.Sort(s) }
|
||||
|
||||
// Search is looking for an ngram or the position where it would be
|
||||
// inserted.
|
||||
func (s ngrams) Search(g string) int {
|
||||
return sort.Search(len(s), func(k int) bool { return s[k].s >= g })
|
||||
}
|
||||
|
||||
// prob represents a string, usually an ngram, and a probability value.
|
||||
type prob struct {
|
||||
s string
|
||||
p float64
|
||||
}
|
||||
|
||||
// probs is a slice of prob values that can be sorted and searched.
|
||||
type probs []prob
|
||||
|
||||
func (s probs) Len() int { return len(s) }
|
||||
func (s probs) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s probs) Less(i, j int) bool { return s[i].s < s[j].s }
|
||||
|
||||
// SortByNgram sorts the probs slice by ngram, field s.
|
||||
func (s probs) SortByNgram() { sort.Sort(s) }
|
||||
|
||||
// SortsByProb sorts the probs slice by probability, field p.
|
||||
func (s probs) SortByProb() { sort.Sort(byProb{s}) }
|
||||
|
||||
// SearchNgram searches for an ngram or the position where it would be
|
||||
// inserted.
|
||||
func (s probs) SearchNgram(g string) int {
|
||||
return sort.Search(len(s), func(k int) bool { return s[k].s >= g })
|
||||
}
|
||||
|
||||
// SearchProb searches ngrams for a specific probability or where it
|
||||
// would be inserted.
|
||||
func (s probs) SearchProb(p float64) int {
|
||||
return sort.Search(len(s), func(k int) bool { return s[k].p >= p })
|
||||
}
|
||||
|
||||
// byProb is used to sort probs slice by probability, field p.
|
||||
type byProb struct {
|
||||
probs
|
||||
}
|
||||
|
||||
func (s byProb) Less(i, j int) bool {
|
||||
return s.probs[i].p < s.probs[j].p
|
||||
}
|
||||
|
||||
// cdf can be used to setup a cumulative distribution function
|
||||
// represented by a probs slice. We should have returned an actual
|
||||
// function.
|
||||
func cdf(n int, p func(i int) prob) probs {
|
||||
prs := make(probs, n)
|
||||
sum := 0.0
|
||||
for i := range prs {
|
||||
pr := p(i)
|
||||
sum += pr.p
|
||||
prs[i] = pr
|
||||
}
|
||||
q := 1.0 / sum
|
||||
x := 0.0
|
||||
for i, pr := range prs {
|
||||
x += pr.p * q
|
||||
if x > 1.0 {
|
||||
x = 1.0
|
||||
}
|
||||
prs[i].p = x
|
||||
}
|
||||
if !sort.IsSorted(byProb{prs}) {
|
||||
panic("cdf not sorted")
|
||||
}
|
||||
return prs
|
||||
}
|
||||
|
||||
// pCDFOfLM converts a language model into a cumulative distribution
|
||||
// function represented by probs.
|
||||
func pCDFOfLM(lm ngrams) probs {
|
||||
return cdf(len(lm), func(i int) prob {
|
||||
return prob{lm[i].s, math.Exp2(lm[i].lgP)}
|
||||
})
|
||||
}
|
||||
|
||||
// cCDF converts a ngrams slice into a cumulative distribution function
|
||||
// using the conditional probability lgQ.
|
||||
func cCDF(s ngrams) probs {
|
||||
return cdf(len(s), func(i int) prob {
|
||||
return prob{s[i].s, math.Exp2(s[i].lgQ)}
|
||||
})
|
||||
}
|
||||
|
||||
// comap contains a map of conditional distribution function for the
|
||||
// last character.
|
||||
type comap map[string]probs
|
||||
|
||||
// comapOfLM converts a language model in a map of conditional
|
||||
// distribution functions.
|
||||
func comapOfLM(lm ngrams) comap {
|
||||
if !sort.IsSorted(lm) {
|
||||
panic("lm is not sorted")
|
||||
}
|
||||
m := make(comap, 26*26)
|
||||
for i := 0; i < len(lm); {
|
||||
j := i
|
||||
g := lm[i].s
|
||||
g2 := g[:2]
|
||||
z := g2 + "Z"
|
||||
i = lm.Search(z)
|
||||
if i >= len(lm) || lm[i].s != z {
|
||||
panic("unexpected search result")
|
||||
}
|
||||
i++
|
||||
m[g2] = cCDF(lm[j:i])
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// trigram returns the trigram with prefix g2 using a probability value
|
||||
// in the range [0.0,1.0).
|
||||
func (c comap) trigram(g2 string, p float64) string {
|
||||
prs := c[g2]
|
||||
i := prs.SearchProb(p)
|
||||
return prs[i].s
|
||||
}
|
||||
|
||||
var (
|
||||
// CDF for normal probabilities
|
||||
pcdf = pCDFOfLM(englm3)
|
||||
// map of two letter conditionals
|
||||
cmap = comapOfLM(englm3)
|
||||
)
|
||||
|
||||
// Reader generates a stream of text of uppercase letters with trigrams
|
||||
// distributed according to a language model of the English language.
|
||||
type Reader struct {
|
||||
rnd *rand.Rand
|
||||
g3 string
|
||||
}
|
||||
|
||||
// NewReader creates a new reader. The argument src must create a uniformly
|
||||
// distributed stream of random values.
|
||||
func NewReader(src rand.Source) *Reader {
|
||||
rnd := rand.New(src)
|
||||
i := pcdf.SearchProb(rnd.Float64())
|
||||
return &Reader{rnd, pcdf[i].s}
|
||||
}
|
||||
|
||||
// Read reads random text. The Read function will always return len(p)
|
||||
// bytes and will never return an error.
|
||||
func (r *Reader) Read(p []byte) (n int, err error) {
|
||||
for i := range p {
|
||||
r.g3 = cmap.trigram(r.g3[1:], r.rnd.Float64())
|
||||
p[i] = r.g3[2]
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
37
vendor/github.com/ulikunitz/xz/internal/randtxt/probs_test.go
generated
vendored
Normal file
37
vendor/github.com/ulikunitz/xz/internal/randtxt/probs_test.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package randtxt
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestReader(t *testing.T) {
|
||||
lr := io.LimitReader(NewReader(rand.NewSource(13)), 195)
|
||||
pretty := NewGroupReader(lr)
|
||||
scanner := bufio.NewScanner(pretty)
|
||||
for scanner.Scan() {
|
||||
t.Log(scanner.Text())
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
t.Fatalf("scanner error %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestComap(t *testing.T) {
|
||||
prs := cmap["TH"]
|
||||
for _, p := range prs[3:6] {
|
||||
t.Logf("%v", p)
|
||||
}
|
||||
p := 0.2
|
||||
x := cmap.trigram("TH", p)
|
||||
if x != "THE" {
|
||||
t.Fatalf("cmap.trigram(%q, %.1f) returned %q; want %q",
|
||||
"TH", p, x, "THE")
|
||||
}
|
||||
}
|
457
vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go
generated
vendored
Normal file
457
vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go
generated
vendored
Normal file
@ -0,0 +1,457 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package xlog provides a simple logging package that allows to disable
|
||||
// certain message categories. It defines a type, Logger, with multiple
|
||||
// methods for formatting output. The package has also a predefined
|
||||
// 'standard' Logger accessible through helper function Print[f|ln],
|
||||
// Fatal[f|ln], Panic[f|ln], Warn[f|ln], Print[f|ln] and Debug[f|ln]
|
||||
// that are easier to use then creating a Logger manually. That logger
|
||||
// writes to standard error and prints the date and time of each logged
|
||||
// message, which can be configured using the function SetFlags.
|
||||
//
|
||||
// The Fatal functions call os.Exit(1) after the message is output
|
||||
// unless not suppressed by the flags. The Panic functions call panic
|
||||
// after the writing the log message unless suppressed.
|
||||
package xlog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// The flags define what information is prefixed to each log entry
|
||||
// generated by the Logger. The Lno* versions allow the suppression of
|
||||
// specific output. The bits are or'ed together to control what will be
|
||||
// printed. There is no control over the order of the items printed and
|
||||
// the format. The full format is:
|
||||
//
|
||||
// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message
|
||||
//
|
||||
const (
|
||||
Ldate = 1 << iota // the date: 2009-01-23
|
||||
Ltime // the time: 01:23:23
|
||||
Lmicroseconds // microsecond resolution: 01:23:23.123123
|
||||
Llongfile // full file name and line number: /a/b/c/d.go:23
|
||||
Lshortfile // final file name element and line number: d.go:23
|
||||
Lnopanic // suppresses output from Panic[f|ln] but not the panic call
|
||||
Lnofatal // suppresses output from Fatal[f|ln] but not the exit
|
||||
Lnowarn // suppresses output from Warn[f|ln]
|
||||
Lnoprint // suppresses output from Print[f|ln]
|
||||
Lnodebug // suppresses output from Debug[f|ln]
|
||||
// initial values for the standard logger
|
||||
Lstdflags = Ldate | Ltime | Lnodebug
|
||||
)
|
||||
|
||||
// A Logger represents an active logging object that generates lines of
|
||||
// output to an io.Writer. Each logging operation if not suppressed
|
||||
// makes a single call to the Writer's Write method. A Logger can be
|
||||
// used simultaneously from multiple goroutines; it guarantees to
|
||||
// serialize access to the Writer.
|
||||
type Logger struct {
|
||||
mu sync.Mutex // ensures atomic writes; and protects the following
|
||||
// fields
|
||||
prefix string // prefix to write at beginning of each line
|
||||
flag int // properties
|
||||
out io.Writer // destination for output
|
||||
buf []byte // for accumulating text to write
|
||||
}
|
||||
|
||||
// New creates a new Logger. The out argument sets the destination to
|
||||
// which the log output will be written. The prefix appears at the
|
||||
// beginning of each log line. The flag argument defines the logging
|
||||
// properties.
|
||||
func New(out io.Writer, prefix string, flag int) *Logger {
|
||||
return &Logger{out: out, prefix: prefix, flag: flag}
|
||||
}
|
||||
|
||||
// std is the standard logger used by the package scope functions.
|
||||
var std = New(os.Stderr, "", Lstdflags)
|
||||
|
||||
// itoa converts the integer to ASCII. A negative widths will avoid
|
||||
// zero-padding. The function supports only non-negative integers.
|
||||
func itoa(buf *[]byte, i int, wid int) {
|
||||
var u = uint(i)
|
||||
if u == 0 && wid <= 1 {
|
||||
*buf = append(*buf, '0')
|
||||
return
|
||||
}
|
||||
var b [32]byte
|
||||
bp := len(b)
|
||||
for ; u > 0 || wid > 0; u /= 10 {
|
||||
bp--
|
||||
wid--
|
||||
b[bp] = byte(u%10) + '0'
|
||||
}
|
||||
*buf = append(*buf, b[bp:]...)
|
||||
}
|
||||
|
||||
// formatHeader puts the header into the buf field of the buffer.
|
||||
func (l *Logger) formatHeader(t time.Time, file string, line int) {
|
||||
l.buf = append(l.buf, l.prefix...)
|
||||
if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 {
|
||||
if l.flag&Ldate != 0 {
|
||||
year, month, day := t.Date()
|
||||
itoa(&l.buf, year, 4)
|
||||
l.buf = append(l.buf, '-')
|
||||
itoa(&l.buf, int(month), 2)
|
||||
l.buf = append(l.buf, '-')
|
||||
itoa(&l.buf, day, 2)
|
||||
l.buf = append(l.buf, ' ')
|
||||
}
|
||||
if l.flag&(Ltime|Lmicroseconds) != 0 {
|
||||
hour, min, sec := t.Clock()
|
||||
itoa(&l.buf, hour, 2)
|
||||
l.buf = append(l.buf, ':')
|
||||
itoa(&l.buf, min, 2)
|
||||
l.buf = append(l.buf, ':')
|
||||
itoa(&l.buf, sec, 2)
|
||||
if l.flag&Lmicroseconds != 0 {
|
||||
l.buf = append(l.buf, '.')
|
||||
itoa(&l.buf, t.Nanosecond()/1e3, 6)
|
||||
}
|
||||
l.buf = append(l.buf, ' ')
|
||||
}
|
||||
}
|
||||
if l.flag&(Lshortfile|Llongfile) != 0 {
|
||||
if l.flag&Lshortfile != 0 {
|
||||
short := file
|
||||
for i := len(file) - 1; i > 0; i-- {
|
||||
if file[i] == '/' {
|
||||
short = file[i+1:]
|
||||
break
|
||||
}
|
||||
}
|
||||
file = short
|
||||
}
|
||||
l.buf = append(l.buf, file...)
|
||||
l.buf = append(l.buf, ':')
|
||||
itoa(&l.buf, line, -1)
|
||||
l.buf = append(l.buf, ": "...)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Logger) output(calldepth int, now time.Time, s string) error {
|
||||
var file string
|
||||
var line int
|
||||
if l.flag&(Lshortfile|Llongfile) != 0 {
|
||||
l.mu.Unlock()
|
||||
var ok bool
|
||||
_, file, line, ok = runtime.Caller(calldepth)
|
||||
if !ok {
|
||||
file = "???"
|
||||
line = 0
|
||||
}
|
||||
l.mu.Lock()
|
||||
}
|
||||
l.buf = l.buf[:0]
|
||||
l.formatHeader(now, file, line)
|
||||
l.buf = append(l.buf, s...)
|
||||
if len(s) == 0 || s[len(s)-1] != '\n' {
|
||||
l.buf = append(l.buf, '\n')
|
||||
}
|
||||
_, err := l.out.Write(l.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
// Output writes the string s with the header controlled by the flags to
|
||||
// the l.out writer. A newline will be appended if s doesn't end in a
|
||||
// newline. Calldepth is used to recover the PC, although all current
|
||||
// calls of Output use the call depth 2. Access to the function is serialized.
|
||||
func (l *Logger) Output(calldepth, noflag int, v ...interface{}) error {
|
||||
now := time.Now()
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.flag&noflag != 0 {
|
||||
return nil
|
||||
}
|
||||
s := fmt.Sprint(v...)
|
||||
return l.output(calldepth+1, now, s)
|
||||
}
|
||||
|
||||
// Outputf works like output but formats the output like Printf.
|
||||
func (l *Logger) Outputf(calldepth int, noflag int, format string, v ...interface{}) error {
|
||||
now := time.Now()
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.flag&noflag != 0 {
|
||||
return nil
|
||||
}
|
||||
s := fmt.Sprintf(format, v...)
|
||||
return l.output(calldepth+1, now, s)
|
||||
}
|
||||
|
||||
// Outputln works like output but formats the output like Println.
|
||||
func (l *Logger) Outputln(calldepth int, noflag int, v ...interface{}) error {
|
||||
now := time.Now()
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.flag&noflag != 0 {
|
||||
return nil
|
||||
}
|
||||
s := fmt.Sprintln(v...)
|
||||
return l.output(calldepth+1, now, s)
|
||||
}
|
||||
|
||||
// Panic prints the message like Print and calls panic. The printing
|
||||
// might be suppressed by the flag Lnopanic.
|
||||
func (l *Logger) Panic(v ...interface{}) {
|
||||
l.Output(2, Lnopanic, v...)
|
||||
s := fmt.Sprint(v...)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
// Panic prints the message like Print and calls panic. The printing
|
||||
// might be suppressed by the flag Lnopanic.
|
||||
func Panic(v ...interface{}) {
|
||||
std.Output(2, Lnopanic, v...)
|
||||
s := fmt.Sprint(v...)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
// Panicf prints the message like Printf and calls panic. The printing
|
||||
// might be suppressed by the flag Lnopanic.
|
||||
func (l *Logger) Panicf(format string, v ...interface{}) {
|
||||
l.Outputf(2, Lnopanic, format, v...)
|
||||
s := fmt.Sprintf(format, v...)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
// Panicf prints the message like Printf and calls panic. The printing
|
||||
// might be suppressed by the flag Lnopanic.
|
||||
func Panicf(format string, v ...interface{}) {
|
||||
std.Outputf(2, Lnopanic, format, v...)
|
||||
s := fmt.Sprintf(format, v...)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
// Panicln prints the message like Println and calls panic. The printing
|
||||
// might be suppressed by the flag Lnopanic.
|
||||
func (l *Logger) Panicln(v ...interface{}) {
|
||||
l.Outputln(2, Lnopanic, v...)
|
||||
s := fmt.Sprintln(v...)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
// Panicln prints the message like Println and calls panic. The printing
|
||||
// might be suppressed by the flag Lnopanic.
|
||||
func Panicln(v ...interface{}) {
|
||||
std.Outputln(2, Lnopanic, v...)
|
||||
s := fmt.Sprintln(v...)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
// Fatal prints the message like Print and calls os.Exit(1). The
|
||||
// printing might be suppressed by the flag Lnofatal.
|
||||
func (l *Logger) Fatal(v ...interface{}) {
|
||||
l.Output(2, Lnofatal, v...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatal prints the message like Print and calls os.Exit(1). The
|
||||
// printing might be suppressed by the flag Lnofatal.
|
||||
func Fatal(v ...interface{}) {
|
||||
std.Output(2, Lnofatal, v...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatalf prints the message like Printf and calls os.Exit(1). The
|
||||
// printing might be suppressed by the flag Lnofatal.
|
||||
func (l *Logger) Fatalf(format string, v ...interface{}) {
|
||||
l.Outputf(2, Lnofatal, format, v...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatalf prints the message like Printf and calls os.Exit(1). The
|
||||
// printing might be suppressed by the flag Lnofatal.
|
||||
func Fatalf(format string, v ...interface{}) {
|
||||
std.Outputf(2, Lnofatal, format, v...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatalln prints the message like Println and calls os.Exit(1). The
|
||||
// printing might be suppressed by the flag Lnofatal.
|
||||
func (l *Logger) Fatalln(format string, v ...interface{}) {
|
||||
l.Outputln(2, Lnofatal, v...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatalln prints the message like Println and calls os.Exit(1). The
|
||||
// printing might be suppressed by the flag Lnofatal.
|
||||
func Fatalln(format string, v ...interface{}) {
|
||||
std.Outputln(2, Lnofatal, v...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Warn prints the message like Print. The printing might be suppressed
|
||||
// by the flag Lnowarn.
|
||||
func (l *Logger) Warn(v ...interface{}) {
|
||||
l.Output(2, Lnowarn, v...)
|
||||
}
|
||||
|
||||
// Warn prints the message like Print. The printing might be suppressed
|
||||
// by the flag Lnowarn.
|
||||
func Warn(v ...interface{}) {
|
||||
std.Output(2, Lnowarn, v...)
|
||||
}
|
||||
|
||||
// Warnf prints the message like Printf. The printing might be suppressed
|
||||
// by the flag Lnowarn.
|
||||
func (l *Logger) Warnf(format string, v ...interface{}) {
|
||||
l.Outputf(2, Lnowarn, format, v...)
|
||||
}
|
||||
|
||||
// Warnf prints the message like Printf. The printing might be suppressed
|
||||
// by the flag Lnowarn.
|
||||
func Warnf(format string, v ...interface{}) {
|
||||
std.Outputf(2, Lnowarn, format, v...)
|
||||
}
|
||||
|
||||
// Warnln prints the message like Println. The printing might be suppressed
|
||||
// by the flag Lnowarn.
|
||||
func (l *Logger) Warnln(v ...interface{}) {
|
||||
l.Outputln(2, Lnowarn, v...)
|
||||
}
|
||||
|
||||
// Warnln prints the message like Println. The printing might be suppressed
|
||||
// by the flag Lnowarn.
|
||||
func Warnln(v ...interface{}) {
|
||||
std.Outputln(2, Lnowarn, v...)
|
||||
}
|
||||
|
||||
// Print prints the message like fmt.Print. The printing might be suppressed
|
||||
// by the flag Lnoprint.
|
||||
func (l *Logger) Print(v ...interface{}) {
|
||||
l.Output(2, Lnoprint, v...)
|
||||
}
|
||||
|
||||
// Print prints the message like fmt.Print. The printing might be suppressed
|
||||
// by the flag Lnoprint.
|
||||
func Print(v ...interface{}) {
|
||||
std.Output(2, Lnoprint, v...)
|
||||
}
|
||||
|
||||
// Printf prints the message like fmt.Printf. The printing might be suppressed
|
||||
// by the flag Lnoprint.
|
||||
func (l *Logger) Printf(format string, v ...interface{}) {
|
||||
l.Outputf(2, Lnoprint, format, v...)
|
||||
}
|
||||
|
||||
// Printf prints the message like fmt.Printf. The printing might be suppressed
|
||||
// by the flag Lnoprint.
|
||||
func Printf(format string, v ...interface{}) {
|
||||
std.Outputf(2, Lnoprint, format, v...)
|
||||
}
|
||||
|
||||
// Println prints the message like fmt.Println. The printing might be
|
||||
// suppressed by the flag Lnoprint.
|
||||
func (l *Logger) Println(v ...interface{}) {
|
||||
l.Outputln(2, Lnoprint, v...)
|
||||
}
|
||||
|
||||
// Println prints the message like fmt.Println. The printing might be
|
||||
// suppressed by the flag Lnoprint.
|
||||
func Println(v ...interface{}) {
|
||||
std.Outputln(2, Lnoprint, v...)
|
||||
}
|
||||
|
||||
// Debug prints the message like Print. The printing might be suppressed
|
||||
// by the flag Lnodebug.
|
||||
func (l *Logger) Debug(v ...interface{}) {
|
||||
l.Output(2, Lnodebug, v...)
|
||||
}
|
||||
|
||||
// Debug prints the message like Print. The printing might be suppressed
|
||||
// by the flag Lnodebug.
|
||||
func Debug(v ...interface{}) {
|
||||
std.Output(2, Lnodebug, v...)
|
||||
}
|
||||
|
||||
// Debugf prints the message like Printf. The printing might be suppressed
|
||||
// by the flag Lnodebug.
|
||||
func (l *Logger) Debugf(format string, v ...interface{}) {
|
||||
l.Outputf(2, Lnodebug, format, v...)
|
||||
}
|
||||
|
||||
// Debugf prints the message like Printf. The printing might be suppressed
|
||||
// by the flag Lnodebug.
|
||||
func Debugf(format string, v ...interface{}) {
|
||||
std.Outputf(2, Lnodebug, format, v...)
|
||||
}
|
||||
|
||||
// Debugln prints the message like Println. The printing might be suppressed
|
||||
// by the flag Lnodebug.
|
||||
func (l *Logger) Debugln(v ...interface{}) {
|
||||
l.Outputln(2, Lnodebug, v...)
|
||||
}
|
||||
|
||||
// Debugln prints the message like Println. The printing might be suppressed
|
||||
// by the flag Lnodebug.
|
||||
func Debugln(v ...interface{}) {
|
||||
std.Outputln(2, Lnodebug, v...)
|
||||
}
|
||||
|
||||
// Flags returns the current flags used by the logger.
|
||||
func (l *Logger) Flags() int {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
return l.flag
|
||||
}
|
||||
|
||||
// Flags returns the current flags used by the standard logger.
|
||||
func Flags() int {
|
||||
return std.Flags()
|
||||
}
|
||||
|
||||
// SetFlags sets the flags of the logger.
|
||||
func (l *Logger) SetFlags(flag int) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
l.flag = flag
|
||||
}
|
||||
|
||||
// SetFlags sets the flags for the standard logger.
|
||||
func SetFlags(flag int) {
|
||||
std.SetFlags(flag)
|
||||
}
|
||||
|
||||
// Prefix returns the prefix used by the logger.
|
||||
func (l *Logger) Prefix() string {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
return l.prefix
|
||||
}
|
||||
|
||||
// Prefix returns the prefix used by the standard logger of the package.
|
||||
func Prefix() string {
|
||||
return std.Prefix()
|
||||
}
|
||||
|
||||
// SetPrefix sets the prefix for the logger.
|
||||
func (l *Logger) SetPrefix(prefix string) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
l.prefix = prefix
|
||||
}
|
||||
|
||||
// SetPrefix sets the prefix of the standard logger of the package.
|
||||
func SetPrefix(prefix string) {
|
||||
std.SetPrefix(prefix)
|
||||
}
|
||||
|
||||
// SetOutput sets the output of the logger.
|
||||
func (l *Logger) SetOutput(w io.Writer) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
l.out = w
|
||||
}
|
||||
|
||||
// SetOutput sets the output for the standard logger of the package.
|
||||
func SetOutput(w io.Writer) {
|
||||
std.SetOutput(w)
|
||||
}
|
523
vendor/github.com/ulikunitz/xz/lzma/bintree.go
generated
vendored
Normal file
523
vendor/github.com/ulikunitz/xz/lzma/bintree.go
generated
vendored
Normal file
@ -0,0 +1,523 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// node represents a node in the binary tree.
|
||||
type node struct {
|
||||
// x is the search value
|
||||
x uint32
|
||||
// p parent node
|
||||
p uint32
|
||||
// l left child
|
||||
l uint32
|
||||
// r right child
|
||||
r uint32
|
||||
}
|
||||
|
||||
// wordLen is the number of bytes represented by the v field of a node.
|
||||
const wordLen = 4
|
||||
|
||||
// binTree supports the identification of the next operation based on a
|
||||
// binary tree.
|
||||
//
|
||||
// Nodes will be identified by their index into the ring buffer.
|
||||
type binTree struct {
|
||||
dict *encoderDict
|
||||
// ring buffer of nodes
|
||||
node []node
|
||||
// absolute offset of the entry for the next node. Position 4
|
||||
// byte larger.
|
||||
hoff int64
|
||||
// front position in the node ring buffer
|
||||
front uint32
|
||||
// index of the root node
|
||||
root uint32
|
||||
// current x value
|
||||
x uint32
|
||||
// preallocated array
|
||||
data []byte
|
||||
}
|
||||
|
||||
// null represents the nonexistent index. We can't use zero because it
|
||||
// would always exist or we would need to decrease the index for each
|
||||
// reference.
|
||||
const null uint32 = 1<<32 - 1
|
||||
|
||||
// newBinTree initializes the binTree structure. The capacity defines
|
||||
// the size of the buffer and defines the maximum distance for which
|
||||
// matches will be found.
|
||||
func newBinTree(capacity int) (t *binTree, err error) {
|
||||
if capacity < 1 {
|
||||
return nil, errors.New(
|
||||
"newBinTree: capacity must be larger than zero")
|
||||
}
|
||||
if int64(capacity) >= int64(null) {
|
||||
return nil, errors.New(
|
||||
"newBinTree: capacity must less 2^{32}-1")
|
||||
}
|
||||
t = &binTree{
|
||||
node: make([]node, capacity),
|
||||
hoff: -int64(wordLen),
|
||||
root: null,
|
||||
data: make([]byte, maxMatchLen),
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (t *binTree) SetDict(d *encoderDict) { t.dict = d }
|
||||
|
||||
// WriteByte writes a single byte into the binary tree.
|
||||
func (t *binTree) WriteByte(c byte) error {
|
||||
t.x = (t.x << 8) | uint32(c)
|
||||
t.hoff++
|
||||
if t.hoff < 0 {
|
||||
return nil
|
||||
}
|
||||
v := t.front
|
||||
if int64(v) < t.hoff {
|
||||
// We are overwriting old nodes stored in the tree.
|
||||
t.remove(v)
|
||||
}
|
||||
t.node[v].x = t.x
|
||||
t.add(v)
|
||||
t.front++
|
||||
if int64(t.front) >= int64(len(t.node)) {
|
||||
t.front = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Writes writes a sequence of bytes into the binTree structure.
|
||||
func (t *binTree) Write(p []byte) (n int, err error) {
|
||||
for _, c := range p {
|
||||
t.WriteByte(c)
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// add puts the node v into the tree. The node must not be part of the
|
||||
// tree before.
|
||||
func (t *binTree) add(v uint32) {
|
||||
vn := &t.node[v]
|
||||
// Set left and right to null indices.
|
||||
vn.l, vn.r = null, null
|
||||
// If the binary tree is empty make v the root.
|
||||
if t.root == null {
|
||||
t.root = v
|
||||
vn.p = null
|
||||
return
|
||||
}
|
||||
x := vn.x
|
||||
p := t.root
|
||||
// Search for the right leave link and add the new node.
|
||||
for {
|
||||
pn := &t.node[p]
|
||||
if x <= pn.x {
|
||||
if pn.l == null {
|
||||
pn.l = v
|
||||
vn.p = p
|
||||
return
|
||||
}
|
||||
p = pn.l
|
||||
} else {
|
||||
if pn.r == null {
|
||||
pn.r = v
|
||||
vn.p = p
|
||||
return
|
||||
}
|
||||
p = pn.r
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parent returns the parent node index of v and the pointer to v value
|
||||
// in the parent.
|
||||
func (t *binTree) parent(v uint32) (p uint32, ptr *uint32) {
|
||||
if t.root == v {
|
||||
return null, &t.root
|
||||
}
|
||||
p = t.node[v].p
|
||||
if t.node[p].l == v {
|
||||
ptr = &t.node[p].l
|
||||
} else {
|
||||
ptr = &t.node[p].r
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Remove node v.
|
||||
func (t *binTree) remove(v uint32) {
|
||||
vn := &t.node[v]
|
||||
p, ptr := t.parent(v)
|
||||
l, r := vn.l, vn.r
|
||||
if l == null {
|
||||
// Move the right child up.
|
||||
*ptr = r
|
||||
if r != null {
|
||||
t.node[r].p = p
|
||||
}
|
||||
return
|
||||
}
|
||||
if r == null {
|
||||
// Move the left child up.
|
||||
*ptr = l
|
||||
t.node[l].p = p
|
||||
return
|
||||
}
|
||||
|
||||
// Search the in-order predecessor u.
|
||||
un := &t.node[l]
|
||||
ur := un.r
|
||||
if ur == null {
|
||||
// In order predecessor is l. Move it up.
|
||||
un.r = r
|
||||
t.node[r].p = l
|
||||
un.p = p
|
||||
*ptr = l
|
||||
return
|
||||
}
|
||||
var u uint32
|
||||
for {
|
||||
// Look for the max value in the tree where l is root.
|
||||
u = ur
|
||||
ur = t.node[u].r
|
||||
if ur == null {
|
||||
break
|
||||
}
|
||||
}
|
||||
// replace u with ul
|
||||
un = &t.node[u]
|
||||
ul := un.l
|
||||
up := un.p
|
||||
t.node[up].r = ul
|
||||
if ul != null {
|
||||
t.node[ul].p = up
|
||||
}
|
||||
|
||||
// replace v by u
|
||||
un.l, un.r = l, r
|
||||
t.node[l].p = u
|
||||
t.node[r].p = u
|
||||
*ptr = u
|
||||
un.p = p
|
||||
}
|
||||
|
||||
// search looks for the node that have the value x or for the nodes that
|
||||
// brace it. The node highest in the tree with the value x will be
|
||||
// returned. All other nodes with the same value live in left subtree of
|
||||
// the returned node.
|
||||
func (t *binTree) search(v uint32, x uint32) (a, b uint32) {
|
||||
a, b = null, null
|
||||
if v == null {
|
||||
return
|
||||
}
|
||||
for {
|
||||
vn := &t.node[v]
|
||||
if x <= vn.x {
|
||||
if x == vn.x {
|
||||
return v, v
|
||||
}
|
||||
b = v
|
||||
if vn.l == null {
|
||||
return
|
||||
}
|
||||
v = vn.l
|
||||
} else {
|
||||
a = v
|
||||
if vn.r == null {
|
||||
return
|
||||
}
|
||||
v = vn.r
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// max returns the node with maximum value in the subtree with v as
|
||||
// root.
|
||||
func (t *binTree) max(v uint32) uint32 {
|
||||
if v == null {
|
||||
return null
|
||||
}
|
||||
for {
|
||||
r := t.node[v].r
|
||||
if r == null {
|
||||
return v
|
||||
}
|
||||
v = r
|
||||
}
|
||||
}
|
||||
|
||||
// min returns the node with the minimum value in the subtree with v as
|
||||
// root.
|
||||
func (t *binTree) min(v uint32) uint32 {
|
||||
if v == null {
|
||||
return null
|
||||
}
|
||||
for {
|
||||
l := t.node[v].l
|
||||
if l == null {
|
||||
return v
|
||||
}
|
||||
v = l
|
||||
}
|
||||
}
|
||||
|
||||
// pred returns the in-order predecessor of node v.
|
||||
func (t *binTree) pred(v uint32) uint32 {
|
||||
if v == null {
|
||||
return null
|
||||
}
|
||||
u := t.max(t.node[v].l)
|
||||
if u != null {
|
||||
return u
|
||||
}
|
||||
for {
|
||||
p := t.node[v].p
|
||||
if p == null {
|
||||
return null
|
||||
}
|
||||
if t.node[p].r == v {
|
||||
return p
|
||||
}
|
||||
v = p
|
||||
}
|
||||
}
|
||||
|
||||
// succ returns the in-order successor of node v.
|
||||
func (t *binTree) succ(v uint32) uint32 {
|
||||
if v == null {
|
||||
return null
|
||||
}
|
||||
u := t.min(t.node[v].r)
|
||||
if u != null {
|
||||
return u
|
||||
}
|
||||
for {
|
||||
p := t.node[v].p
|
||||
if p == null {
|
||||
return null
|
||||
}
|
||||
if t.node[p].l == v {
|
||||
return p
|
||||
}
|
||||
v = p
|
||||
}
|
||||
}
|
||||
|
||||
// xval converts the first four bytes of a into an 32-bit unsigned
|
||||
// integer in big-endian order.
|
||||
func xval(a []byte) uint32 {
|
||||
var x uint32
|
||||
switch len(a) {
|
||||
default:
|
||||
x |= uint32(a[3])
|
||||
fallthrough
|
||||
case 3:
|
||||
x |= uint32(a[2]) << 8
|
||||
fallthrough
|
||||
case 2:
|
||||
x |= uint32(a[1]) << 16
|
||||
fallthrough
|
||||
case 1:
|
||||
x |= uint32(a[0]) << 24
|
||||
case 0:
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// dumpX converts value x into a four-letter string.
|
||||
func dumpX(x uint32) string {
|
||||
a := make([]byte, 4)
|
||||
for i := 0; i < 4; i++ {
|
||||
c := byte(x >> uint((3-i)*8))
|
||||
if unicode.IsGraphic(rune(c)) {
|
||||
a[i] = c
|
||||
} else {
|
||||
a[i] = '.'
|
||||
}
|
||||
}
|
||||
return string(a)
|
||||
}
|
||||
|
||||
// dumpNode writes a representation of the node v into the io.Writer.
|
||||
func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) {
|
||||
if v == null {
|
||||
return
|
||||
}
|
||||
|
||||
vn := &t.node[v]
|
||||
|
||||
t.dumpNode(w, vn.r, indent+2)
|
||||
|
||||
for i := 0; i < indent; i++ {
|
||||
fmt.Fprint(w, " ")
|
||||
}
|
||||
if vn.p == null {
|
||||
fmt.Fprintf(w, "node %d %q parent null\n", v, dumpX(vn.x))
|
||||
} else {
|
||||
fmt.Fprintf(w, "node %d %q parent %d\n", v, dumpX(vn.x), vn.p)
|
||||
}
|
||||
|
||||
t.dumpNode(w, vn.l, indent+2)
|
||||
}
|
||||
|
||||
// dump prints a representation of the binary tree into the writer.
|
||||
func (t *binTree) dump(w io.Writer) error {
|
||||
bw := bufio.NewWriter(w)
|
||||
t.dumpNode(bw, t.root, 0)
|
||||
return bw.Flush()
|
||||
}
|
||||
|
||||
func (t *binTree) distance(v uint32) int {
|
||||
dist := int(t.front) - int(v)
|
||||
if dist <= 0 {
|
||||
dist += len(t.node)
|
||||
}
|
||||
return dist
|
||||
}
|
||||
|
||||
type matchParams struct {
|
||||
rep [4]uint32
|
||||
// length when match will be accepted
|
||||
nAccept int
|
||||
// nodes to check
|
||||
check int
|
||||
// finish if length get shorter
|
||||
stopShorter bool
|
||||
}
|
||||
|
||||
func (t *binTree) match(m match, distIter func() (int, bool), p matchParams,
|
||||
) (r match, checked int, accepted bool) {
|
||||
buf := &t.dict.buf
|
||||
for {
|
||||
if checked >= p.check {
|
||||
return m, checked, true
|
||||
}
|
||||
dist, ok := distIter()
|
||||
if !ok {
|
||||
return m, checked, false
|
||||
}
|
||||
checked++
|
||||
if m.n > 0 {
|
||||
i := buf.rear - dist + m.n - 1
|
||||
if i < 0 {
|
||||
i += len(buf.data)
|
||||
} else if i >= len(buf.data) {
|
||||
i -= len(buf.data)
|
||||
}
|
||||
if buf.data[i] != t.data[m.n-1] {
|
||||
if p.stopShorter {
|
||||
return m, checked, false
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
n := buf.matchLen(dist, t.data)
|
||||
switch n {
|
||||
case 0:
|
||||
if p.stopShorter {
|
||||
return m, checked, false
|
||||
}
|
||||
continue
|
||||
case 1:
|
||||
if uint32(dist-minDistance) != p.rep[0] {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if n < m.n || (n == m.n && int64(dist) >= m.distance) {
|
||||
continue
|
||||
}
|
||||
m = match{int64(dist), n}
|
||||
if n >= p.nAccept {
|
||||
return m, checked, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *binTree) NextOp(rep [4]uint32) operation {
|
||||
// retrieve maxMatchLen data
|
||||
n, _ := t.dict.buf.Peek(t.data[:maxMatchLen])
|
||||
if n == 0 {
|
||||
panic("no data in buffer")
|
||||
}
|
||||
t.data = t.data[:n]
|
||||
|
||||
var (
|
||||
m match
|
||||
x, u, v uint32
|
||||
iterPred, iterSucc func() (int, bool)
|
||||
)
|
||||
p := matchParams{
|
||||
rep: rep,
|
||||
nAccept: maxMatchLen,
|
||||
check: 32,
|
||||
}
|
||||
i := 4
|
||||
iterSmall := func() (dist int, ok bool) {
|
||||
i--
|
||||
if i <= 0 {
|
||||
return 0, false
|
||||
}
|
||||
return i, true
|
||||
}
|
||||
m, checked, accepted := t.match(m, iterSmall, p)
|
||||
if accepted {
|
||||
goto end
|
||||
}
|
||||
p.check -= checked
|
||||
x = xval(t.data)
|
||||
u, v = t.search(t.root, x)
|
||||
if u == v && len(t.data) == 4 {
|
||||
iter := func() (dist int, ok bool) {
|
||||
if u == null {
|
||||
return 0, false
|
||||
}
|
||||
dist = t.distance(u)
|
||||
u, v = t.search(t.node[u].l, x)
|
||||
if u != v {
|
||||
u = null
|
||||
}
|
||||
return dist, true
|
||||
}
|
||||
m, _, _ = t.match(m, iter, p)
|
||||
goto end
|
||||
}
|
||||
p.stopShorter = true
|
||||
iterSucc = func() (dist int, ok bool) {
|
||||
if v == null {
|
||||
return 0, false
|
||||
}
|
||||
dist = t.distance(v)
|
||||
v = t.succ(v)
|
||||
return dist, true
|
||||
}
|
||||
m, checked, accepted = t.match(m, iterSucc, p)
|
||||
if accepted {
|
||||
goto end
|
||||
}
|
||||
p.check -= checked
|
||||
iterPred = func() (dist int, ok bool) {
|
||||
if u == null {
|
||||
return 0, false
|
||||
}
|
||||
dist = t.distance(u)
|
||||
u = t.pred(u)
|
||||
return dist, true
|
||||
}
|
||||
m, _, _ = t.match(m, iterPred, p)
|
||||
end:
|
||||
if m.n == 0 {
|
||||
return lit{t.data[0]}
|
||||
}
|
||||
return m
|
||||
}
|
107
vendor/github.com/ulikunitz/xz/lzma/bintree_test.go
generated
vendored
Normal file
107
vendor/github.com/ulikunitz/xz/lzma/bintree_test.go
generated
vendored
Normal file
@ -0,0 +1,107 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ulikunitz/xz/internal/randtxt"
|
||||
)
|
||||
|
||||
func TestBinTree_Find(t *testing.T) {
|
||||
bt, err := newBinTree(30)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
const s = "Klopp feiert mit Liverpool seinen hoechsten SiegSieg"
|
||||
n, err := io.WriteString(bt, s)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteString error %s", err)
|
||||
}
|
||||
if n != len(s) {
|
||||
t.Fatalf("WriteString returned %d; want %d", n, len(s))
|
||||
}
|
||||
|
||||
/* dump info writes the complete tree
|
||||
if err = bt.dump(os.Stdout); err != nil {
|
||||
t.Fatalf("bt.dump error %s", err)
|
||||
}
|
||||
*/
|
||||
|
||||
tests := []string{"Sieg", "Sieb", "Simu"}
|
||||
for _, c := range tests {
|
||||
x := xval([]byte(c))
|
||||
a, b := bt.search(bt.root, x)
|
||||
t.Logf("%q: a, b == %d, %d", c, a, b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBinTree_PredSucc(t *testing.T) {
|
||||
bt, err := newBinTree(30)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
const s = "Klopp feiert mit Liverpool seinen hoechsten Sieg."
|
||||
n, err := io.WriteString(bt, s)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteString error %s", err)
|
||||
}
|
||||
if n != len(s) {
|
||||
t.Fatalf("WriteString returned %d; want %d", n, len(s))
|
||||
}
|
||||
for v := bt.min(bt.root); v != null; v = bt.succ(v) {
|
||||
t.Log(dumpX(bt.node[v].x))
|
||||
}
|
||||
t.Log("")
|
||||
for v := bt.max(bt.root); v != null; v = bt.pred(v) {
|
||||
t.Log(dumpX(bt.node[v].x))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBinTree_Cycle(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
w, err := Writer2Config{
|
||||
DictCap: 4096,
|
||||
Matcher: BinaryTree,
|
||||
}.NewWriter2(buf)
|
||||
if err != nil {
|
||||
t.Fatalf("NewWriter error %s", err)
|
||||
}
|
||||
// const txtlen = 1024
|
||||
const txtlen = 10000
|
||||
io.CopyN(buf, randtxt.NewReader(rand.NewSource(42)), txtlen)
|
||||
txt := buf.String()
|
||||
buf.Reset()
|
||||
n, err := io.Copy(w, strings.NewReader(txt))
|
||||
if err != nil {
|
||||
t.Fatalf("Compressing copy error %s", err)
|
||||
}
|
||||
if n != txtlen {
|
||||
t.Fatalf("Compressing data length %d; want %d", n, txtlen)
|
||||
}
|
||||
if err = w.Close(); err != nil {
|
||||
t.Fatalf("w.Close error %s", err)
|
||||
}
|
||||
t.Logf("buf.Len() %d", buf.Len())
|
||||
r, err := Reader2Config{DictCap: 4096}.NewReader2(buf)
|
||||
if err != nil {
|
||||
t.Fatalf("NewReader error %s", err)
|
||||
}
|
||||
out := new(bytes.Buffer)
|
||||
n, err = io.Copy(out, r)
|
||||
if err != nil {
|
||||
t.Fatalf("Decompressing copy error %s after %d bytes", err, n)
|
||||
}
|
||||
if n != txtlen {
|
||||
t.Fatalf("Decompression data length %d; want %d", n, txtlen)
|
||||
}
|
||||
if txt != out.String() {
|
||||
t.Fatal("decompressed data differs from original")
|
||||
}
|
||||
}
|
45
vendor/github.com/ulikunitz/xz/lzma/bitops.go
generated
vendored
Normal file
45
vendor/github.com/ulikunitz/xz/lzma/bitops.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
/* Naming conventions follows the CodeReviewComments in the Go Wiki. */
|
||||
|
||||
// ntz32Const is used by the functions NTZ and NLZ.
|
||||
const ntz32Const = 0x04d7651f
|
||||
|
||||
// ntz32Table is a helper table for de Bruijn algorithm by Danny Dubé.
|
||||
// See Henry S. Warren, Jr. "Hacker's Delight" section 5-1 figure 5-26.
|
||||
var ntz32Table = [32]int8{
|
||||
0, 1, 2, 24, 3, 19, 6, 25,
|
||||
22, 4, 20, 10, 16, 7, 12, 26,
|
||||
31, 23, 18, 5, 21, 9, 15, 11,
|
||||
30, 17, 8, 14, 29, 13, 28, 27,
|
||||
}
|
||||
|
||||
// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer.
|
||||
func ntz32(x uint32) int {
|
||||
if x == 0 {
|
||||
return 32
|
||||
}
|
||||
x = (x & -x) * ntz32Const
|
||||
return int(ntz32Table[x>>27])
|
||||
}
|
||||
|
||||
// nlz32 computes the number of leading zeros for an unsigned 32-bit integer.
|
||||
func nlz32(x uint32) int {
|
||||
// Smear left most bit to the right
|
||||
x |= x >> 1
|
||||
x |= x >> 2
|
||||
x |= x >> 4
|
||||
x |= x >> 8
|
||||
x |= x >> 16
|
||||
// Use ntz mechanism to calculate nlz.
|
||||
x++
|
||||
if x == 0 {
|
||||
return 0
|
||||
}
|
||||
x *= ntz32Const
|
||||
return 32 - int(ntz32Table[x>>27])
|
||||
}
|
39
vendor/github.com/ulikunitz/xz/lzma/breader.go
generated
vendored
Normal file
39
vendor/github.com/ulikunitz/xz/lzma/breader.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// breader provides the ReadByte function for a Reader. It doesn't read
|
||||
// more data from the reader than absolutely necessary.
|
||||
type breader struct {
|
||||
io.Reader
|
||||
// helper slice to save allocations
|
||||
p []byte
|
||||
}
|
||||
|
||||
// ByteReader converts an io.Reader into an io.ByteReader.
|
||||
func ByteReader(r io.Reader) io.ByteReader {
|
||||
br, ok := r.(io.ByteReader)
|
||||
if !ok {
|
||||
return &breader{r, make([]byte, 1)}
|
||||
}
|
||||
return br
|
||||
}
|
||||
|
||||
// ReadByte read byte function.
|
||||
func (r *breader) ReadByte() (c byte, err error) {
|
||||
n, err := r.Reader.Read(r.p)
|
||||
if n < 1 {
|
||||
if err == nil {
|
||||
err = errors.New("breader.ReadByte: no data")
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
return r.p[0], nil
|
||||
}
|
171
vendor/github.com/ulikunitz/xz/lzma/buffer.go
generated
vendored
Normal file
171
vendor/github.com/ulikunitz/xz/lzma/buffer.go
generated
vendored
Normal file
@ -0,0 +1,171 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// buffer provides a circular buffer of bytes. If the front index equals
|
||||
// the rear index the buffer is empty. As a consequence front cannot be
|
||||
// equal rear for a full buffer. So a full buffer has a length that is
|
||||
// one byte less the the length of the data slice.
|
||||
type buffer struct {
|
||||
data []byte
|
||||
front int
|
||||
rear int
|
||||
}
|
||||
|
||||
// newBuffer creates a buffer with the given size.
|
||||
func newBuffer(size int) *buffer {
|
||||
return &buffer{data: make([]byte, size+1)}
|
||||
}
|
||||
|
||||
// Cap returns the capacity of the buffer.
|
||||
func (b *buffer) Cap() int {
|
||||
return len(b.data) - 1
|
||||
}
|
||||
|
||||
// Resets the buffer. The front and rear index are set to zero.
|
||||
func (b *buffer) Reset() {
|
||||
b.front = 0
|
||||
b.rear = 0
|
||||
}
|
||||
|
||||
// Buffered returns the number of bytes buffered.
|
||||
func (b *buffer) Buffered() int {
|
||||
delta := b.front - b.rear
|
||||
if delta < 0 {
|
||||
delta += len(b.data)
|
||||
}
|
||||
return delta
|
||||
}
|
||||
|
||||
// Available returns the number of bytes available for writing.
|
||||
func (b *buffer) Available() int {
|
||||
delta := b.rear - 1 - b.front
|
||||
if delta < 0 {
|
||||
delta += len(b.data)
|
||||
}
|
||||
return delta
|
||||
}
|
||||
|
||||
// addIndex adds a non-negative integer to the index i and returns the
|
||||
// resulting index. The function takes care of wrapping the index as
|
||||
// well as potential overflow situations.
|
||||
func (b *buffer) addIndex(i int, n int) int {
|
||||
// subtraction of len(b.data) prevents overflow
|
||||
i += n - len(b.data)
|
||||
if i < 0 {
|
||||
i += len(b.data)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// Read reads bytes from the buffer into p and returns the number of
|
||||
// bytes read. The function never returns an error but might return less
|
||||
// data than requested.
|
||||
func (b *buffer) Read(p []byte) (n int, err error) {
|
||||
n, err = b.Peek(p)
|
||||
b.rear = b.addIndex(b.rear, n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Peek reads bytes from the buffer into p without changing the buffer.
|
||||
// Peek will never return an error but might return less data than
|
||||
// requested.
|
||||
func (b *buffer) Peek(p []byte) (n int, err error) {
|
||||
m := b.Buffered()
|
||||
n = len(p)
|
||||
if m < n {
|
||||
n = m
|
||||
p = p[:n]
|
||||
}
|
||||
k := copy(p, b.data[b.rear:])
|
||||
if k < n {
|
||||
copy(p[k:], b.data)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Discard skips the n next bytes to read from the buffer, returning the
|
||||
// bytes discarded.
|
||||
//
|
||||
// If Discards skips fewer than n bytes, it returns an error.
|
||||
func (b *buffer) Discard(n int) (discarded int, err error) {
|
||||
if n < 0 {
|
||||
return 0, errors.New("buffer.Discard: negative argument")
|
||||
}
|
||||
m := b.Buffered()
|
||||
if m < n {
|
||||
n = m
|
||||
err = errors.New(
|
||||
"buffer.Discard: discarded less bytes then requested")
|
||||
}
|
||||
b.rear = b.addIndex(b.rear, n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// ErrNoSpace indicates that there is insufficient space for the Write
|
||||
// operation.
|
||||
var ErrNoSpace = errors.New("insufficient space")
|
||||
|
||||
// Write puts data into the buffer. If less bytes are written than
|
||||
// requested ErrNoSpace is returned.
|
||||
func (b *buffer) Write(p []byte) (n int, err error) {
|
||||
m := b.Available()
|
||||
n = len(p)
|
||||
if m < n {
|
||||
n = m
|
||||
p = p[:m]
|
||||
err = ErrNoSpace
|
||||
}
|
||||
k := copy(b.data[b.front:], p)
|
||||
if k < n {
|
||||
copy(b.data, p[k:])
|
||||
}
|
||||
b.front = b.addIndex(b.front, n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// WriteByte writes a single byte into the buffer. The error ErrNoSpace
|
||||
// is returned if no single byte is available in the buffer for writing.
|
||||
func (b *buffer) WriteByte(c byte) error {
|
||||
if b.Available() < 1 {
|
||||
return ErrNoSpace
|
||||
}
|
||||
b.data[b.front] = c
|
||||
b.front = b.addIndex(b.front, 1)
|
||||
return nil
|
||||
}
|
||||
|
||||
// prefixLen returns the length of the common prefix of a and b.
|
||||
func prefixLen(a, b []byte) int {
|
||||
if len(a) > len(b) {
|
||||
a, b = b, a
|
||||
}
|
||||
for i, c := range a {
|
||||
if b[i] != c {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return len(a)
|
||||
}
|
||||
|
||||
// matchLen returns the length of the common prefix for the given
|
||||
// distance from the rear and the byte slice p.
|
||||
func (b *buffer) matchLen(distance int, p []byte) int {
|
||||
var n int
|
||||
i := b.rear - distance
|
||||
if i < 0 {
|
||||
if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i {
|
||||
return n
|
||||
}
|
||||
p = p[n:]
|
||||
i = 0
|
||||
}
|
||||
n += prefixLen(p, b.data[i:])
|
||||
return n
|
||||
}
|
230
vendor/github.com/ulikunitz/xz/lzma/buffer_test.go
generated
vendored
Normal file
230
vendor/github.com/ulikunitz/xz/lzma/buffer_test.go
generated
vendored
Normal file
@ -0,0 +1,230 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBuffer_Write(t *testing.T) {
|
||||
buf := newBuffer(10)
|
||||
b := []byte("1234567890")
|
||||
for i := range b {
|
||||
n, err := buf.Write(b[i : i+1])
|
||||
if err != nil {
|
||||
t.Fatalf("buf.Write(b[%d:%d]) error %s", i, i+1, err)
|
||||
}
|
||||
if n != 1 {
|
||||
t.Fatalf("buf.Write(b[%d:%d]) returned %d; want %d",
|
||||
i, i+1, n, 1)
|
||||
}
|
||||
}
|
||||
const c = 8
|
||||
n, err := buf.Discard(c)
|
||||
if err != nil {
|
||||
t.Fatalf("Discard error %s", err)
|
||||
}
|
||||
if n != c {
|
||||
t.Fatalf("Discard returned %d; want %d", n, c)
|
||||
}
|
||||
n, err = buf.Write(b)
|
||||
if err == nil {
|
||||
t.Fatalf("Write length exceed returned no error; n %d", n)
|
||||
}
|
||||
if n != c {
|
||||
t.Fatalf("Write length exceeding returned %d; want %d", n, c)
|
||||
}
|
||||
n, err = buf.Discard(4)
|
||||
if err != nil {
|
||||
t.Fatalf("Discard error %s", err)
|
||||
}
|
||||
if n != 4 {
|
||||
t.Fatalf("Discard returned %d; want %d", n, 4)
|
||||
}
|
||||
n, err = buf.Write(b[:3])
|
||||
if err != nil {
|
||||
t.Fatalf("buf.Write(b[:3]) error %s; n %d", err, n)
|
||||
}
|
||||
if n != 3 {
|
||||
t.Fatalf("buf.Write(b[:3]) returned %d; want %d", n, 3)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuffer_Buffered_Available(t *testing.T) {
|
||||
buf := newBuffer(19)
|
||||
b := []byte("0123456789")
|
||||
var err error
|
||||
if _, err = buf.Write(b); err != nil {
|
||||
t.Fatalf("buf.Write(b) error %s", err)
|
||||
}
|
||||
if n := buf.Buffered(); n != 10 {
|
||||
t.Fatalf("buf.Buffered() returns %d; want %d", n, 10)
|
||||
}
|
||||
if _, err = buf.Discard(8); err != nil {
|
||||
t.Fatalf("buf.Discard(8) error %s", err)
|
||||
}
|
||||
if _, err = buf.Write(b[:7]); err != nil {
|
||||
t.Fatalf("buf.Write(b[:7]) error %s", err)
|
||||
}
|
||||
if n := buf.Buffered(); n != 9 {
|
||||
t.Fatalf("buf.Buffered() returns %d; want %d", n, 9)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuffer_Read(t *testing.T) {
|
||||
buf := newBuffer(10)
|
||||
b := []byte("0123456789")
|
||||
var err error
|
||||
if _, err = buf.Write(b); err != nil {
|
||||
t.Fatalf("buf.Write(b) error %s", err)
|
||||
}
|
||||
p := make([]byte, 8)
|
||||
n, err := buf.Read(p)
|
||||
if err != nil {
|
||||
t.Fatalf("buf.Read(p) error %s", err)
|
||||
}
|
||||
if n != len(p) {
|
||||
t.Fatalf("buf.Read(p) returned %d; want %d", n, len(p))
|
||||
}
|
||||
if !bytes.Equal(p, b[:8]) {
|
||||
t.Fatalf("buf.Read(p) put %s into p; want %s", p, b[:8])
|
||||
}
|
||||
if _, err = buf.Write(b[:7]); err != nil {
|
||||
t.Fatalf("buf.Write(b[:7]) error %s", err)
|
||||
}
|
||||
q := make([]byte, 7)
|
||||
n, err = buf.Read(q)
|
||||
if err != nil {
|
||||
t.Fatalf("buf.Read(q) error %s", err)
|
||||
}
|
||||
if n != len(q) {
|
||||
t.Fatalf("buf.Read(q) returns %d; want %d", n, len(q))
|
||||
}
|
||||
c := []byte("8901234")
|
||||
if !bytes.Equal(q, c) {
|
||||
t.Fatalf("buf.Read(q) put %s into q; want %s", q, c)
|
||||
}
|
||||
if _, err := buf.Write(b[7:]); err != nil {
|
||||
t.Fatalf("buf.Write(b[7:]) error %s", err)
|
||||
}
|
||||
if _, err := buf.Write(b[:2]); err != nil {
|
||||
t.Fatalf("buf.Write(b[:2]) error %s", err)
|
||||
}
|
||||
t.Logf("buf.rear %d buf.front %d", buf.rear, buf.front)
|
||||
r := make([]byte, 2)
|
||||
n, err = buf.Read(r)
|
||||
if err != nil {
|
||||
t.Fatalf("buf.Read(r) error %s", err)
|
||||
}
|
||||
if n != len(r) {
|
||||
t.Fatalf("buf.Read(r) returns %d; want %d", n, len(r))
|
||||
}
|
||||
d := []byte("56")
|
||||
if !bytes.Equal(r, d) {
|
||||
t.Fatalf("buf.Read(r) put %s into r; want %s", r, d)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuffer_Discard(t *testing.T) {
|
||||
buf := newBuffer(10)
|
||||
b := []byte("0123456789")
|
||||
var err error
|
||||
if _, err = buf.Write(b); err != nil {
|
||||
t.Fatalf("buf.Write(b) error %s", err)
|
||||
}
|
||||
n, err := buf.Discard(11)
|
||||
if err == nil {
|
||||
t.Fatalf("buf.Discard(11) didn't return error")
|
||||
}
|
||||
if n != 10 {
|
||||
t.Fatalf("buf.Discard(11) returned %d; want %d", n, 10)
|
||||
}
|
||||
if _, err := buf.Write(b); err != nil {
|
||||
t.Fatalf("buf.Write(b) #2 error %s", err)
|
||||
}
|
||||
n, err = buf.Discard(10)
|
||||
if err != nil {
|
||||
t.Fatalf("buf.Discard(10) error %s", err)
|
||||
}
|
||||
if n != 10 {
|
||||
t.Fatalf("buf.Discard(11) returned %d; want %d", n, 10)
|
||||
}
|
||||
if _, err := buf.Write(b[:4]); err != nil {
|
||||
t.Fatalf("buf.Write(b[:4]) error %s", err)
|
||||
}
|
||||
n, err = buf.Discard(1)
|
||||
if err != nil {
|
||||
t.Fatalf("buf.Discard(1) error %s", err)
|
||||
}
|
||||
if n != 1 {
|
||||
t.Fatalf("buf.Discard(1) returned %d; want %d", n, 1)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuffer_Discard_error(t *testing.T) {
|
||||
buf := newBuffer(10)
|
||||
n, err := buf.Discard(-1)
|
||||
if err == nil {
|
||||
t.Fatal("buf.Discard(-1) didn't return an error")
|
||||
}
|
||||
if n != 0 {
|
||||
t.Fatalf("buf.Discard(-1) returned %d; want %d", n, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrefixLen(t *testing.T) {
|
||||
tests := []struct {
|
||||
a, b []byte
|
||||
k int
|
||||
}{
|
||||
{[]byte("abcde"), []byte("abc"), 3},
|
||||
{[]byte("abc"), []byte("uvw"), 0},
|
||||
{[]byte(""), []byte("uvw"), 0},
|
||||
{[]byte("abcde"), []byte("abcuvw"), 3},
|
||||
}
|
||||
for _, c := range tests {
|
||||
k := prefixLen(c.a, c.b)
|
||||
if k != c.k {
|
||||
t.Errorf("prefixLen(%q,%q) returned %d; want %d",
|
||||
c.a, c.b, k, c.k)
|
||||
}
|
||||
k = prefixLen(c.b, c.a)
|
||||
if k != c.k {
|
||||
t.Errorf("prefixLen(%q,%q) returned %d; want %d",
|
||||
c.b, c.a, k, c.k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatchLen(t *testing.T) {
|
||||
buf := newBuffer(13)
|
||||
const s = "abcaba"
|
||||
_, err := io.WriteString(buf, s)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteString error %s", err)
|
||||
}
|
||||
_, err = io.WriteString(buf, s)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteString error %s", err)
|
||||
}
|
||||
if _, err = buf.Discard(12); err != nil {
|
||||
t.Fatalf("buf.Discard(6) error %s", err)
|
||||
}
|
||||
_, err = io.WriteString(buf, s)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteString error %s", err)
|
||||
}
|
||||
tests := []struct{ d, n int }{{1, 1}, {3, 2}, {6, 6}, {5, 0}, {2, 0}}
|
||||
for _, c := range tests {
|
||||
n := buf.matchLen(c.d, []byte(s))
|
||||
if n != c.n {
|
||||
t.Errorf(
|
||||
"MatchLen(%d,[]byte(%q)) returned %d; want %d",
|
||||
c.d, s, n, c.n)
|
||||
}
|
||||
}
|
||||
}
|
37
vendor/github.com/ulikunitz/xz/lzma/bytewriter.go
generated
vendored
Normal file
37
vendor/github.com/ulikunitz/xz/lzma/bytewriter.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// ErrLimit indicates that the limit of the LimitedByteWriter has been
|
||||
// reached.
|
||||
var ErrLimit = errors.New("limit reached")
|
||||
|
||||
// LimitedByteWriter provides a byte writer that can be written until a
|
||||
// limit is reached. The field N provides the number of remaining
|
||||
// bytes.
|
||||
type LimitedByteWriter struct {
|
||||
BW io.ByteWriter
|
||||
N int64
|
||||
}
|
||||
|
||||
// WriteByte writes a single byte to the limited byte writer. It returns
|
||||
// ErrLimit if the limit has been reached. If the byte is successfully
|
||||
// written the field N of the LimitedByteWriter will be decremented by
|
||||
// one.
|
||||
func (l *LimitedByteWriter) WriteByte(c byte) error {
|
||||
if l.N <= 0 {
|
||||
return ErrLimit
|
||||
}
|
||||
if err := l.BW.WriteByte(c); err != nil {
|
||||
return err
|
||||
}
|
||||
l.N--
|
||||
return nil
|
||||
}
|
277
vendor/github.com/ulikunitz/xz/lzma/decoder.go
generated
vendored
Normal file
277
vendor/github.com/ulikunitz/xz/lzma/decoder.go
generated
vendored
Normal file
@ -0,0 +1,277 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// decoder decodes a raw LZMA stream without any header.
|
||||
type decoder struct {
|
||||
// dictionary; the rear pointer of the buffer will be used for
|
||||
// reading the data.
|
||||
Dict *decoderDict
|
||||
// decoder state
|
||||
State *state
|
||||
// range decoder
|
||||
rd *rangeDecoder
|
||||
// start stores the head value of the dictionary for the LZMA
|
||||
// stream
|
||||
start int64
|
||||
// size of uncompressed data
|
||||
size int64
|
||||
// end-of-stream encountered
|
||||
eos bool
|
||||
// EOS marker found
|
||||
eosMarker bool
|
||||
}
|
||||
|
||||
// newDecoder creates a new decoder instance. The parameter size provides
|
||||
// the expected byte size of the decompressed data. If the size is
|
||||
// unknown use a negative value. In that case the decoder will look for
|
||||
// a terminating end-of-stream marker.
|
||||
func newDecoder(br io.ByteReader, state *state, dict *decoderDict, size int64) (d *decoder, err error) {
|
||||
rd, err := newRangeDecoder(br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d = &decoder{
|
||||
State: state,
|
||||
Dict: dict,
|
||||
rd: rd,
|
||||
size: size,
|
||||
start: dict.pos(),
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Reopen restarts the decoder with a new byte reader and a new size. Reopen
|
||||
// resets the Decompressed counter to zero.
|
||||
func (d *decoder) Reopen(br io.ByteReader, size int64) error {
|
||||
var err error
|
||||
if d.rd, err = newRangeDecoder(br); err != nil {
|
||||
return err
|
||||
}
|
||||
d.start = d.Dict.pos()
|
||||
d.size = size
|
||||
d.eos = false
|
||||
return nil
|
||||
}
|
||||
|
||||
// decodeLiteral decodes a single literal from the LZMA stream.
|
||||
func (d *decoder) decodeLiteral() (op operation, err error) {
|
||||
litState := d.State.litState(d.Dict.byteAt(1), d.Dict.head)
|
||||
match := d.Dict.byteAt(int(d.State.rep[0]) + 1)
|
||||
s, err := d.State.litCodec.Decode(d.rd, d.State.state, match, litState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return lit{s}, nil
|
||||
}
|
||||
|
||||
// errEOS indicates that an EOS marker has been found.
|
||||
var errEOS = errors.New("EOS marker found")
|
||||
|
||||
// readOp decodes the next operation from the compressed stream. It
|
||||
// returns the operation. If an explicit end of stream marker is
|
||||
// identified the eos error is returned.
|
||||
func (d *decoder) readOp() (op operation, err error) {
|
||||
// Value of the end of stream (EOS) marker
|
||||
const eosDist = 1<<32 - 1
|
||||
|
||||
state, state2, posState := d.State.states(d.Dict.head)
|
||||
|
||||
b, err := d.State.isMatch[state2].Decode(d.rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b == 0 {
|
||||
// literal
|
||||
op, err := d.decodeLiteral()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.State.updateStateLiteral()
|
||||
return op, nil
|
||||
}
|
||||
b, err = d.State.isRep[state].Decode(d.rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b == 0 {
|
||||
// simple match
|
||||
d.State.rep[3], d.State.rep[2], d.State.rep[1] =
|
||||
d.State.rep[2], d.State.rep[1], d.State.rep[0]
|
||||
|
||||
d.State.updateStateMatch()
|
||||
// The length decoder returns the length offset.
|
||||
n, err := d.State.lenCodec.Decode(d.rd, posState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// The dist decoder returns the distance offset. The actual
|
||||
// distance is 1 higher.
|
||||
d.State.rep[0], err = d.State.distCodec.Decode(d.rd, n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if d.State.rep[0] == eosDist {
|
||||
d.eosMarker = true
|
||||
return nil, errEOS
|
||||
}
|
||||
op = match{n: int(n) + minMatchLen,
|
||||
distance: int64(d.State.rep[0]) + minDistance}
|
||||
return op, nil
|
||||
}
|
||||
b, err = d.State.isRepG0[state].Decode(d.rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dist := d.State.rep[0]
|
||||
if b == 0 {
|
||||
// rep match 0
|
||||
b, err = d.State.isRepG0Long[state2].Decode(d.rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b == 0 {
|
||||
d.State.updateStateShortRep()
|
||||
op = match{n: 1, distance: int64(dist) + minDistance}
|
||||
return op, nil
|
||||
}
|
||||
} else {
|
||||
b, err = d.State.isRepG1[state].Decode(d.rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b == 0 {
|
||||
dist = d.State.rep[1]
|
||||
} else {
|
||||
b, err = d.State.isRepG2[state].Decode(d.rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b == 0 {
|
||||
dist = d.State.rep[2]
|
||||
} else {
|
||||
dist = d.State.rep[3]
|
||||
d.State.rep[3] = d.State.rep[2]
|
||||
}
|
||||
d.State.rep[2] = d.State.rep[1]
|
||||
}
|
||||
d.State.rep[1] = d.State.rep[0]
|
||||
d.State.rep[0] = dist
|
||||
}
|
||||
n, err := d.State.repLenCodec.Decode(d.rd, posState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.State.updateStateRep()
|
||||
op = match{n: int(n) + minMatchLen, distance: int64(dist) + minDistance}
|
||||
return op, nil
|
||||
}
|
||||
|
||||
// apply takes the operation and transforms the decoder dictionary accordingly.
|
||||
func (d *decoder) apply(op operation) error {
|
||||
var err error
|
||||
switch x := op.(type) {
|
||||
case match:
|
||||
err = d.Dict.writeMatch(x.distance, x.n)
|
||||
case lit:
|
||||
err = d.Dict.WriteByte(x.b)
|
||||
default:
|
||||
panic("op is neither a match nor a literal")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// decompress fills the dictionary unless no space for new data is
|
||||
// available. If the end of the LZMA stream has been reached io.EOF will
|
||||
// be returned.
|
||||
func (d *decoder) decompress() error {
|
||||
if d.eos {
|
||||
return io.EOF
|
||||
}
|
||||
for d.Dict.Available() >= maxMatchLen {
|
||||
op, err := d.readOp()
|
||||
switch err {
|
||||
case nil:
|
||||
break
|
||||
case errEOS:
|
||||
d.eos = true
|
||||
if !d.rd.possiblyAtEnd() {
|
||||
return errDataAfterEOS
|
||||
}
|
||||
if d.size >= 0 && d.size != d.Decompressed() {
|
||||
return errSize
|
||||
}
|
||||
return io.EOF
|
||||
case io.EOF:
|
||||
d.eos = true
|
||||
return io.ErrUnexpectedEOF
|
||||
default:
|
||||
return err
|
||||
}
|
||||
if err = d.apply(op); err != nil {
|
||||
return err
|
||||
}
|
||||
if d.size >= 0 && d.Decompressed() >= d.size {
|
||||
d.eos = true
|
||||
if d.Decompressed() > d.size {
|
||||
return errSize
|
||||
}
|
||||
if !d.rd.possiblyAtEnd() {
|
||||
switch _, err = d.readOp(); err {
|
||||
case nil:
|
||||
return errSize
|
||||
case io.EOF:
|
||||
return io.ErrUnexpectedEOF
|
||||
case errEOS:
|
||||
break
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
return io.EOF
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Errors that may be returned while decoding data.
|
||||
var (
|
||||
errDataAfterEOS = errors.New("lzma: data after end of stream marker")
|
||||
errSize = errors.New("lzma: wrong uncompressed data size")
|
||||
)
|
||||
|
||||
// Read reads data from the buffer. If no more data is available io.EOF is
|
||||
// returned.
|
||||
func (d *decoder) Read(p []byte) (n int, err error) {
|
||||
var k int
|
||||
for {
|
||||
// Read of decoder dict never returns an error.
|
||||
k, err = d.Dict.Read(p[n:])
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("dictionary read error %s", err))
|
||||
}
|
||||
if k == 0 && d.eos {
|
||||
return n, io.EOF
|
||||
}
|
||||
n += k
|
||||
if n >= len(p) {
|
||||
return n, nil
|
||||
}
|
||||
if err = d.decompress(); err != nil && err != io.EOF {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Decompressed returns the number of bytes decompressed by the decoder.
|
||||
func (d *decoder) Decompressed() int64 {
|
||||
return d.Dict.pos() - d.start
|
||||
}
|
59
vendor/github.com/ulikunitz/xz/lzma/decoder_test.go
generated
vendored
Normal file
59
vendor/github.com/ulikunitz/xz/lzma/decoder_test.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDecoder(t *testing.T) {
|
||||
filename := "fox.lzma"
|
||||
want := "The quick brown fox jumps over the lazy dog.\n"
|
||||
for i := 0; i < 2; i++ {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
t.Fatalf("os.Open(%q) error %s", filename, err)
|
||||
}
|
||||
p := make([]byte, 13)
|
||||
_, err = io.ReadFull(f, p)
|
||||
if err != nil {
|
||||
t.Fatalf("io.ReadFull error %s", err)
|
||||
}
|
||||
props, err := PropertiesForCode(p[0])
|
||||
if err != nil {
|
||||
t.Fatalf("p[0] error %s", err)
|
||||
}
|
||||
state := newState(props)
|
||||
const capacity = 0x800000
|
||||
dict, err := newDecoderDict(capacity)
|
||||
if err != nil {
|
||||
t.Fatalf("newDecoderDict: error %s", err)
|
||||
}
|
||||
size := int64(-1)
|
||||
if i > 0 {
|
||||
size = int64(len(want))
|
||||
}
|
||||
br := bufio.NewReader(f)
|
||||
r, err := newDecoder(br, state, dict, size)
|
||||
if err != nil {
|
||||
t.Fatalf("newDecoder error %s", err)
|
||||
}
|
||||
bytes, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Fatalf("[%d] ReadAll error %s", i, err)
|
||||
}
|
||||
if err = f.Close(); err != nil {
|
||||
t.Fatalf("Close error %s", err)
|
||||
}
|
||||
got := string(bytes)
|
||||
if got != want {
|
||||
t.Fatalf("read %q; but want %q", got, want)
|
||||
}
|
||||
}
|
||||
}
|
135
vendor/github.com/ulikunitz/xz/lzma/decoderdict.go
generated
vendored
Normal file
135
vendor/github.com/ulikunitz/xz/lzma/decoderdict.go
generated
vendored
Normal file
@ -0,0 +1,135 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// decoderDict provides the dictionary for the decoder. The whole
|
||||
// dictionary is used as reader buffer.
|
||||
type decoderDict struct {
|
||||
buf buffer
|
||||
head int64
|
||||
}
|
||||
|
||||
// newDecoderDict creates a new decoder dictionary. The whole dictionary
|
||||
// will be used as reader buffer.
|
||||
func newDecoderDict(dictCap int) (d *decoderDict, err error) {
|
||||
// lower limit supports easy test cases
|
||||
if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) {
|
||||
return nil, errors.New("lzma: dictCap out of range")
|
||||
}
|
||||
d = &decoderDict{buf: *newBuffer(dictCap)}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Reset clears the dictionary. The read buffer is not changed, so the
|
||||
// buffered data can still be read.
|
||||
func (d *decoderDict) Reset() {
|
||||
d.head = 0
|
||||
}
|
||||
|
||||
// WriteByte writes a single byte into the dictionary. It is used to
|
||||
// write literals into the dictionary.
|
||||
func (d *decoderDict) WriteByte(c byte) error {
|
||||
if err := d.buf.WriteByte(c); err != nil {
|
||||
return err
|
||||
}
|
||||
d.head++
|
||||
return nil
|
||||
}
|
||||
|
||||
// pos returns the position of the dictionary head.
|
||||
func (d *decoderDict) pos() int64 { return d.head }
|
||||
|
||||
// dictLen returns the actual length of the dictionary.
|
||||
func (d *decoderDict) dictLen() int {
|
||||
capacity := d.buf.Cap()
|
||||
if d.head >= int64(capacity) {
|
||||
return capacity
|
||||
}
|
||||
return int(d.head)
|
||||
}
|
||||
|
||||
// byteAt returns a byte stored in the dictionary. If the distance is
|
||||
// non-positive or exceeds the current length of the dictionary the zero
|
||||
// byte is returned.
|
||||
func (d *decoderDict) byteAt(dist int) byte {
|
||||
if !(0 < dist && dist <= d.dictLen()) {
|
||||
return 0
|
||||
}
|
||||
i := d.buf.front - dist
|
||||
if i < 0 {
|
||||
i += len(d.buf.data)
|
||||
}
|
||||
return d.buf.data[i]
|
||||
}
|
||||
|
||||
// writeMatch writes the match at the top of the dictionary. The given
|
||||
// distance must point in the current dictionary and the length must not
|
||||
// exceed the maximum length 273 supported in LZMA.
|
||||
//
|
||||
// The error value ErrNoSpace indicates that no space is available in
|
||||
// the dictionary for writing. You need to read from the dictionary
|
||||
// first.
|
||||
func (d *decoderDict) writeMatch(dist int64, length int) error {
|
||||
if !(0 < dist && dist <= int64(d.dictLen())) {
|
||||
return errors.New("writeMatch: distance out of range")
|
||||
}
|
||||
if !(0 < length && length <= maxMatchLen) {
|
||||
return errors.New("writeMatch: length out of range")
|
||||
}
|
||||
if length > d.buf.Available() {
|
||||
return ErrNoSpace
|
||||
}
|
||||
d.head += int64(length)
|
||||
|
||||
i := d.buf.front - int(dist)
|
||||
if i < 0 {
|
||||
i += len(d.buf.data)
|
||||
}
|
||||
for length > 0 {
|
||||
var p []byte
|
||||
if i >= d.buf.front {
|
||||
p = d.buf.data[i:]
|
||||
i = 0
|
||||
} else {
|
||||
p = d.buf.data[i:d.buf.front]
|
||||
i = d.buf.front
|
||||
}
|
||||
if len(p) > length {
|
||||
p = p[:length]
|
||||
}
|
||||
if _, err := d.buf.Write(p); err != nil {
|
||||
panic(fmt.Errorf("d.buf.Write returned error %s", err))
|
||||
}
|
||||
length -= len(p)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes the given bytes into the dictionary and advances the
|
||||
// head.
|
||||
func (d *decoderDict) Write(p []byte) (n int, err error) {
|
||||
n, err = d.buf.Write(p)
|
||||
d.head += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Available returns the number of available bytes for writing into the
|
||||
// decoder dictionary.
|
||||
func (d *decoderDict) Available() int { return d.buf.Available() }
|
||||
|
||||
// Read reads data from the buffer contained in the decoder dictionary.
|
||||
func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) }
|
||||
|
||||
// Buffered returns the number of bytes currently buffered in the
|
||||
// decoder dictionary.
|
||||
func (d *decoderDict) buffered() int { return d.buf.Buffered() }
|
||||
|
||||
// Peek gets data from the buffer without advancing the rear index.
|
||||
func (d *decoderDict) peek(p []byte) (n int, err error) { return d.buf.Peek(p) }
|
33
vendor/github.com/ulikunitz/xz/lzma/decoderdict_test.go
generated
vendored
Normal file
33
vendor/github.com/ulikunitz/xz/lzma/decoderdict_test.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func peek(d *decoderDict) []byte {
|
||||
p := make([]byte, d.buffered())
|
||||
k, err := d.peek(p)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("peek: "+
|
||||
"Read returned unexpected error %s", err))
|
||||
}
|
||||
if k != len(p) {
|
||||
panic(fmt.Errorf("peek: "+
|
||||
"Read returned %d; wanted %d", k, len(p)))
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func TestNewDecoderDict(t *testing.T) {
|
||||
if _, err := newDecoderDict(0); err == nil {
|
||||
t.Fatalf("no error for zero dictionary capacity")
|
||||
}
|
||||
if _, err := newDecoderDict(8); err != nil {
|
||||
t.Fatalf("error %s", err)
|
||||
}
|
||||
}
|
49
vendor/github.com/ulikunitz/xz/lzma/directcodec.go
generated
vendored
Normal file
49
vendor/github.com/ulikunitz/xz/lzma/directcodec.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import "fmt"
|
||||
|
||||
// directCodec allows the encoding and decoding of values with a fixed number
|
||||
// of bits. The number of bits must be in the range [1,32].
|
||||
type directCodec byte
|
||||
|
||||
// makeDirectCodec creates a directCodec. The function panics if the number of
|
||||
// bits is not in the range [1,32].
|
||||
func makeDirectCodec(bits int) directCodec {
|
||||
if !(1 <= bits && bits <= 32) {
|
||||
panic(fmt.Errorf("bits=%d out of range", bits))
|
||||
}
|
||||
return directCodec(bits)
|
||||
}
|
||||
|
||||
// Bits returns the number of bits supported by this codec.
|
||||
func (dc directCodec) Bits() int {
|
||||
return int(dc)
|
||||
}
|
||||
|
||||
// Encode uses the range encoder to encode a value with the fixed number of
|
||||
// bits. The most-significant bit is encoded first.
|
||||
func (dc directCodec) Encode(e *rangeEncoder, v uint32) error {
|
||||
for i := int(dc) - 1; i >= 0; i-- {
|
||||
if err := e.DirectEncodeBit(v >> uint(i)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode uses the range decoder to decode a value with the given number of
|
||||
// given bits. The most-significant bit is decoded first.
|
||||
func (dc directCodec) Decode(d *rangeDecoder) (v uint32, err error) {
|
||||
for i := int(dc) - 1; i >= 0; i-- {
|
||||
x, err := d.DirectDecodeBit()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
v = (v << 1) | x
|
||||
}
|
||||
return v, nil
|
||||
}
|
156
vendor/github.com/ulikunitz/xz/lzma/distcodec.go
generated
vendored
Normal file
156
vendor/github.com/ulikunitz/xz/lzma/distcodec.go
generated
vendored
Normal file
@ -0,0 +1,156 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
// Constants used by the distance codec.
|
||||
const (
|
||||
// minimum supported distance
|
||||
minDistance = 1
|
||||
// maximum supported distance, value is used for the eos marker.
|
||||
maxDistance = 1 << 32
|
||||
// number of the supported len states
|
||||
lenStates = 4
|
||||
// start for the position models
|
||||
startPosModel = 4
|
||||
// first index with align bits support
|
||||
endPosModel = 14
|
||||
// bits for the position slots
|
||||
posSlotBits = 6
|
||||
// number of align bits
|
||||
alignBits = 4
|
||||
// maximum position slot
|
||||
maxPosSlot = 63
|
||||
)
|
||||
|
||||
// distCodec provides encoding and decoding of distance values.
|
||||
type distCodec struct {
|
||||
posSlotCodecs [lenStates]treeCodec
|
||||
posModel [endPosModel - startPosModel]treeReverseCodec
|
||||
alignCodec treeReverseCodec
|
||||
}
|
||||
|
||||
// deepcopy initializes dc as deep copy of the source.
|
||||
func (dc *distCodec) deepcopy(src *distCodec) {
|
||||
if dc == src {
|
||||
return
|
||||
}
|
||||
for i := range dc.posSlotCodecs {
|
||||
dc.posSlotCodecs[i].deepcopy(&src.posSlotCodecs[i])
|
||||
}
|
||||
for i := range dc.posModel {
|
||||
dc.posModel[i].deepcopy(&src.posModel[i])
|
||||
}
|
||||
dc.alignCodec.deepcopy(&src.alignCodec)
|
||||
}
|
||||
|
||||
// distBits returns the number of bits required to encode dist.
|
||||
func distBits(dist uint32) int {
|
||||
if dist < startPosModel {
|
||||
return 6
|
||||
}
|
||||
// slot s > 3, dist d
|
||||
// s = 2(bits(d)-1) + bit(d, bits(d)-2)
|
||||
// s>>1 = bits(d)-1
|
||||
// bits(d) = 32-nlz32(d)
|
||||
// s>>1=31-nlz32(d)
|
||||
// n = 5 + (s>>1) = 36 - nlz32(d)
|
||||
return 36 - nlz32(dist)
|
||||
}
|
||||
|
||||
// newDistCodec creates a new distance codec.
|
||||
func (dc *distCodec) init() {
|
||||
for i := range dc.posSlotCodecs {
|
||||
dc.posSlotCodecs[i] = makeTreeCodec(posSlotBits)
|
||||
}
|
||||
for i := range dc.posModel {
|
||||
posSlot := startPosModel + i
|
||||
bits := (posSlot >> 1) - 1
|
||||
dc.posModel[i] = makeTreeReverseCodec(bits)
|
||||
}
|
||||
dc.alignCodec = makeTreeReverseCodec(alignBits)
|
||||
}
|
||||
|
||||
// lenState converts the value l to a supported lenState value.
|
||||
func lenState(l uint32) uint32 {
|
||||
if l >= lenStates {
|
||||
l = lenStates - 1
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// Encode encodes the distance using the parameter l. Dist can have values from
|
||||
// the full range of uint32 values. To get the distance offset the actual match
|
||||
// distance has to be decreased by 1. A distance offset of 0xffffffff (eos)
|
||||
// indicates the end of the stream.
|
||||
func (dc *distCodec) Encode(e *rangeEncoder, dist uint32, l uint32) (err error) {
|
||||
// Compute the posSlot using nlz32
|
||||
var posSlot uint32
|
||||
var bits uint32
|
||||
if dist < startPosModel {
|
||||
posSlot = dist
|
||||
} else {
|
||||
bits = uint32(30 - nlz32(dist))
|
||||
posSlot = startPosModel - 2 + (bits << 1)
|
||||
posSlot += (dist >> uint(bits)) & 1
|
||||
}
|
||||
|
||||
if err = dc.posSlotCodecs[lenState(l)].Encode(e, posSlot); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
switch {
|
||||
case posSlot < startPosModel:
|
||||
return nil
|
||||
case posSlot < endPosModel:
|
||||
tc := &dc.posModel[posSlot-startPosModel]
|
||||
return tc.Encode(dist, e)
|
||||
}
|
||||
dic := directCodec(bits - alignBits)
|
||||
if err = dic.Encode(e, dist>>alignBits); err != nil {
|
||||
return
|
||||
}
|
||||
return dc.alignCodec.Encode(dist, e)
|
||||
}
|
||||
|
||||
// Decode decodes the distance offset using the parameter l. The dist value
|
||||
// 0xffffffff (eos) indicates the end of the stream. Add one to the distance
|
||||
// offset to get the actual match distance.
|
||||
func (dc *distCodec) Decode(d *rangeDecoder, l uint32) (dist uint32, err error) {
|
||||
posSlot, err := dc.posSlotCodecs[lenState(l)].Decode(d)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// posSlot equals distance
|
||||
if posSlot < startPosModel {
|
||||
return posSlot, nil
|
||||
}
|
||||
|
||||
// posSlot uses the individual models
|
||||
bits := (posSlot >> 1) - 1
|
||||
dist = (2 | (posSlot & 1)) << bits
|
||||
var u uint32
|
||||
if posSlot < endPosModel {
|
||||
tc := &dc.posModel[posSlot-startPosModel]
|
||||
if u, err = tc.Decode(d); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
dist += u
|
||||
return dist, nil
|
||||
}
|
||||
|
||||
// posSlots use direct encoding and a single model for the four align
|
||||
// bits.
|
||||
dic := directCodec(bits - alignBits)
|
||||
if u, err = dic.Decode(d); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
dist += u << alignBits
|
||||
if u, err = dc.alignCodec.Decode(d); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
dist += u
|
||||
return dist, nil
|
||||
}
|
268
vendor/github.com/ulikunitz/xz/lzma/encoder.go
generated
vendored
Normal file
268
vendor/github.com/ulikunitz/xz/lzma/encoder.go
generated
vendored
Normal file
@ -0,0 +1,268 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// opLenMargin provides the upper limit of the number of bytes required
|
||||
// to encode a single operation.
|
||||
const opLenMargin = 10
|
||||
|
||||
// compressFlags control the compression process.
|
||||
type compressFlags uint32
|
||||
|
||||
// Values for compressFlags.
|
||||
const (
|
||||
// all data should be compressed, even if compression is not
|
||||
// optimal.
|
||||
all compressFlags = 1 << iota
|
||||
)
|
||||
|
||||
// encoderFlags provide the flags for an encoder.
|
||||
type encoderFlags uint32
|
||||
|
||||
// Flags for the encoder.
|
||||
const (
|
||||
// eosMarker requests an EOS marker to be written.
|
||||
eosMarker encoderFlags = 1 << iota
|
||||
)
|
||||
|
||||
// Encoder compresses data buffered in the encoder dictionary and writes
|
||||
// it into a byte writer.
|
||||
type encoder struct {
|
||||
dict *encoderDict
|
||||
state *state
|
||||
re *rangeEncoder
|
||||
start int64
|
||||
// generate eos marker
|
||||
marker bool
|
||||
limit bool
|
||||
margin int
|
||||
}
|
||||
|
||||
// newEncoder creates a new encoder. If the byte writer must be
|
||||
// limited use LimitedByteWriter provided by this package. The flags
|
||||
// argument supports the eosMarker flag, controlling whether a
|
||||
// terminating end-of-stream marker must be written.
|
||||
func newEncoder(bw io.ByteWriter, state *state, dict *encoderDict,
|
||||
flags encoderFlags) (e *encoder, err error) {
|
||||
|
||||
re, err := newRangeEncoder(bw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e = &encoder{
|
||||
dict: dict,
|
||||
state: state,
|
||||
re: re,
|
||||
marker: flags&eosMarker != 0,
|
||||
start: dict.Pos(),
|
||||
margin: opLenMargin,
|
||||
}
|
||||
if e.marker {
|
||||
e.margin += 5
|
||||
}
|
||||
return e, nil
|
||||
}
|
||||
|
||||
// Write writes the bytes from p into the dictionary. If not enough
|
||||
// space is available the data in the dictionary buffer will be
|
||||
// compressed to make additional space available. If the limit of the
|
||||
// underlying writer has been reached ErrLimit will be returned.
|
||||
func (e *encoder) Write(p []byte) (n int, err error) {
|
||||
for {
|
||||
k, err := e.dict.Write(p[n:])
|
||||
n += k
|
||||
if err == ErrNoSpace {
|
||||
if err = e.compress(0); err != nil {
|
||||
return n, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
// Reopen reopens the encoder with a new byte writer.
|
||||
func (e *encoder) Reopen(bw io.ByteWriter) error {
|
||||
var err error
|
||||
if e.re, err = newRangeEncoder(bw); err != nil {
|
||||
return err
|
||||
}
|
||||
e.start = e.dict.Pos()
|
||||
e.limit = false
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeLiteral writes a literal into the LZMA stream
|
||||
func (e *encoder) writeLiteral(l lit) error {
|
||||
var err error
|
||||
state, state2, _ := e.state.states(e.dict.Pos())
|
||||
if err = e.state.isMatch[state2].Encode(e.re, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
litState := e.state.litState(e.dict.ByteAt(1), e.dict.Pos())
|
||||
match := e.dict.ByteAt(int(e.state.rep[0]) + 1)
|
||||
err = e.state.litCodec.Encode(e.re, l.b, state, match, litState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e.state.updateStateLiteral()
|
||||
return nil
|
||||
}
|
||||
|
||||
// iverson implements the Iverson operator as proposed by Donald Knuth in his
|
||||
// book Concrete Mathematics.
|
||||
func iverson(ok bool) uint32 {
|
||||
if ok {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// writeMatch writes a repetition operation into the operation stream
|
||||
func (e *encoder) writeMatch(m match) error {
|
||||
var err error
|
||||
if !(minDistance <= m.distance && m.distance <= maxDistance) {
|
||||
panic(fmt.Errorf("match distance %d out of range", m.distance))
|
||||
}
|
||||
dist := uint32(m.distance - minDistance)
|
||||
if !(minMatchLen <= m.n && m.n <= maxMatchLen) &&
|
||||
!(dist == e.state.rep[0] && m.n == 1) {
|
||||
panic(fmt.Errorf(
|
||||
"match length %d out of range; dist %d rep[0] %d",
|
||||
m.n, dist, e.state.rep[0]))
|
||||
}
|
||||
state, state2, posState := e.state.states(e.dict.Pos())
|
||||
if err = e.state.isMatch[state2].Encode(e.re, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
g := 0
|
||||
for ; g < 4; g++ {
|
||||
if e.state.rep[g] == dist {
|
||||
break
|
||||
}
|
||||
}
|
||||
b := iverson(g < 4)
|
||||
if err = e.state.isRep[state].Encode(e.re, b); err != nil {
|
||||
return err
|
||||
}
|
||||
n := uint32(m.n - minMatchLen)
|
||||
if b == 0 {
|
||||
// simple match
|
||||
e.state.rep[3], e.state.rep[2], e.state.rep[1], e.state.rep[0] =
|
||||
e.state.rep[2], e.state.rep[1], e.state.rep[0], dist
|
||||
e.state.updateStateMatch()
|
||||
if err = e.state.lenCodec.Encode(e.re, n, posState); err != nil {
|
||||
return err
|
||||
}
|
||||
return e.state.distCodec.Encode(e.re, dist, n)
|
||||
}
|
||||
b = iverson(g != 0)
|
||||
if err = e.state.isRepG0[state].Encode(e.re, b); err != nil {
|
||||
return err
|
||||
}
|
||||
if b == 0 {
|
||||
// g == 0
|
||||
b = iverson(m.n != 1)
|
||||
if err = e.state.isRepG0Long[state2].Encode(e.re, b); err != nil {
|
||||
return err
|
||||
}
|
||||
if b == 0 {
|
||||
e.state.updateStateShortRep()
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
// g in {1,2,3}
|
||||
b = iverson(g != 1)
|
||||
if err = e.state.isRepG1[state].Encode(e.re, b); err != nil {
|
||||
return err
|
||||
}
|
||||
if b == 1 {
|
||||
// g in {2,3}
|
||||
b = iverson(g != 2)
|
||||
err = e.state.isRepG2[state].Encode(e.re, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b == 1 {
|
||||
e.state.rep[3] = e.state.rep[2]
|
||||
}
|
||||
e.state.rep[2] = e.state.rep[1]
|
||||
}
|
||||
e.state.rep[1] = e.state.rep[0]
|
||||
e.state.rep[0] = dist
|
||||
}
|
||||
e.state.updateStateRep()
|
||||
return e.state.repLenCodec.Encode(e.re, n, posState)
|
||||
}
|
||||
|
||||
// writeOp writes a single operation to the range encoder. The function
|
||||
// checks whether there is enough space available to close the LZMA
|
||||
// stream.
|
||||
func (e *encoder) writeOp(op operation) error {
|
||||
if e.re.Available() < int64(e.margin) {
|
||||
return ErrLimit
|
||||
}
|
||||
switch x := op.(type) {
|
||||
case lit:
|
||||
return e.writeLiteral(x)
|
||||
case match:
|
||||
return e.writeMatch(x)
|
||||
default:
|
||||
panic("unexpected operation")
|
||||
}
|
||||
}
|
||||
|
||||
// compress compressed data from the dictionary buffer. If the flag all
|
||||
// is set, all data in the dictionary buffer will be compressed. The
|
||||
// function returns ErrLimit if the underlying writer has reached its
|
||||
// limit.
|
||||
func (e *encoder) compress(flags compressFlags) error {
|
||||
n := 0
|
||||
if flags&all == 0 {
|
||||
n = maxMatchLen - 1
|
||||
}
|
||||
d := e.dict
|
||||
m := d.m
|
||||
for d.Buffered() > n {
|
||||
op := m.NextOp(e.state.rep)
|
||||
if err := e.writeOp(op); err != nil {
|
||||
return err
|
||||
}
|
||||
d.Discard(op.Len())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// eosMatch is a pseudo operation that indicates the end of the stream.
|
||||
var eosMatch = match{distance: maxDistance, n: minMatchLen}
|
||||
|
||||
// Close terminates the LZMA stream. If requested the end-of-stream
|
||||
// marker will be written. If the byte writer limit has been or will be
|
||||
// reached during compression of the remaining data in the buffer the
|
||||
// LZMA stream will be closed and data will remain in the buffer.
|
||||
func (e *encoder) Close() error {
|
||||
err := e.compress(all)
|
||||
if err != nil && err != ErrLimit {
|
||||
return err
|
||||
}
|
||||
if e.marker {
|
||||
if err := e.writeMatch(eosMatch); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = e.re.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
// Compressed returns the number bytes of the input data that been
|
||||
// compressed.
|
||||
func (e *encoder) Compressed() int64 {
|
||||
return e.dict.Pos() - e.start
|
||||
}
|
151
vendor/github.com/ulikunitz/xz/lzma/encoder_test.go
generated
vendored
Normal file
151
vendor/github.com/ulikunitz/xz/lzma/encoder_test.go
generated
vendored
Normal file
@ -0,0 +1,151 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/ulikunitz/xz/internal/randtxt"
|
||||
)
|
||||
|
||||
var testString = `LZMA decoder test example
|
||||
=========================
|
||||
! LZMA ! Decoder ! TEST !
|
||||
=========================
|
||||
! TEST ! LZMA ! Decoder !
|
||||
=========================
|
||||
---- Test Line 1 --------
|
||||
=========================
|
||||
---- Test Line 2 --------
|
||||
=========================
|
||||
=== End of test file ====
|
||||
=========================
|
||||
`
|
||||
|
||||
func cycle(t *testing.T, n int) {
|
||||
t.Logf("cycle(t,%d)", n)
|
||||
if n > len(testString) {
|
||||
t.Fatalf("cycle: n=%d larger than len(testString)=%d", n,
|
||||
len(testString))
|
||||
}
|
||||
const dictCap = MinDictCap
|
||||
m, err := newHashTable(dictCap, 4)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
encoderDict, err := newEncoderDict(dictCap, dictCap+1024, m)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
props := Properties{2, 0, 2}
|
||||
if err := props.verify(); err != nil {
|
||||
t.Fatalf("properties error %s", err)
|
||||
}
|
||||
state := newState(props)
|
||||
var buf bytes.Buffer
|
||||
w, err := newEncoder(&buf, state, encoderDict, eosMarker)
|
||||
if err != nil {
|
||||
t.Fatalf("newEncoder error %s", err)
|
||||
}
|
||||
orig := []byte(testString)[:n]
|
||||
t.Logf("len(orig) %d", len(orig))
|
||||
k, err := w.Write(orig)
|
||||
if err != nil {
|
||||
t.Fatalf("w.Write error %s", err)
|
||||
}
|
||||
if k != len(orig) {
|
||||
t.Fatalf("w.Write returned %d; want %d", k, len(orig))
|
||||
}
|
||||
if err = w.Close(); err != nil {
|
||||
t.Fatalf("w.Close error %s", err)
|
||||
}
|
||||
t.Logf("buf.Len() %d len(orig) %d", buf.Len(), len(orig))
|
||||
decoderDict, err := newDecoderDict(dictCap)
|
||||
if err != nil {
|
||||
t.Fatalf("newDecoderDict error %s", err)
|
||||
}
|
||||
state.Reset()
|
||||
r, err := newDecoder(&buf, state, decoderDict, -1)
|
||||
if err != nil {
|
||||
t.Fatalf("newDecoder error %s", err)
|
||||
}
|
||||
decoded, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadAll(lr) error %s", err)
|
||||
}
|
||||
t.Logf("decoded: %s", decoded)
|
||||
if len(orig) != len(decoded) {
|
||||
t.Fatalf("length decoded is %d; want %d", len(decoded),
|
||||
len(orig))
|
||||
}
|
||||
if !bytes.Equal(orig, decoded) {
|
||||
t.Fatalf("decoded file differs from original")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncoderCycle1(t *testing.T) {
|
||||
cycle(t, len(testString))
|
||||
}
|
||||
|
||||
func TestEncoderCycle2(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
const txtlen = 50000
|
||||
io.CopyN(buf, randtxt.NewReader(rand.NewSource(42)), txtlen)
|
||||
txt := buf.String()
|
||||
buf.Reset()
|
||||
const dictCap = MinDictCap
|
||||
m, err := newHashTable(dictCap, 4)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
encoderDict, err := newEncoderDict(dictCap, dictCap+1024, m)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
props := Properties{3, 0, 2}
|
||||
if err := props.verify(); err != nil {
|
||||
t.Fatalf("properties error %s", err)
|
||||
}
|
||||
state := newState(props)
|
||||
lbw := &LimitedByteWriter{BW: buf, N: 100}
|
||||
w, err := newEncoder(lbw, state, encoderDict, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("NewEncoder error %s", err)
|
||||
}
|
||||
_, err = io.WriteString(w, txt)
|
||||
if err != nil && err != ErrLimit {
|
||||
t.Fatalf("WriteString error %s", err)
|
||||
}
|
||||
if err = w.Close(); err != nil {
|
||||
t.Fatalf("w.Close error %s", err)
|
||||
}
|
||||
n := w.Compressed()
|
||||
txt = txt[:n]
|
||||
decoderDict, err := newDecoderDict(dictCap)
|
||||
if err != nil {
|
||||
t.Fatalf("NewDecoderDict error %s", err)
|
||||
}
|
||||
state.Reset()
|
||||
r, err := newDecoder(buf, state, decoderDict, n)
|
||||
if err != nil {
|
||||
t.Fatalf("NewDecoder error %s", err)
|
||||
}
|
||||
out := new(bytes.Buffer)
|
||||
if _, err = io.Copy(out, r); err != nil {
|
||||
t.Fatalf("decompress copy error %s", err)
|
||||
}
|
||||
got := out.String()
|
||||
t.Logf("%s", got)
|
||||
if len(got) != int(n) {
|
||||
t.Fatalf("len(got) %d; want %d", len(got), n)
|
||||
}
|
||||
if got != txt {
|
||||
t.Fatalf("got and txt differ")
|
||||
}
|
||||
}
|
149
vendor/github.com/ulikunitz/xz/lzma/encoderdict.go
generated
vendored
Normal file
149
vendor/github.com/ulikunitz/xz/lzma/encoderdict.go
generated
vendored
Normal file
@ -0,0 +1,149 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// matcher is an interface that supports the identification of the next
|
||||
// operation.
|
||||
type matcher interface {
|
||||
io.Writer
|
||||
SetDict(d *encoderDict)
|
||||
NextOp(rep [4]uint32) operation
|
||||
}
|
||||
|
||||
// encoderDict provides the dictionary of the encoder. It includes an
|
||||
// addtional buffer atop of the actual dictionary.
|
||||
type encoderDict struct {
|
||||
buf buffer
|
||||
m matcher
|
||||
head int64
|
||||
capacity int
|
||||
// preallocated array
|
||||
data [maxMatchLen]byte
|
||||
}
|
||||
|
||||
// newEncoderDict creates the encoder dictionary. The argument bufSize
|
||||
// defines the size of the additional buffer.
|
||||
func newEncoderDict(dictCap, bufSize int, m matcher) (d *encoderDict, err error) {
|
||||
if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) {
|
||||
return nil, errors.New(
|
||||
"lzma: dictionary capacity out of range")
|
||||
}
|
||||
if bufSize < 1 {
|
||||
return nil, errors.New(
|
||||
"lzma: buffer size must be larger than zero")
|
||||
}
|
||||
d = &encoderDict{
|
||||
buf: *newBuffer(dictCap + bufSize),
|
||||
capacity: dictCap,
|
||||
m: m,
|
||||
}
|
||||
m.SetDict(d)
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Discard discards n bytes. Note that n must not be larger than
|
||||
// MaxMatchLen.
|
||||
func (d *encoderDict) Discard(n int) {
|
||||
p := d.data[:n]
|
||||
k, _ := d.buf.Read(p)
|
||||
if k < n {
|
||||
panic(fmt.Errorf("lzma: can't discard %d bytes", n))
|
||||
}
|
||||
d.head += int64(n)
|
||||
d.m.Write(p)
|
||||
}
|
||||
|
||||
// Len returns the data available in the encoder dictionary.
|
||||
func (d *encoderDict) Len() int {
|
||||
n := d.buf.Available()
|
||||
if int64(n) > d.head {
|
||||
return int(d.head)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// DictLen returns the actual length of data in the dictionary.
|
||||
func (d *encoderDict) DictLen() int {
|
||||
if d.head < int64(d.capacity) {
|
||||
return int(d.head)
|
||||
}
|
||||
return d.capacity
|
||||
}
|
||||
|
||||
// Available returns the number of bytes that can be written by a
|
||||
// following Write call.
|
||||
func (d *encoderDict) Available() int {
|
||||
return d.buf.Available() - d.DictLen()
|
||||
}
|
||||
|
||||
// Write writes data into the dictionary buffer. Note that the position
|
||||
// of the dictionary head will not be moved. If there is not enough
|
||||
// space in the buffer ErrNoSpace will be returned.
|
||||
func (d *encoderDict) Write(p []byte) (n int, err error) {
|
||||
m := d.Available()
|
||||
if len(p) > m {
|
||||
p = p[:m]
|
||||
err = ErrNoSpace
|
||||
}
|
||||
var e error
|
||||
if n, e = d.buf.Write(p); e != nil {
|
||||
err = e
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Pos returns the position of the head.
|
||||
func (d *encoderDict) Pos() int64 { return d.head }
|
||||
|
||||
// ByteAt returns the byte at the given distance.
|
||||
func (d *encoderDict) ByteAt(distance int) byte {
|
||||
if !(0 < distance && distance <= d.Len()) {
|
||||
return 0
|
||||
}
|
||||
i := d.buf.rear - distance
|
||||
if i < 0 {
|
||||
i += len(d.buf.data)
|
||||
}
|
||||
return d.buf.data[i]
|
||||
}
|
||||
|
||||
// CopyN copies the last n bytes from the dictionary into the provided
|
||||
// writer. This is used for copying uncompressed data into an
|
||||
// uncompressed segment.
|
||||
func (d *encoderDict) CopyN(w io.Writer, n int) (written int, err error) {
|
||||
if n <= 0 {
|
||||
return 0, nil
|
||||
}
|
||||
m := d.Len()
|
||||
if n > m {
|
||||
n = m
|
||||
err = ErrNoSpace
|
||||
}
|
||||
i := d.buf.rear - n
|
||||
var e error
|
||||
if i < 0 {
|
||||
i += len(d.buf.data)
|
||||
if written, e = w.Write(d.buf.data[i:]); e != nil {
|
||||
return written, e
|
||||
}
|
||||
i = 0
|
||||
}
|
||||
var k int
|
||||
k, e = w.Write(d.buf.data[i:d.buf.rear])
|
||||
written += k
|
||||
if e != nil {
|
||||
err = e
|
||||
}
|
||||
return written, err
|
||||
}
|
||||
|
||||
// Buffered returns the number of bytes in the buffer.
|
||||
func (d *encoderDict) Buffered() int { return d.buf.Buffered() }
|
BIN
vendor/github.com/ulikunitz/xz/lzma/fox.lzma
generated
vendored
Normal file
BIN
vendor/github.com/ulikunitz/xz/lzma/fox.lzma
generated
vendored
Normal file
Binary file not shown.
309
vendor/github.com/ulikunitz/xz/lzma/hashtable.go
generated
vendored
Normal file
309
vendor/github.com/ulikunitz/xz/lzma/hashtable.go
generated
vendored
Normal file
@ -0,0 +1,309 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/ulikunitz/xz/internal/hash"
|
||||
)
|
||||
|
||||
/* For compression we need to find byte sequences that match the byte
|
||||
* sequence at the dictionary head. A hash table is a simple method to
|
||||
* provide this capability.
|
||||
*/
|
||||
|
||||
// maxMatches limits the number of matches requested from the Matches
|
||||
// function. This controls the speed of the overall encoding.
|
||||
const maxMatches = 16
|
||||
|
||||
// shortDists defines the number of short distances supported by the
|
||||
// implementation.
|
||||
const shortDists = 8
|
||||
|
||||
// The minimum is somehow arbitrary but the maximum is limited by the
|
||||
// memory requirements of the hash table.
|
||||
const (
|
||||
minTableExponent = 9
|
||||
maxTableExponent = 20
|
||||
)
|
||||
|
||||
// newRoller contains the function used to create an instance of the
|
||||
// hash.Roller.
|
||||
var newRoller = func(n int) hash.Roller { return hash.NewCyclicPoly(n) }
|
||||
|
||||
// hashTable stores the hash table including the rolling hash method.
|
||||
//
|
||||
// We implement chained hashing into a circular buffer. Each entry in
|
||||
// the circular buffer stores the delta distance to the next position with a
|
||||
// word that has the same hash value.
|
||||
type hashTable struct {
|
||||
dict *encoderDict
|
||||
// actual hash table
|
||||
t []int64
|
||||
// circular list data with the offset to the next word
|
||||
data []uint32
|
||||
front int
|
||||
// mask for computing the index for the hash table
|
||||
mask uint64
|
||||
// hash offset; initial value is -int64(wordLen)
|
||||
hoff int64
|
||||
// length of the hashed word
|
||||
wordLen int
|
||||
// hash roller for computing the hash values for the Write
|
||||
// method
|
||||
wr hash.Roller
|
||||
// hash roller for computing arbitrary hashes
|
||||
hr hash.Roller
|
||||
// preallocated slices
|
||||
p [maxMatches]int64
|
||||
distances [maxMatches + shortDists]int
|
||||
}
|
||||
|
||||
// hashTableExponent derives the hash table exponent from the dictionary
|
||||
// capacity.
|
||||
func hashTableExponent(n uint32) int {
|
||||
e := 30 - nlz32(n)
|
||||
switch {
|
||||
case e < minTableExponent:
|
||||
e = minTableExponent
|
||||
case e > maxTableExponent:
|
||||
e = maxTableExponent
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// newHashTable creates a new hash table for words of length wordLen
|
||||
func newHashTable(capacity int, wordLen int) (t *hashTable, err error) {
|
||||
if !(0 < capacity) {
|
||||
return nil, errors.New(
|
||||
"newHashTable: capacity must not be negative")
|
||||
}
|
||||
exp := hashTableExponent(uint32(capacity))
|
||||
if !(1 <= wordLen && wordLen <= 4) {
|
||||
return nil, errors.New("newHashTable: " +
|
||||
"argument wordLen out of range")
|
||||
}
|
||||
n := 1 << uint(exp)
|
||||
if n <= 0 {
|
||||
panic("newHashTable: exponent is too large")
|
||||
}
|
||||
t = &hashTable{
|
||||
t: make([]int64, n),
|
||||
data: make([]uint32, capacity),
|
||||
mask: (uint64(1) << uint(exp)) - 1,
|
||||
hoff: -int64(wordLen),
|
||||
wordLen: wordLen,
|
||||
wr: newRoller(wordLen),
|
||||
hr: newRoller(wordLen),
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (t *hashTable) SetDict(d *encoderDict) { t.dict = d }
|
||||
|
||||
// buffered returns the number of bytes that are currently hashed.
|
||||
func (t *hashTable) buffered() int {
|
||||
n := t.hoff + 1
|
||||
switch {
|
||||
case n <= 0:
|
||||
return 0
|
||||
case n >= int64(len(t.data)):
|
||||
return len(t.data)
|
||||
}
|
||||
return int(n)
|
||||
}
|
||||
|
||||
// addIndex adds n to an index ensuring that is stays inside the
|
||||
// circular buffer for the hash chain.
|
||||
func (t *hashTable) addIndex(i, n int) int {
|
||||
i += n - len(t.data)
|
||||
if i < 0 {
|
||||
i += len(t.data)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// putDelta puts the delta instance at the current front of the circular
|
||||
// chain buffer.
|
||||
func (t *hashTable) putDelta(delta uint32) {
|
||||
t.data[t.front] = delta
|
||||
t.front = t.addIndex(t.front, 1)
|
||||
}
|
||||
|
||||
// putEntry puts a new entry into the hash table. If there is already a
|
||||
// value stored it is moved into the circular chain buffer.
|
||||
func (t *hashTable) putEntry(h uint64, pos int64) {
|
||||
if pos < 0 {
|
||||
return
|
||||
}
|
||||
i := h & t.mask
|
||||
old := t.t[i] - 1
|
||||
t.t[i] = pos + 1
|
||||
var delta int64
|
||||
if old >= 0 {
|
||||
delta = pos - old
|
||||
if delta > 1<<32-1 || delta > int64(t.buffered()) {
|
||||
delta = 0
|
||||
}
|
||||
}
|
||||
t.putDelta(uint32(delta))
|
||||
}
|
||||
|
||||
// WriteByte converts a single byte into a hash and puts them into the hash
|
||||
// table.
|
||||
func (t *hashTable) WriteByte(b byte) error {
|
||||
h := t.wr.RollByte(b)
|
||||
t.hoff++
|
||||
t.putEntry(h, t.hoff)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write converts the bytes provided into hash tables and stores the
|
||||
// abbreviated offsets into the hash table. The method will never return an
|
||||
// error.
|
||||
func (t *hashTable) Write(p []byte) (n int, err error) {
|
||||
for _, b := range p {
|
||||
// WriteByte doesn't generate an error.
|
||||
t.WriteByte(b)
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// getMatches the matches for a specific hash. The functions returns the
|
||||
// number of positions found.
|
||||
//
|
||||
// TODO: Make a getDistances because that we are actually interested in.
|
||||
func (t *hashTable) getMatches(h uint64, positions []int64) (n int) {
|
||||
if t.hoff < 0 || len(positions) == 0 {
|
||||
return 0
|
||||
}
|
||||
buffered := t.buffered()
|
||||
tailPos := t.hoff + 1 - int64(buffered)
|
||||
rear := t.front - buffered
|
||||
if rear >= 0 {
|
||||
rear -= len(t.data)
|
||||
}
|
||||
// get the slot for the hash
|
||||
pos := t.t[h&t.mask] - 1
|
||||
delta := pos - tailPos
|
||||
for {
|
||||
if delta < 0 {
|
||||
return n
|
||||
}
|
||||
positions[n] = tailPos + delta
|
||||
n++
|
||||
if n >= len(positions) {
|
||||
return n
|
||||
}
|
||||
i := rear + int(delta)
|
||||
if i < 0 {
|
||||
i += len(t.data)
|
||||
}
|
||||
u := t.data[i]
|
||||
if u == 0 {
|
||||
return n
|
||||
}
|
||||
delta -= int64(u)
|
||||
}
|
||||
}
|
||||
|
||||
// hash computes the rolling hash for the word stored in p. For correct
|
||||
// results its length must be equal to t.wordLen.
|
||||
func (t *hashTable) hash(p []byte) uint64 {
|
||||
var h uint64
|
||||
for _, b := range p {
|
||||
h = t.hr.RollByte(b)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// Matches fills the positions slice with potential matches. The
|
||||
// functions returns the number of positions filled into positions. The
|
||||
// byte slice p must have word length of the hash table.
|
||||
func (t *hashTable) Matches(p []byte, positions []int64) int {
|
||||
if len(p) != t.wordLen {
|
||||
panic(fmt.Errorf(
|
||||
"byte slice must have length %d", t.wordLen))
|
||||
}
|
||||
h := t.hash(p)
|
||||
return t.getMatches(h, positions)
|
||||
}
|
||||
|
||||
// NextOp identifies the next operation using the hash table.
|
||||
//
|
||||
// TODO: Use all repetitions to find matches.
|
||||
func (t *hashTable) NextOp(rep [4]uint32) operation {
|
||||
// get positions
|
||||
data := t.dict.data[:maxMatchLen]
|
||||
n, _ := t.dict.buf.Peek(data)
|
||||
data = data[:n]
|
||||
var p []int64
|
||||
if n < t.wordLen {
|
||||
p = t.p[:0]
|
||||
} else {
|
||||
p = t.p[:maxMatches]
|
||||
n = t.Matches(data[:t.wordLen], p)
|
||||
p = p[:n]
|
||||
}
|
||||
|
||||
// convert positions in potential distances
|
||||
head := t.dict.head
|
||||
dists := append(t.distances[:0], 1, 2, 3, 4, 5, 6, 7, 8)
|
||||
for _, pos := range p {
|
||||
dis := int(head - pos)
|
||||
if dis > shortDists {
|
||||
dists = append(dists, dis)
|
||||
}
|
||||
}
|
||||
|
||||
// check distances
|
||||
var m match
|
||||
dictLen := t.dict.DictLen()
|
||||
for _, dist := range dists {
|
||||
if dist > dictLen {
|
||||
continue
|
||||
}
|
||||
|
||||
// Here comes a trick. We are only interested in matches
|
||||
// that are longer than the matches we have been found
|
||||
// before. So before we test the whole byte sequence at
|
||||
// the given distance, we test the first byte that would
|
||||
// make the match longer. If it doesn't match the byte
|
||||
// to match, we don't to care any longer.
|
||||
i := t.dict.buf.rear - dist + m.n
|
||||
if i < 0 {
|
||||
i += len(t.dict.buf.data)
|
||||
}
|
||||
if t.dict.buf.data[i] != data[m.n] {
|
||||
// We can't get a longer match. Jump to the next
|
||||
// distance.
|
||||
continue
|
||||
}
|
||||
|
||||
n := t.dict.buf.matchLen(dist, data)
|
||||
switch n {
|
||||
case 0:
|
||||
continue
|
||||
case 1:
|
||||
if uint32(dist-minDistance) != rep[0] {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if n > m.n {
|
||||
m = match{int64(dist), n}
|
||||
if n == len(data) {
|
||||
// No better match will be found.
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if m.n == 0 {
|
||||
return lit{data[0]}
|
||||
}
|
||||
return m
|
||||
}
|
47
vendor/github.com/ulikunitz/xz/lzma/hashtable_test.go
generated
vendored
Normal file
47
vendor/github.com/ulikunitz/xz/lzma/hashtable_test.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestHashTable(t *testing.T) {
|
||||
ht, err := newHashTable(32, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("newHashTable: error %s", err)
|
||||
}
|
||||
// 01234567890123456
|
||||
s := "abcabcdefghijklmn"
|
||||
n, err := ht.Write([]byte(s))
|
||||
if err != nil {
|
||||
t.Fatalf("ht.Write: error %s", err)
|
||||
}
|
||||
if n != len(s) {
|
||||
t.Fatalf("ht.Write returned %d; want %d", n, len(s))
|
||||
}
|
||||
tests := []struct {
|
||||
s string
|
||||
w string
|
||||
}{
|
||||
{"ab", "[3 0]"},
|
||||
{"bc", "[4 1]"},
|
||||
{"ca", "[2]"},
|
||||
{"xx", "[]"},
|
||||
{"gh", "[9]"},
|
||||
{"mn", "[15]"},
|
||||
}
|
||||
distances := make([]int64, 20)
|
||||
for _, c := range tests {
|
||||
distances := distances[:20]
|
||||
k := ht.Matches([]byte(c.s), distances)
|
||||
distances = distances[:k]
|
||||
o := fmt.Sprintf("%v", distances)
|
||||
if o != c.w {
|
||||
t.Errorf("%s: offsets %s; want %s", c.s, o, c.w)
|
||||
}
|
||||
}
|
||||
}
|
167
vendor/github.com/ulikunitz/xz/lzma/header.go
generated
vendored
Normal file
167
vendor/github.com/ulikunitz/xz/lzma/header.go
generated
vendored
Normal file
@ -0,0 +1,167 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// uint32LE reads an uint32 integer from a byte slice
|
||||
func uint32LE(b []byte) uint32 {
|
||||
x := uint32(b[3]) << 24
|
||||
x |= uint32(b[2]) << 16
|
||||
x |= uint32(b[1]) << 8
|
||||
x |= uint32(b[0])
|
||||
return x
|
||||
}
|
||||
|
||||
// uint64LE converts the uint64 value stored as little endian to an uint64
|
||||
// value.
|
||||
func uint64LE(b []byte) uint64 {
|
||||
x := uint64(b[7]) << 56
|
||||
x |= uint64(b[6]) << 48
|
||||
x |= uint64(b[5]) << 40
|
||||
x |= uint64(b[4]) << 32
|
||||
x |= uint64(b[3]) << 24
|
||||
x |= uint64(b[2]) << 16
|
||||
x |= uint64(b[1]) << 8
|
||||
x |= uint64(b[0])
|
||||
return x
|
||||
}
|
||||
|
||||
// putUint32LE puts an uint32 integer into a byte slice that must have at least
|
||||
// a length of 4 bytes.
|
||||
func putUint32LE(b []byte, x uint32) {
|
||||
b[0] = byte(x)
|
||||
b[1] = byte(x >> 8)
|
||||
b[2] = byte(x >> 16)
|
||||
b[3] = byte(x >> 24)
|
||||
}
|
||||
|
||||
// putUint64LE puts the uint64 value into the byte slice as little endian
|
||||
// value. The byte slice b must have at least place for 8 bytes.
|
||||
func putUint64LE(b []byte, x uint64) {
|
||||
b[0] = byte(x)
|
||||
b[1] = byte(x >> 8)
|
||||
b[2] = byte(x >> 16)
|
||||
b[3] = byte(x >> 24)
|
||||
b[4] = byte(x >> 32)
|
||||
b[5] = byte(x >> 40)
|
||||
b[6] = byte(x >> 48)
|
||||
b[7] = byte(x >> 56)
|
||||
}
|
||||
|
||||
// noHeaderSize defines the value of the length field in the LZMA header.
|
||||
const noHeaderSize uint64 = 1<<64 - 1
|
||||
|
||||
// HeaderLen provides the length of the LZMA file header.
|
||||
const HeaderLen = 13
|
||||
|
||||
// header represents the header of an LZMA file.
|
||||
type header struct {
|
||||
properties Properties
|
||||
dictCap int
|
||||
// uncompressed size; negative value if no size is given
|
||||
size int64
|
||||
}
|
||||
|
||||
// marshalBinary marshals the header.
|
||||
func (h *header) marshalBinary() (data []byte, err error) {
|
||||
if err = h.properties.verify(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !(0 <= h.dictCap && int64(h.dictCap) <= MaxDictCap) {
|
||||
return nil, fmt.Errorf("lzma: DictCap %d out of range",
|
||||
h.dictCap)
|
||||
}
|
||||
|
||||
data = make([]byte, 13)
|
||||
|
||||
// property byte
|
||||
data[0] = h.properties.Code()
|
||||
|
||||
// dictionary capacity
|
||||
putUint32LE(data[1:5], uint32(h.dictCap))
|
||||
|
||||
// uncompressed size
|
||||
var s uint64
|
||||
if h.size > 0 {
|
||||
s = uint64(h.size)
|
||||
} else {
|
||||
s = noHeaderSize
|
||||
}
|
||||
putUint64LE(data[5:], s)
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// unmarshalBinary unmarshals the header.
|
||||
func (h *header) unmarshalBinary(data []byte) error {
|
||||
if len(data) != HeaderLen {
|
||||
return errors.New("lzma.unmarshalBinary: data has wrong length")
|
||||
}
|
||||
|
||||
// properties
|
||||
var err error
|
||||
if h.properties, err = PropertiesForCode(data[0]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// dictionary capacity
|
||||
h.dictCap = int(uint32LE(data[1:]))
|
||||
if h.dictCap < 0 {
|
||||
return errors.New(
|
||||
"LZMA header: dictionary capacity exceeds maximum " +
|
||||
"integer")
|
||||
}
|
||||
|
||||
// uncompressed size
|
||||
s := uint64LE(data[5:])
|
||||
if s == noHeaderSize {
|
||||
h.size = -1
|
||||
} else {
|
||||
h.size = int64(s)
|
||||
if h.size < 0 {
|
||||
return errors.New(
|
||||
"LZMA header: uncompressed size " +
|
||||
"out of int64 range")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validDictCap checks whether the dictionary capacity is correct. This
|
||||
// is used to weed out wrong file headers.
|
||||
func validDictCap(dictcap int) bool {
|
||||
if int64(dictcap) == MaxDictCap {
|
||||
return true
|
||||
}
|
||||
for n := uint(10); n < 32; n++ {
|
||||
if dictcap == 1<<n {
|
||||
return true
|
||||
}
|
||||
if dictcap == 1<<n+1<<(n-1) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ValidHeader checks for a valid LZMA file header. It allows only
|
||||
// dictionary sizes of 2^n or 2^n+2^(n-1) with n >= 10 or 2^32-1. If
|
||||
// there is an explicit size it must not exceed 256 GiB. The length of
|
||||
// the data argument must be HeaderLen.
|
||||
func ValidHeader(data []byte) bool {
|
||||
var h header
|
||||
if err := h.unmarshalBinary(data); err != nil {
|
||||
return false
|
||||
}
|
||||
if !validDictCap(h.dictCap) {
|
||||
return false
|
||||
}
|
||||
return h.size < 0 || h.size <= 1<<38
|
||||
}
|
398
vendor/github.com/ulikunitz/xz/lzma/header2.go
generated
vendored
Normal file
398
vendor/github.com/ulikunitz/xz/lzma/header2.go
generated
vendored
Normal file
@ -0,0 +1,398 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
// maximum size of compressed data in a chunk
|
||||
maxCompressed = 1 << 16
|
||||
// maximum size of uncompressed data in a chunk
|
||||
maxUncompressed = 1 << 21
|
||||
)
|
||||
|
||||
// chunkType represents the type of an LZMA2 chunk. Note that this
|
||||
// value is an internal representation and no actual encoding of a LZMA2
|
||||
// chunk header.
|
||||
type chunkType byte
|
||||
|
||||
// Possible values for the chunk type.
|
||||
const (
|
||||
// end of stream
|
||||
cEOS chunkType = iota
|
||||
// uncompressed; reset dictionary
|
||||
cUD
|
||||
// uncompressed; no reset of dictionary
|
||||
cU
|
||||
// LZMA compressed; no reset
|
||||
cL
|
||||
// LZMA compressed; reset state
|
||||
cLR
|
||||
// LZMA compressed; reset state; new property value
|
||||
cLRN
|
||||
// LZMA compressed; reset state; new property value; reset dictionary
|
||||
cLRND
|
||||
)
|
||||
|
||||
// chunkTypeStrings provide a string representation for the chunk types.
|
||||
var chunkTypeStrings = [...]string{
|
||||
cEOS: "EOS",
|
||||
cU: "U",
|
||||
cUD: "UD",
|
||||
cL: "L",
|
||||
cLR: "LR",
|
||||
cLRN: "LRN",
|
||||
cLRND: "LRND",
|
||||
}
|
||||
|
||||
// String returns a string representation of the chunk type.
|
||||
func (c chunkType) String() string {
|
||||
if !(cEOS <= c && c <= cLRND) {
|
||||
return "unknown"
|
||||
}
|
||||
return chunkTypeStrings[c]
|
||||
}
|
||||
|
||||
// Actual encodings for the chunk types in the value. Note that the high
|
||||
// uncompressed size bits are stored in the header byte additionally.
|
||||
const (
|
||||
hEOS = 0
|
||||
hUD = 1
|
||||
hU = 2
|
||||
hL = 1 << 7
|
||||
hLR = 1<<7 | 1<<5
|
||||
hLRN = 1<<7 | 1<<6
|
||||
hLRND = 1<<7 | 1<<6 | 1<<5
|
||||
)
|
||||
|
||||
// errHeaderByte indicates an unsupported value for the chunk header
|
||||
// byte. These bytes starts the variable-length chunk header.
|
||||
var errHeaderByte = errors.New("lzma: unsupported chunk header byte")
|
||||
|
||||
// headerChunkType converts the header byte into a chunk type. It
|
||||
// ignores the uncompressed size bits in the chunk header byte.
|
||||
func headerChunkType(h byte) (c chunkType, err error) {
|
||||
if h&hL == 0 {
|
||||
// no compression
|
||||
switch h {
|
||||
case hEOS:
|
||||
c = cEOS
|
||||
case hUD:
|
||||
c = cUD
|
||||
case hU:
|
||||
c = cU
|
||||
default:
|
||||
return 0, errHeaderByte
|
||||
}
|
||||
return
|
||||
}
|
||||
switch h & hLRND {
|
||||
case hL:
|
||||
c = cL
|
||||
case hLR:
|
||||
c = cLR
|
||||
case hLRN:
|
||||
c = cLRN
|
||||
case hLRND:
|
||||
c = cLRND
|
||||
default:
|
||||
return 0, errHeaderByte
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// uncompressedHeaderLen provides the length of an uncompressed header
|
||||
const uncompressedHeaderLen = 3
|
||||
|
||||
// headerLen returns the length of the LZMA2 header for a given chunk
|
||||
// type.
|
||||
func headerLen(c chunkType) int {
|
||||
switch c {
|
||||
case cEOS:
|
||||
return 1
|
||||
case cU, cUD:
|
||||
return uncompressedHeaderLen
|
||||
case cL, cLR:
|
||||
return 5
|
||||
case cLRN, cLRND:
|
||||
return 6
|
||||
}
|
||||
panic(fmt.Errorf("unsupported chunk type %d", c))
|
||||
}
|
||||
|
||||
// chunkHeader represents the contents of a chunk header.
|
||||
type chunkHeader struct {
|
||||
ctype chunkType
|
||||
uncompressed uint32
|
||||
compressed uint16
|
||||
props Properties
|
||||
}
|
||||
|
||||
// String returns a string representation of the chunk header.
|
||||
func (h *chunkHeader) String() string {
|
||||
return fmt.Sprintf("%s %d %d %s", h.ctype, h.uncompressed,
|
||||
h.compressed, &h.props)
|
||||
}
|
||||
|
||||
// UnmarshalBinary reads the content of the chunk header from the data
|
||||
// slice. The slice must have the correct length.
|
||||
func (h *chunkHeader) UnmarshalBinary(data []byte) error {
|
||||
if len(data) == 0 {
|
||||
return errors.New("no data")
|
||||
}
|
||||
c, err := headerChunkType(data[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n := headerLen(c)
|
||||
if len(data) < n {
|
||||
return errors.New("incomplete data")
|
||||
}
|
||||
if len(data) > n {
|
||||
return errors.New("invalid data length")
|
||||
}
|
||||
|
||||
*h = chunkHeader{ctype: c}
|
||||
if c == cEOS {
|
||||
return nil
|
||||
}
|
||||
|
||||
h.uncompressed = uint32(uint16BE(data[1:3]))
|
||||
if c <= cU {
|
||||
return nil
|
||||
}
|
||||
h.uncompressed |= uint32(data[0]&^hLRND) << 16
|
||||
|
||||
h.compressed = uint16BE(data[3:5])
|
||||
if c <= cLR {
|
||||
return nil
|
||||
}
|
||||
|
||||
h.props, err = PropertiesForCode(data[5])
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalBinary encodes the chunk header value. The function checks
|
||||
// whether the content of the chunk header is correct.
|
||||
func (h *chunkHeader) MarshalBinary() (data []byte, err error) {
|
||||
if h.ctype > cLRND {
|
||||
return nil, errors.New("invalid chunk type")
|
||||
}
|
||||
if err = h.props.verify(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data = make([]byte, headerLen(h.ctype))
|
||||
|
||||
switch h.ctype {
|
||||
case cEOS:
|
||||
return data, nil
|
||||
case cUD:
|
||||
data[0] = hUD
|
||||
case cU:
|
||||
data[0] = hU
|
||||
case cL:
|
||||
data[0] = hL
|
||||
case cLR:
|
||||
data[0] = hLR
|
||||
case cLRN:
|
||||
data[0] = hLRN
|
||||
case cLRND:
|
||||
data[0] = hLRND
|
||||
}
|
||||
|
||||
putUint16BE(data[1:3], uint16(h.uncompressed))
|
||||
if h.ctype <= cU {
|
||||
return data, nil
|
||||
}
|
||||
data[0] |= byte(h.uncompressed>>16) &^ hLRND
|
||||
|
||||
putUint16BE(data[3:5], h.compressed)
|
||||
if h.ctype <= cLR {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
data[5] = h.props.Code()
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// readChunkHeader reads the chunk header from the IO reader.
|
||||
func readChunkHeader(r io.Reader) (h *chunkHeader, err error) {
|
||||
p := make([]byte, 1, 6)
|
||||
if _, err = io.ReadFull(r, p); err != nil {
|
||||
return
|
||||
}
|
||||
c, err := headerChunkType(p[0])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
p = p[:headerLen(c)]
|
||||
if _, err = io.ReadFull(r, p[1:]); err != nil {
|
||||
return
|
||||
}
|
||||
h = new(chunkHeader)
|
||||
if err = h.UnmarshalBinary(p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// uint16BE converts a big-endian uint16 representation to an uint16
|
||||
// value.
|
||||
func uint16BE(p []byte) uint16 {
|
||||
return uint16(p[0])<<8 | uint16(p[1])
|
||||
}
|
||||
|
||||
// putUint16BE puts the big-endian uint16 presentation into the given
|
||||
// slice.
|
||||
func putUint16BE(p []byte, x uint16) {
|
||||
p[0] = byte(x >> 8)
|
||||
p[1] = byte(x)
|
||||
}
|
||||
|
||||
// chunkState is used to manage the state of the chunks
|
||||
type chunkState byte
|
||||
|
||||
// start and stop define the initial and terminating state of the chunk
|
||||
// state
|
||||
const (
|
||||
start chunkState = 'S'
|
||||
stop = 'T'
|
||||
)
|
||||
|
||||
// errors for the chunk state handling
|
||||
var (
|
||||
errChunkType = errors.New("lzma: unexpected chunk type")
|
||||
errState = errors.New("lzma: wrong chunk state")
|
||||
)
|
||||
|
||||
// next transitions state based on chunk type input
|
||||
func (c *chunkState) next(ctype chunkType) error {
|
||||
switch *c {
|
||||
// start state
|
||||
case 'S':
|
||||
switch ctype {
|
||||
case cEOS:
|
||||
*c = 'T'
|
||||
case cUD:
|
||||
*c = 'R'
|
||||
case cLRND:
|
||||
*c = 'L'
|
||||
default:
|
||||
return errChunkType
|
||||
}
|
||||
// normal LZMA mode
|
||||
case 'L':
|
||||
switch ctype {
|
||||
case cEOS:
|
||||
*c = 'T'
|
||||
case cUD:
|
||||
*c = 'R'
|
||||
case cU:
|
||||
*c = 'U'
|
||||
case cL, cLR, cLRN, cLRND:
|
||||
break
|
||||
default:
|
||||
return errChunkType
|
||||
}
|
||||
// reset required
|
||||
case 'R':
|
||||
switch ctype {
|
||||
case cEOS:
|
||||
*c = 'T'
|
||||
case cUD, cU:
|
||||
break
|
||||
case cLRN, cLRND:
|
||||
*c = 'L'
|
||||
default:
|
||||
return errChunkType
|
||||
}
|
||||
// uncompressed
|
||||
case 'U':
|
||||
switch ctype {
|
||||
case cEOS:
|
||||
*c = 'T'
|
||||
case cUD:
|
||||
*c = 'R'
|
||||
case cU:
|
||||
break
|
||||
case cL, cLR, cLRN, cLRND:
|
||||
*c = 'L'
|
||||
default:
|
||||
return errChunkType
|
||||
}
|
||||
// terminal state
|
||||
case 'T':
|
||||
return errChunkType
|
||||
default:
|
||||
return errState
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// defaultChunkType returns the default chunk type for each chunk state.
|
||||
func (c chunkState) defaultChunkType() chunkType {
|
||||
switch c {
|
||||
case 'S':
|
||||
return cLRND
|
||||
case 'L', 'U':
|
||||
return cL
|
||||
case 'R':
|
||||
return cLRN
|
||||
default:
|
||||
// no error
|
||||
return cEOS
|
||||
}
|
||||
}
|
||||
|
||||
// maxDictCap defines the maximum dictionary capacity supported by the
|
||||
// LZMA2 dictionary capacity encoding.
|
||||
const maxDictCap = 1<<32 - 1
|
||||
|
||||
// maxDictCapCode defines the maximum dictionary capacity code.
|
||||
const maxDictCapCode = 40
|
||||
|
||||
// The function decodes the dictionary capacity byte, but doesn't change
|
||||
// for the correct range of the given byte.
|
||||
func decodeDictCap(c byte) int64 {
|
||||
return (2 | int64(c)&1) << (11 + (c>>1)&0x1f)
|
||||
}
|
||||
|
||||
// DecodeDictCap decodes the encoded dictionary capacity. The function
|
||||
// returns an error if the code is out of range.
|
||||
func DecodeDictCap(c byte) (n int64, err error) {
|
||||
if c >= maxDictCapCode {
|
||||
if c == maxDictCapCode {
|
||||
return maxDictCap, nil
|
||||
}
|
||||
return 0, errors.New("lzma: invalid dictionary size code")
|
||||
}
|
||||
return decodeDictCap(c), nil
|
||||
}
|
||||
|
||||
// EncodeDictCap encodes a dictionary capacity. The function returns the
|
||||
// code for the capacity that is greater or equal n. If n exceeds the
|
||||
// maximum support dictionary capacity, the maximum value is returned.
|
||||
func EncodeDictCap(n int64) byte {
|
||||
a, b := byte(0), byte(40)
|
||||
for a < b {
|
||||
c := a + (b-a)>>1
|
||||
m := decodeDictCap(c)
|
||||
if n <= m {
|
||||
if n == m {
|
||||
return c
|
||||
}
|
||||
b = c
|
||||
} else {
|
||||
a = c + 1
|
||||
}
|
||||
}
|
||||
return a
|
||||
}
|
153
vendor/github.com/ulikunitz/xz/lzma/header2_test.go
generated
vendored
Normal file
153
vendor/github.com/ulikunitz/xz/lzma/header2_test.go
generated
vendored
Normal file
@ -0,0 +1,153 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestChunkTypeString(t *testing.T) {
|
||||
tests := [...]struct {
|
||||
c chunkType
|
||||
s string
|
||||
}{
|
||||
{cEOS, "EOS"},
|
||||
{cUD, "UD"},
|
||||
{cU, "U"},
|
||||
{cL, "L"},
|
||||
{cLR, "LR"},
|
||||
{cLRN, "LRN"},
|
||||
{cLRND, "LRND"},
|
||||
}
|
||||
for _, c := range tests {
|
||||
s := fmt.Sprintf("%v", c.c)
|
||||
if s != c.s {
|
||||
t.Errorf("got %s; want %s", s, c.s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeaderChunkType(t *testing.T) {
|
||||
tests := []struct {
|
||||
h byte
|
||||
c chunkType
|
||||
}{
|
||||
{h: 0, c: cEOS},
|
||||
{h: 1, c: cUD},
|
||||
{h: 2, c: cU},
|
||||
{h: 1<<7 | 0x1f, c: cL},
|
||||
{h: 1<<7 | 1<<5 | 0x1f, c: cLR},
|
||||
{h: 1<<7 | 1<<6 | 0x1f, c: cLRN},
|
||||
{h: 1<<7 | 1<<6 | 1<<5 | 0x1f, c: cLRND},
|
||||
{h: 1<<7 | 1<<6 | 1<<5, c: cLRND},
|
||||
}
|
||||
if _, err := headerChunkType(3); err == nil {
|
||||
t.Fatalf("headerChunkType(%d) got %v; want %v",
|
||||
3, err, errHeaderByte)
|
||||
}
|
||||
for _, tc := range tests {
|
||||
c, err := headerChunkType(tc.h)
|
||||
if err != nil {
|
||||
t.Fatalf("headerChunkType error %s", err)
|
||||
}
|
||||
if c != tc.c {
|
||||
t.Errorf("got %s; want %s", c, tc.c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeaderLen(t *testing.T) {
|
||||
tests := []struct {
|
||||
c chunkType
|
||||
n int
|
||||
}{
|
||||
{cEOS, 1}, {cU, 3}, {cUD, 3}, {cL, 5}, {cLR, 5}, {cLRN, 6},
|
||||
{cLRND, 6},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
n := headerLen(tc.c)
|
||||
if n != tc.n {
|
||||
t.Errorf("header length for %s %d; want %d",
|
||||
tc.c, n, tc.n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func chunkHeaderSamples(t *testing.T) []chunkHeader {
|
||||
props := Properties{LC: 3, LP: 0, PB: 2}
|
||||
headers := make([]chunkHeader, 0, 12)
|
||||
for c := cEOS; c <= cLRND; c++ {
|
||||
var h chunkHeader
|
||||
h.ctype = c
|
||||
if c >= cUD {
|
||||
h.uncompressed = 0x0304
|
||||
}
|
||||
if c >= cL {
|
||||
h.compressed = 0x0201
|
||||
}
|
||||
if c >= cLRN {
|
||||
h.props = props
|
||||
}
|
||||
headers = append(headers, h)
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
func TestChunkHeaderMarshalling(t *testing.T) {
|
||||
for _, h := range chunkHeaderSamples(t) {
|
||||
data, err := h.MarshalBinary()
|
||||
if err != nil {
|
||||
t.Fatalf("MarshalBinary for %v error %s", h, err)
|
||||
}
|
||||
var g chunkHeader
|
||||
if err = g.UnmarshalBinary(data); err != nil {
|
||||
t.Fatalf("UnmarshalBinary error %s", err)
|
||||
}
|
||||
if g != h {
|
||||
t.Fatalf("got %v; want %v", g, h)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadChunkHeader(t *testing.T) {
|
||||
for _, h := range chunkHeaderSamples(t) {
|
||||
data, err := h.MarshalBinary()
|
||||
if err != nil {
|
||||
t.Fatalf("MarshalBinary for %v error %s", h, err)
|
||||
}
|
||||
r := bytes.NewReader(data)
|
||||
g, err := readChunkHeader(r)
|
||||
if err != nil {
|
||||
t.Fatalf("readChunkHeader for %v error %s", h, err)
|
||||
}
|
||||
if *g != h {
|
||||
t.Fatalf("got %v; want %v", g, h)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadEOS(t *testing.T) {
|
||||
var b [1]byte
|
||||
r := bytes.NewReader(b[:])
|
||||
h, err := readChunkHeader(r)
|
||||
if err != nil {
|
||||
t.Fatalf("readChunkHeader error %s", err)
|
||||
}
|
||||
if h.ctype != cEOS {
|
||||
t.Errorf("ctype got %s; want %s", h.ctype, cEOS)
|
||||
}
|
||||
if h.compressed != 0 {
|
||||
t.Errorf("compressed got %d; want %d", h.compressed, 0)
|
||||
}
|
||||
if h.uncompressed != 0 {
|
||||
t.Errorf("uncompressed got %d; want %d", h.uncompressed, 0)
|
||||
}
|
||||
wantProps := Properties{}
|
||||
if h.props != wantProps {
|
||||
t.Errorf("props got %v; want %v", h.props, wantProps)
|
||||
}
|
||||
}
|
52
vendor/github.com/ulikunitz/xz/lzma/header_test.go
generated
vendored
Normal file
52
vendor/github.com/ulikunitz/xz/lzma/header_test.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestHeaderMarshalling(t *testing.T) {
|
||||
tests := []header{
|
||||
{properties: Properties{3, 0, 2}, dictCap: 8 * 1024 * 1024,
|
||||
size: -1},
|
||||
{properties: Properties{4, 3, 3}, dictCap: 4096,
|
||||
size: 10},
|
||||
}
|
||||
for _, h := range tests {
|
||||
data, err := h.marshalBinary()
|
||||
if err != nil {
|
||||
t.Fatalf("marshalBinary error %s", err)
|
||||
}
|
||||
var g header
|
||||
if err = g.unmarshalBinary(data); err != nil {
|
||||
t.Fatalf("unmarshalBinary error %s", err)
|
||||
}
|
||||
if h != g {
|
||||
t.Errorf("got header %#v; want %#v", g, h)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidHeader(t *testing.T) {
|
||||
tests := []header{
|
||||
{properties: Properties{3, 0, 2}, dictCap: 8 * 1024 * 1024,
|
||||
size: -1},
|
||||
{properties: Properties{4, 3, 3}, dictCap: 4096,
|
||||
size: 10},
|
||||
}
|
||||
for _, h := range tests {
|
||||
data, err := h.marshalBinary()
|
||||
if err != nil {
|
||||
t.Fatalf("marshalBinary error %s", err)
|
||||
}
|
||||
if !ValidHeader(data) {
|
||||
t.Errorf("ValidHeader returns false for header %v;"+
|
||||
" want true", h)
|
||||
}
|
||||
}
|
||||
const a = "1234567890123"
|
||||
if ValidHeader([]byte(a)) {
|
||||
t.Errorf("ValidHeader returns true for %s; want false", a)
|
||||
}
|
||||
}
|
129
vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go
generated
vendored
Normal file
129
vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go
generated
vendored
Normal file
@ -0,0 +1,129 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import "errors"
|
||||
|
||||
// maxPosBits defines the number of bits of the position value that are used to
|
||||
// to compute the posState value. The value is used to select the tree codec
|
||||
// for length encoding and decoding.
|
||||
const maxPosBits = 4
|
||||
|
||||
// minMatchLen and maxMatchLen give the minimum and maximum values for
|
||||
// encoding and decoding length values. minMatchLen is also used as base
|
||||
// for the encoded length values.
|
||||
const (
|
||||
minMatchLen = 2
|
||||
maxMatchLen = minMatchLen + 16 + 256 - 1
|
||||
)
|
||||
|
||||
// lengthCodec support the encoding of the length value.
|
||||
type lengthCodec struct {
|
||||
choice [2]prob
|
||||
low [1 << maxPosBits]treeCodec
|
||||
mid [1 << maxPosBits]treeCodec
|
||||
high treeCodec
|
||||
}
|
||||
|
||||
// deepcopy initializes the lc value as deep copy of the source value.
|
||||
func (lc *lengthCodec) deepcopy(src *lengthCodec) {
|
||||
if lc == src {
|
||||
return
|
||||
}
|
||||
lc.choice = src.choice
|
||||
for i := range lc.low {
|
||||
lc.low[i].deepcopy(&src.low[i])
|
||||
}
|
||||
for i := range lc.mid {
|
||||
lc.mid[i].deepcopy(&src.mid[i])
|
||||
}
|
||||
lc.high.deepcopy(&src.high)
|
||||
}
|
||||
|
||||
// init initializes a new length codec.
|
||||
func (lc *lengthCodec) init() {
|
||||
for i := range lc.choice {
|
||||
lc.choice[i] = probInit
|
||||
}
|
||||
for i := range lc.low {
|
||||
lc.low[i] = makeTreeCodec(3)
|
||||
}
|
||||
for i := range lc.mid {
|
||||
lc.mid[i] = makeTreeCodec(3)
|
||||
}
|
||||
lc.high = makeTreeCodec(8)
|
||||
}
|
||||
|
||||
// lBits gives the number of bits used for the encoding of the l value
|
||||
// provided to the range encoder.
|
||||
func lBits(l uint32) int {
|
||||
switch {
|
||||
case l < 8:
|
||||
return 4
|
||||
case l < 16:
|
||||
return 5
|
||||
default:
|
||||
return 10
|
||||
}
|
||||
}
|
||||
|
||||
// Encode encodes the length offset. The length offset l can be compute by
|
||||
// subtracting minMatchLen (2) from the actual length.
|
||||
//
|
||||
// l = length - minMatchLen
|
||||
//
|
||||
func (lc *lengthCodec) Encode(e *rangeEncoder, l uint32, posState uint32,
|
||||
) (err error) {
|
||||
if l > maxMatchLen-minMatchLen {
|
||||
return errors.New("lengthCodec.Encode: l out of range")
|
||||
}
|
||||
if l < 8 {
|
||||
if err = lc.choice[0].Encode(e, 0); err != nil {
|
||||
return
|
||||
}
|
||||
return lc.low[posState].Encode(e, l)
|
||||
}
|
||||
if err = lc.choice[0].Encode(e, 1); err != nil {
|
||||
return
|
||||
}
|
||||
if l < 16 {
|
||||
if err = lc.choice[1].Encode(e, 0); err != nil {
|
||||
return
|
||||
}
|
||||
return lc.mid[posState].Encode(e, l-8)
|
||||
}
|
||||
if err = lc.choice[1].Encode(e, 1); err != nil {
|
||||
return
|
||||
}
|
||||
if err = lc.high.Encode(e, l-16); err != nil {
|
||||
return
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode reads the length offset. Add minMatchLen to compute the actual length
|
||||
// to the length offset l.
|
||||
func (lc *lengthCodec) Decode(d *rangeDecoder, posState uint32,
|
||||
) (l uint32, err error) {
|
||||
var b uint32
|
||||
if b, err = lc.choice[0].Decode(d); err != nil {
|
||||
return
|
||||
}
|
||||
if b == 0 {
|
||||
l, err = lc.low[posState].Decode(d)
|
||||
return
|
||||
}
|
||||
if b, err = lc.choice[1].Decode(d); err != nil {
|
||||
return
|
||||
}
|
||||
if b == 0 {
|
||||
l, err = lc.mid[posState].Decode(d)
|
||||
l += 8
|
||||
return
|
||||
}
|
||||
l, err = lc.high.Decode(d)
|
||||
l += 16
|
||||
return
|
||||
}
|
132
vendor/github.com/ulikunitz/xz/lzma/literalcodec.go
generated
vendored
Normal file
132
vendor/github.com/ulikunitz/xz/lzma/literalcodec.go
generated
vendored
Normal file
@ -0,0 +1,132 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
// literalCodec supports the encoding of literal. It provides 768 probability
|
||||
// values per literal state. The upper 512 probabilities are used with the
|
||||
// context of a match bit.
|
||||
type literalCodec struct {
|
||||
probs []prob
|
||||
}
|
||||
|
||||
// deepcopy initializes literal codec c as a deep copy of the source.
|
||||
func (c *literalCodec) deepcopy(src *literalCodec) {
|
||||
if c == src {
|
||||
return
|
||||
}
|
||||
c.probs = make([]prob, len(src.probs))
|
||||
copy(c.probs, src.probs)
|
||||
}
|
||||
|
||||
// init initializes the literal codec.
|
||||
func (c *literalCodec) init(lc, lp int) {
|
||||
switch {
|
||||
case !(minLC <= lc && lc <= maxLC):
|
||||
panic("lc out of range")
|
||||
case !(minLP <= lp && lp <= maxLP):
|
||||
panic("lp out of range")
|
||||
}
|
||||
c.probs = make([]prob, 0x300<<uint(lc+lp))
|
||||
for i := range c.probs {
|
||||
c.probs[i] = probInit
|
||||
}
|
||||
}
|
||||
|
||||
// Encode encodes the byte s using a range encoder as well as the current LZMA
|
||||
// encoder state, a match byte and the literal state.
|
||||
func (c *literalCodec) Encode(e *rangeEncoder, s byte,
|
||||
state uint32, match byte, litState uint32,
|
||||
) (err error) {
|
||||
k := litState * 0x300
|
||||
probs := c.probs[k : k+0x300]
|
||||
symbol := uint32(1)
|
||||
r := uint32(s)
|
||||
if state >= 7 {
|
||||
m := uint32(match)
|
||||
for {
|
||||
matchBit := (m >> 7) & 1
|
||||
m <<= 1
|
||||
bit := (r >> 7) & 1
|
||||
r <<= 1
|
||||
i := ((1 + matchBit) << 8) | symbol
|
||||
if err = probs[i].Encode(e, bit); err != nil {
|
||||
return
|
||||
}
|
||||
symbol = (symbol << 1) | bit
|
||||
if matchBit != bit {
|
||||
break
|
||||
}
|
||||
if symbol >= 0x100 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
for symbol < 0x100 {
|
||||
bit := (r >> 7) & 1
|
||||
r <<= 1
|
||||
if err = probs[symbol].Encode(e, bit); err != nil {
|
||||
return
|
||||
}
|
||||
symbol = (symbol << 1) | bit
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode decodes a literal byte using the range decoder as well as the LZMA
|
||||
// state, a match byte, and the literal state.
|
||||
func (c *literalCodec) Decode(d *rangeDecoder,
|
||||
state uint32, match byte, litState uint32,
|
||||
) (s byte, err error) {
|
||||
k := litState * 0x300
|
||||
probs := c.probs[k : k+0x300]
|
||||
symbol := uint32(1)
|
||||
if state >= 7 {
|
||||
m := uint32(match)
|
||||
for {
|
||||
matchBit := (m >> 7) & 1
|
||||
m <<= 1
|
||||
i := ((1 + matchBit) << 8) | symbol
|
||||
bit, err := d.DecodeBit(&probs[i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
symbol = (symbol << 1) | bit
|
||||
if matchBit != bit {
|
||||
break
|
||||
}
|
||||
if symbol >= 0x100 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
for symbol < 0x100 {
|
||||
bit, err := d.DecodeBit(&probs[symbol])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
symbol = (symbol << 1) | bit
|
||||
}
|
||||
s = byte(symbol - 0x100)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// minLC and maxLC define the range for LC values.
|
||||
const (
|
||||
minLC = 0
|
||||
maxLC = 8
|
||||
)
|
||||
|
||||
// minLC and maxLC define the range for LP values.
|
||||
const (
|
||||
minLP = 0
|
||||
maxLP = 4
|
||||
)
|
||||
|
||||
// minState and maxState define a range for the state values stored in
|
||||
// the State values.
|
||||
const (
|
||||
minState = 0
|
||||
maxState = 11
|
||||
)
|
52
vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go
generated
vendored
Normal file
52
vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import "errors"
|
||||
|
||||
// MatchAlgorithm identifies an algorithm to find matches in the
|
||||
// dictionary.
|
||||
type MatchAlgorithm byte
|
||||
|
||||
// Supported matcher algorithms.
|
||||
const (
|
||||
HashTable4 MatchAlgorithm = iota
|
||||
BinaryTree
|
||||
)
|
||||
|
||||
// maStrings are used by the String method.
|
||||
var maStrings = map[MatchAlgorithm]string{
|
||||
HashTable4: "HashTable4",
|
||||
BinaryTree: "BinaryTree",
|
||||
}
|
||||
|
||||
// String returns a string representation of the Matcher.
|
||||
func (a MatchAlgorithm) String() string {
|
||||
if s, ok := maStrings[a]; ok {
|
||||
return s
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
var errUnsupportedMatchAlgorithm = errors.New(
|
||||
"lzma: unsupported match algorithm value")
|
||||
|
||||
// verify checks whether the matcher value is supported.
|
||||
func (a MatchAlgorithm) verify() error {
|
||||
if _, ok := maStrings[a]; !ok {
|
||||
return errUnsupportedMatchAlgorithm
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a MatchAlgorithm) new(dictCap int) (m matcher, err error) {
|
||||
switch a {
|
||||
case HashTable4:
|
||||
return newHashTable(dictCap, 4)
|
||||
case BinaryTree:
|
||||
return newBinTree(dictCap)
|
||||
}
|
||||
return nil, errUnsupportedMatchAlgorithm
|
||||
}
|
80
vendor/github.com/ulikunitz/xz/lzma/operation.go
generated
vendored
Normal file
80
vendor/github.com/ulikunitz/xz/lzma/operation.go
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// operation represents an operation on the dictionary during encoding or
|
||||
// decoding.
|
||||
type operation interface {
|
||||
Len() int
|
||||
}
|
||||
|
||||
// rep represents a repetition at the given distance and the given length
|
||||
type match struct {
|
||||
// supports all possible distance values, including the eos marker
|
||||
distance int64
|
||||
// length
|
||||
n int
|
||||
}
|
||||
|
||||
// verify checks whether the match is valid. If that is not the case an
|
||||
// error is returned.
|
||||
func (m match) verify() error {
|
||||
if !(minDistance <= m.distance && m.distance <= maxDistance) {
|
||||
return errors.New("distance out of range")
|
||||
}
|
||||
if !(1 <= m.n && m.n <= maxMatchLen) {
|
||||
return errors.New("length out of range")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// l return the l-value for the match, which is the difference of length
|
||||
// n and 2.
|
||||
func (m match) l() uint32 {
|
||||
return uint32(m.n - minMatchLen)
|
||||
}
|
||||
|
||||
// dist returns the dist value for the match, which is one less of the
|
||||
// distance stored in the match.
|
||||
func (m match) dist() uint32 {
|
||||
return uint32(m.distance - minDistance)
|
||||
}
|
||||
|
||||
// Len returns the number of bytes matched.
|
||||
func (m match) Len() int {
|
||||
return m.n
|
||||
}
|
||||
|
||||
// String returns a string representation for the repetition.
|
||||
func (m match) String() string {
|
||||
return fmt.Sprintf("M{%d,%d}", m.distance, m.n)
|
||||
}
|
||||
|
||||
// lit represents a single byte literal.
|
||||
type lit struct {
|
||||
b byte
|
||||
}
|
||||
|
||||
// Len returns 1 for the single byte literal.
|
||||
func (l lit) Len() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
// String returns a string representation for the literal.
|
||||
func (l lit) String() string {
|
||||
var c byte
|
||||
if unicode.IsPrint(rune(l.b)) {
|
||||
c = l.b
|
||||
} else {
|
||||
c = '.'
|
||||
}
|
||||
return fmt.Sprintf("L{%c/%02x}", c, l.b)
|
||||
}
|
53
vendor/github.com/ulikunitz/xz/lzma/prob.go
generated
vendored
Normal file
53
vendor/github.com/ulikunitz/xz/lzma/prob.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
// movebits defines the number of bits used for the updates of probability
|
||||
// values.
|
||||
const movebits = 5
|
||||
|
||||
// probbits defines the number of bits of a probability value.
|
||||
const probbits = 11
|
||||
|
||||
// probInit defines 0.5 as initial value for prob values.
|
||||
const probInit prob = 1 << (probbits - 1)
|
||||
|
||||
// Type prob represents probabilities. The type can also be used to encode and
|
||||
// decode single bits.
|
||||
type prob uint16
|
||||
|
||||
// Dec decreases the probability. The decrease is proportional to the
|
||||
// probability value.
|
||||
func (p *prob) dec() {
|
||||
*p -= *p >> movebits
|
||||
}
|
||||
|
||||
// Inc increases the probability. The Increase is proportional to the
|
||||
// difference of 1 and the probability value.
|
||||
func (p *prob) inc() {
|
||||
*p += ((1 << probbits) - *p) >> movebits
|
||||
}
|
||||
|
||||
// Computes the new bound for a given range using the probability value.
|
||||
func (p prob) bound(r uint32) uint32 {
|
||||
return (r >> probbits) * uint32(p)
|
||||
}
|
||||
|
||||
// Bits returns 1. One is the number of bits that can be encoded or decoded
|
||||
// with a single prob value.
|
||||
func (p prob) Bits() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
// Encode encodes the least-significant bit of v. Note that the p value will be
|
||||
// changed.
|
||||
func (p *prob) Encode(e *rangeEncoder, v uint32) error {
|
||||
return e.EncodeBit(v, p)
|
||||
}
|
||||
|
||||
// Decode decodes a single bit. Note that the p value will change.
|
||||
func (p *prob) Decode(d *rangeDecoder) (v uint32, err error) {
|
||||
return d.DecodeBit(p)
|
||||
}
|
69
vendor/github.com/ulikunitz/xz/lzma/properties.go
generated
vendored
Normal file
69
vendor/github.com/ulikunitz/xz/lzma/properties.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// maximum and minimum values for the LZMA properties.
|
||||
const (
|
||||
minPB = 0
|
||||
maxPB = 4
|
||||
)
|
||||
|
||||
// maxPropertyCode is the possible maximum of a properties code byte.
|
||||
const maxPropertyCode = (maxPB+1)*(maxLP+1)*(maxLC+1) - 1
|
||||
|
||||
// Properties contains the parameters LC, LP and PB. The parameter LC
|
||||
// defines the number of literal context bits; parameter LP the number
|
||||
// of literal position bits and PB the number of position bits.
|
||||
type Properties struct {
|
||||
LC int
|
||||
LP int
|
||||
PB int
|
||||
}
|
||||
|
||||
// String returns the properties in a string representation.
|
||||
func (p *Properties) String() string {
|
||||
return fmt.Sprintf("LC %d LP %d PB %d", p.LC, p.LP, p.PB)
|
||||
}
|
||||
|
||||
// PropertiesForCode converts a properties code byte into a Properties value.
|
||||
func PropertiesForCode(code byte) (p Properties, err error) {
|
||||
if code > maxPropertyCode {
|
||||
return p, errors.New("lzma: invalid properties code")
|
||||
}
|
||||
p.LC = int(code % 9)
|
||||
code /= 9
|
||||
p.LP = int(code % 5)
|
||||
code /= 5
|
||||
p.PB = int(code % 5)
|
||||
return p, err
|
||||
}
|
||||
|
||||
// verify checks the properties for correctness.
|
||||
func (p *Properties) verify() error {
|
||||
if p == nil {
|
||||
return errors.New("lzma: properties are nil")
|
||||
}
|
||||
if !(minLC <= p.LC && p.LC <= maxLC) {
|
||||
return errors.New("lzma: lc out of range")
|
||||
}
|
||||
if !(minLP <= p.LP && p.LP <= maxLP) {
|
||||
return errors.New("lzma: lp out of range")
|
||||
}
|
||||
if !(minPB <= p.PB && p.PB <= maxPB) {
|
||||
return errors.New("lzma: pb out of range")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Code converts the properties to a byte. The function assumes that
|
||||
// the properties components are all in range.
|
||||
func (p Properties) Code() byte {
|
||||
return byte((p.PB*5+p.LP)*9 + p.LC)
|
||||
}
|
248
vendor/github.com/ulikunitz/xz/lzma/rangecodec.go
generated
vendored
Normal file
248
vendor/github.com/ulikunitz/xz/lzma/rangecodec.go
generated
vendored
Normal file
@ -0,0 +1,248 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// rangeEncoder implements range encoding of single bits. The low value can
|
||||
// overflow therefore we need uint64. The cache value is used to handle
|
||||
// overflows.
|
||||
type rangeEncoder struct {
|
||||
lbw *LimitedByteWriter
|
||||
nrange uint32
|
||||
low uint64
|
||||
cacheLen int64
|
||||
cache byte
|
||||
}
|
||||
|
||||
// maxInt64 provides the maximal value of the int64 type
|
||||
const maxInt64 = 1<<63 - 1
|
||||
|
||||
// newRangeEncoder creates a new range encoder.
|
||||
func newRangeEncoder(bw io.ByteWriter) (re *rangeEncoder, err error) {
|
||||
lbw, ok := bw.(*LimitedByteWriter)
|
||||
if !ok {
|
||||
lbw = &LimitedByteWriter{BW: bw, N: maxInt64}
|
||||
}
|
||||
return &rangeEncoder{
|
||||
lbw: lbw,
|
||||
nrange: 0xffffffff,
|
||||
cacheLen: 1}, nil
|
||||
}
|
||||
|
||||
// Available returns the number of bytes that still can be written. The
|
||||
// method takes the bytes that will be currently written by Close into
|
||||
// account.
|
||||
func (e *rangeEncoder) Available() int64 {
|
||||
return e.lbw.N - (e.cacheLen + 4)
|
||||
}
|
||||
|
||||
// writeByte writes a single byte to the underlying writer. An error is
|
||||
// returned if the limit is reached. The written byte will be counted if
|
||||
// the underlying writer doesn't return an error.
|
||||
func (e *rangeEncoder) writeByte(c byte) error {
|
||||
if e.Available() < 1 {
|
||||
return ErrLimit
|
||||
}
|
||||
return e.lbw.WriteByte(c)
|
||||
}
|
||||
|
||||
// DirectEncodeBit encodes the least-significant bit of b with probability 1/2.
|
||||
func (e *rangeEncoder) DirectEncodeBit(b uint32) error {
|
||||
e.nrange >>= 1
|
||||
e.low += uint64(e.nrange) & (0 - (uint64(b) & 1))
|
||||
|
||||
// normalize
|
||||
const top = 1 << 24
|
||||
if e.nrange >= top {
|
||||
return nil
|
||||
}
|
||||
e.nrange <<= 8
|
||||
return e.shiftLow()
|
||||
}
|
||||
|
||||
// EncodeBit encodes the least significant bit of b. The p value will be
|
||||
// updated by the function depending on the bit encoded.
|
||||
func (e *rangeEncoder) EncodeBit(b uint32, p *prob) error {
|
||||
bound := p.bound(e.nrange)
|
||||
if b&1 == 0 {
|
||||
e.nrange = bound
|
||||
p.inc()
|
||||
} else {
|
||||
e.low += uint64(bound)
|
||||
e.nrange -= bound
|
||||
p.dec()
|
||||
}
|
||||
|
||||
// normalize
|
||||
const top = 1 << 24
|
||||
if e.nrange >= top {
|
||||
return nil
|
||||
}
|
||||
e.nrange <<= 8
|
||||
return e.shiftLow()
|
||||
}
|
||||
|
||||
// Close writes a complete copy of the low value.
|
||||
func (e *rangeEncoder) Close() error {
|
||||
for i := 0; i < 5; i++ {
|
||||
if err := e.shiftLow(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// shiftLow shifts the low value for 8 bit. The shifted byte is written into
|
||||
// the byte writer. The cache value is used to handle overflows.
|
||||
func (e *rangeEncoder) shiftLow() error {
|
||||
if uint32(e.low) < 0xff000000 || (e.low>>32) != 0 {
|
||||
tmp := e.cache
|
||||
for {
|
||||
err := e.writeByte(tmp + byte(e.low>>32))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmp = 0xff
|
||||
e.cacheLen--
|
||||
if e.cacheLen <= 0 {
|
||||
if e.cacheLen < 0 {
|
||||
panic("negative cacheLen")
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
e.cache = byte(uint32(e.low) >> 24)
|
||||
}
|
||||
e.cacheLen++
|
||||
e.low = uint64(uint32(e.low) << 8)
|
||||
return nil
|
||||
}
|
||||
|
||||
// rangeDecoder decodes single bits of the range encoding stream.
|
||||
type rangeDecoder struct {
|
||||
br io.ByteReader
|
||||
nrange uint32
|
||||
code uint32
|
||||
}
|
||||
|
||||
// init initializes the range decoder, by reading from the byte reader.
|
||||
func (d *rangeDecoder) init() error {
|
||||
d.nrange = 0xffffffff
|
||||
d.code = 0
|
||||
|
||||
b, err := d.br.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b != 0 {
|
||||
return errors.New("newRangeDecoder: first byte not zero")
|
||||
}
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
if err = d.updateCode(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if d.code >= d.nrange {
|
||||
return errors.New("newRangeDecoder: d.code >= d.nrange")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// newRangeDecoder initializes a range decoder. It reads five bytes from the
|
||||
// reader and therefore may return an error.
|
||||
func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) {
|
||||
d = &rangeDecoder{br: br, nrange: 0xffffffff}
|
||||
|
||||
b, err := d.br.ReadByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b != 0 {
|
||||
return nil, errors.New("newRangeDecoder: first byte not zero")
|
||||
}
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
if err = d.updateCode(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if d.code >= d.nrange {
|
||||
return nil, errors.New("newRangeDecoder: d.code >= d.nrange")
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// possiblyAtEnd checks whether the decoder may be at the end of the stream.
|
||||
func (d *rangeDecoder) possiblyAtEnd() bool {
|
||||
return d.code == 0
|
||||
}
|
||||
|
||||
// DirectDecodeBit decodes a bit with probability 1/2. The return value b will
|
||||
// contain the bit at the least-significant position. All other bits will be
|
||||
// zero.
|
||||
func (d *rangeDecoder) DirectDecodeBit() (b uint32, err error) {
|
||||
d.nrange >>= 1
|
||||
d.code -= d.nrange
|
||||
t := 0 - (d.code >> 31)
|
||||
d.code += d.nrange & t
|
||||
b = (t + 1) & 1
|
||||
|
||||
// d.code will stay less then d.nrange
|
||||
|
||||
// normalize
|
||||
// assume d.code < d.nrange
|
||||
const top = 1 << 24
|
||||
if d.nrange >= top {
|
||||
return b, nil
|
||||
}
|
||||
d.nrange <<= 8
|
||||
// d.code < d.nrange will be maintained
|
||||
return b, d.updateCode()
|
||||
}
|
||||
|
||||
// decodeBit decodes a single bit. The bit will be returned at the
|
||||
// least-significant position. All other bits will be zero. The probability
|
||||
// value will be updated.
|
||||
func (d *rangeDecoder) DecodeBit(p *prob) (b uint32, err error) {
|
||||
bound := p.bound(d.nrange)
|
||||
if d.code < bound {
|
||||
d.nrange = bound
|
||||
p.inc()
|
||||
b = 0
|
||||
} else {
|
||||
d.code -= bound
|
||||
d.nrange -= bound
|
||||
p.dec()
|
||||
b = 1
|
||||
}
|
||||
// normalize
|
||||
// assume d.code < d.nrange
|
||||
const top = 1 << 24
|
||||
if d.nrange >= top {
|
||||
return b, nil
|
||||
}
|
||||
d.nrange <<= 8
|
||||
// d.code < d.nrange will be maintained
|
||||
return b, d.updateCode()
|
||||
}
|
||||
|
||||
// updateCode reads a new byte into the code.
|
||||
func (d *rangeDecoder) updateCode() error {
|
||||
b, err := d.br.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.code = (d.code << 8) | uint32(b)
|
||||
return nil
|
||||
}
|
100
vendor/github.com/ulikunitz/xz/lzma/reader.go
generated
vendored
Normal file
100
vendor/github.com/ulikunitz/xz/lzma/reader.go
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package lzma supports the decoding and encoding of LZMA streams.
|
||||
// Reader and Writer support the classic LZMA format. Reader2 and
|
||||
// Writer2 support the decoding and encoding of LZMA2 streams.
|
||||
//
|
||||
// The package is written completely in Go and doesn't rely on any external
|
||||
// library.
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// ReaderConfig stores the parameters for the reader of the classic LZMA
|
||||
// format.
|
||||
type ReaderConfig struct {
|
||||
DictCap int
|
||||
}
|
||||
|
||||
// fill converts the zero values of the configuration to the default values.
|
||||
func (c *ReaderConfig) fill() {
|
||||
if c.DictCap == 0 {
|
||||
c.DictCap = 8 * 1024 * 1024
|
||||
}
|
||||
}
|
||||
|
||||
// Verify checks the reader configuration for errors. Zero values will
|
||||
// be replaced by default values.
|
||||
func (c *ReaderConfig) Verify() error {
|
||||
c.fill()
|
||||
if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) {
|
||||
return errors.New("lzma: dictionary capacity is out of range")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reader provides a reader for LZMA files or streams.
|
||||
type Reader struct {
|
||||
lzma io.Reader
|
||||
h header
|
||||
d *decoder
|
||||
}
|
||||
|
||||
// NewReader creates a new reader for an LZMA stream using the classic
|
||||
// format. NewReader reads and checks the header of the LZMA stream.
|
||||
func NewReader(lzma io.Reader) (r *Reader, err error) {
|
||||
return ReaderConfig{}.NewReader(lzma)
|
||||
}
|
||||
|
||||
// NewReader creates a new reader for an LZMA stream in the classic
|
||||
// format. The function reads and verifies the the header of the LZMA
|
||||
// stream.
|
||||
func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) {
|
||||
if err = c.Verify(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data := make([]byte, HeaderLen)
|
||||
if _, err := io.ReadFull(lzma, data); err != nil {
|
||||
if err == io.EOF {
|
||||
return nil, errors.New("lzma: unexpected EOF")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
r = &Reader{lzma: lzma}
|
||||
if err = r.h.unmarshalBinary(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r.h.dictCap < MinDictCap {
|
||||
return nil, errors.New("lzma: dictionary capacity too small")
|
||||
}
|
||||
dictCap := r.h.dictCap
|
||||
if c.DictCap > dictCap {
|
||||
dictCap = c.DictCap
|
||||
}
|
||||
|
||||
state := newState(r.h.properties)
|
||||
dict, err := newDecoderDict(dictCap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.d, err = newDecoder(ByteReader(lzma), state, dict, r.h.size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// EOSMarker indicates that an EOS marker has been encountered.
|
||||
func (r *Reader) EOSMarker() bool {
|
||||
return r.d.eosMarker
|
||||
}
|
||||
|
||||
// Read returns uncompressed data.
|
||||
func (r *Reader) Read(p []byte) (n int, err error) {
|
||||
return r.d.Read(p)
|
||||
}
|
232
vendor/github.com/ulikunitz/xz/lzma/reader2.go
generated
vendored
Normal file
232
vendor/github.com/ulikunitz/xz/lzma/reader2.go
generated
vendored
Normal file
@ -0,0 +1,232 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/ulikunitz/xz/internal/xlog"
|
||||
)
|
||||
|
||||
// Reader2Config stores the parameters for the LZMA2 reader.
|
||||
// format.
|
||||
type Reader2Config struct {
|
||||
DictCap int
|
||||
}
|
||||
|
||||
// fill converts the zero values of the configuration to the default values.
|
||||
func (c *Reader2Config) fill() {
|
||||
if c.DictCap == 0 {
|
||||
c.DictCap = 8 * 1024 * 1024
|
||||
}
|
||||
}
|
||||
|
||||
// Verify checks the reader configuration for errors. Zero configuration values
|
||||
// will be replaced by default values.
|
||||
func (c *Reader2Config) Verify() error {
|
||||
c.fill()
|
||||
if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) {
|
||||
return errors.New("lzma: dictionary capacity is out of range")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reader2 supports the reading of LZMA2 chunk sequences. Note that the
|
||||
// first chunk should have a dictionary reset and the first compressed
|
||||
// chunk a properties reset. The chunk sequence may not be terminated by
|
||||
// an end-of-stream chunk.
|
||||
type Reader2 struct {
|
||||
r io.Reader
|
||||
err error
|
||||
|
||||
dict *decoderDict
|
||||
ur *uncompressedReader
|
||||
decoder *decoder
|
||||
chunkReader io.Reader
|
||||
|
||||
cstate chunkState
|
||||
ctype chunkType
|
||||
}
|
||||
|
||||
// NewReader2 creates a reader for an LZMA2 chunk sequence.
|
||||
func NewReader2(lzma2 io.Reader) (r *Reader2, err error) {
|
||||
return Reader2Config{}.NewReader2(lzma2)
|
||||
}
|
||||
|
||||
// NewReader2 creates an LZMA2 reader using the given configuration.
|
||||
func (c Reader2Config) NewReader2(lzma2 io.Reader) (r *Reader2, err error) {
|
||||
if err = c.Verify(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r = &Reader2{r: lzma2, cstate: start}
|
||||
r.dict, err = newDecoderDict(c.DictCap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = r.startChunk(); err != nil {
|
||||
r.err = err
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// uncompressed tests whether the chunk type specifies an uncompressed
|
||||
// chunk.
|
||||
func uncompressed(ctype chunkType) bool {
|
||||
return ctype == cU || ctype == cUD
|
||||
}
|
||||
|
||||
// startChunk parses a new chunk.
|
||||
func (r *Reader2) startChunk() error {
|
||||
r.chunkReader = nil
|
||||
header, err := readChunkHeader(r.r)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
xlog.Debugf("chunk header %v", header)
|
||||
if err = r.cstate.next(header.ctype); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.cstate == stop {
|
||||
return io.EOF
|
||||
}
|
||||
if header.ctype == cUD || header.ctype == cLRND {
|
||||
r.dict.Reset()
|
||||
}
|
||||
size := int64(header.uncompressed) + 1
|
||||
if uncompressed(header.ctype) {
|
||||
if r.ur != nil {
|
||||
r.ur.Reopen(r.r, size)
|
||||
} else {
|
||||
r.ur = newUncompressedReader(r.r, r.dict, size)
|
||||
}
|
||||
r.chunkReader = r.ur
|
||||
return nil
|
||||
}
|
||||
br := ByteReader(io.LimitReader(r.r, int64(header.compressed)+1))
|
||||
if r.decoder == nil {
|
||||
state := newState(header.props)
|
||||
r.decoder, err = newDecoder(br, state, r.dict, size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.chunkReader = r.decoder
|
||||
return nil
|
||||
}
|
||||
switch header.ctype {
|
||||
case cLR:
|
||||
r.decoder.State.Reset()
|
||||
case cLRN, cLRND:
|
||||
r.decoder.State = newState(header.props)
|
||||
}
|
||||
err = r.decoder.Reopen(br, size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.chunkReader = r.decoder
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read reads data from the LZMA2 chunk sequence.
|
||||
func (r *Reader2) Read(p []byte) (n int, err error) {
|
||||
if r.err != nil {
|
||||
return 0, r.err
|
||||
}
|
||||
for n < len(p) {
|
||||
var k int
|
||||
k, err = r.chunkReader.Read(p[n:])
|
||||
n += k
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = r.startChunk()
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
r.err = err
|
||||
return n, err
|
||||
}
|
||||
if k == 0 {
|
||||
r.err = errors.New("lzma: Reader2 doesn't get data")
|
||||
return n, r.err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// EOS returns whether the LZMA2 stream has been terminated by an
|
||||
// end-of-stream chunk.
|
||||
func (r *Reader2) EOS() bool {
|
||||
return r.cstate == stop
|
||||
}
|
||||
|
||||
// uncompressedReader is used to read uncompressed chunks.
|
||||
type uncompressedReader struct {
|
||||
lr io.LimitedReader
|
||||
Dict *decoderDict
|
||||
eof bool
|
||||
err error
|
||||
}
|
||||
|
||||
// newUncompressedReader initializes a new uncompressedReader.
|
||||
func newUncompressedReader(r io.Reader, dict *decoderDict, size int64) *uncompressedReader {
|
||||
ur := &uncompressedReader{
|
||||
lr: io.LimitedReader{R: r, N: size},
|
||||
Dict: dict,
|
||||
}
|
||||
return ur
|
||||
}
|
||||
|
||||
// Reopen reinitializes an uncompressed reader.
|
||||
func (ur *uncompressedReader) Reopen(r io.Reader, size int64) {
|
||||
ur.err = nil
|
||||
ur.eof = false
|
||||
ur.lr = io.LimitedReader{R: r, N: size}
|
||||
}
|
||||
|
||||
// fill reads uncompressed data into the dictionary.
|
||||
func (ur *uncompressedReader) fill() error {
|
||||
if !ur.eof {
|
||||
n, err := io.CopyN(ur.Dict, &ur.lr, int64(ur.Dict.Available()))
|
||||
if err != io.EOF {
|
||||
return err
|
||||
}
|
||||
ur.eof = true
|
||||
if n > 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if ur.lr.N != 0 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
// Read reads uncompressed data from the limited reader.
|
||||
func (ur *uncompressedReader) Read(p []byte) (n int, err error) {
|
||||
if ur.err != nil {
|
||||
return 0, ur.err
|
||||
}
|
||||
for {
|
||||
var k int
|
||||
k, err = ur.Dict.Read(p[n:])
|
||||
n += k
|
||||
if n >= len(p) {
|
||||
return n, nil
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = ur.fill()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
ur.err = err
|
||||
return n, err
|
||||
}
|
312
vendor/github.com/ulikunitz/xz/lzma/reader_test.go
generated
vendored
Normal file
312
vendor/github.com/ulikunitz/xz/lzma/reader_test.go
generated
vendored
Normal file
@ -0,0 +1,312 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"testing/iotest"
|
||||
)
|
||||
|
||||
func TestNewReader(t *testing.T) {
|
||||
f, err := os.Open("examples/a.lzma")
|
||||
if err != nil {
|
||||
t.Fatalf("open examples/a.lzma: %s", err)
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = NewReader(bufio.NewReader(f))
|
||||
if err != nil {
|
||||
t.Fatalf("NewReader: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
dirname = "examples"
|
||||
origname = "a.txt"
|
||||
)
|
||||
|
||||
func readOrigFile(t *testing.T) []byte {
|
||||
orig, err := ioutil.ReadFile(filepath.Join(dirname, origname))
|
||||
if err != nil {
|
||||
t.Fatalf("ReadFile: %s", err)
|
||||
}
|
||||
return orig
|
||||
}
|
||||
|
||||
func testDecodeFile(t *testing.T, filename string, orig []byte) {
|
||||
pathname := filepath.Join(dirname, filename)
|
||||
f, err := os.Open(pathname)
|
||||
if err != nil {
|
||||
t.Fatalf("Open(%q): %s", pathname, err)
|
||||
}
|
||||
defer func() {
|
||||
if err = f.Close(); err != nil {
|
||||
t.Fatalf("f.Close() error %s", err)
|
||||
}
|
||||
}()
|
||||
t.Logf("file %s opened", filename)
|
||||
l, err := NewReader(bufio.NewReader(f))
|
||||
if err != nil {
|
||||
t.Fatalf("NewReader: %s", err)
|
||||
}
|
||||
decoded, err := ioutil.ReadAll(l)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadAll: %s", err)
|
||||
}
|
||||
t.Logf("%s", decoded)
|
||||
if len(orig) != len(decoded) {
|
||||
t.Fatalf("length decoded is %d; want %d",
|
||||
len(decoded), len(orig))
|
||||
}
|
||||
if !bytes.Equal(orig, decoded) {
|
||||
t.Fatalf("decoded file differs from original")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReaderSimple(t *testing.T) {
|
||||
// DebugOn(os.Stderr)
|
||||
// defer DebugOff()
|
||||
|
||||
testDecodeFile(t, "a.lzma", readOrigFile(t))
|
||||
}
|
||||
|
||||
func TestReaderAll(t *testing.T) {
|
||||
dirname := "examples"
|
||||
dir, err := os.Open(dirname)
|
||||
if err != nil {
|
||||
t.Fatalf("Open: %s", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := dir.Close(); err != nil {
|
||||
t.Fatalf("dir.Close() error %s", err)
|
||||
}
|
||||
}()
|
||||
all, err := dir.Readdirnames(0)
|
||||
if err != nil {
|
||||
t.Fatalf("Readdirnames: %s", err)
|
||||
}
|
||||
// filter now all file with the pattern "a*.lzma"
|
||||
files := make([]string, 0, len(all))
|
||||
for _, fn := range all {
|
||||
match, err := filepath.Match("a*.lzma", fn)
|
||||
if err != nil {
|
||||
t.Fatalf("Match: %s", err)
|
||||
}
|
||||
if match {
|
||||
files = append(files, fn)
|
||||
}
|
||||
}
|
||||
t.Log("files:", files)
|
||||
orig := readOrigFile(t)
|
||||
// actually test the files
|
||||
for _, fn := range files {
|
||||
testDecodeFile(t, fn, orig)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
func Example_reader() {
|
||||
f, err := os.Open("fox.lzma")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// no need for defer; Fatal calls os.Exit(1) that doesn't execute deferred functions
|
||||
r, err := NewReader(bufio.NewReader(f))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
_, err = io.Copy(os.Stdout, r)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Output:
|
||||
// The quick brown fox jumps over the lazy dog.
|
||||
}
|
||||
|
||||
type wrapTest struct {
|
||||
name string
|
||||
wrap func(io.Reader) io.Reader
|
||||
}
|
||||
|
||||
func (w *wrapTest) testFile(t *testing.T, filename string, orig []byte) {
|
||||
pathname := filepath.Join(dirname, filename)
|
||||
f, err := os.Open(pathname)
|
||||
if err != nil {
|
||||
t.Fatalf("Open(\"%s\"): %s", pathname, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}()
|
||||
t.Logf("%s file %s opened", w.name, filename)
|
||||
l, err := NewReader(w.wrap(f))
|
||||
if err != nil {
|
||||
t.Fatalf("%s NewReader: %s", w.name, err)
|
||||
}
|
||||
decoded, err := ioutil.ReadAll(l)
|
||||
if err != nil {
|
||||
t.Fatalf("%s ReadAll: %s", w.name, err)
|
||||
}
|
||||
t.Logf("%s", decoded)
|
||||
if len(orig) != len(decoded) {
|
||||
t.Fatalf("%s length decoded is %d; want %d",
|
||||
w.name, len(decoded), len(orig))
|
||||
}
|
||||
if !bytes.Equal(orig, decoded) {
|
||||
t.Fatalf("%s decoded file differs from original", w.name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReaderWrap(t *testing.T) {
|
||||
tests := [...]wrapTest{
|
||||
{"DataErrReader", iotest.DataErrReader},
|
||||
{"HalfReader", iotest.HalfReader},
|
||||
{"OneByteReader", iotest.OneByteReader},
|
||||
// TimeOutReader would require buffer
|
||||
}
|
||||
orig := readOrigFile(t)
|
||||
for _, tst := range tests {
|
||||
tst.testFile(t, "a.lzma", orig)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReaderBadFiles(t *testing.T) {
|
||||
dirname := "examples"
|
||||
dir, err := os.Open(dirname)
|
||||
if err != nil {
|
||||
t.Fatalf("Open: %s", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := dir.Close(); err != nil {
|
||||
t.Fatalf("dir.Close() error %s", err)
|
||||
}
|
||||
}()
|
||||
all, err := dir.Readdirnames(0)
|
||||
if err != nil {
|
||||
t.Fatalf("Readdirnames: %s", err)
|
||||
}
|
||||
// filter now all file with the pattern "bad*.lzma"
|
||||
files := make([]string, 0, len(all))
|
||||
for _, fn := range all {
|
||||
match, err := filepath.Match("bad*.lzma", fn)
|
||||
if err != nil {
|
||||
t.Fatalf("Match: %s", err)
|
||||
}
|
||||
if match {
|
||||
files = append(files, fn)
|
||||
}
|
||||
}
|
||||
t.Log("files:", files)
|
||||
for _, filename := range files {
|
||||
pathname := filepath.Join(dirname, filename)
|
||||
f, err := os.Open(pathname)
|
||||
if err != nil {
|
||||
t.Fatalf("Open(\"%s\"): %s", pathname, err)
|
||||
}
|
||||
defer func(f *os.File) {
|
||||
if err := f.Close(); err != nil {
|
||||
t.Fatalf("f.Close() error %s", err)
|
||||
}
|
||||
}(f)
|
||||
t.Logf("file %s opened", filename)
|
||||
l, err := NewReader(f)
|
||||
if err != nil {
|
||||
t.Fatalf("NewReader: %s", err)
|
||||
}
|
||||
decoded, err := ioutil.ReadAll(l)
|
||||
if err == nil {
|
||||
t.Errorf("ReadAll for %s: no error", filename)
|
||||
t.Logf("%s", decoded)
|
||||
continue
|
||||
}
|
||||
t.Logf("%s: error %s", filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
type repReader byte
|
||||
|
||||
func (r repReader) Read(p []byte) (n int, err error) {
|
||||
for i := range p {
|
||||
p[i] = byte(r)
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func newRepReader(c byte, n int64) *io.LimitedReader {
|
||||
return &io.LimitedReader{R: repReader(c), N: n}
|
||||
}
|
||||
|
||||
func newCodeReader(r io.Reader) *io.PipeReader {
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
bw := bufio.NewWriter(pw)
|
||||
lw, err := NewWriter(bw)
|
||||
if err != nil {
|
||||
log.Fatalf("NewWriter error %s", err)
|
||||
}
|
||||
if _, err = io.Copy(lw, r); err != nil {
|
||||
log.Fatalf("io.Copy error %s", err)
|
||||
}
|
||||
if err = lw.Close(); err != nil {
|
||||
log.Fatalf("lw.Close error %s", err)
|
||||
}
|
||||
if err = bw.Flush(); err != nil {
|
||||
log.Fatalf("bw.Flush() error %s", err)
|
||||
}
|
||||
if err = pw.CloseWithError(io.EOF); err != nil {
|
||||
log.Fatalf("pw.CloseWithError(io.EOF) error %s", err)
|
||||
}
|
||||
}()
|
||||
return pr
|
||||
}
|
||||
|
||||
func TestReaderErrAgain(t *testing.T) {
|
||||
lengths := []int64{0, 128, 1024, 4095, 4096, 4097, 8191, 8192, 8193}
|
||||
buf := make([]byte, 128)
|
||||
const c = 'A'
|
||||
for _, n := range lengths {
|
||||
t.Logf("n: %d", n)
|
||||
pr := newCodeReader(newRepReader(c, n))
|
||||
r, err := NewReader(pr)
|
||||
if err != nil {
|
||||
t.Fatalf("NewReader(pr) error %s", err)
|
||||
}
|
||||
k := int64(0)
|
||||
for {
|
||||
m, err := r.Read(buf)
|
||||
k += int64(m)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("r.Read(buf) error %s", err)
|
||||
break
|
||||
}
|
||||
if m > len(buf) {
|
||||
t.Fatalf("r.Read(buf) %d; want <= %d", m,
|
||||
len(buf))
|
||||
}
|
||||
for i, b := range buf[:m] {
|
||||
if b != c {
|
||||
t.Fatalf("buf[%d]=%c; want %c", i, b,
|
||||
c)
|
||||
}
|
||||
}
|
||||
}
|
||||
if k != n {
|
||||
t.Errorf("Read %d bytes; want %d", k, n)
|
||||
}
|
||||
}
|
||||
}
|
151
vendor/github.com/ulikunitz/xz/lzma/state.go
generated
vendored
Normal file
151
vendor/github.com/ulikunitz/xz/lzma/state.go
generated
vendored
Normal file
@ -0,0 +1,151 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
// states defines the overall state count
|
||||
const states = 12
|
||||
|
||||
// State maintains the full state of the operation encoding or decoding
|
||||
// process.
|
||||
type state struct {
|
||||
rep [4]uint32
|
||||
isMatch [states << maxPosBits]prob
|
||||
isRepG0Long [states << maxPosBits]prob
|
||||
isRep [states]prob
|
||||
isRepG0 [states]prob
|
||||
isRepG1 [states]prob
|
||||
isRepG2 [states]prob
|
||||
litCodec literalCodec
|
||||
lenCodec lengthCodec
|
||||
repLenCodec lengthCodec
|
||||
distCodec distCodec
|
||||
state uint32
|
||||
posBitMask uint32
|
||||
Properties Properties
|
||||
}
|
||||
|
||||
// initProbSlice initializes a slice of probabilities.
|
||||
func initProbSlice(p []prob) {
|
||||
for i := range p {
|
||||
p[i] = probInit
|
||||
}
|
||||
}
|
||||
|
||||
// Reset sets all state information to the original values.
|
||||
func (s *state) Reset() {
|
||||
p := s.Properties
|
||||
*s = state{
|
||||
Properties: p,
|
||||
// dict: s.dict,
|
||||
posBitMask: (uint32(1) << uint(p.PB)) - 1,
|
||||
}
|
||||
initProbSlice(s.isMatch[:])
|
||||
initProbSlice(s.isRep[:])
|
||||
initProbSlice(s.isRepG0[:])
|
||||
initProbSlice(s.isRepG1[:])
|
||||
initProbSlice(s.isRepG2[:])
|
||||
initProbSlice(s.isRepG0Long[:])
|
||||
s.litCodec.init(p.LC, p.LP)
|
||||
s.lenCodec.init()
|
||||
s.repLenCodec.init()
|
||||
s.distCodec.init()
|
||||
}
|
||||
|
||||
// initState initializes the state.
|
||||
func initState(s *state, p Properties) {
|
||||
*s = state{Properties: p}
|
||||
s.Reset()
|
||||
}
|
||||
|
||||
// newState creates a new state from the give Properties.
|
||||
func newState(p Properties) *state {
|
||||
s := &state{Properties: p}
|
||||
s.Reset()
|
||||
return s
|
||||
}
|
||||
|
||||
// deepcopy initializes s as a deep copy of the source.
|
||||
func (s *state) deepcopy(src *state) {
|
||||
if s == src {
|
||||
return
|
||||
}
|
||||
s.rep = src.rep
|
||||
s.isMatch = src.isMatch
|
||||
s.isRepG0Long = src.isRepG0Long
|
||||
s.isRep = src.isRep
|
||||
s.isRepG0 = src.isRepG0
|
||||
s.isRepG1 = src.isRepG1
|
||||
s.isRepG2 = src.isRepG2
|
||||
s.litCodec.deepcopy(&src.litCodec)
|
||||
s.lenCodec.deepcopy(&src.lenCodec)
|
||||
s.repLenCodec.deepcopy(&src.repLenCodec)
|
||||
s.distCodec.deepcopy(&src.distCodec)
|
||||
s.state = src.state
|
||||
s.posBitMask = src.posBitMask
|
||||
s.Properties = src.Properties
|
||||
}
|
||||
|
||||
// cloneState creates a new clone of the give state.
|
||||
func cloneState(src *state) *state {
|
||||
s := new(state)
|
||||
s.deepcopy(src)
|
||||
return s
|
||||
}
|
||||
|
||||
// updateStateLiteral updates the state for a literal.
|
||||
func (s *state) updateStateLiteral() {
|
||||
switch {
|
||||
case s.state < 4:
|
||||
s.state = 0
|
||||
return
|
||||
case s.state < 10:
|
||||
s.state -= 3
|
||||
return
|
||||
}
|
||||
s.state -= 6
|
||||
}
|
||||
|
||||
// updateStateMatch updates the state for a match.
|
||||
func (s *state) updateStateMatch() {
|
||||
if s.state < 7 {
|
||||
s.state = 7
|
||||
} else {
|
||||
s.state = 10
|
||||
}
|
||||
}
|
||||
|
||||
// updateStateRep updates the state for a repetition.
|
||||
func (s *state) updateStateRep() {
|
||||
if s.state < 7 {
|
||||
s.state = 8
|
||||
} else {
|
||||
s.state = 11
|
||||
}
|
||||
}
|
||||
|
||||
// updateStateShortRep updates the state for a short repetition.
|
||||
func (s *state) updateStateShortRep() {
|
||||
if s.state < 7 {
|
||||
s.state = 9
|
||||
} else {
|
||||
s.state = 11
|
||||
}
|
||||
}
|
||||
|
||||
// states computes the states of the operation codec.
|
||||
func (s *state) states(dictHead int64) (state1, state2, posState uint32) {
|
||||
state1 = s.state
|
||||
posState = uint32(dictHead) & s.posBitMask
|
||||
state2 = (s.state << maxPosBits) | posState
|
||||
return
|
||||
}
|
||||
|
||||
// litState computes the literal state.
|
||||
func (s *state) litState(prev byte, dictHead int64) uint32 {
|
||||
lp, lc := uint(s.Properties.LP), uint(s.Properties.LC)
|
||||
litState := ((uint32(dictHead) & ((1 << lp) - 1)) << lc) |
|
||||
(uint32(prev) >> (8 - lc))
|
||||
return litState
|
||||
}
|
133
vendor/github.com/ulikunitz/xz/lzma/treecodecs.go
generated
vendored
Normal file
133
vendor/github.com/ulikunitz/xz/lzma/treecodecs.go
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
// treeCodec encodes or decodes values with a fixed bit size. It is using a
|
||||
// tree of probability value. The root of the tree is the most-significant bit.
|
||||
type treeCodec struct {
|
||||
probTree
|
||||
}
|
||||
|
||||
// makeTreeCodec makes a tree codec. The bits value must be inside the range
|
||||
// [1,32].
|
||||
func makeTreeCodec(bits int) treeCodec {
|
||||
return treeCodec{makeProbTree(bits)}
|
||||
}
|
||||
|
||||
// deepcopy initializes tc as a deep copy of the source.
|
||||
func (tc *treeCodec) deepcopy(src *treeCodec) {
|
||||
tc.probTree.deepcopy(&src.probTree)
|
||||
}
|
||||
|
||||
// Encode uses the range encoder to encode a fixed-bit-size value.
|
||||
func (tc *treeCodec) Encode(e *rangeEncoder, v uint32) (err error) {
|
||||
m := uint32(1)
|
||||
for i := int(tc.bits) - 1; i >= 0; i-- {
|
||||
b := (v >> uint(i)) & 1
|
||||
if err := e.EncodeBit(b, &tc.probs[m]); err != nil {
|
||||
return err
|
||||
}
|
||||
m = (m << 1) | b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decodes uses the range decoder to decode a fixed-bit-size value. Errors may
|
||||
// be caused by the range decoder.
|
||||
func (tc *treeCodec) Decode(d *rangeDecoder) (v uint32, err error) {
|
||||
m := uint32(1)
|
||||
for j := 0; j < int(tc.bits); j++ {
|
||||
b, err := d.DecodeBit(&tc.probs[m])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
m = (m << 1) | b
|
||||
}
|
||||
return m - (1 << uint(tc.bits)), nil
|
||||
}
|
||||
|
||||
// treeReverseCodec is another tree codec, where the least-significant bit is
|
||||
// the start of the probability tree.
|
||||
type treeReverseCodec struct {
|
||||
probTree
|
||||
}
|
||||
|
||||
// deepcopy initializes the treeReverseCodec as a deep copy of the
|
||||
// source.
|
||||
func (tc *treeReverseCodec) deepcopy(src *treeReverseCodec) {
|
||||
tc.probTree.deepcopy(&src.probTree)
|
||||
}
|
||||
|
||||
// makeTreeReverseCodec creates treeReverseCodec value. The bits argument must
|
||||
// be in the range [1,32].
|
||||
func makeTreeReverseCodec(bits int) treeReverseCodec {
|
||||
return treeReverseCodec{makeProbTree(bits)}
|
||||
}
|
||||
|
||||
// Encode uses range encoder to encode a fixed-bit-size value. The range
|
||||
// encoder may cause errors.
|
||||
func (tc *treeReverseCodec) Encode(v uint32, e *rangeEncoder) (err error) {
|
||||
m := uint32(1)
|
||||
for i := uint(0); i < uint(tc.bits); i++ {
|
||||
b := (v >> i) & 1
|
||||
if err := e.EncodeBit(b, &tc.probs[m]); err != nil {
|
||||
return err
|
||||
}
|
||||
m = (m << 1) | b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decodes uses the range decoder to decode a fixed-bit-size value. Errors
|
||||
// returned by the range decoder will be returned.
|
||||
func (tc *treeReverseCodec) Decode(d *rangeDecoder) (v uint32, err error) {
|
||||
m := uint32(1)
|
||||
for j := uint(0); j < uint(tc.bits); j++ {
|
||||
b, err := d.DecodeBit(&tc.probs[m])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
m = (m << 1) | b
|
||||
v |= b << j
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// probTree stores enough probability values to be used by the treeEncode and
|
||||
// treeDecode methods of the range coder types.
|
||||
type probTree struct {
|
||||
probs []prob
|
||||
bits byte
|
||||
}
|
||||
|
||||
// deepcopy initializes the probTree value as a deep copy of the source.
|
||||
func (t *probTree) deepcopy(src *probTree) {
|
||||
if t == src {
|
||||
return
|
||||
}
|
||||
t.probs = make([]prob, len(src.probs))
|
||||
copy(t.probs, src.probs)
|
||||
t.bits = src.bits
|
||||
}
|
||||
|
||||
// makeProbTree initializes a probTree structure.
|
||||
func makeProbTree(bits int) probTree {
|
||||
if !(1 <= bits && bits <= 32) {
|
||||
panic("bits outside of range [1,32]")
|
||||
}
|
||||
t := probTree{
|
||||
bits: byte(bits),
|
||||
probs: make([]prob, 1<<uint(bits)),
|
||||
}
|
||||
for i := range t.probs {
|
||||
t.probs[i] = probInit
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Bits provides the number of bits for the values to de- or encode.
|
||||
func (t *probTree) Bits() int {
|
||||
return int(t.bits)
|
||||
}
|
209
vendor/github.com/ulikunitz/xz/lzma/writer.go
generated
vendored
Normal file
209
vendor/github.com/ulikunitz/xz/lzma/writer.go
generated
vendored
Normal file
@ -0,0 +1,209 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// MinDictCap and MaxDictCap provide the range of supported dictionary
|
||||
// capacities.
|
||||
const (
|
||||
MinDictCap = 1 << 12
|
||||
MaxDictCap = 1<<32 - 1
|
||||
)
|
||||
|
||||
// WriterConfig defines the configuration parameter for a writer.
|
||||
type WriterConfig struct {
|
||||
// Properties for the encoding. If the it is nil the value
|
||||
// {LC: 3, LP: 0, PB: 2} will be chosen.
|
||||
Properties *Properties
|
||||
// The capacity of the dictionary. If DictCap is zero, the value
|
||||
// 8 MiB will be chosen.
|
||||
DictCap int
|
||||
// Size of the lookahead buffer; value 0 indicates default size
|
||||
// 4096
|
||||
BufSize int
|
||||
// Match algorithm
|
||||
Matcher MatchAlgorithm
|
||||
// SizeInHeader indicates that the header will contain an
|
||||
// explicit size.
|
||||
SizeInHeader bool
|
||||
// Size of the data to be encoded. A positive value will imply
|
||||
// than an explicit size will be set in the header.
|
||||
Size int64
|
||||
// EOSMarker requests whether the EOSMarker needs to be written.
|
||||
// If no explicit size is been given the EOSMarker will be
|
||||
// set automatically.
|
||||
EOSMarker bool
|
||||
}
|
||||
|
||||
// fill converts zero-value fields to their explicit default values.
|
||||
func (c *WriterConfig) fill() {
|
||||
if c.Properties == nil {
|
||||
c.Properties = &Properties{LC: 3, LP: 0, PB: 2}
|
||||
}
|
||||
if c.DictCap == 0 {
|
||||
c.DictCap = 8 * 1024 * 1024
|
||||
}
|
||||
if c.BufSize == 0 {
|
||||
c.BufSize = 4096
|
||||
}
|
||||
if c.Size > 0 {
|
||||
c.SizeInHeader = true
|
||||
}
|
||||
if !c.SizeInHeader {
|
||||
c.EOSMarker = true
|
||||
}
|
||||
}
|
||||
|
||||
// Verify checks WriterConfig for errors. Verify will replace zero
|
||||
// values with default values.
|
||||
func (c *WriterConfig) Verify() error {
|
||||
c.fill()
|
||||
var err error
|
||||
if c == nil {
|
||||
return errors.New("lzma: WriterConfig is nil")
|
||||
}
|
||||
if c.Properties == nil {
|
||||
return errors.New("lzma: WriterConfig has no Properties set")
|
||||
}
|
||||
if err = c.Properties.verify(); err != nil {
|
||||
return err
|
||||
}
|
||||
if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) {
|
||||
return errors.New("lzma: dictionary capacity is out of range")
|
||||
}
|
||||
if !(maxMatchLen <= c.BufSize) {
|
||||
return errors.New("lzma: lookahead buffer size too small")
|
||||
}
|
||||
if c.SizeInHeader {
|
||||
if c.Size < 0 {
|
||||
return errors.New("lzma: negative size not supported")
|
||||
}
|
||||
} else if !c.EOSMarker {
|
||||
return errors.New("lzma: EOS marker is required")
|
||||
}
|
||||
if err = c.Matcher.verify(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// header returns the header structure for this configuration.
|
||||
func (c *WriterConfig) header() header {
|
||||
h := header{
|
||||
properties: *c.Properties,
|
||||
dictCap: c.DictCap,
|
||||
size: -1,
|
||||
}
|
||||
if c.SizeInHeader {
|
||||
h.size = c.Size
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// Writer writes an LZMA stream in the classic format.
|
||||
type Writer struct {
|
||||
h header
|
||||
bw io.ByteWriter
|
||||
buf *bufio.Writer
|
||||
e *encoder
|
||||
}
|
||||
|
||||
// NewWriter creates a new LZMA writer for the classic format. The
|
||||
// method will write the header to the underlying stream.
|
||||
func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) {
|
||||
if err = c.Verify(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w = &Writer{h: c.header()}
|
||||
|
||||
var ok bool
|
||||
w.bw, ok = lzma.(io.ByteWriter)
|
||||
if !ok {
|
||||
w.buf = bufio.NewWriter(lzma)
|
||||
w.bw = w.buf
|
||||
}
|
||||
state := newState(w.h.properties)
|
||||
m, err := c.Matcher.new(w.h.dictCap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dict, err := newEncoderDict(w.h.dictCap, c.BufSize, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var flags encoderFlags
|
||||
if c.EOSMarker {
|
||||
flags = eosMarker
|
||||
}
|
||||
if w.e, err = newEncoder(w.bw, state, dict, flags); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = w.writeHeader(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// NewWriter creates a new LZMA writer using the classic format. The
|
||||
// function writes the header to the underlying stream.
|
||||
func NewWriter(lzma io.Writer) (w *Writer, err error) {
|
||||
return WriterConfig{}.NewWriter(lzma)
|
||||
}
|
||||
|
||||
// writeHeader writes the LZMA header into the stream.
|
||||
func (w *Writer) writeHeader() error {
|
||||
data, err := w.h.marshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.bw.(io.Writer).Write(data)
|
||||
return err
|
||||
}
|
||||
|
||||
// Write puts data into the Writer.
|
||||
func (w *Writer) Write(p []byte) (n int, err error) {
|
||||
if w.h.size >= 0 {
|
||||
m := w.h.size
|
||||
m -= w.e.Compressed() + int64(w.e.dict.Buffered())
|
||||
if m < 0 {
|
||||
m = 0
|
||||
}
|
||||
if m < int64(len(p)) {
|
||||
p = p[:m]
|
||||
err = ErrNoSpace
|
||||
}
|
||||
}
|
||||
var werr error
|
||||
if n, werr = w.e.Write(p); werr != nil {
|
||||
err = werr
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close closes the writer stream. It ensures that all data from the
|
||||
// buffer will be compressed and the LZMA stream will be finished.
|
||||
func (w *Writer) Close() error {
|
||||
if w.h.size >= 0 {
|
||||
n := w.e.Compressed() + int64(w.e.dict.Buffered())
|
||||
if n != w.h.size {
|
||||
return errSize
|
||||
}
|
||||
}
|
||||
err := w.e.Close()
|
||||
if w.buf != nil {
|
||||
ferr := w.buf.Flush()
|
||||
if err == nil {
|
||||
err = ferr
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
305
vendor/github.com/ulikunitz/xz/lzma/writer2.go
generated
vendored
Normal file
305
vendor/github.com/ulikunitz/xz/lzma/writer2.go
generated
vendored
Normal file
@ -0,0 +1,305 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Writer2Config is used to create a Writer2 using parameters.
|
||||
type Writer2Config struct {
|
||||
// The properties for the encoding. If the it is nil the value
|
||||
// {LC: 3, LP: 0, PB: 2} will be chosen.
|
||||
Properties *Properties
|
||||
// The capacity of the dictionary. If DictCap is zero, the value
|
||||
// 8 MiB will be chosen.
|
||||
DictCap int
|
||||
// Size of the lookahead buffer; value 0 indicates default size
|
||||
// 4096
|
||||
BufSize int
|
||||
// Match algorithm
|
||||
Matcher MatchAlgorithm
|
||||
}
|
||||
|
||||
// fill replaces zero values with default values.
|
||||
func (c *Writer2Config) fill() {
|
||||
if c.Properties == nil {
|
||||
c.Properties = &Properties{LC: 3, LP: 0, PB: 2}
|
||||
}
|
||||
if c.DictCap == 0 {
|
||||
c.DictCap = 8 * 1024 * 1024
|
||||
}
|
||||
if c.BufSize == 0 {
|
||||
c.BufSize = 4096
|
||||
}
|
||||
}
|
||||
|
||||
// Verify checks the Writer2Config for correctness. Zero values will be
|
||||
// replaced by default values.
|
||||
func (c *Writer2Config) Verify() error {
|
||||
c.fill()
|
||||
var err error
|
||||
if c == nil {
|
||||
return errors.New("lzma: WriterConfig is nil")
|
||||
}
|
||||
if c.Properties == nil {
|
||||
return errors.New("lzma: WriterConfig has no Properties set")
|
||||
}
|
||||
if err = c.Properties.verify(); err != nil {
|
||||
return err
|
||||
}
|
||||
if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) {
|
||||
return errors.New("lzma: dictionary capacity is out of range")
|
||||
}
|
||||
if !(maxMatchLen <= c.BufSize) {
|
||||
return errors.New("lzma: lookahead buffer size too small")
|
||||
}
|
||||
if c.Properties.LC+c.Properties.LP > 4 {
|
||||
return errors.New("lzma: sum of lc and lp exceeds 4")
|
||||
}
|
||||
if err = c.Matcher.verify(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Writer2 supports the creation of an LZMA2 stream. But note that
|
||||
// written data is buffered, so call Flush or Close to write data to the
|
||||
// underlying writer. The Close method writes the end-of-stream marker
|
||||
// to the stream. So you may be able to concatenate the output of two
|
||||
// writers as long the output of the first writer has only been flushed
|
||||
// but not closed.
|
||||
//
|
||||
// Any change to the fields Properties, DictCap must be done before the
|
||||
// first call to Write, Flush or Close.
|
||||
type Writer2 struct {
|
||||
w io.Writer
|
||||
|
||||
start *state
|
||||
encoder *encoder
|
||||
|
||||
cstate chunkState
|
||||
ctype chunkType
|
||||
|
||||
buf bytes.Buffer
|
||||
lbw LimitedByteWriter
|
||||
}
|
||||
|
||||
// NewWriter2 creates an LZMA2 chunk sequence writer with the default
|
||||
// parameters and options.
|
||||
func NewWriter2(lzma2 io.Writer) (w *Writer2, err error) {
|
||||
return Writer2Config{}.NewWriter2(lzma2)
|
||||
}
|
||||
|
||||
// NewWriter2 creates a new LZMA2 writer using the given configuration.
|
||||
func (c Writer2Config) NewWriter2(lzma2 io.Writer) (w *Writer2, err error) {
|
||||
if err = c.Verify(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w = &Writer2{
|
||||
w: lzma2,
|
||||
start: newState(*c.Properties),
|
||||
cstate: start,
|
||||
ctype: start.defaultChunkType(),
|
||||
}
|
||||
w.buf.Grow(maxCompressed)
|
||||
w.lbw = LimitedByteWriter{BW: &w.buf, N: maxCompressed}
|
||||
m, err := c.Matcher.new(c.DictCap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d, err := newEncoderDict(c.DictCap, c.BufSize, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.encoder, err = newEncoder(&w.lbw, cloneState(w.start), d, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// written returns the number of bytes written to the current chunk
|
||||
func (w *Writer2) written() int {
|
||||
if w.encoder == nil {
|
||||
return 0
|
||||
}
|
||||
return int(w.encoder.Compressed()) + w.encoder.dict.Buffered()
|
||||
}
|
||||
|
||||
// errClosed indicates that the writer is closed.
|
||||
var errClosed = errors.New("lzma: writer closed")
|
||||
|
||||
// Writes data to LZMA2 stream. Note that written data will be buffered.
|
||||
// Use Flush or Close to ensure that data is written to the underlying
|
||||
// writer.
|
||||
func (w *Writer2) Write(p []byte) (n int, err error) {
|
||||
if w.cstate == stop {
|
||||
return 0, errClosed
|
||||
}
|
||||
for n < len(p) {
|
||||
m := maxUncompressed - w.written()
|
||||
if m <= 0 {
|
||||
panic("lzma: maxUncompressed reached")
|
||||
}
|
||||
var q []byte
|
||||
if n+m < len(p) {
|
||||
q = p[n : n+m]
|
||||
} else {
|
||||
q = p[n:]
|
||||
}
|
||||
k, err := w.encoder.Write(q)
|
||||
n += k
|
||||
if err != nil && err != ErrLimit {
|
||||
return n, err
|
||||
}
|
||||
if err == ErrLimit || k == m {
|
||||
if err = w.flushChunk(); err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// writeUncompressedChunk writes an uncompressed chunk to the LZMA2
|
||||
// stream.
|
||||
func (w *Writer2) writeUncompressedChunk() error {
|
||||
u := w.encoder.Compressed()
|
||||
if u <= 0 {
|
||||
return errors.New("lzma: can't write empty uncompressed chunk")
|
||||
}
|
||||
if u > maxUncompressed {
|
||||
panic("overrun of uncompressed data limit")
|
||||
}
|
||||
switch w.ctype {
|
||||
case cLRND:
|
||||
w.ctype = cUD
|
||||
default:
|
||||
w.ctype = cU
|
||||
}
|
||||
w.encoder.state = w.start
|
||||
|
||||
header := chunkHeader{
|
||||
ctype: w.ctype,
|
||||
uncompressed: uint32(u - 1),
|
||||
}
|
||||
hdata, err := header.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = w.w.Write(hdata); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.encoder.dict.CopyN(w.w, int(u))
|
||||
return err
|
||||
}
|
||||
|
||||
// writeCompressedChunk writes a compressed chunk to the underlying
|
||||
// writer.
|
||||
func (w *Writer2) writeCompressedChunk() error {
|
||||
if w.ctype == cU || w.ctype == cUD {
|
||||
panic("chunk type uncompressed")
|
||||
}
|
||||
|
||||
u := w.encoder.Compressed()
|
||||
if u <= 0 {
|
||||
return errors.New("writeCompressedChunk: empty chunk")
|
||||
}
|
||||
if u > maxUncompressed {
|
||||
panic("overrun of uncompressed data limit")
|
||||
}
|
||||
c := w.buf.Len()
|
||||
if c <= 0 {
|
||||
panic("no compressed data")
|
||||
}
|
||||
if c > maxCompressed {
|
||||
panic("overrun of compressed data limit")
|
||||
}
|
||||
header := chunkHeader{
|
||||
ctype: w.ctype,
|
||||
uncompressed: uint32(u - 1),
|
||||
compressed: uint16(c - 1),
|
||||
props: w.encoder.state.Properties,
|
||||
}
|
||||
hdata, err := header.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = w.w.Write(hdata); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(w.w, &w.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
// writes a single chunk to the underlying writer.
|
||||
func (w *Writer2) writeChunk() error {
|
||||
u := int(uncompressedHeaderLen + w.encoder.Compressed())
|
||||
c := headerLen(w.ctype) + w.buf.Len()
|
||||
if u < c {
|
||||
return w.writeUncompressedChunk()
|
||||
}
|
||||
return w.writeCompressedChunk()
|
||||
}
|
||||
|
||||
// flushChunk terminates the current chunk. The encoder will be reset
|
||||
// to support the next chunk.
|
||||
func (w *Writer2) flushChunk() error {
|
||||
if w.written() == 0 {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
if err = w.encoder.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = w.writeChunk(); err != nil {
|
||||
return err
|
||||
}
|
||||
w.buf.Reset()
|
||||
w.lbw.N = maxCompressed
|
||||
if err = w.encoder.Reopen(&w.lbw); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = w.cstate.next(w.ctype); err != nil {
|
||||
return err
|
||||
}
|
||||
w.ctype = w.cstate.defaultChunkType()
|
||||
w.start = cloneState(w.encoder.state)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flush writes all buffered data out to the underlying stream. This
|
||||
// could result in multiple chunks to be created.
|
||||
func (w *Writer2) Flush() error {
|
||||
if w.cstate == stop {
|
||||
return errClosed
|
||||
}
|
||||
for w.written() > 0 {
|
||||
if err := w.flushChunk(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close terminates the LZMA2 stream with an EOS chunk.
|
||||
func (w *Writer2) Close() error {
|
||||
if w.cstate == stop {
|
||||
return errClosed
|
||||
}
|
||||
if err := w.Flush(); err != nil {
|
||||
return nil
|
||||
}
|
||||
// write zero byte EOS chunk
|
||||
_, err := w.w.Write([]byte{0})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.cstate = stop
|
||||
return nil
|
||||
}
|
109
vendor/github.com/ulikunitz/xz/lzma/writer2_test.go
generated
vendored
Normal file
109
vendor/github.com/ulikunitz/xz/lzma/writer2_test.go
generated
vendored
Normal file
@ -0,0 +1,109 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ulikunitz/xz/internal/randtxt"
|
||||
)
|
||||
|
||||
func TestWriter2(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
w, err := Writer2Config{DictCap: 4096}.NewWriter2(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("NewWriter error %s", err)
|
||||
}
|
||||
n, err := w.Write([]byte{'a'})
|
||||
if err != nil {
|
||||
t.Fatalf("w.Write([]byte{'a'}) error %s", err)
|
||||
}
|
||||
if n != 1 {
|
||||
t.Fatalf("w.Write([]byte{'a'}) returned %d; want %d", n, 1)
|
||||
}
|
||||
if err = w.Flush(); err != nil {
|
||||
t.Fatalf("w.Flush() error %s", err)
|
||||
}
|
||||
// check that double Flush doesn't write another chunk
|
||||
if err = w.Flush(); err != nil {
|
||||
t.Fatalf("w.Flush() error %s", err)
|
||||
}
|
||||
if err = w.Close(); err != nil {
|
||||
t.Fatalf("w.Close() error %s", err)
|
||||
}
|
||||
p := buf.Bytes()
|
||||
want := []byte{1, 0, 0, 'a', 0}
|
||||
if !bytes.Equal(p, want) {
|
||||
t.Fatalf("bytes written %#v; want %#v", p, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCycle1(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
w, err := Writer2Config{DictCap: 4096}.NewWriter2(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("NewWriter error %s", err)
|
||||
}
|
||||
n, err := w.Write([]byte{'a'})
|
||||
if err != nil {
|
||||
t.Fatalf("w.Write([]byte{'a'}) error %s", err)
|
||||
}
|
||||
if n != 1 {
|
||||
t.Fatalf("w.Write([]byte{'a'}) returned %d; want %d", n, 1)
|
||||
}
|
||||
if err = w.Close(); err != nil {
|
||||
t.Fatalf("w.Close() error %s", err)
|
||||
}
|
||||
r, err := Reader2Config{DictCap: 4096}.NewReader2(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("NewReader error %s", err)
|
||||
}
|
||||
p := make([]byte, 3)
|
||||
n, err = r.Read(p)
|
||||
t.Logf("n %d error %v", n, err)
|
||||
}
|
||||
|
||||
func TestCycle2(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
w, err := Writer2Config{DictCap: 4096}.NewWriter2(buf)
|
||||
if err != nil {
|
||||
t.Fatalf("NewWriter error %s", err)
|
||||
}
|
||||
// const txtlen = 1024
|
||||
const txtlen = 2100000
|
||||
io.CopyN(buf, randtxt.NewReader(rand.NewSource(42)), txtlen)
|
||||
txt := buf.String()
|
||||
buf.Reset()
|
||||
n, err := io.Copy(w, strings.NewReader(txt))
|
||||
if err != nil {
|
||||
t.Fatalf("Compressing copy error %s", err)
|
||||
}
|
||||
if n != txtlen {
|
||||
t.Fatalf("Compressing data length %d; want %d", n, txtlen)
|
||||
}
|
||||
if err = w.Close(); err != nil {
|
||||
t.Fatalf("w.Close error %s", err)
|
||||
}
|
||||
t.Logf("buf.Len() %d", buf.Len())
|
||||
r, err := Reader2Config{DictCap: 4096}.NewReader2(buf)
|
||||
if err != nil {
|
||||
t.Fatalf("NewReader error %s", err)
|
||||
}
|
||||
out := new(bytes.Buffer)
|
||||
n, err = io.Copy(out, r)
|
||||
if err != nil {
|
||||
t.Fatalf("Decompressing copy error %s after %d bytes", err, n)
|
||||
}
|
||||
if n != txtlen {
|
||||
t.Fatalf("Decompression data length %d; want %d", n, txtlen)
|
||||
}
|
||||
if txt != out.String() {
|
||||
t.Fatal("decompressed data differs from original")
|
||||
}
|
||||
}
|
249
vendor/github.com/ulikunitz/xz/lzma/writer_test.go
generated
vendored
Normal file
249
vendor/github.com/ulikunitz/xz/lzma/writer_test.go
generated
vendored
Normal file
@ -0,0 +1,249 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ulikunitz/xz/internal/randtxt"
|
||||
)
|
||||
|
||||
func TestWriterCycle(t *testing.T) {
|
||||
orig := readOrigFile(t)
|
||||
buf := new(bytes.Buffer)
|
||||
w, err := NewWriter(buf)
|
||||
if err != nil {
|
||||
t.Fatalf("NewWriter: error %s", err)
|
||||
}
|
||||
n, err := w.Write(orig)
|
||||
if err != nil {
|
||||
t.Fatalf("w.Write error %s", err)
|
||||
}
|
||||
if n != len(orig) {
|
||||
t.Fatalf("w.Write returned %d; want %d", n, len(orig))
|
||||
}
|
||||
if err = w.Close(); err != nil {
|
||||
t.Fatalf("w.Close error %s", err)
|
||||
}
|
||||
t.Logf("buf.Len() %d len(orig) %d", buf.Len(), len(orig))
|
||||
if buf.Len() > len(orig) {
|
||||
t.Errorf("buf.Len()=%d bigger then len(orig)=%d", buf.Len(),
|
||||
len(orig))
|
||||
}
|
||||
lr, err := NewReader(buf)
|
||||
if err != nil {
|
||||
t.Fatalf("NewReader error %s", err)
|
||||
}
|
||||
decoded, err := ioutil.ReadAll(lr)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadAll(lr) error %s", err)
|
||||
}
|
||||
t.Logf("%s", decoded)
|
||||
if len(orig) != len(decoded) {
|
||||
t.Fatalf("length decoded is %d; want %d", len(decoded),
|
||||
len(orig))
|
||||
}
|
||||
if !bytes.Equal(orig, decoded) {
|
||||
t.Fatalf("decoded file differs from original")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriterLongData(t *testing.T) {
|
||||
const (
|
||||
seed = 49
|
||||
size = 82237
|
||||
)
|
||||
r := io.LimitReader(randtxt.NewReader(rand.NewSource(seed)), size)
|
||||
txt, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadAll error %s", err)
|
||||
}
|
||||
if len(txt) != size {
|
||||
t.Fatalf("ReadAll read %d bytes; want %d", len(txt), size)
|
||||
}
|
||||
buf := &bytes.Buffer{}
|
||||
w, err := WriterConfig{DictCap: 0x4000}.NewWriter(buf)
|
||||
if err != nil {
|
||||
t.Fatalf("WriterConfig.NewWriter error %s", err)
|
||||
}
|
||||
n, err := w.Write(txt)
|
||||
if err != nil {
|
||||
t.Fatalf("w.Write error %s", err)
|
||||
}
|
||||
if n != len(txt) {
|
||||
t.Fatalf("w.Write wrote %d bytes; want %d", n, size)
|
||||
}
|
||||
if err = w.Close(); err != nil {
|
||||
t.Fatalf("w.Close error %s", err)
|
||||
}
|
||||
t.Logf("compressed length %d", buf.Len())
|
||||
lr, err := NewReader(buf)
|
||||
if err != nil {
|
||||
t.Fatalf("NewReader error %s", err)
|
||||
}
|
||||
txtRead, err := ioutil.ReadAll(lr)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadAll(lr) error %s", err)
|
||||
}
|
||||
if len(txtRead) != size {
|
||||
t.Fatalf("ReadAll(lr) returned %d bytes; want %d",
|
||||
len(txtRead), size)
|
||||
}
|
||||
if !bytes.Equal(txtRead, txt) {
|
||||
t.Fatal("ReadAll(lr) returned txt differs from origin")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriter_Size(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
w, err := WriterConfig{Size: 10, EOSMarker: true}.NewWriter(buf)
|
||||
if err != nil {
|
||||
t.Fatalf("WriterConfig.NewWriter error %s", err)
|
||||
}
|
||||
q := []byte{'a'}
|
||||
for i := 0; i < 9; i++ {
|
||||
n, err := w.Write(q)
|
||||
if err != nil {
|
||||
t.Fatalf("w.Write error %s", err)
|
||||
}
|
||||
if n != 1 {
|
||||
t.Fatalf("w.Write returned %d; want %d", n, 1)
|
||||
}
|
||||
q[0]++
|
||||
}
|
||||
if err := w.Close(); err != errSize {
|
||||
t.Fatalf("expected errSize, but got %v", err)
|
||||
}
|
||||
n, err := w.Write(q)
|
||||
if err != nil {
|
||||
t.Fatalf("w.Write error %s", err)
|
||||
}
|
||||
if n != 1 {
|
||||
t.Fatalf("w.Write returned %d; want %d", n, 1)
|
||||
}
|
||||
if err = w.Close(); err != nil {
|
||||
t.Fatalf("w.Close error %s", err)
|
||||
}
|
||||
t.Logf("compressed size %d", buf.Len())
|
||||
r, err := NewReader(buf)
|
||||
if err != nil {
|
||||
t.Fatalf("NewReader error %s", err)
|
||||
}
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadAll error %s", err)
|
||||
}
|
||||
s := string(b)
|
||||
want := "abcdefghij"
|
||||
if s != want {
|
||||
t.Fatalf("read %q, want %q", s, want)
|
||||
}
|
||||
}
|
||||
|
||||
// The example uses the buffered reader and writer from package bufio.
|
||||
func Example_writer() {
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
bw := bufio.NewWriter(pw)
|
||||
w, err := NewWriter(bw)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
input := []byte("The quick brown fox jumps over the lazy dog.")
|
||||
if _, err = w.Write(input); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err = w.Close(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// reader waits for the data
|
||||
if err = bw.Flush(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}()
|
||||
r, err := NewReader(pr)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
_, err = io.Copy(os.Stdout, r)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Output:
|
||||
// The quick brown fox jumps over the lazy dog.
|
||||
}
|
||||
|
||||
func BenchmarkReader(b *testing.B) {
|
||||
const (
|
||||
seed = 49
|
||||
size = 50000
|
||||
)
|
||||
r := io.LimitReader(randtxt.NewReader(rand.NewSource(seed)), size)
|
||||
txt, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
b.Fatalf("ReadAll error %s", err)
|
||||
}
|
||||
buf := &bytes.Buffer{}
|
||||
w, err := WriterConfig{DictCap: 0x4000}.NewWriter(buf)
|
||||
if err != nil {
|
||||
b.Fatalf("WriterConfig{}.NewWriter error %s", err)
|
||||
}
|
||||
if _, err = w.Write(txt); err != nil {
|
||||
b.Fatalf("w.Write error %s", err)
|
||||
}
|
||||
if err = w.Close(); err != nil {
|
||||
b.Fatalf("w.Close error %s", err)
|
||||
}
|
||||
data, err := ioutil.ReadAll(buf)
|
||||
if err != nil {
|
||||
b.Fatalf("ReadAll error %s", err)
|
||||
}
|
||||
b.SetBytes(int64(len(txt)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
lr, err := NewReader(bytes.NewReader(data))
|
||||
if err != nil {
|
||||
b.Fatalf("NewReader error %s", err)
|
||||
}
|
||||
if _, err = ioutil.ReadAll(lr); err != nil {
|
||||
b.Fatalf("ReadAll(lr) error %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWriter(b *testing.B) {
|
||||
const (
|
||||
seed = 49
|
||||
size = 50000
|
||||
)
|
||||
r := io.LimitReader(randtxt.NewReader(rand.NewSource(seed)), size)
|
||||
txt, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
b.Fatalf("ReadAll error %s", err)
|
||||
}
|
||||
buf := &bytes.Buffer{}
|
||||
b.SetBytes(int64(len(txt)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
buf.Reset()
|
||||
w, err := WriterConfig{DictCap: 0x4000}.NewWriter(buf)
|
||||
if err != nil {
|
||||
b.Fatalf("NewWriter error %s", err)
|
||||
}
|
||||
if _, err = w.Write(txt); err != nil {
|
||||
b.Fatalf("w.Write error %s", err)
|
||||
}
|
||||
if err = w.Close(); err != nil {
|
||||
b.Fatalf("w.Close error %s", err)
|
||||
}
|
||||
}
|
||||
}
|
117
vendor/github.com/ulikunitz/xz/lzmafilter.go
generated
vendored
Normal file
117
vendor/github.com/ulikunitz/xz/lzmafilter.go
generated
vendored
Normal file
@ -0,0 +1,117 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xz
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/ulikunitz/xz/lzma"
|
||||
)
|
||||
|
||||
// LZMA filter constants.
|
||||
const (
|
||||
lzmaFilterID = 0x21
|
||||
lzmaFilterLen = 3
|
||||
)
|
||||
|
||||
// lzmaFilter declares the LZMA2 filter information stored in an xz
|
||||
// block header.
|
||||
type lzmaFilter struct {
|
||||
dictCap int64
|
||||
}
|
||||
|
||||
// String returns a representation of the LZMA filter.
|
||||
func (f lzmaFilter) String() string {
|
||||
return fmt.Sprintf("LZMA dict cap %#x", f.dictCap)
|
||||
}
|
||||
|
||||
// id returns the ID for the LZMA2 filter.
|
||||
func (f lzmaFilter) id() uint64 { return lzmaFilterID }
|
||||
|
||||
// MarshalBinary converts the lzmaFilter in its encoded representation.
|
||||
func (f lzmaFilter) MarshalBinary() (data []byte, err error) {
|
||||
c := lzma.EncodeDictCap(f.dictCap)
|
||||
return []byte{lzmaFilterID, 1, c}, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary unmarshals the given data representation of the LZMA2
|
||||
// filter.
|
||||
func (f *lzmaFilter) UnmarshalBinary(data []byte) error {
|
||||
if len(data) != lzmaFilterLen {
|
||||
return errors.New("xz: data for LZMA2 filter has wrong length")
|
||||
}
|
||||
if data[0] != lzmaFilterID {
|
||||
return errors.New("xz: wrong LZMA2 filter id")
|
||||
}
|
||||
if data[1] != 1 {
|
||||
return errors.New("xz: wrong LZMA2 filter size")
|
||||
}
|
||||
dc, err := lzma.DecodeDictCap(data[2])
|
||||
if err != nil {
|
||||
return errors.New("xz: wrong LZMA2 dictionary size property")
|
||||
}
|
||||
|
||||
f.dictCap = dc
|
||||
return nil
|
||||
}
|
||||
|
||||
// reader creates a new reader for the LZMA2 filter.
|
||||
func (f lzmaFilter) reader(r io.Reader, c *ReaderConfig) (fr io.Reader,
|
||||
err error) {
|
||||
|
||||
config := new(lzma.Reader2Config)
|
||||
if c != nil {
|
||||
config.DictCap = c.DictCap
|
||||
}
|
||||
dc := int(f.dictCap)
|
||||
if dc < 1 {
|
||||
return nil, errors.New("xz: LZMA2 filter parameter " +
|
||||
"dictionary capacity overflow")
|
||||
}
|
||||
if dc > config.DictCap {
|
||||
config.DictCap = dc
|
||||
}
|
||||
|
||||
fr, err = config.NewReader2(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fr, nil
|
||||
}
|
||||
|
||||
// writeCloser creates a io.WriteCloser for the LZMA2 filter.
|
||||
func (f lzmaFilter) writeCloser(w io.WriteCloser, c *WriterConfig,
|
||||
) (fw io.WriteCloser, err error) {
|
||||
config := new(lzma.Writer2Config)
|
||||
if c != nil {
|
||||
*config = lzma.Writer2Config{
|
||||
Properties: c.Properties,
|
||||
DictCap: c.DictCap,
|
||||
BufSize: c.BufSize,
|
||||
Matcher: c.Matcher,
|
||||
}
|
||||
}
|
||||
|
||||
dc := int(f.dictCap)
|
||||
if dc < 1 {
|
||||
return nil, errors.New("xz: LZMA2 filter parameter " +
|
||||
"dictionary capacity overflow")
|
||||
}
|
||||
if dc > config.DictCap {
|
||||
config.DictCap = dc
|
||||
}
|
||||
|
||||
fw, err = config.NewWriter2(w)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fw, nil
|
||||
}
|
||||
|
||||
// last returns true, because an LZMA2 filter must be the last filter in
|
||||
// the filter list.
|
||||
func (f lzmaFilter) last() bool { return true }
|
5
vendor/github.com/ulikunitz/xz/make-docs
generated
vendored
Normal file
5
vendor/github.com/ulikunitz/xz/make-docs
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -x
|
||||
pandoc -t html5 -f markdown -s --css=doc/md.css -o README.html README.md
|
||||
pandoc -t html5 -f markdown -s --css=doc/md.css -o TODO.html TODO.md
|
373
vendor/github.com/ulikunitz/xz/reader.go
generated
vendored
Normal file
373
vendor/github.com/ulikunitz/xz/reader.go
generated
vendored
Normal file
@ -0,0 +1,373 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package xz supports the compression and decompression of xz files. It
|
||||
// supports version 1.0.4 of the specification without the non-LZMA2
|
||||
// filters. See http://tukaani.org/xz/xz-file-format-1.0.4.txt
|
||||
package xz
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/ulikunitz/xz/internal/xlog"
|
||||
"github.com/ulikunitz/xz/lzma"
|
||||
)
|
||||
|
||||
// ReaderConfig defines the parameters for the xz reader. The
|
||||
// SingleStream parameter requests the reader to assume that the
|
||||
// underlying stream contains only a single stream.
|
||||
type ReaderConfig struct {
|
||||
DictCap int
|
||||
SingleStream bool
|
||||
}
|
||||
|
||||
// fill replaces all zero values with their default values.
|
||||
func (c *ReaderConfig) fill() {
|
||||
if c.DictCap == 0 {
|
||||
c.DictCap = 8 * 1024 * 1024
|
||||
}
|
||||
}
|
||||
|
||||
// Verify checks the reader parameters for Validity. Zero values will be
|
||||
// replaced by default values.
|
||||
func (c *ReaderConfig) Verify() error {
|
||||
if c == nil {
|
||||
return errors.New("xz: reader parameters are nil")
|
||||
}
|
||||
lc := lzma.Reader2Config{DictCap: c.DictCap}
|
||||
if err := lc.Verify(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reader supports the reading of one or multiple xz streams.
|
||||
type Reader struct {
|
||||
ReaderConfig
|
||||
|
||||
xz io.Reader
|
||||
sr *streamReader
|
||||
}
|
||||
|
||||
// streamReader decodes a single xz stream
|
||||
type streamReader struct {
|
||||
ReaderConfig
|
||||
|
||||
xz io.Reader
|
||||
br *blockReader
|
||||
newHash func() hash.Hash
|
||||
h header
|
||||
index []record
|
||||
}
|
||||
|
||||
// NewReader creates a new xz reader using the default parameters.
|
||||
// The function reads and checks the header of the first XZ stream. The
|
||||
// reader will process multiple streams including padding.
|
||||
func NewReader(xz io.Reader) (r *Reader, err error) {
|
||||
return ReaderConfig{}.NewReader(xz)
|
||||
}
|
||||
|
||||
// NewReader creates an xz stream reader. The created reader will be
|
||||
// able to process multiple streams and padding unless a SingleStream
|
||||
// has been set in the reader configuration c.
|
||||
func (c ReaderConfig) NewReader(xz io.Reader) (r *Reader, err error) {
|
||||
if err = c.Verify(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r = &Reader{
|
||||
ReaderConfig: c,
|
||||
xz: xz,
|
||||
}
|
||||
if r.sr, err = c.newStreamReader(xz); err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
var errUnexpectedData = errors.New("xz: unexpected data after stream")
|
||||
|
||||
// Read reads uncompressed data from the stream.
|
||||
func (r *Reader) Read(p []byte) (n int, err error) {
|
||||
for n < len(p) {
|
||||
if r.sr == nil {
|
||||
if r.SingleStream {
|
||||
data := make([]byte, 1)
|
||||
_, err = io.ReadFull(r.xz, data)
|
||||
if err != io.EOF {
|
||||
return n, errUnexpectedData
|
||||
}
|
||||
return n, io.EOF
|
||||
}
|
||||
for {
|
||||
r.sr, err = r.ReaderConfig.newStreamReader(r.xz)
|
||||
if err != errPadding {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
k, err := r.sr.Read(p[n:])
|
||||
n += k
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
r.sr = nil
|
||||
continue
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
var errPadding = errors.New("xz: padding (4 zero bytes) encountered")
|
||||
|
||||
// newStreamReader creates a new xz stream reader using the given configuration
|
||||
// parameters. NewReader reads and checks the header of the xz stream.
|
||||
func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error) {
|
||||
if err = c.Verify(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data := make([]byte, HeaderLen)
|
||||
if _, err := io.ReadFull(xz, data[:4]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if bytes.Equal(data[:4], []byte{0, 0, 0, 0}) {
|
||||
return nil, errPadding
|
||||
}
|
||||
if _, err = io.ReadFull(xz, data[4:]); err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
r = &streamReader{
|
||||
ReaderConfig: c,
|
||||
xz: xz,
|
||||
index: make([]record, 0, 4),
|
||||
}
|
||||
if err = r.h.UnmarshalBinary(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
xlog.Debugf("xz header %s", r.h)
|
||||
if r.newHash, err = newHashFunc(r.h.flags); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// errIndex indicates an error with the xz file index.
|
||||
var errIndex = errors.New("xz: error in xz file index")
|
||||
|
||||
// readTail reads the index body and the xz footer.
|
||||
func (r *streamReader) readTail() error {
|
||||
index, n, err := readIndexBody(r.xz)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
if len(index) != len(r.index) {
|
||||
return fmt.Errorf("xz: index length is %d; want %d",
|
||||
len(index), len(r.index))
|
||||
}
|
||||
for i, rec := range r.index {
|
||||
if rec != index[i] {
|
||||
return fmt.Errorf("xz: record %d is %v; want %v",
|
||||
i, rec, index[i])
|
||||
}
|
||||
}
|
||||
|
||||
p := make([]byte, footerLen)
|
||||
if _, err = io.ReadFull(r.xz, p); err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
var f footer
|
||||
if err = f.UnmarshalBinary(p); err != nil {
|
||||
return err
|
||||
}
|
||||
xlog.Debugf("xz footer %s", f)
|
||||
if f.flags != r.h.flags {
|
||||
return errors.New("xz: footer flags incorrect")
|
||||
}
|
||||
if f.indexSize != int64(n)+1 {
|
||||
return errors.New("xz: index size in footer wrong")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read reads actual data from the xz stream.
|
||||
func (r *streamReader) Read(p []byte) (n int, err error) {
|
||||
for n < len(p) {
|
||||
if r.br == nil {
|
||||
bh, hlen, err := readBlockHeader(r.xz)
|
||||
if err != nil {
|
||||
if err == errIndexIndicator {
|
||||
if err = r.readTail(); err != nil {
|
||||
return n, err
|
||||
}
|
||||
return n, io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
xlog.Debugf("block %v", *bh)
|
||||
r.br, err = r.ReaderConfig.newBlockReader(r.xz, bh,
|
||||
hlen, r.newHash())
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
k, err := r.br.Read(p[n:])
|
||||
n += k
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
r.index = append(r.index, r.br.record())
|
||||
r.br = nil
|
||||
} else {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// countingReader is a reader that counts the bytes read.
|
||||
type countingReader struct {
|
||||
r io.Reader
|
||||
n int64
|
||||
}
|
||||
|
||||
// Read reads data from the wrapped reader and adds it to the n field.
|
||||
func (lr *countingReader) Read(p []byte) (n int, err error) {
|
||||
n, err = lr.r.Read(p)
|
||||
lr.n += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// blockReader supports the reading of a block.
|
||||
type blockReader struct {
|
||||
lxz countingReader
|
||||
header *blockHeader
|
||||
headerLen int
|
||||
n int64
|
||||
hash hash.Hash
|
||||
r io.Reader
|
||||
err error
|
||||
}
|
||||
|
||||
// newBlockReader creates a new block reader.
|
||||
func (c *ReaderConfig) newBlockReader(xz io.Reader, h *blockHeader,
|
||||
hlen int, hash hash.Hash) (br *blockReader, err error) {
|
||||
|
||||
br = &blockReader{
|
||||
lxz: countingReader{r: xz},
|
||||
header: h,
|
||||
headerLen: hlen,
|
||||
hash: hash,
|
||||
}
|
||||
|
||||
fr, err := c.newFilterReader(&br.lxz, h.filters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
br.r = io.TeeReader(fr, br.hash)
|
||||
|
||||
return br, nil
|
||||
}
|
||||
|
||||
// uncompressedSize returns the uncompressed size of the block.
|
||||
func (br *blockReader) uncompressedSize() int64 {
|
||||
return br.n
|
||||
}
|
||||
|
||||
// compressedSize returns the compressed size of the block.
|
||||
func (br *blockReader) compressedSize() int64 {
|
||||
return br.lxz.n
|
||||
}
|
||||
|
||||
// unpaddedSize computes the unpadded size for the block.
|
||||
func (br *blockReader) unpaddedSize() int64 {
|
||||
n := int64(br.headerLen)
|
||||
n += br.compressedSize()
|
||||
n += int64(br.hash.Size())
|
||||
return n
|
||||
}
|
||||
|
||||
// record returns the index record for the current block.
|
||||
func (br *blockReader) record() record {
|
||||
return record{br.unpaddedSize(), br.uncompressedSize()}
|
||||
}
|
||||
|
||||
// errBlockSize indicates that the size of the block in the block header
|
||||
// is wrong.
|
||||
var errBlockSize = errors.New("xz: wrong uncompressed size for block")
|
||||
|
||||
// Read reads data from the block.
|
||||
func (br *blockReader) Read(p []byte) (n int, err error) {
|
||||
n, err = br.r.Read(p)
|
||||
br.n += int64(n)
|
||||
|
||||
u := br.header.uncompressedSize
|
||||
if u >= 0 && br.uncompressedSize() > u {
|
||||
return n, errors.New("xz: wrong uncompressed size for block")
|
||||
}
|
||||
c := br.header.compressedSize
|
||||
if c >= 0 && br.compressedSize() > c {
|
||||
return n, errors.New("xz: wrong compressed size for block")
|
||||
}
|
||||
if err != io.EOF {
|
||||
return n, err
|
||||
}
|
||||
if br.uncompressedSize() < u || br.compressedSize() < c {
|
||||
return n, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
s := br.hash.Size()
|
||||
k := padLen(br.lxz.n)
|
||||
q := make([]byte, k+s, k+2*s)
|
||||
if _, err = io.ReadFull(br.lxz.r, q); err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
if !allZeros(q[:k]) {
|
||||
return n, errors.New("xz: non-zero block padding")
|
||||
}
|
||||
checkSum := q[k:]
|
||||
computedSum := br.hash.Sum(checkSum[s:])
|
||||
if !bytes.Equal(checkSum, computedSum) {
|
||||
return n, errors.New("xz: checksum error for block")
|
||||
}
|
||||
return n, io.EOF
|
||||
}
|
||||
|
||||
func (c *ReaderConfig) newFilterReader(r io.Reader, f []filter) (fr io.Reader,
|
||||
err error) {
|
||||
|
||||
if err = verifyFilters(f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fr = r
|
||||
for i := len(f) - 1; i >= 0; i-- {
|
||||
fr, err = f[i].reader(fr, c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return fr, nil
|
||||
}
|
81
vendor/github.com/ulikunitz/xz/reader_test.go
generated
vendored
Normal file
81
vendor/github.com/ulikunitz/xz/reader_test.go
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xz
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestReaderSimple(t *testing.T) {
|
||||
const file = "fox.xz"
|
||||
xz, err := os.Open(file)
|
||||
if err != nil {
|
||||
t.Fatalf("os.Open(%q) error %s", file, err)
|
||||
}
|
||||
r, err := NewReader(xz)
|
||||
if err != nil {
|
||||
t.Fatalf("NewReader error %s", err)
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if _, err = io.Copy(&buf, r); err != nil {
|
||||
t.Fatalf("io.Copy error %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReaderSingleStream(t *testing.T) {
|
||||
data, err := ioutil.ReadFile("fox.xz")
|
||||
if err != nil {
|
||||
t.Fatalf("ReadFile error %s", err)
|
||||
}
|
||||
xz := bytes.NewReader(data)
|
||||
rc := ReaderConfig{SingleStream: true}
|
||||
r, err := rc.NewReader(xz)
|
||||
if err != nil {
|
||||
t.Fatalf("NewReader error %s", err)
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if _, err = io.Copy(&buf, r); err != nil {
|
||||
t.Fatalf("io.Copy error %s", err)
|
||||
}
|
||||
buf.Reset()
|
||||
data = append(data, 0)
|
||||
xz = bytes.NewReader(data)
|
||||
r, err = rc.NewReader(xz)
|
||||
if err != nil {
|
||||
t.Fatalf("NewReader error %s", err)
|
||||
}
|
||||
if _, err = io.Copy(&buf, r); err != errUnexpectedData {
|
||||
t.Fatalf("io.Copy returned %v; want %v", err, errUnexpectedData)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReaaderMultipleStreams(t *testing.T) {
|
||||
data, err := ioutil.ReadFile("fox.xz")
|
||||
if err != nil {
|
||||
t.Fatalf("ReadFile error %s", err)
|
||||
}
|
||||
m := make([]byte, 0, 4*len(data)+4*4)
|
||||
m = append(m, data...)
|
||||
m = append(m, data...)
|
||||
m = append(m, 0, 0, 0, 0)
|
||||
m = append(m, data...)
|
||||
m = append(m, 0, 0, 0, 0)
|
||||
m = append(m, 0, 0, 0, 0)
|
||||
m = append(m, data...)
|
||||
m = append(m, 0, 0, 0, 0)
|
||||
xz := bytes.NewReader(m)
|
||||
r, err := NewReader(xz)
|
||||
if err != nil {
|
||||
t.Fatalf("NewReader error %s", err)
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if _, err = io.Copy(&buf, r); err != nil {
|
||||
t.Fatalf("io.Copy error %s", err)
|
||||
}
|
||||
}
|
386
vendor/github.com/ulikunitz/xz/writer.go
generated
vendored
Normal file
386
vendor/github.com/ulikunitz/xz/writer.go
generated
vendored
Normal file
@ -0,0 +1,386 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xz
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/ulikunitz/xz/lzma"
|
||||
)
|
||||
|
||||
// WriterConfig describe the parameters for an xz writer.
|
||||
type WriterConfig struct {
|
||||
Properties *lzma.Properties
|
||||
DictCap int
|
||||
BufSize int
|
||||
BlockSize int64
|
||||
// checksum method: CRC32, CRC64 or SHA256
|
||||
CheckSum byte
|
||||
// match algorithm
|
||||
Matcher lzma.MatchAlgorithm
|
||||
}
|
||||
|
||||
// fill replaces zero values with default values.
|
||||
func (c *WriterConfig) fill() {
|
||||
if c.Properties == nil {
|
||||
c.Properties = &lzma.Properties{LC: 3, LP: 0, PB: 2}
|
||||
}
|
||||
if c.DictCap == 0 {
|
||||
c.DictCap = 8 * 1024 * 1024
|
||||
}
|
||||
if c.BufSize == 0 {
|
||||
c.BufSize = 4096
|
||||
}
|
||||
if c.BlockSize == 0 {
|
||||
c.BlockSize = maxInt64
|
||||
}
|
||||
if c.CheckSum == 0 {
|
||||
c.CheckSum = CRC64
|
||||
}
|
||||
}
|
||||
|
||||
// Verify checks the configuration for errors. Zero values will be
|
||||
// replaced by default values.
|
||||
func (c *WriterConfig) Verify() error {
|
||||
if c == nil {
|
||||
return errors.New("xz: writer configuration is nil")
|
||||
}
|
||||
c.fill()
|
||||
lc := lzma.Writer2Config{
|
||||
Properties: c.Properties,
|
||||
DictCap: c.DictCap,
|
||||
BufSize: c.BufSize,
|
||||
Matcher: c.Matcher,
|
||||
}
|
||||
if err := lc.Verify(); err != nil {
|
||||
return err
|
||||
}
|
||||
if c.BlockSize <= 0 {
|
||||
return errors.New("xz: block size out of range")
|
||||
}
|
||||
if err := verifyFlags(c.CheckSum); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// filters creates the filter list for the given parameters.
|
||||
func (c *WriterConfig) filters() []filter {
|
||||
return []filter{&lzmaFilter{int64(c.DictCap)}}
|
||||
}
|
||||
|
||||
// maxInt64 defines the maximum 64-bit signed integer.
|
||||
const maxInt64 = 1<<63 - 1
|
||||
|
||||
// verifyFilters checks the filter list for the length and the right
|
||||
// sequence of filters.
|
||||
func verifyFilters(f []filter) error {
|
||||
if len(f) == 0 {
|
||||
return errors.New("xz: no filters")
|
||||
}
|
||||
if len(f) > 4 {
|
||||
return errors.New("xz: more than four filters")
|
||||
}
|
||||
for _, g := range f[:len(f)-1] {
|
||||
if g.last() {
|
||||
return errors.New("xz: last filter is not last")
|
||||
}
|
||||
}
|
||||
if !f[len(f)-1].last() {
|
||||
return errors.New("xz: wrong last filter")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// newFilterWriteCloser converts a filter list into a WriteCloser that
|
||||
// can be used by a blockWriter.
|
||||
func (c *WriterConfig) newFilterWriteCloser(w io.Writer, f []filter) (fw io.WriteCloser, err error) {
|
||||
if err = verifyFilters(f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fw = nopWriteCloser(w)
|
||||
for i := len(f) - 1; i >= 0; i-- {
|
||||
fw, err = f[i].writeCloser(fw, c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return fw, nil
|
||||
}
|
||||
|
||||
// nopWCloser implements a WriteCloser with a Close method not doing
|
||||
// anything.
|
||||
type nopWCloser struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
// Close returns nil and doesn't do anything else.
|
||||
func (c nopWCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// nopWriteCloser converts the Writer into a WriteCloser with a Close
|
||||
// function that does nothing beside returning nil.
|
||||
func nopWriteCloser(w io.Writer) io.WriteCloser {
|
||||
return nopWCloser{w}
|
||||
}
|
||||
|
||||
// Writer compresses data written to it. It is an io.WriteCloser.
|
||||
type Writer struct {
|
||||
WriterConfig
|
||||
|
||||
xz io.Writer
|
||||
bw *blockWriter
|
||||
newHash func() hash.Hash
|
||||
h header
|
||||
index []record
|
||||
closed bool
|
||||
}
|
||||
|
||||
// newBlockWriter creates a new block writer writes the header out.
|
||||
func (w *Writer) newBlockWriter() error {
|
||||
var err error
|
||||
w.bw, err = w.WriterConfig.newBlockWriter(w.xz, w.newHash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = w.bw.writeHeader(w.xz); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// closeBlockWriter closes a block writer and records the sizes in the
|
||||
// index.
|
||||
func (w *Writer) closeBlockWriter() error {
|
||||
var err error
|
||||
if err = w.bw.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
w.index = append(w.index, w.bw.record())
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewWriter creates a new xz writer using default parameters.
|
||||
func NewWriter(xz io.Writer) (w *Writer, err error) {
|
||||
return WriterConfig{}.NewWriter(xz)
|
||||
}
|
||||
|
||||
// NewWriter creates a new Writer using the given configuration parameters.
|
||||
func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) {
|
||||
if err = c.Verify(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w = &Writer{
|
||||
WriterConfig: c,
|
||||
xz: xz,
|
||||
h: header{c.CheckSum},
|
||||
index: make([]record, 0, 4),
|
||||
}
|
||||
if w.newHash, err = newHashFunc(c.CheckSum); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data, err := w.h.MarshalBinary()
|
||||
if _, err = xz.Write(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = w.newBlockWriter(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return w, nil
|
||||
|
||||
}
|
||||
|
||||
// Write compresses the uncompressed data provided.
|
||||
func (w *Writer) Write(p []byte) (n int, err error) {
|
||||
if w.closed {
|
||||
return 0, errClosed
|
||||
}
|
||||
for {
|
||||
k, err := w.bw.Write(p[n:])
|
||||
n += k
|
||||
if err != errNoSpace {
|
||||
return n, err
|
||||
}
|
||||
if err = w.closeBlockWriter(); err != nil {
|
||||
return n, err
|
||||
}
|
||||
if err = w.newBlockWriter(); err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the writer and adds the footer to the Writer. Close
|
||||
// doesn't close the underlying writer.
|
||||
func (w *Writer) Close() error {
|
||||
if w.closed {
|
||||
return errClosed
|
||||
}
|
||||
w.closed = true
|
||||
var err error
|
||||
if err = w.closeBlockWriter(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f := footer{flags: w.h.flags}
|
||||
if f.indexSize, err = writeIndex(w.xz, w.index); err != nil {
|
||||
return err
|
||||
}
|
||||
data, err := f.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = w.xz.Write(data); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// countingWriter is a writer that counts all data written to it.
|
||||
type countingWriter struct {
|
||||
w io.Writer
|
||||
n int64
|
||||
}
|
||||
|
||||
// Write writes data to the countingWriter.
|
||||
func (cw *countingWriter) Write(p []byte) (n int, err error) {
|
||||
n, err = cw.w.Write(p)
|
||||
cw.n += int64(n)
|
||||
if err == nil && cw.n < 0 {
|
||||
return n, errors.New("xz: counter overflow")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// blockWriter is writes a single block.
|
||||
type blockWriter struct {
|
||||
cxz countingWriter
|
||||
// mw combines io.WriteCloser w and the hash.
|
||||
mw io.Writer
|
||||
w io.WriteCloser
|
||||
n int64
|
||||
blockSize int64
|
||||
closed bool
|
||||
headerLen int
|
||||
|
||||
filters []filter
|
||||
hash hash.Hash
|
||||
}
|
||||
|
||||
// newBlockWriter creates a new block writer.
|
||||
func (c *WriterConfig) newBlockWriter(xz io.Writer, hash hash.Hash) (bw *blockWriter, err error) {
|
||||
bw = &blockWriter{
|
||||
cxz: countingWriter{w: xz},
|
||||
blockSize: c.BlockSize,
|
||||
filters: c.filters(),
|
||||
hash: hash,
|
||||
}
|
||||
bw.w, err = c.newFilterWriteCloser(&bw.cxz, bw.filters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bw.mw = io.MultiWriter(bw.w, bw.hash)
|
||||
return bw, nil
|
||||
}
|
||||
|
||||
// writeHeader writes the header. If the function is called after Close
|
||||
// the commpressedSize and uncompressedSize fields will be filled.
|
||||
func (bw *blockWriter) writeHeader(w io.Writer) error {
|
||||
h := blockHeader{
|
||||
compressedSize: -1,
|
||||
uncompressedSize: -1,
|
||||
filters: bw.filters,
|
||||
}
|
||||
if bw.closed {
|
||||
h.compressedSize = bw.compressedSize()
|
||||
h.uncompressedSize = bw.uncompressedSize()
|
||||
}
|
||||
data, err := h.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = w.Write(data); err != nil {
|
||||
return err
|
||||
}
|
||||
bw.headerLen = len(data)
|
||||
return nil
|
||||
}
|
||||
|
||||
// compressed size returns the amount of data written to the underlying
|
||||
// stream.
|
||||
func (bw *blockWriter) compressedSize() int64 {
|
||||
return bw.cxz.n
|
||||
}
|
||||
|
||||
// uncompressedSize returns the number of data written to the
|
||||
// blockWriter
|
||||
func (bw *blockWriter) uncompressedSize() int64 {
|
||||
return bw.n
|
||||
}
|
||||
|
||||
// unpaddedSize returns the sum of the header length, the uncompressed
|
||||
// size of the block and the hash size.
|
||||
func (bw *blockWriter) unpaddedSize() int64 {
|
||||
if bw.headerLen <= 0 {
|
||||
panic("xz: block header not written")
|
||||
}
|
||||
n := int64(bw.headerLen)
|
||||
n += bw.compressedSize()
|
||||
n += int64(bw.hash.Size())
|
||||
return n
|
||||
}
|
||||
|
||||
// record returns the record for the current stream. Call Close before
|
||||
// calling this method.
|
||||
func (bw *blockWriter) record() record {
|
||||
return record{bw.unpaddedSize(), bw.uncompressedSize()}
|
||||
}
|
||||
|
||||
var errClosed = errors.New("xz: writer already closed")
|
||||
|
||||
var errNoSpace = errors.New("xz: no space")
|
||||
|
||||
// Write writes uncompressed data to the block writer.
|
||||
func (bw *blockWriter) Write(p []byte) (n int, err error) {
|
||||
if bw.closed {
|
||||
return 0, errClosed
|
||||
}
|
||||
|
||||
t := bw.blockSize - bw.n
|
||||
if int64(len(p)) > t {
|
||||
err = errNoSpace
|
||||
p = p[:t]
|
||||
}
|
||||
|
||||
var werr error
|
||||
n, werr = bw.mw.Write(p)
|
||||
bw.n += int64(n)
|
||||
if werr != nil {
|
||||
return n, werr
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close closes the writer.
|
||||
func (bw *blockWriter) Close() error {
|
||||
if bw.closed {
|
||||
return errClosed
|
||||
}
|
||||
bw.closed = true
|
||||
if err := bw.w.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
s := bw.hash.Size()
|
||||
k := padLen(bw.cxz.n)
|
||||
p := make([]byte, k+s)
|
||||
bw.hash.Sum(p[k:k])
|
||||
if _, err := bw.cxz.w.Write(p); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
138
vendor/github.com/ulikunitz/xz/writer_test.go
generated
vendored
Normal file
138
vendor/github.com/ulikunitz/xz/writer_test.go
generated
vendored
Normal file
@ -0,0 +1,138 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xz
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ulikunitz/xz/internal/randtxt"
|
||||
)
|
||||
|
||||
func TestWriter(t *testing.T) {
|
||||
const text = "The quick brown fox jumps over the lazy dog."
|
||||
var buf bytes.Buffer
|
||||
w, err := NewWriter(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("NewWriter error %s", err)
|
||||
}
|
||||
n, err := io.WriteString(w, text)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteString error %s", err)
|
||||
}
|
||||
if n != len(text) {
|
||||
t.Fatalf("Writestring wrote %d bytes; want %d", n, len(text))
|
||||
}
|
||||
if err = w.Close(); err != nil {
|
||||
t.Fatalf("w.Close error %s", err)
|
||||
}
|
||||
var out bytes.Buffer
|
||||
r, err := NewReader(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("NewReader error %s", err)
|
||||
}
|
||||
if _, err = io.Copy(&out, r); err != nil {
|
||||
t.Fatalf("io.Copy error %s", err)
|
||||
}
|
||||
s := out.String()
|
||||
if s != text {
|
||||
t.Fatalf("reader decompressed to %q; want %q", s, text)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue12(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
w, err := NewWriter(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("NewWriter error %s", err)
|
||||
}
|
||||
if err = w.Close(); err != nil {
|
||||
t.Fatalf("w.Close error %s", err)
|
||||
}
|
||||
r, err := NewReader(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("NewReader error %s", err)
|
||||
}
|
||||
var out bytes.Buffer
|
||||
if _, err = io.Copy(&out, r); err != nil {
|
||||
t.Fatalf("io.Copy error %s", err)
|
||||
}
|
||||
s := out.String()
|
||||
if s != "" {
|
||||
t.Fatalf("reader decompressed to %q; want %q", s, "")
|
||||
}
|
||||
}
|
||||
|
||||
func Example() {
|
||||
const text = "The quick brown fox jumps over the lazy dog."
|
||||
var buf bytes.Buffer
|
||||
|
||||
// compress text
|
||||
w, err := NewWriter(&buf)
|
||||
if err != nil {
|
||||
log.Fatalf("NewWriter error %s", err)
|
||||
}
|
||||
if _, err := io.WriteString(w, text); err != nil {
|
||||
log.Fatalf("WriteString error %s", err)
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
log.Fatalf("w.Close error %s", err)
|
||||
}
|
||||
|
||||
// decompress buffer and write result to stdout
|
||||
r, err := NewReader(&buf)
|
||||
if err != nil {
|
||||
log.Fatalf("NewReader error %s", err)
|
||||
}
|
||||
if _, err = io.Copy(os.Stdout, r); err != nil {
|
||||
log.Fatalf("io.Copy error %s", err)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// The quick brown fox jumps over the lazy dog.
|
||||
}
|
||||
|
||||
func TestWriter2(t *testing.T) {
|
||||
const txtlen = 1023
|
||||
var buf bytes.Buffer
|
||||
io.CopyN(&buf, randtxt.NewReader(rand.NewSource(41)), txtlen)
|
||||
txt := buf.String()
|
||||
|
||||
buf.Reset()
|
||||
w, err := NewWriter(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("NewWriter error %s", err)
|
||||
}
|
||||
n, err := io.WriteString(w, txt)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteString error %s", err)
|
||||
}
|
||||
if n != len(txt) {
|
||||
t.Fatalf("WriteString wrote %d bytes; want %d", n, len(txt))
|
||||
}
|
||||
if err = w.Close(); err != nil {
|
||||
t.Fatalf("Close error %s", err)
|
||||
}
|
||||
t.Logf("buf.Len() %d", buf.Len())
|
||||
r, err := NewReader(&buf)
|
||||
if err != nil {
|
||||
t.Fatalf("NewReader error %s", err)
|
||||
}
|
||||
var out bytes.Buffer
|
||||
k, err := io.Copy(&out, r)
|
||||
if err != nil {
|
||||
t.Fatalf("Decompressing copy error %s after %d bytes", err, n)
|
||||
}
|
||||
if k != txtlen {
|
||||
t.Fatalf("Decompression data length %d; want %d", k, txtlen)
|
||||
}
|
||||
if txt != out.String() {
|
||||
t.Fatal("decompressed data differs from original")
|
||||
}
|
||||
}
|
4
vendor/gopkg.in/check.v1/.gitignore
generated
vendored
Normal file
4
vendor/gopkg.in/check.v1/.gitignore
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
_*
|
||||
*.swp
|
||||
*.[568]
|
||||
[568].out
|
3
vendor/gopkg.in/check.v1/.travis.yml
generated
vendored
Normal file
3
vendor/gopkg.in/check.v1/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
language: go
|
||||
|
||||
go_import_path: gopkg.in/check.v1
|
25
vendor/gopkg.in/check.v1/LICENSE
generated
vendored
Normal file
25
vendor/gopkg.in/check.v1/LICENSE
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
Gocheck - A rich testing framework for Go
|
||||
|
||||
Copyright (c) 2010-2013 Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
20
vendor/gopkg.in/check.v1/README.md
generated
vendored
Normal file
20
vendor/gopkg.in/check.v1/README.md
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
Instructions
|
||||
============
|
||||
|
||||
Install the package with:
|
||||
|
||||
go get gopkg.in/check.v1
|
||||
|
||||
Import it with:
|
||||
|
||||
import "gopkg.in/check.v1"
|
||||
|
||||
and use _check_ as the package name inside the code.
|
||||
|
||||
For more details, visit the project page:
|
||||
|
||||
* http://labix.org/gocheck
|
||||
|
||||
and the API documentation:
|
||||
|
||||
* https://gopkg.in/check.v1
|
2
vendor/gopkg.in/check.v1/TODO
generated
vendored
Normal file
2
vendor/gopkg.in/check.v1/TODO
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
- Assert(slice, Contains, item)
|
||||
- Parallel test support
|
187
vendor/gopkg.in/check.v1/benchmark.go
generated
vendored
Normal file
187
vendor/gopkg.in/check.v1/benchmark.go
generated
vendored
Normal file
@ -0,0 +1,187 @@
|
||||
// Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package check
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
var memStats runtime.MemStats
|
||||
|
||||
// testingB is a type passed to Benchmark functions to manage benchmark
|
||||
// timing and to specify the number of iterations to run.
|
||||
type timer struct {
|
||||
start time.Time // Time test or benchmark started
|
||||
duration time.Duration
|
||||
N int
|
||||
bytes int64
|
||||
timerOn bool
|
||||
benchTime time.Duration
|
||||
// The initial states of memStats.Mallocs and memStats.TotalAlloc.
|
||||
startAllocs uint64
|
||||
startBytes uint64
|
||||
// The net total of this test after being run.
|
||||
netAllocs uint64
|
||||
netBytes uint64
|
||||
}
|
||||
|
||||
// StartTimer starts timing a test. This function is called automatically
|
||||
// before a benchmark starts, but it can also used to resume timing after
|
||||
// a call to StopTimer.
|
||||
func (c *C) StartTimer() {
|
||||
if !c.timerOn {
|
||||
c.start = time.Now()
|
||||
c.timerOn = true
|
||||
|
||||
runtime.ReadMemStats(&memStats)
|
||||
c.startAllocs = memStats.Mallocs
|
||||
c.startBytes = memStats.TotalAlloc
|
||||
}
|
||||
}
|
||||
|
||||
// StopTimer stops timing a test. This can be used to pause the timer
|
||||
// while performing complex initialization that you don't
|
||||
// want to measure.
|
||||
func (c *C) StopTimer() {
|
||||
if c.timerOn {
|
||||
c.duration += time.Now().Sub(c.start)
|
||||
c.timerOn = false
|
||||
runtime.ReadMemStats(&memStats)
|
||||
c.netAllocs += memStats.Mallocs - c.startAllocs
|
||||
c.netBytes += memStats.TotalAlloc - c.startBytes
|
||||
}
|
||||
}
|
||||
|
||||
// ResetTimer sets the elapsed benchmark time to zero.
|
||||
// It does not affect whether the timer is running.
|
||||
func (c *C) ResetTimer() {
|
||||
if c.timerOn {
|
||||
c.start = time.Now()
|
||||
runtime.ReadMemStats(&memStats)
|
||||
c.startAllocs = memStats.Mallocs
|
||||
c.startBytes = memStats.TotalAlloc
|
||||
}
|
||||
c.duration = 0
|
||||
c.netAllocs = 0
|
||||
c.netBytes = 0
|
||||
}
|
||||
|
||||
// SetBytes informs the number of bytes that the benchmark processes
|
||||
// on each iteration. If this is called in a benchmark it will also
|
||||
// report MB/s.
|
||||
func (c *C) SetBytes(n int64) {
|
||||
c.bytes = n
|
||||
}
|
||||
|
||||
func (c *C) nsPerOp() int64 {
|
||||
if c.N <= 0 {
|
||||
return 0
|
||||
}
|
||||
return c.duration.Nanoseconds() / int64(c.N)
|
||||
}
|
||||
|
||||
func (c *C) mbPerSec() float64 {
|
||||
if c.bytes <= 0 || c.duration <= 0 || c.N <= 0 {
|
||||
return 0
|
||||
}
|
||||
return (float64(c.bytes) * float64(c.N) / 1e6) / c.duration.Seconds()
|
||||
}
|
||||
|
||||
func (c *C) timerString() string {
|
||||
if c.N <= 0 {
|
||||
return fmt.Sprintf("%3.3fs", float64(c.duration.Nanoseconds())/1e9)
|
||||
}
|
||||
mbs := c.mbPerSec()
|
||||
mb := ""
|
||||
if mbs != 0 {
|
||||
mb = fmt.Sprintf("\t%7.2f MB/s", mbs)
|
||||
}
|
||||
nsop := c.nsPerOp()
|
||||
ns := fmt.Sprintf("%10d ns/op", nsop)
|
||||
if c.N > 0 && nsop < 100 {
|
||||
// The format specifiers here make sure that
|
||||
// the ones digits line up for all three possible formats.
|
||||
if nsop < 10 {
|
||||
ns = fmt.Sprintf("%13.2f ns/op", float64(c.duration.Nanoseconds())/float64(c.N))
|
||||
} else {
|
||||
ns = fmt.Sprintf("%12.1f ns/op", float64(c.duration.Nanoseconds())/float64(c.N))
|
||||
}
|
||||
}
|
||||
memStats := ""
|
||||
if c.benchMem {
|
||||
allocedBytes := fmt.Sprintf("%8d B/op", int64(c.netBytes)/int64(c.N))
|
||||
allocs := fmt.Sprintf("%8d allocs/op", int64(c.netAllocs)/int64(c.N))
|
||||
memStats = fmt.Sprintf("\t%s\t%s", allocedBytes, allocs)
|
||||
}
|
||||
return fmt.Sprintf("%8d\t%s%s%s", c.N, ns, mb, memStats)
|
||||
}
|
||||
|
||||
func min(x, y int) int {
|
||||
if x > y {
|
||||
return y
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func max(x, y int) int {
|
||||
if x < y {
|
||||
return y
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// roundDown10 rounds a number down to the nearest power of 10.
|
||||
func roundDown10(n int) int {
|
||||
var tens = 0
|
||||
// tens = floor(log_10(n))
|
||||
for n > 10 {
|
||||
n = n / 10
|
||||
tens++
|
||||
}
|
||||
// result = 10^tens
|
||||
result := 1
|
||||
for i := 0; i < tens; i++ {
|
||||
result *= 10
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX].
|
||||
func roundUp(n int) int {
|
||||
base := roundDown10(n)
|
||||
if n < (2 * base) {
|
||||
return 2 * base
|
||||
}
|
||||
if n < (5 * base) {
|
||||
return 5 * base
|
||||
}
|
||||
return 10 * base
|
||||
}
|
91
vendor/gopkg.in/check.v1/benchmark_test.go
generated
vendored
Normal file
91
vendor/gopkg.in/check.v1/benchmark_test.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
// These tests verify the test running logic.
|
||||
|
||||
package check_test
|
||||
|
||||
import (
|
||||
"time"
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
var benchmarkS = Suite(&BenchmarkS{})
|
||||
|
||||
type BenchmarkS struct{}
|
||||
|
||||
func (s *BenchmarkS) TestCountSuite(c *C) {
|
||||
suitesRun += 1
|
||||
}
|
||||
|
||||
func (s *BenchmarkS) TestBasicTestTiming(c *C) {
|
||||
helper := FixtureHelper{sleepOn: "Test1", sleep: 1000000 * time.Nanosecond}
|
||||
output := String{}
|
||||
runConf := RunConf{Output: &output, Verbose: true}
|
||||
Run(&helper, &runConf)
|
||||
|
||||
expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test1\t0\\.0[0-9]+s\n" +
|
||||
"PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t0\\.0[0-9]+s\n"
|
||||
c.Assert(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
func (s *BenchmarkS) TestStreamTestTiming(c *C) {
|
||||
helper := FixtureHelper{sleepOn: "SetUpSuite", sleep: 1000000 * time.Nanosecond}
|
||||
output := String{}
|
||||
runConf := RunConf{Output: &output, Stream: true}
|
||||
Run(&helper, &runConf)
|
||||
|
||||
expected := "(?s).*\nPASS: check_test\\.go:[0-9]+: FixtureHelper\\.SetUpSuite\t[0-9]+\\.[0-9]+s\n.*"
|
||||
c.Assert(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
func (s *BenchmarkS) TestBenchmark(c *C) {
|
||||
helper := FixtureHelper{sleep: 100000}
|
||||
output := String{}
|
||||
runConf := RunConf{
|
||||
Output: &output,
|
||||
Benchmark: true,
|
||||
BenchmarkTime: 10000000,
|
||||
Filter: "Benchmark1",
|
||||
}
|
||||
Run(&helper, &runConf)
|
||||
c.Check(helper.calls[0], Equals, "SetUpSuite")
|
||||
c.Check(helper.calls[1], Equals, "SetUpTest")
|
||||
c.Check(helper.calls[2], Equals, "Benchmark1")
|
||||
c.Check(helper.calls[3], Equals, "TearDownTest")
|
||||
c.Check(helper.calls[4], Equals, "SetUpTest")
|
||||
c.Check(helper.calls[5], Equals, "Benchmark1")
|
||||
c.Check(helper.calls[6], Equals, "TearDownTest")
|
||||
// ... and more.
|
||||
|
||||
expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark1\t\\s+[0-9]+\t\\s+[0-9]+ ns/op\n"
|
||||
c.Assert(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
func (s *BenchmarkS) TestBenchmarkBytes(c *C) {
|
||||
helper := FixtureHelper{sleep: 100000}
|
||||
output := String{}
|
||||
runConf := RunConf{
|
||||
Output: &output,
|
||||
Benchmark: true,
|
||||
BenchmarkTime: 10000000,
|
||||
Filter: "Benchmark2",
|
||||
}
|
||||
Run(&helper, &runConf)
|
||||
|
||||
expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark2\t\\s+[0-9]+\t\\s+[0-9]+ ns/op\t\\s+ *[1-9]\\.[0-9]{2} MB/s\n"
|
||||
c.Assert(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
func (s *BenchmarkS) TestBenchmarkMem(c *C) {
|
||||
helper := FixtureHelper{sleep: 100000}
|
||||
output := String{}
|
||||
runConf := RunConf{
|
||||
Output: &output,
|
||||
Benchmark: true,
|
||||
BenchmarkMem: true,
|
||||
BenchmarkTime: 10000000,
|
||||
Filter: "Benchmark3",
|
||||
}
|
||||
Run(&helper, &runConf)
|
||||
|
||||
expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark3\t\\s+ [0-9]+\t\\s+ *[0-9]+ ns/op\t\\s+ [0-9]+ B/op\t\\s+ [1-9]+ allocs/op\n"
|
||||
c.Assert(output.value, Matches, expected)
|
||||
}
|
82
vendor/gopkg.in/check.v1/bootstrap_test.go
generated
vendored
Normal file
82
vendor/gopkg.in/check.v1/bootstrap_test.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
// These initial tests are for bootstrapping. They verify that we can
|
||||
// basically use the testing infrastructure itself to check if the test
|
||||
// system is working.
|
||||
//
|
||||
// These tests use will break down the test runner badly in case of
|
||||
// errors because if they simply fail, we can't be sure the developer
|
||||
// will ever see anything (because failing means the failing system
|
||||
// somehow isn't working! :-)
|
||||
//
|
||||
// Do not assume *any* internal functionality works as expected besides
|
||||
// what's actually tested here.
|
||||
|
||||
package check_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"gopkg.in/check.v1"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type BootstrapS struct{}
|
||||
|
||||
var boostrapS = check.Suite(&BootstrapS{})
|
||||
|
||||
func (s *BootstrapS) TestCountSuite(c *check.C) {
|
||||
suitesRun += 1
|
||||
}
|
||||
|
||||
func (s *BootstrapS) TestFailedAndFail(c *check.C) {
|
||||
if c.Failed() {
|
||||
critical("c.Failed() must be false first!")
|
||||
}
|
||||
c.Fail()
|
||||
if !c.Failed() {
|
||||
critical("c.Fail() didn't put the test in a failed state!")
|
||||
}
|
||||
c.Succeed()
|
||||
}
|
||||
|
||||
func (s *BootstrapS) TestFailedAndSucceed(c *check.C) {
|
||||
c.Fail()
|
||||
c.Succeed()
|
||||
if c.Failed() {
|
||||
critical("c.Succeed() didn't put the test back in a non-failed state")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *BootstrapS) TestLogAndGetTestLog(c *check.C) {
|
||||
c.Log("Hello there!")
|
||||
log := c.GetTestLog()
|
||||
if log != "Hello there!\n" {
|
||||
critical(fmt.Sprintf("Log() or GetTestLog() is not working! Got: %#v", log))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *BootstrapS) TestLogfAndGetTestLog(c *check.C) {
|
||||
c.Logf("Hello %v", "there!")
|
||||
log := c.GetTestLog()
|
||||
if log != "Hello there!\n" {
|
||||
critical(fmt.Sprintf("Logf() or GetTestLog() is not working! Got: %#v", log))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *BootstrapS) TestRunShowsErrors(c *check.C) {
|
||||
output := String{}
|
||||
check.Run(&FailHelper{}, &check.RunConf{Output: &output})
|
||||
if strings.Index(output.value, "Expected failure!") == -1 {
|
||||
critical(fmt.Sprintf("RunWithWriter() output did not contain the "+
|
||||
"expected failure! Got: %#v",
|
||||
output.value))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *BootstrapS) TestRunDoesntShowSuccesses(c *check.C) {
|
||||
output := String{}
|
||||
check.Run(&SuccessHelper{}, &check.RunConf{Output: &output})
|
||||
if strings.Index(output.value, "Expected success!") != -1 {
|
||||
critical(fmt.Sprintf("RunWithWriter() output contained a successful "+
|
||||
"test! Got: %#v",
|
||||
output.value))
|
||||
}
|
||||
}
|
873
vendor/gopkg.in/check.v1/check.go
generated
vendored
Normal file
873
vendor/gopkg.in/check.v1/check.go
generated
vendored
Normal file
@ -0,0 +1,873 @@
|
||||
// Package check is a rich testing extension for Go's testing package.
|
||||
//
|
||||
// For details about the project, see:
|
||||
//
|
||||
// http://labix.org/gocheck
|
||||
//
|
||||
package check
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Internal type which deals with suite method calling.
|
||||
|
||||
const (
|
||||
fixtureKd = iota
|
||||
testKd
|
||||
)
|
||||
|
||||
type funcKind int
|
||||
|
||||
const (
|
||||
succeededSt = iota
|
||||
failedSt
|
||||
skippedSt
|
||||
panickedSt
|
||||
fixturePanickedSt
|
||||
missedSt
|
||||
)
|
||||
|
||||
type funcStatus uint32
|
||||
|
||||
// A method value can't reach its own Method structure.
|
||||
type methodType struct {
|
||||
reflect.Value
|
||||
Info reflect.Method
|
||||
}
|
||||
|
||||
func newMethod(receiver reflect.Value, i int) *methodType {
|
||||
return &methodType{receiver.Method(i), receiver.Type().Method(i)}
|
||||
}
|
||||
|
||||
func (method *methodType) PC() uintptr {
|
||||
return method.Info.Func.Pointer()
|
||||
}
|
||||
|
||||
func (method *methodType) suiteName() string {
|
||||
t := method.Info.Type.In(0)
|
||||
if t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
return t.Name()
|
||||
}
|
||||
|
||||
func (method *methodType) String() string {
|
||||
return method.suiteName() + "." + method.Info.Name
|
||||
}
|
||||
|
||||
func (method *methodType) matches(re *regexp.Regexp) bool {
|
||||
return (re.MatchString(method.Info.Name) ||
|
||||
re.MatchString(method.suiteName()) ||
|
||||
re.MatchString(method.String()))
|
||||
}
|
||||
|
||||
type C struct {
|
||||
method *methodType
|
||||
kind funcKind
|
||||
testName string
|
||||
_status funcStatus
|
||||
logb *logger
|
||||
logw io.Writer
|
||||
done chan *C
|
||||
reason string
|
||||
mustFail bool
|
||||
tempDir *tempDir
|
||||
benchMem bool
|
||||
startTime time.Time
|
||||
timer
|
||||
}
|
||||
|
||||
func (c *C) status() funcStatus {
|
||||
return funcStatus(atomic.LoadUint32((*uint32)(&c._status)))
|
||||
}
|
||||
|
||||
func (c *C) setStatus(s funcStatus) {
|
||||
atomic.StoreUint32((*uint32)(&c._status), uint32(s))
|
||||
}
|
||||
|
||||
func (c *C) stopNow() {
|
||||
runtime.Goexit()
|
||||
}
|
||||
|
||||
// logger is a concurrency safe byte.Buffer
|
||||
type logger struct {
|
||||
sync.Mutex
|
||||
writer bytes.Buffer
|
||||
}
|
||||
|
||||
func (l *logger) Write(buf []byte) (int, error) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
return l.writer.Write(buf)
|
||||
}
|
||||
|
||||
func (l *logger) WriteTo(w io.Writer) (int64, error) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
return l.writer.WriteTo(w)
|
||||
}
|
||||
|
||||
func (l *logger) String() string {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
return l.writer.String()
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Handling of temporary files and directories.
|
||||
|
||||
type tempDir struct {
|
||||
sync.Mutex
|
||||
path string
|
||||
counter int
|
||||
}
|
||||
|
||||
func (td *tempDir) newPath() string {
|
||||
td.Lock()
|
||||
defer td.Unlock()
|
||||
if td.path == "" {
|
||||
var err error
|
||||
for i := 0; i != 100; i++ {
|
||||
path := fmt.Sprintf("%s%ccheck-%d", os.TempDir(), os.PathSeparator, rand.Int())
|
||||
if err = os.Mkdir(path, 0700); err == nil {
|
||||
td.path = path
|
||||
break
|
||||
}
|
||||
}
|
||||
if td.path == "" {
|
||||
panic("Couldn't create temporary directory: " + err.Error())
|
||||
}
|
||||
}
|
||||
result := filepath.Join(td.path, strconv.Itoa(td.counter))
|
||||
td.counter++
|
||||
return result
|
||||
}
|
||||
|
||||
func (td *tempDir) removeAll() {
|
||||
td.Lock()
|
||||
defer td.Unlock()
|
||||
if td.path != "" {
|
||||
err := os.RemoveAll(td.path)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "WARNING: Error cleaning up temporaries: "+err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new temporary directory which is automatically removed after
|
||||
// the suite finishes running.
|
||||
func (c *C) MkDir() string {
|
||||
path := c.tempDir.newPath()
|
||||
if err := os.Mkdir(path, 0700); err != nil {
|
||||
panic(fmt.Sprintf("Couldn't create temporary directory %s: %s", path, err.Error()))
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Low-level logging functions.
|
||||
|
||||
func (c *C) log(args ...interface{}) {
|
||||
c.writeLog([]byte(fmt.Sprint(args...) + "\n"))
|
||||
}
|
||||
|
||||
func (c *C) logf(format string, args ...interface{}) {
|
||||
c.writeLog([]byte(fmt.Sprintf(format+"\n", args...)))
|
||||
}
|
||||
|
||||
func (c *C) logNewLine() {
|
||||
c.writeLog([]byte{'\n'})
|
||||
}
|
||||
|
||||
func (c *C) writeLog(buf []byte) {
|
||||
c.logb.Write(buf)
|
||||
if c.logw != nil {
|
||||
c.logw.Write(buf)
|
||||
}
|
||||
}
|
||||
|
||||
func hasStringOrError(x interface{}) (ok bool) {
|
||||
_, ok = x.(fmt.Stringer)
|
||||
if ok {
|
||||
return
|
||||
}
|
||||
_, ok = x.(error)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *C) logValue(label string, value interface{}) {
|
||||
if label == "" {
|
||||
if hasStringOrError(value) {
|
||||
c.logf("... %#v (%q)", value, value)
|
||||
} else {
|
||||
c.logf("... %#v", value)
|
||||
}
|
||||
} else if value == nil {
|
||||
c.logf("... %s = nil", label)
|
||||
} else {
|
||||
if hasStringOrError(value) {
|
||||
fv := fmt.Sprintf("%#v", value)
|
||||
qv := fmt.Sprintf("%q", value)
|
||||
if fv != qv {
|
||||
c.logf("... %s %s = %s (%s)", label, reflect.TypeOf(value), fv, qv)
|
||||
return
|
||||
}
|
||||
}
|
||||
if s, ok := value.(string); ok && isMultiLine(s) {
|
||||
c.logf(`... %s %s = "" +`, label, reflect.TypeOf(value))
|
||||
c.logMultiLine(s)
|
||||
} else {
|
||||
c.logf("... %s %s = %#v", label, reflect.TypeOf(value), value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *C) logMultiLine(s string) {
|
||||
b := make([]byte, 0, len(s)*2)
|
||||
i := 0
|
||||
n := len(s)
|
||||
for i < n {
|
||||
j := i + 1
|
||||
for j < n && s[j-1] != '\n' {
|
||||
j++
|
||||
}
|
||||
b = append(b, "... "...)
|
||||
b = strconv.AppendQuote(b, s[i:j])
|
||||
if j < n {
|
||||
b = append(b, " +"...)
|
||||
}
|
||||
b = append(b, '\n')
|
||||
i = j
|
||||
}
|
||||
c.writeLog(b)
|
||||
}
|
||||
|
||||
func isMultiLine(s string) bool {
|
||||
for i := 0; i+1 < len(s); i++ {
|
||||
if s[i] == '\n' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *C) logString(issue string) {
|
||||
c.log("... ", issue)
|
||||
}
|
||||
|
||||
func (c *C) logCaller(skip int) {
|
||||
// This is a bit heavier than it ought to be.
|
||||
skip++ // Our own frame.
|
||||
pc, callerFile, callerLine, ok := runtime.Caller(skip)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
var testFile string
|
||||
var testLine int
|
||||
testFunc := runtime.FuncForPC(c.method.PC())
|
||||
if runtime.FuncForPC(pc) != testFunc {
|
||||
for {
|
||||
skip++
|
||||
if pc, file, line, ok := runtime.Caller(skip); ok {
|
||||
// Note that the test line may be different on
|
||||
// distinct calls for the same test. Showing
|
||||
// the "internal" line is helpful when debugging.
|
||||
if runtime.FuncForPC(pc) == testFunc {
|
||||
testFile, testLine = file, line
|
||||
break
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if testFile != "" && (testFile != callerFile || testLine != callerLine) {
|
||||
c.logCode(testFile, testLine)
|
||||
}
|
||||
c.logCode(callerFile, callerLine)
|
||||
}
|
||||
|
||||
func (c *C) logCode(path string, line int) {
|
||||
c.logf("%s:%d:", nicePath(path), line)
|
||||
code, err := printLine(path, line)
|
||||
if code == "" {
|
||||
code = "..." // XXX Open the file and take the raw line.
|
||||
if err != nil {
|
||||
code += err.Error()
|
||||
}
|
||||
}
|
||||
c.log(indent(code, " "))
|
||||
}
|
||||
|
||||
var valueGo = filepath.Join("reflect", "value.go")
|
||||
var asmGo = filepath.Join("runtime", "asm_")
|
||||
|
||||
func (c *C) logPanic(skip int, value interface{}) {
|
||||
skip++ // Our own frame.
|
||||
initialSkip := skip
|
||||
for ; ; skip++ {
|
||||
if pc, file, line, ok := runtime.Caller(skip); ok {
|
||||
if skip == initialSkip {
|
||||
c.logf("... Panic: %s (PC=0x%X)\n", value, pc)
|
||||
}
|
||||
name := niceFuncName(pc)
|
||||
path := nicePath(file)
|
||||
if strings.Contains(path, "/gopkg.in/check.v") {
|
||||
continue
|
||||
}
|
||||
if name == "Value.call" && strings.HasSuffix(path, valueGo) {
|
||||
continue
|
||||
}
|
||||
if (name == "call16" || name == "call32") && strings.Contains(path, asmGo) {
|
||||
continue
|
||||
}
|
||||
c.logf("%s:%d\n in %s", nicePath(file), line, name)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *C) logSoftPanic(issue string) {
|
||||
c.log("... Panic: ", issue)
|
||||
}
|
||||
|
||||
func (c *C) logArgPanic(method *methodType, expectedType string) {
|
||||
c.logf("... Panic: %s argument should be %s",
|
||||
niceFuncName(method.PC()), expectedType)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Some simple formatting helpers.
|
||||
|
||||
var initWD, initWDErr = os.Getwd()
|
||||
|
||||
func init() {
|
||||
if initWDErr == nil {
|
||||
initWD = strings.Replace(initWD, "\\", "/", -1) + "/"
|
||||
}
|
||||
}
|
||||
|
||||
func nicePath(path string) string {
|
||||
if initWDErr == nil {
|
||||
if strings.HasPrefix(path, initWD) {
|
||||
return path[len(initWD):]
|
||||
}
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func niceFuncPath(pc uintptr) string {
|
||||
function := runtime.FuncForPC(pc)
|
||||
if function != nil {
|
||||
filename, line := function.FileLine(pc)
|
||||
return fmt.Sprintf("%s:%d", nicePath(filename), line)
|
||||
}
|
||||
return "<unknown path>"
|
||||
}
|
||||
|
||||
func niceFuncName(pc uintptr) string {
|
||||
function := runtime.FuncForPC(pc)
|
||||
if function != nil {
|
||||
name := path.Base(function.Name())
|
||||
if i := strings.Index(name, "."); i > 0 {
|
||||
name = name[i+1:]
|
||||
}
|
||||
if strings.HasPrefix(name, "(*") {
|
||||
if i := strings.Index(name, ")"); i > 0 {
|
||||
name = name[2:i] + name[i+1:]
|
||||
}
|
||||
}
|
||||
if i := strings.LastIndex(name, ".*"); i != -1 {
|
||||
name = name[:i] + "." + name[i+2:]
|
||||
}
|
||||
if i := strings.LastIndex(name, "·"); i != -1 {
|
||||
name = name[:i] + "." + name[i+2:]
|
||||
}
|
||||
return name
|
||||
}
|
||||
return "<unknown function>"
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Result tracker to aggregate call results.
|
||||
|
||||
type Result struct {
|
||||
Succeeded int
|
||||
Failed int
|
||||
Skipped int
|
||||
Panicked int
|
||||
FixturePanicked int
|
||||
ExpectedFailures int
|
||||
Missed int // Not even tried to run, related to a panic in the fixture.
|
||||
RunError error // Houston, we've got a problem.
|
||||
WorkDir string // If KeepWorkDir is true
|
||||
}
|
||||
|
||||
type resultTracker struct {
|
||||
result Result
|
||||
_lastWasProblem bool
|
||||
_waiting int
|
||||
_missed int
|
||||
_expectChan chan *C
|
||||
_doneChan chan *C
|
||||
_stopChan chan bool
|
||||
}
|
||||
|
||||
func newResultTracker() *resultTracker {
|
||||
return &resultTracker{_expectChan: make(chan *C), // Synchronous
|
||||
_doneChan: make(chan *C, 32), // Asynchronous
|
||||
_stopChan: make(chan bool)} // Synchronous
|
||||
}
|
||||
|
||||
func (tracker *resultTracker) start() {
|
||||
go tracker._loopRoutine()
|
||||
}
|
||||
|
||||
func (tracker *resultTracker) waitAndStop() {
|
||||
<-tracker._stopChan
|
||||
}
|
||||
|
||||
func (tracker *resultTracker) expectCall(c *C) {
|
||||
tracker._expectChan <- c
|
||||
}
|
||||
|
||||
func (tracker *resultTracker) callDone(c *C) {
|
||||
tracker._doneChan <- c
|
||||
}
|
||||
|
||||
func (tracker *resultTracker) _loopRoutine() {
|
||||
for {
|
||||
var c *C
|
||||
if tracker._waiting > 0 {
|
||||
// Calls still running. Can't stop.
|
||||
select {
|
||||
// XXX Reindent this (not now to make diff clear)
|
||||
case <-tracker._expectChan:
|
||||
tracker._waiting++
|
||||
case c = <-tracker._doneChan:
|
||||
tracker._waiting--
|
||||
switch c.status() {
|
||||
case succeededSt:
|
||||
if c.kind == testKd {
|
||||
if c.mustFail {
|
||||
tracker.result.ExpectedFailures++
|
||||
} else {
|
||||
tracker.result.Succeeded++
|
||||
}
|
||||
}
|
||||
case failedSt:
|
||||
tracker.result.Failed++
|
||||
case panickedSt:
|
||||
if c.kind == fixtureKd {
|
||||
tracker.result.FixturePanicked++
|
||||
} else {
|
||||
tracker.result.Panicked++
|
||||
}
|
||||
case fixturePanickedSt:
|
||||
// Track it as missed, since the panic
|
||||
// was on the fixture, not on the test.
|
||||
tracker.result.Missed++
|
||||
case missedSt:
|
||||
tracker.result.Missed++
|
||||
case skippedSt:
|
||||
if c.kind == testKd {
|
||||
tracker.result.Skipped++
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No calls. Can stop, but no done calls here.
|
||||
select {
|
||||
case tracker._stopChan <- true:
|
||||
return
|
||||
case <-tracker._expectChan:
|
||||
tracker._waiting++
|
||||
case <-tracker._doneChan:
|
||||
panic("Tracker got an unexpected done call.")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// The underlying suite runner.
|
||||
|
||||
type suiteRunner struct {
|
||||
suite interface{}
|
||||
setUpSuite, tearDownSuite *methodType
|
||||
setUpTest, tearDownTest *methodType
|
||||
tests []*methodType
|
||||
tracker *resultTracker
|
||||
tempDir *tempDir
|
||||
keepDir bool
|
||||
output *outputWriter
|
||||
reportedProblemLast bool
|
||||
benchTime time.Duration
|
||||
benchMem bool
|
||||
}
|
||||
|
||||
type RunConf struct {
|
||||
Output io.Writer
|
||||
Stream bool
|
||||
Verbose bool
|
||||
Filter string
|
||||
Benchmark bool
|
||||
BenchmarkTime time.Duration // Defaults to 1 second
|
||||
BenchmarkMem bool
|
||||
KeepWorkDir bool
|
||||
}
|
||||
|
||||
// Create a new suiteRunner able to run all methods in the given suite.
|
||||
func newSuiteRunner(suite interface{}, runConf *RunConf) *suiteRunner {
|
||||
var conf RunConf
|
||||
if runConf != nil {
|
||||
conf = *runConf
|
||||
}
|
||||
if conf.Output == nil {
|
||||
conf.Output = os.Stdout
|
||||
}
|
||||
if conf.Benchmark {
|
||||
conf.Verbose = true
|
||||
}
|
||||
|
||||
suiteType := reflect.TypeOf(suite)
|
||||
suiteNumMethods := suiteType.NumMethod()
|
||||
suiteValue := reflect.ValueOf(suite)
|
||||
|
||||
runner := &suiteRunner{
|
||||
suite: suite,
|
||||
output: newOutputWriter(conf.Output, conf.Stream, conf.Verbose),
|
||||
tracker: newResultTracker(),
|
||||
benchTime: conf.BenchmarkTime,
|
||||
benchMem: conf.BenchmarkMem,
|
||||
tempDir: &tempDir{},
|
||||
keepDir: conf.KeepWorkDir,
|
||||
tests: make([]*methodType, 0, suiteNumMethods),
|
||||
}
|
||||
if runner.benchTime == 0 {
|
||||
runner.benchTime = 1 * time.Second
|
||||
}
|
||||
|
||||
var filterRegexp *regexp.Regexp
|
||||
if conf.Filter != "" {
|
||||
regexp, err := regexp.Compile(conf.Filter)
|
||||
if err != nil {
|
||||
msg := "Bad filter expression: " + err.Error()
|
||||
runner.tracker.result.RunError = errors.New(msg)
|
||||
return runner
|
||||
}
|
||||
filterRegexp = regexp
|
||||
}
|
||||
|
||||
for i := 0; i != suiteNumMethods; i++ {
|
||||
method := newMethod(suiteValue, i)
|
||||
switch method.Info.Name {
|
||||
case "SetUpSuite":
|
||||
runner.setUpSuite = method
|
||||
case "TearDownSuite":
|
||||
runner.tearDownSuite = method
|
||||
case "SetUpTest":
|
||||
runner.setUpTest = method
|
||||
case "TearDownTest":
|
||||
runner.tearDownTest = method
|
||||
default:
|
||||
prefix := "Test"
|
||||
if conf.Benchmark {
|
||||
prefix = "Benchmark"
|
||||
}
|
||||
if !strings.HasPrefix(method.Info.Name, prefix) {
|
||||
continue
|
||||
}
|
||||
if filterRegexp == nil || method.matches(filterRegexp) {
|
||||
runner.tests = append(runner.tests, method)
|
||||
}
|
||||
}
|
||||
}
|
||||
return runner
|
||||
}
|
||||
|
||||
// Run all methods in the given suite.
|
||||
func (runner *suiteRunner) run() *Result {
|
||||
if runner.tracker.result.RunError == nil && len(runner.tests) > 0 {
|
||||
runner.tracker.start()
|
||||
if runner.checkFixtureArgs() {
|
||||
c := runner.runFixture(runner.setUpSuite, "", nil)
|
||||
if c == nil || c.status() == succeededSt {
|
||||
for i := 0; i != len(runner.tests); i++ {
|
||||
c := runner.runTest(runner.tests[i])
|
||||
if c.status() == fixturePanickedSt {
|
||||
runner.skipTests(missedSt, runner.tests[i+1:])
|
||||
break
|
||||
}
|
||||
}
|
||||
} else if c != nil && c.status() == skippedSt {
|
||||
runner.skipTests(skippedSt, runner.tests)
|
||||
} else {
|
||||
runner.skipTests(missedSt, runner.tests)
|
||||
}
|
||||
runner.runFixture(runner.tearDownSuite, "", nil)
|
||||
} else {
|
||||
runner.skipTests(missedSt, runner.tests)
|
||||
}
|
||||
runner.tracker.waitAndStop()
|
||||
if runner.keepDir {
|
||||
runner.tracker.result.WorkDir = runner.tempDir.path
|
||||
} else {
|
||||
runner.tempDir.removeAll()
|
||||
}
|
||||
}
|
||||
return &runner.tracker.result
|
||||
}
|
||||
|
||||
// Create a call object with the given suite method, and fork a
|
||||
// goroutine with the provided dispatcher for running it.
|
||||
func (runner *suiteRunner) forkCall(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C {
|
||||
var logw io.Writer
|
||||
if runner.output.Stream {
|
||||
logw = runner.output
|
||||
}
|
||||
if logb == nil {
|
||||
logb = new(logger)
|
||||
}
|
||||
c := &C{
|
||||
method: method,
|
||||
kind: kind,
|
||||
testName: testName,
|
||||
logb: logb,
|
||||
logw: logw,
|
||||
tempDir: runner.tempDir,
|
||||
done: make(chan *C, 1),
|
||||
timer: timer{benchTime: runner.benchTime},
|
||||
startTime: time.Now(),
|
||||
benchMem: runner.benchMem,
|
||||
}
|
||||
runner.tracker.expectCall(c)
|
||||
go (func() {
|
||||
runner.reportCallStarted(c)
|
||||
defer runner.callDone(c)
|
||||
dispatcher(c)
|
||||
})()
|
||||
return c
|
||||
}
|
||||
|
||||
// Same as forkCall(), but wait for call to finish before returning.
|
||||
func (runner *suiteRunner) runFunc(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C {
|
||||
c := runner.forkCall(method, kind, testName, logb, dispatcher)
|
||||
<-c.done
|
||||
return c
|
||||
}
|
||||
|
||||
// Handle a finished call. If there were any panics, update the call status
|
||||
// accordingly. Then, mark the call as done and report to the tracker.
|
||||
func (runner *suiteRunner) callDone(c *C) {
|
||||
value := recover()
|
||||
if value != nil {
|
||||
switch v := value.(type) {
|
||||
case *fixturePanic:
|
||||
if v.status == skippedSt {
|
||||
c.setStatus(skippedSt)
|
||||
} else {
|
||||
c.logSoftPanic("Fixture has panicked (see related PANIC)")
|
||||
c.setStatus(fixturePanickedSt)
|
||||
}
|
||||
default:
|
||||
c.logPanic(1, value)
|
||||
c.setStatus(panickedSt)
|
||||
}
|
||||
}
|
||||
if c.mustFail {
|
||||
switch c.status() {
|
||||
case failedSt:
|
||||
c.setStatus(succeededSt)
|
||||
case succeededSt:
|
||||
c.setStatus(failedSt)
|
||||
c.logString("Error: Test succeeded, but was expected to fail")
|
||||
c.logString("Reason: " + c.reason)
|
||||
}
|
||||
}
|
||||
|
||||
runner.reportCallDone(c)
|
||||
c.done <- c
|
||||
}
|
||||
|
||||
// Runs a fixture call synchronously. The fixture will still be run in a
|
||||
// goroutine like all suite methods, but this method will not return
|
||||
// while the fixture goroutine is not done, because the fixture must be
|
||||
// run in a desired order.
|
||||
func (runner *suiteRunner) runFixture(method *methodType, testName string, logb *logger) *C {
|
||||
if method != nil {
|
||||
c := runner.runFunc(method, fixtureKd, testName, logb, func(c *C) {
|
||||
c.ResetTimer()
|
||||
c.StartTimer()
|
||||
defer c.StopTimer()
|
||||
c.method.Call([]reflect.Value{reflect.ValueOf(c)})
|
||||
})
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run the fixture method with runFixture(), but panic with a fixturePanic{}
|
||||
// in case the fixture method panics. This makes it easier to track the
|
||||
// fixture panic together with other call panics within forkTest().
|
||||
func (runner *suiteRunner) runFixtureWithPanic(method *methodType, testName string, logb *logger, skipped *bool) *C {
|
||||
if skipped != nil && *skipped {
|
||||
return nil
|
||||
}
|
||||
c := runner.runFixture(method, testName, logb)
|
||||
if c != nil && c.status() != succeededSt {
|
||||
if skipped != nil {
|
||||
*skipped = c.status() == skippedSt
|
||||
}
|
||||
panic(&fixturePanic{c.status(), method})
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
type fixturePanic struct {
|
||||
status funcStatus
|
||||
method *methodType
|
||||
}
|
||||
|
||||
// Run the suite test method, together with the test-specific fixture,
|
||||
// asynchronously.
|
||||
func (runner *suiteRunner) forkTest(method *methodType) *C {
|
||||
testName := method.String()
|
||||
return runner.forkCall(method, testKd, testName, nil, func(c *C) {
|
||||
var skipped bool
|
||||
defer runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, &skipped)
|
||||
defer c.StopTimer()
|
||||
benchN := 1
|
||||
for {
|
||||
runner.runFixtureWithPanic(runner.setUpTest, testName, c.logb, &skipped)
|
||||
mt := c.method.Type()
|
||||
if mt.NumIn() != 1 || mt.In(0) != reflect.TypeOf(c) {
|
||||
// Rather than a plain panic, provide a more helpful message when
|
||||
// the argument type is incorrect.
|
||||
c.setStatus(panickedSt)
|
||||
c.logArgPanic(c.method, "*check.C")
|
||||
return
|
||||
}
|
||||
if strings.HasPrefix(c.method.Info.Name, "Test") {
|
||||
c.ResetTimer()
|
||||
c.StartTimer()
|
||||
c.method.Call([]reflect.Value{reflect.ValueOf(c)})
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(c.method.Info.Name, "Benchmark") {
|
||||
panic("unexpected method prefix: " + c.method.Info.Name)
|
||||
}
|
||||
|
||||
runtime.GC()
|
||||
c.N = benchN
|
||||
c.ResetTimer()
|
||||
c.StartTimer()
|
||||
c.method.Call([]reflect.Value{reflect.ValueOf(c)})
|
||||
c.StopTimer()
|
||||
if c.status() != succeededSt || c.duration >= c.benchTime || benchN >= 1e9 {
|
||||
return
|
||||
}
|
||||
perOpN := int(1e9)
|
||||
if c.nsPerOp() != 0 {
|
||||
perOpN = int(c.benchTime.Nanoseconds() / c.nsPerOp())
|
||||
}
|
||||
|
||||
// Logic taken from the stock testing package:
|
||||
// - Run more iterations than we think we'll need for a second (1.5x).
|
||||
// - Don't grow too fast in case we had timing errors previously.
|
||||
// - Be sure to run at least one more than last time.
|
||||
benchN = max(min(perOpN+perOpN/2, 100*benchN), benchN+1)
|
||||
benchN = roundUp(benchN)
|
||||
|
||||
skipped = true // Don't run the deferred one if this panics.
|
||||
runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, nil)
|
||||
skipped = false
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Same as forkTest(), but wait for the test to finish before returning.
|
||||
func (runner *suiteRunner) runTest(method *methodType) *C {
|
||||
c := runner.forkTest(method)
|
||||
<-c.done
|
||||
return c
|
||||
}
|
||||
|
||||
// Helper to mark tests as skipped or missed. A bit heavy for what
|
||||
// it does, but it enables homogeneous handling of tracking, including
|
||||
// nice verbose output.
|
||||
func (runner *suiteRunner) skipTests(status funcStatus, methods []*methodType) {
|
||||
for _, method := range methods {
|
||||
runner.runFunc(method, testKd, "", nil, func(c *C) {
|
||||
c.setStatus(status)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Verify if the fixture arguments are *check.C. In case of errors,
|
||||
// log the error as a panic in the fixture method call, and return false.
|
||||
func (runner *suiteRunner) checkFixtureArgs() bool {
|
||||
succeeded := true
|
||||
argType := reflect.TypeOf(&C{})
|
||||
for _, method := range []*methodType{runner.setUpSuite, runner.tearDownSuite, runner.setUpTest, runner.tearDownTest} {
|
||||
if method != nil {
|
||||
mt := method.Type()
|
||||
if mt.NumIn() != 1 || mt.In(0) != argType {
|
||||
succeeded = false
|
||||
runner.runFunc(method, fixtureKd, "", nil, func(c *C) {
|
||||
c.logArgPanic(method, "*check.C")
|
||||
c.setStatus(panickedSt)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
return succeeded
|
||||
}
|
||||
|
||||
func (runner *suiteRunner) reportCallStarted(c *C) {
|
||||
runner.output.WriteCallStarted("START", c)
|
||||
}
|
||||
|
||||
func (runner *suiteRunner) reportCallDone(c *C) {
|
||||
runner.tracker.callDone(c)
|
||||
switch c.status() {
|
||||
case succeededSt:
|
||||
if c.mustFail {
|
||||
runner.output.WriteCallSuccess("FAIL EXPECTED", c)
|
||||
} else {
|
||||
runner.output.WriteCallSuccess("PASS", c)
|
||||
}
|
||||
case skippedSt:
|
||||
runner.output.WriteCallSuccess("SKIP", c)
|
||||
case failedSt:
|
||||
runner.output.WriteCallProblem("FAIL", c)
|
||||
case panickedSt:
|
||||
runner.output.WriteCallProblem("PANIC", c)
|
||||
case fixturePanickedSt:
|
||||
// That's a testKd call reporting that its fixture
|
||||
// has panicked. The fixture call which caused the
|
||||
// panic itself was tracked above. We'll report to
|
||||
// aid debugging.
|
||||
runner.output.WriteCallProblem("PANIC", c)
|
||||
case missedSt:
|
||||
runner.output.WriteCallSuccess("MISS", c)
|
||||
}
|
||||
}
|
207
vendor/gopkg.in/check.v1/check_test.go
generated
vendored
Normal file
207
vendor/gopkg.in/check.v1/check_test.go
generated
vendored
Normal file
@ -0,0 +1,207 @@
|
||||
// This file contains just a few generic helpers which are used by the
|
||||
// other test files.
|
||||
|
||||
package check_test
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
// We count the number of suites run at least to get a vague hint that the
|
||||
// test suite is behaving as it should. Otherwise a bug introduced at the
|
||||
// very core of the system could go unperceived.
|
||||
const suitesRunExpected = 8
|
||||
|
||||
var suitesRun int = 0
|
||||
|
||||
func Test(t *testing.T) {
|
||||
check.TestingT(t)
|
||||
if suitesRun != suitesRunExpected && flag.Lookup("check.f").Value.String() == "" {
|
||||
critical(fmt.Sprintf("Expected %d suites to run rather than %d",
|
||||
suitesRunExpected, suitesRun))
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Helper functions.
|
||||
|
||||
// Break down badly. This is used in test cases which can't yet assume
|
||||
// that the fundamental bits are working.
|
||||
func critical(error string) {
|
||||
fmt.Fprintln(os.Stderr, "CRITICAL: "+error)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Return the file line where it's called.
|
||||
func getMyLine() int {
|
||||
if _, _, line, ok := runtime.Caller(1); ok {
|
||||
return line
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Helper type implementing a basic io.Writer for testing output.
|
||||
|
||||
// Type implementing the io.Writer interface for analyzing output.
|
||||
type String struct {
|
||||
value string
|
||||
}
|
||||
|
||||
// The only function required by the io.Writer interface. Will append
|
||||
// written data to the String.value string.
|
||||
func (s *String) Write(p []byte) (n int, err error) {
|
||||
s.value += string(p)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Trivial wrapper to test errors happening on a different file
|
||||
// than the test itself.
|
||||
func checkEqualWrapper(c *check.C, obtained, expected interface{}) (result bool, line int) {
|
||||
return c.Check(obtained, check.Equals, expected), getMyLine()
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Helper suite for testing basic fail behavior.
|
||||
|
||||
type FailHelper struct {
|
||||
testLine int
|
||||
}
|
||||
|
||||
func (s *FailHelper) TestLogAndFail(c *check.C) {
|
||||
s.testLine = getMyLine() - 1
|
||||
c.Log("Expected failure!")
|
||||
c.Fail()
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Helper suite for testing basic success behavior.
|
||||
|
||||
type SuccessHelper struct{}
|
||||
|
||||
func (s *SuccessHelper) TestLogAndSucceed(c *check.C) {
|
||||
c.Log("Expected success!")
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Helper suite for testing ordering and behavior of fixture.
|
||||
|
||||
type FixtureHelper struct {
|
||||
calls []string
|
||||
panicOn string
|
||||
skip bool
|
||||
skipOnN int
|
||||
sleepOn string
|
||||
sleep time.Duration
|
||||
bytes int64
|
||||
}
|
||||
|
||||
func (s *FixtureHelper) trace(name string, c *check.C) {
|
||||
s.calls = append(s.calls, name)
|
||||
if name == s.panicOn {
|
||||
panic(name)
|
||||
}
|
||||
if s.sleep > 0 && s.sleepOn == name {
|
||||
time.Sleep(s.sleep)
|
||||
}
|
||||
if s.skip && s.skipOnN == len(s.calls)-1 {
|
||||
c.Skip("skipOnN == n")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *FixtureHelper) SetUpSuite(c *check.C) {
|
||||
s.trace("SetUpSuite", c)
|
||||
}
|
||||
|
||||
func (s *FixtureHelper) TearDownSuite(c *check.C) {
|
||||
s.trace("TearDownSuite", c)
|
||||
}
|
||||
|
||||
func (s *FixtureHelper) SetUpTest(c *check.C) {
|
||||
s.trace("SetUpTest", c)
|
||||
}
|
||||
|
||||
func (s *FixtureHelper) TearDownTest(c *check.C) {
|
||||
s.trace("TearDownTest", c)
|
||||
}
|
||||
|
||||
func (s *FixtureHelper) Test1(c *check.C) {
|
||||
s.trace("Test1", c)
|
||||
}
|
||||
|
||||
func (s *FixtureHelper) Test2(c *check.C) {
|
||||
s.trace("Test2", c)
|
||||
}
|
||||
|
||||
func (s *FixtureHelper) Benchmark1(c *check.C) {
|
||||
s.trace("Benchmark1", c)
|
||||
for i := 0; i < c.N; i++ {
|
||||
time.Sleep(s.sleep)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *FixtureHelper) Benchmark2(c *check.C) {
|
||||
s.trace("Benchmark2", c)
|
||||
c.SetBytes(1024)
|
||||
for i := 0; i < c.N; i++ {
|
||||
time.Sleep(s.sleep)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *FixtureHelper) Benchmark3(c *check.C) {
|
||||
var x []int64
|
||||
s.trace("Benchmark3", c)
|
||||
for i := 0; i < c.N; i++ {
|
||||
time.Sleep(s.sleep)
|
||||
x = make([]int64, 5)
|
||||
_ = x
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Helper which checks the state of the test and ensures that it matches
|
||||
// the given expectations. Depends on c.Errorf() working, so shouldn't
|
||||
// be used to test this one function.
|
||||
|
||||
type expectedState struct {
|
||||
name string
|
||||
result interface{}
|
||||
failed bool
|
||||
log string
|
||||
}
|
||||
|
||||
// Verify the state of the test. Note that since this also verifies if
|
||||
// the test is supposed to be in a failed state, no other checks should
|
||||
// be done in addition to what is being tested.
|
||||
func checkState(c *check.C, result interface{}, expected *expectedState) {
|
||||
failed := c.Failed()
|
||||
c.Succeed()
|
||||
log := c.GetTestLog()
|
||||
matched, matchError := regexp.MatchString("^"+expected.log+"$", log)
|
||||
if matchError != nil {
|
||||
c.Errorf("Error in matching expression used in testing %s",
|
||||
expected.name)
|
||||
} else if !matched {
|
||||
c.Errorf("%s logged:\n----------\n%s----------\n\nExpected:\n----------\n%s\n----------",
|
||||
expected.name, log, expected.log)
|
||||
}
|
||||
if result != expected.result {
|
||||
c.Errorf("%s returned %#v rather than %#v",
|
||||
expected.name, result, expected.result)
|
||||
}
|
||||
if failed != expected.failed {
|
||||
if failed {
|
||||
c.Errorf("%s has failed when it shouldn't", expected.name)
|
||||
} else {
|
||||
c.Errorf("%s has not failed when it should", expected.name)
|
||||
}
|
||||
}
|
||||
}
|
458
vendor/gopkg.in/check.v1/checkers.go
generated
vendored
Normal file
458
vendor/gopkg.in/check.v1/checkers.go
generated
vendored
Normal file
@ -0,0 +1,458 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// CommentInterface and Commentf helper, to attach extra information to checks.
|
||||
|
||||
type comment struct {
|
||||
format string
|
||||
args []interface{}
|
||||
}
|
||||
|
||||
// Commentf returns an infomational value to use with Assert or Check calls.
|
||||
// If the checker test fails, the provided arguments will be passed to
|
||||
// fmt.Sprintf, and will be presented next to the logged failure.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// c.Assert(v, Equals, 42, Commentf("Iteration #%d failed.", i))
|
||||
//
|
||||
// Note that if the comment is constant, a better option is to
|
||||
// simply use a normal comment right above or next to the line, as
|
||||
// it will also get printed with any errors:
|
||||
//
|
||||
// c.Assert(l, Equals, 8192) // Ensure buffer size is correct (bug #123)
|
||||
//
|
||||
func Commentf(format string, args ...interface{}) CommentInterface {
|
||||
return &comment{format, args}
|
||||
}
|
||||
|
||||
// CommentInterface must be implemented by types that attach extra
|
||||
// information to failed checks. See the Commentf function for details.
|
||||
type CommentInterface interface {
|
||||
CheckCommentString() string
|
||||
}
|
||||
|
||||
func (c *comment) CheckCommentString() string {
|
||||
return fmt.Sprintf(c.format, c.args...)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// The Checker interface.
|
||||
|
||||
// The Checker interface must be provided by checkers used with
|
||||
// the Assert and Check verification methods.
|
||||
type Checker interface {
|
||||
Info() *CheckerInfo
|
||||
Check(params []interface{}, names []string) (result bool, error string)
|
||||
}
|
||||
|
||||
// See the Checker interface.
|
||||
type CheckerInfo struct {
|
||||
Name string
|
||||
Params []string
|
||||
}
|
||||
|
||||
func (info *CheckerInfo) Info() *CheckerInfo {
|
||||
return info
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Not checker logic inverter.
|
||||
|
||||
// The Not checker inverts the logic of the provided checker. The
|
||||
// resulting checker will succeed where the original one failed, and
|
||||
// vice-versa.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// c.Assert(a, Not(Equals), b)
|
||||
//
|
||||
func Not(checker Checker) Checker {
|
||||
return ¬Checker{checker}
|
||||
}
|
||||
|
||||
type notChecker struct {
|
||||
sub Checker
|
||||
}
|
||||
|
||||
func (checker *notChecker) Info() *CheckerInfo {
|
||||
info := *checker.sub.Info()
|
||||
info.Name = "Not(" + info.Name + ")"
|
||||
return &info
|
||||
}
|
||||
|
||||
func (checker *notChecker) Check(params []interface{}, names []string) (result bool, error string) {
|
||||
result, error = checker.sub.Check(params, names)
|
||||
result = !result
|
||||
return
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// IsNil checker.
|
||||
|
||||
type isNilChecker struct {
|
||||
*CheckerInfo
|
||||
}
|
||||
|
||||
// The IsNil checker tests whether the obtained value is nil.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// c.Assert(err, IsNil)
|
||||
//
|
||||
var IsNil Checker = &isNilChecker{
|
||||
&CheckerInfo{Name: "IsNil", Params: []string{"value"}},
|
||||
}
|
||||
|
||||
func (checker *isNilChecker) Check(params []interface{}, names []string) (result bool, error string) {
|
||||
return isNil(params[0]), ""
|
||||
}
|
||||
|
||||
func isNil(obtained interface{}) (result bool) {
|
||||
if obtained == nil {
|
||||
result = true
|
||||
} else {
|
||||
switch v := reflect.ValueOf(obtained); v.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return v.IsNil()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// NotNil checker. Alias for Not(IsNil), since it's so common.
|
||||
|
||||
type notNilChecker struct {
|
||||
*CheckerInfo
|
||||
}
|
||||
|
||||
// The NotNil checker verifies that the obtained value is not nil.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// c.Assert(iface, NotNil)
|
||||
//
|
||||
// This is an alias for Not(IsNil), made available since it's a
|
||||
// fairly common check.
|
||||
//
|
||||
var NotNil Checker = ¬NilChecker{
|
||||
&CheckerInfo{Name: "NotNil", Params: []string{"value"}},
|
||||
}
|
||||
|
||||
func (checker *notNilChecker) Check(params []interface{}, names []string) (result bool, error string) {
|
||||
return !isNil(params[0]), ""
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Equals checker.
|
||||
|
||||
type equalsChecker struct {
|
||||
*CheckerInfo
|
||||
}
|
||||
|
||||
// The Equals checker verifies that the obtained value is equal to
|
||||
// the expected value, according to usual Go semantics for ==.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// c.Assert(value, Equals, 42)
|
||||
//
|
||||
var Equals Checker = &equalsChecker{
|
||||
&CheckerInfo{Name: "Equals", Params: []string{"obtained", "expected"}},
|
||||
}
|
||||
|
||||
func (checker *equalsChecker) Check(params []interface{}, names []string) (result bool, error string) {
|
||||
defer func() {
|
||||
if v := recover(); v != nil {
|
||||
result = false
|
||||
error = fmt.Sprint(v)
|
||||
}
|
||||
}()
|
||||
return params[0] == params[1], ""
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// DeepEquals checker.
|
||||
|
||||
type deepEqualsChecker struct {
|
||||
*CheckerInfo
|
||||
}
|
||||
|
||||
// The DeepEquals checker verifies that the obtained value is deep-equal to
|
||||
// the expected value. The check will work correctly even when facing
|
||||
// slices, interfaces, and values of different types (which always fail
|
||||
// the test).
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// c.Assert(value, DeepEquals, 42)
|
||||
// c.Assert(array, DeepEquals, []string{"hi", "there"})
|
||||
//
|
||||
var DeepEquals Checker = &deepEqualsChecker{
|
||||
&CheckerInfo{Name: "DeepEquals", Params: []string{"obtained", "expected"}},
|
||||
}
|
||||
|
||||
func (checker *deepEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) {
|
||||
return reflect.DeepEqual(params[0], params[1]), ""
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// HasLen checker.
|
||||
|
||||
type hasLenChecker struct {
|
||||
*CheckerInfo
|
||||
}
|
||||
|
||||
// The HasLen checker verifies that the obtained value has the
|
||||
// provided length. In many cases this is superior to using Equals
|
||||
// in conjunction with the len function because in case the check
|
||||
// fails the value itself will be printed, instead of its length,
|
||||
// providing more details for figuring the problem.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// c.Assert(list, HasLen, 5)
|
||||
//
|
||||
var HasLen Checker = &hasLenChecker{
|
||||
&CheckerInfo{Name: "HasLen", Params: []string{"obtained", "n"}},
|
||||
}
|
||||
|
||||
func (checker *hasLenChecker) Check(params []interface{}, names []string) (result bool, error string) {
|
||||
n, ok := params[1].(int)
|
||||
if !ok {
|
||||
return false, "n must be an int"
|
||||
}
|
||||
value := reflect.ValueOf(params[0])
|
||||
switch value.Kind() {
|
||||
case reflect.Map, reflect.Array, reflect.Slice, reflect.Chan, reflect.String:
|
||||
default:
|
||||
return false, "obtained value type has no length"
|
||||
}
|
||||
return value.Len() == n, ""
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// ErrorMatches checker.
|
||||
|
||||
type errorMatchesChecker struct {
|
||||
*CheckerInfo
|
||||
}
|
||||
|
||||
// The ErrorMatches checker verifies that the error value
|
||||
// is non nil and matches the regular expression provided.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// c.Assert(err, ErrorMatches, "perm.*denied")
|
||||
//
|
||||
var ErrorMatches Checker = errorMatchesChecker{
|
||||
&CheckerInfo{Name: "ErrorMatches", Params: []string{"value", "regex"}},
|
||||
}
|
||||
|
||||
func (checker errorMatchesChecker) Check(params []interface{}, names []string) (result bool, errStr string) {
|
||||
if params[0] == nil {
|
||||
return false, "Error value is nil"
|
||||
}
|
||||
err, ok := params[0].(error)
|
||||
if !ok {
|
||||
return false, "Value is not an error"
|
||||
}
|
||||
params[0] = err.Error()
|
||||
names[0] = "error"
|
||||
return matches(params[0], params[1])
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Matches checker.
|
||||
|
||||
type matchesChecker struct {
|
||||
*CheckerInfo
|
||||
}
|
||||
|
||||
// The Matches checker verifies that the string provided as the obtained
|
||||
// value (or the string resulting from obtained.String()) matches the
|
||||
// regular expression provided.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// c.Assert(err, Matches, "perm.*denied")
|
||||
//
|
||||
var Matches Checker = &matchesChecker{
|
||||
&CheckerInfo{Name: "Matches", Params: []string{"value", "regex"}},
|
||||
}
|
||||
|
||||
func (checker *matchesChecker) Check(params []interface{}, names []string) (result bool, error string) {
|
||||
return matches(params[0], params[1])
|
||||
}
|
||||
|
||||
func matches(value, regex interface{}) (result bool, error string) {
|
||||
reStr, ok := regex.(string)
|
||||
if !ok {
|
||||
return false, "Regex must be a string"
|
||||
}
|
||||
valueStr, valueIsStr := value.(string)
|
||||
if !valueIsStr {
|
||||
if valueWithStr, valueHasStr := value.(fmt.Stringer); valueHasStr {
|
||||
valueStr, valueIsStr = valueWithStr.String(), true
|
||||
}
|
||||
}
|
||||
if valueIsStr {
|
||||
matches, err := regexp.MatchString("^"+reStr+"$", valueStr)
|
||||
if err != nil {
|
||||
return false, "Can't compile regex: " + err.Error()
|
||||
}
|
||||
return matches, ""
|
||||
}
|
||||
return false, "Obtained value is not a string and has no .String()"
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Panics checker.
|
||||
|
||||
type panicsChecker struct {
|
||||
*CheckerInfo
|
||||
}
|
||||
|
||||
// The Panics checker verifies that calling the provided zero-argument
|
||||
// function will cause a panic which is deep-equal to the provided value.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// c.Assert(func() { f(1, 2) }, Panics, &SomeErrorType{"BOOM"}).
|
||||
//
|
||||
//
|
||||
var Panics Checker = &panicsChecker{
|
||||
&CheckerInfo{Name: "Panics", Params: []string{"function", "expected"}},
|
||||
}
|
||||
|
||||
func (checker *panicsChecker) Check(params []interface{}, names []string) (result bool, error string) {
|
||||
f := reflect.ValueOf(params[0])
|
||||
if f.Kind() != reflect.Func || f.Type().NumIn() != 0 {
|
||||
return false, "Function must take zero arguments"
|
||||
}
|
||||
defer func() {
|
||||
// If the function has not panicked, then don't do the check.
|
||||
if error != "" {
|
||||
return
|
||||
}
|
||||
params[0] = recover()
|
||||
names[0] = "panic"
|
||||
result = reflect.DeepEqual(params[0], params[1])
|
||||
}()
|
||||
f.Call(nil)
|
||||
return false, "Function has not panicked"
|
||||
}
|
||||
|
||||
type panicMatchesChecker struct {
|
||||
*CheckerInfo
|
||||
}
|
||||
|
||||
// The PanicMatches checker verifies that calling the provided zero-argument
|
||||
// function will cause a panic with an error value matching
|
||||
// the regular expression provided.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// c.Assert(func() { f(1, 2) }, PanicMatches, `open.*: no such file or directory`).
|
||||
//
|
||||
//
|
||||
var PanicMatches Checker = &panicMatchesChecker{
|
||||
&CheckerInfo{Name: "PanicMatches", Params: []string{"function", "expected"}},
|
||||
}
|
||||
|
||||
func (checker *panicMatchesChecker) Check(params []interface{}, names []string) (result bool, errmsg string) {
|
||||
f := reflect.ValueOf(params[0])
|
||||
if f.Kind() != reflect.Func || f.Type().NumIn() != 0 {
|
||||
return false, "Function must take zero arguments"
|
||||
}
|
||||
defer func() {
|
||||
// If the function has not panicked, then don't do the check.
|
||||
if errmsg != "" {
|
||||
return
|
||||
}
|
||||
obtained := recover()
|
||||
names[0] = "panic"
|
||||
if e, ok := obtained.(error); ok {
|
||||
params[0] = e.Error()
|
||||
} else if _, ok := obtained.(string); ok {
|
||||
params[0] = obtained
|
||||
} else {
|
||||
errmsg = "Panic value is not a string or an error"
|
||||
return
|
||||
}
|
||||
result, errmsg = matches(params[0], params[1])
|
||||
}()
|
||||
f.Call(nil)
|
||||
return false, "Function has not panicked"
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// FitsTypeOf checker.
|
||||
|
||||
type fitsTypeChecker struct {
|
||||
*CheckerInfo
|
||||
}
|
||||
|
||||
// The FitsTypeOf checker verifies that the obtained value is
|
||||
// assignable to a variable with the same type as the provided
|
||||
// sample value.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// c.Assert(value, FitsTypeOf, int64(0))
|
||||
// c.Assert(value, FitsTypeOf, os.Error(nil))
|
||||
//
|
||||
var FitsTypeOf Checker = &fitsTypeChecker{
|
||||
&CheckerInfo{Name: "FitsTypeOf", Params: []string{"obtained", "sample"}},
|
||||
}
|
||||
|
||||
func (checker *fitsTypeChecker) Check(params []interface{}, names []string) (result bool, error string) {
|
||||
obtained := reflect.ValueOf(params[0])
|
||||
sample := reflect.ValueOf(params[1])
|
||||
if !obtained.IsValid() {
|
||||
return false, ""
|
||||
}
|
||||
if !sample.IsValid() {
|
||||
return false, "Invalid sample value"
|
||||
}
|
||||
return obtained.Type().AssignableTo(sample.Type()), ""
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Implements checker.
|
||||
|
||||
type implementsChecker struct {
|
||||
*CheckerInfo
|
||||
}
|
||||
|
||||
// The Implements checker verifies that the obtained value
|
||||
// implements the interface specified via a pointer to an interface
|
||||
// variable.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// var e os.Error
|
||||
// c.Assert(err, Implements, &e)
|
||||
//
|
||||
var Implements Checker = &implementsChecker{
|
||||
&CheckerInfo{Name: "Implements", Params: []string{"obtained", "ifaceptr"}},
|
||||
}
|
||||
|
||||
func (checker *implementsChecker) Check(params []interface{}, names []string) (result bool, error string) {
|
||||
obtained := reflect.ValueOf(params[0])
|
||||
ifaceptr := reflect.ValueOf(params[1])
|
||||
if !obtained.IsValid() {
|
||||
return false, ""
|
||||
}
|
||||
if !ifaceptr.IsValid() || ifaceptr.Kind() != reflect.Ptr || ifaceptr.Elem().Kind() != reflect.Interface {
|
||||
return false, "ifaceptr should be a pointer to an interface variable"
|
||||
}
|
||||
return obtained.Type().Implements(ifaceptr.Elem().Type()), ""
|
||||
}
|
272
vendor/gopkg.in/check.v1/checkers_test.go
generated
vendored
Normal file
272
vendor/gopkg.in/check.v1/checkers_test.go
generated
vendored
Normal file
@ -0,0 +1,272 @@
|
||||
package check_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"gopkg.in/check.v1"
|
||||
"reflect"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
type CheckersS struct{}
|
||||
|
||||
var _ = check.Suite(&CheckersS{})
|
||||
|
||||
func testInfo(c *check.C, checker check.Checker, name string, paramNames []string) {
|
||||
info := checker.Info()
|
||||
if info.Name != name {
|
||||
c.Fatalf("Got name %s, expected %s", info.Name, name)
|
||||
}
|
||||
if !reflect.DeepEqual(info.Params, paramNames) {
|
||||
c.Fatalf("Got param names %#v, expected %#v", info.Params, paramNames)
|
||||
}
|
||||
}
|
||||
|
||||
func testCheck(c *check.C, checker check.Checker, result bool, error string, params ...interface{}) ([]interface{}, []string) {
|
||||
info := checker.Info()
|
||||
if len(params) != len(info.Params) {
|
||||
c.Fatalf("unexpected param count in test; expected %d got %d", len(info.Params), len(params))
|
||||
}
|
||||
names := append([]string{}, info.Params...)
|
||||
result_, error_ := checker.Check(params, names)
|
||||
if result_ != result || error_ != error {
|
||||
c.Fatalf("%s.Check(%#v) returned (%#v, %#v) rather than (%#v, %#v)",
|
||||
info.Name, params, result_, error_, result, error)
|
||||
}
|
||||
return params, names
|
||||
}
|
||||
|
||||
func (s *CheckersS) TestComment(c *check.C) {
|
||||
bug := check.Commentf("a %d bc", 42)
|
||||
comment := bug.CheckCommentString()
|
||||
if comment != "a 42 bc" {
|
||||
c.Fatalf("Commentf returned %#v", comment)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *CheckersS) TestIsNil(c *check.C) {
|
||||
testInfo(c, check.IsNil, "IsNil", []string{"value"})
|
||||
|
||||
testCheck(c, check.IsNil, true, "", nil)
|
||||
testCheck(c, check.IsNil, false, "", "a")
|
||||
|
||||
testCheck(c, check.IsNil, true, "", (chan int)(nil))
|
||||
testCheck(c, check.IsNil, false, "", make(chan int))
|
||||
testCheck(c, check.IsNil, true, "", (error)(nil))
|
||||
testCheck(c, check.IsNil, false, "", errors.New(""))
|
||||
testCheck(c, check.IsNil, true, "", ([]int)(nil))
|
||||
testCheck(c, check.IsNil, false, "", make([]int, 1))
|
||||
testCheck(c, check.IsNil, false, "", int(0))
|
||||
}
|
||||
|
||||
func (s *CheckersS) TestNotNil(c *check.C) {
|
||||
testInfo(c, check.NotNil, "NotNil", []string{"value"})
|
||||
|
||||
testCheck(c, check.NotNil, false, "", nil)
|
||||
testCheck(c, check.NotNil, true, "", "a")
|
||||
|
||||
testCheck(c, check.NotNil, false, "", (chan int)(nil))
|
||||
testCheck(c, check.NotNil, true, "", make(chan int))
|
||||
testCheck(c, check.NotNil, false, "", (error)(nil))
|
||||
testCheck(c, check.NotNil, true, "", errors.New(""))
|
||||
testCheck(c, check.NotNil, false, "", ([]int)(nil))
|
||||
testCheck(c, check.NotNil, true, "", make([]int, 1))
|
||||
}
|
||||
|
||||
func (s *CheckersS) TestNot(c *check.C) {
|
||||
testInfo(c, check.Not(check.IsNil), "Not(IsNil)", []string{"value"})
|
||||
|
||||
testCheck(c, check.Not(check.IsNil), false, "", nil)
|
||||
testCheck(c, check.Not(check.IsNil), true, "", "a")
|
||||
}
|
||||
|
||||
type simpleStruct struct {
|
||||
i int
|
||||
}
|
||||
|
||||
func (s *CheckersS) TestEquals(c *check.C) {
|
||||
testInfo(c, check.Equals, "Equals", []string{"obtained", "expected"})
|
||||
|
||||
// The simplest.
|
||||
testCheck(c, check.Equals, true, "", 42, 42)
|
||||
testCheck(c, check.Equals, false, "", 42, 43)
|
||||
|
||||
// Different native types.
|
||||
testCheck(c, check.Equals, false, "", int32(42), int64(42))
|
||||
|
||||
// With nil.
|
||||
testCheck(c, check.Equals, false, "", 42, nil)
|
||||
|
||||
// Slices
|
||||
testCheck(c, check.Equals, false, "runtime error: comparing uncomparable type []uint8", []byte{1, 2}, []byte{1, 2})
|
||||
|
||||
// Struct values
|
||||
testCheck(c, check.Equals, true, "", simpleStruct{1}, simpleStruct{1})
|
||||
testCheck(c, check.Equals, false, "", simpleStruct{1}, simpleStruct{2})
|
||||
|
||||
// Struct pointers
|
||||
testCheck(c, check.Equals, false, "", &simpleStruct{1}, &simpleStruct{1})
|
||||
testCheck(c, check.Equals, false, "", &simpleStruct{1}, &simpleStruct{2})
|
||||
}
|
||||
|
||||
func (s *CheckersS) TestDeepEquals(c *check.C) {
|
||||
testInfo(c, check.DeepEquals, "DeepEquals", []string{"obtained", "expected"})
|
||||
|
||||
// The simplest.
|
||||
testCheck(c, check.DeepEquals, true, "", 42, 42)
|
||||
testCheck(c, check.DeepEquals, false, "", 42, 43)
|
||||
|
||||
// Different native types.
|
||||
testCheck(c, check.DeepEquals, false, "", int32(42), int64(42))
|
||||
|
||||
// With nil.
|
||||
testCheck(c, check.DeepEquals, false, "", 42, nil)
|
||||
|
||||
// Slices
|
||||
testCheck(c, check.DeepEquals, true, "", []byte{1, 2}, []byte{1, 2})
|
||||
testCheck(c, check.DeepEquals, false, "", []byte{1, 2}, []byte{1, 3})
|
||||
|
||||
// Struct values
|
||||
testCheck(c, check.DeepEquals, true, "", simpleStruct{1}, simpleStruct{1})
|
||||
testCheck(c, check.DeepEquals, false, "", simpleStruct{1}, simpleStruct{2})
|
||||
|
||||
// Struct pointers
|
||||
testCheck(c, check.DeepEquals, true, "", &simpleStruct{1}, &simpleStruct{1})
|
||||
testCheck(c, check.DeepEquals, false, "", &simpleStruct{1}, &simpleStruct{2})
|
||||
}
|
||||
|
||||
func (s *CheckersS) TestHasLen(c *check.C) {
|
||||
testInfo(c, check.HasLen, "HasLen", []string{"obtained", "n"})
|
||||
|
||||
testCheck(c, check.HasLen, true, "", "abcd", 4)
|
||||
testCheck(c, check.HasLen, true, "", []int{1, 2}, 2)
|
||||
testCheck(c, check.HasLen, false, "", []int{1, 2}, 3)
|
||||
|
||||
testCheck(c, check.HasLen, false, "n must be an int", []int{1, 2}, "2")
|
||||
testCheck(c, check.HasLen, false, "obtained value type has no length", nil, 2)
|
||||
}
|
||||
|
||||
func (s *CheckersS) TestErrorMatches(c *check.C) {
|
||||
testInfo(c, check.ErrorMatches, "ErrorMatches", []string{"value", "regex"})
|
||||
|
||||
testCheck(c, check.ErrorMatches, false, "Error value is nil", nil, "some error")
|
||||
testCheck(c, check.ErrorMatches, false, "Value is not an error", 1, "some error")
|
||||
testCheck(c, check.ErrorMatches, true, "", errors.New("some error"), "some error")
|
||||
testCheck(c, check.ErrorMatches, true, "", errors.New("some error"), "so.*or")
|
||||
|
||||
// Verify params mutation
|
||||
params, names := testCheck(c, check.ErrorMatches, false, "", errors.New("some error"), "other error")
|
||||
c.Assert(params[0], check.Equals, "some error")
|
||||
c.Assert(names[0], check.Equals, "error")
|
||||
}
|
||||
|
||||
func (s *CheckersS) TestMatches(c *check.C) {
|
||||
testInfo(c, check.Matches, "Matches", []string{"value", "regex"})
|
||||
|
||||
// Simple matching
|
||||
testCheck(c, check.Matches, true, "", "abc", "abc")
|
||||
testCheck(c, check.Matches, true, "", "abc", "a.c")
|
||||
|
||||
// Must match fully
|
||||
testCheck(c, check.Matches, false, "", "abc", "ab")
|
||||
testCheck(c, check.Matches, false, "", "abc", "bc")
|
||||
|
||||
// String()-enabled values accepted
|
||||
testCheck(c, check.Matches, true, "", reflect.ValueOf("abc"), "a.c")
|
||||
testCheck(c, check.Matches, false, "", reflect.ValueOf("abc"), "a.d")
|
||||
|
||||
// Some error conditions.
|
||||
testCheck(c, check.Matches, false, "Obtained value is not a string and has no .String()", 1, "a.c")
|
||||
testCheck(c, check.Matches, false, "Can't compile regex: error parsing regexp: missing closing ]: `[c$`", "abc", "a[c")
|
||||
}
|
||||
|
||||
func (s *CheckersS) TestPanics(c *check.C) {
|
||||
testInfo(c, check.Panics, "Panics", []string{"function", "expected"})
|
||||
|
||||
// Some errors.
|
||||
testCheck(c, check.Panics, false, "Function has not panicked", func() bool { return false }, "BOOM")
|
||||
testCheck(c, check.Panics, false, "Function must take zero arguments", 1, "BOOM")
|
||||
|
||||
// Plain strings.
|
||||
testCheck(c, check.Panics, true, "", func() { panic("BOOM") }, "BOOM")
|
||||
testCheck(c, check.Panics, false, "", func() { panic("KABOOM") }, "BOOM")
|
||||
testCheck(c, check.Panics, true, "", func() bool { panic("BOOM") }, "BOOM")
|
||||
|
||||
// Error values.
|
||||
testCheck(c, check.Panics, true, "", func() { panic(errors.New("BOOM")) }, errors.New("BOOM"))
|
||||
testCheck(c, check.Panics, false, "", func() { panic(errors.New("KABOOM")) }, errors.New("BOOM"))
|
||||
|
||||
type deep struct{ i int }
|
||||
// Deep value
|
||||
testCheck(c, check.Panics, true, "", func() { panic(&deep{99}) }, &deep{99})
|
||||
|
||||
// Verify params/names mutation
|
||||
params, names := testCheck(c, check.Panics, false, "", func() { panic(errors.New("KABOOM")) }, errors.New("BOOM"))
|
||||
c.Assert(params[0], check.ErrorMatches, "KABOOM")
|
||||
c.Assert(names[0], check.Equals, "panic")
|
||||
|
||||
// Verify a nil panic
|
||||
testCheck(c, check.Panics, true, "", func() { panic(nil) }, nil)
|
||||
testCheck(c, check.Panics, false, "", func() { panic(nil) }, "NOPE")
|
||||
}
|
||||
|
||||
func (s *CheckersS) TestPanicMatches(c *check.C) {
|
||||
testInfo(c, check.PanicMatches, "PanicMatches", []string{"function", "expected"})
|
||||
|
||||
// Error matching.
|
||||
testCheck(c, check.PanicMatches, true, "", func() { panic(errors.New("BOOM")) }, "BO.M")
|
||||
testCheck(c, check.PanicMatches, false, "", func() { panic(errors.New("KABOOM")) }, "BO.M")
|
||||
|
||||
// Some errors.
|
||||
testCheck(c, check.PanicMatches, false, "Function has not panicked", func() bool { return false }, "BOOM")
|
||||
testCheck(c, check.PanicMatches, false, "Function must take zero arguments", 1, "BOOM")
|
||||
|
||||
// Plain strings.
|
||||
testCheck(c, check.PanicMatches, true, "", func() { panic("BOOM") }, "BO.M")
|
||||
testCheck(c, check.PanicMatches, false, "", func() { panic("KABOOM") }, "BOOM")
|
||||
testCheck(c, check.PanicMatches, true, "", func() bool { panic("BOOM") }, "BO.M")
|
||||
|
||||
// Verify params/names mutation
|
||||
params, names := testCheck(c, check.PanicMatches, false, "", func() { panic(errors.New("KABOOM")) }, "BOOM")
|
||||
c.Assert(params[0], check.Equals, "KABOOM")
|
||||
c.Assert(names[0], check.Equals, "panic")
|
||||
|
||||
// Verify a nil panic
|
||||
testCheck(c, check.PanicMatches, false, "Panic value is not a string or an error", func() { panic(nil) }, "")
|
||||
}
|
||||
|
||||
func (s *CheckersS) TestFitsTypeOf(c *check.C) {
|
||||
testInfo(c, check.FitsTypeOf, "FitsTypeOf", []string{"obtained", "sample"})
|
||||
|
||||
// Basic types
|
||||
testCheck(c, check.FitsTypeOf, true, "", 1, 0)
|
||||
testCheck(c, check.FitsTypeOf, false, "", 1, int64(0))
|
||||
|
||||
// Aliases
|
||||
testCheck(c, check.FitsTypeOf, false, "", 1, errors.New(""))
|
||||
testCheck(c, check.FitsTypeOf, false, "", "error", errors.New(""))
|
||||
testCheck(c, check.FitsTypeOf, true, "", errors.New("error"), errors.New(""))
|
||||
|
||||
// Structures
|
||||
testCheck(c, check.FitsTypeOf, false, "", 1, simpleStruct{})
|
||||
testCheck(c, check.FitsTypeOf, false, "", simpleStruct{42}, &simpleStruct{})
|
||||
testCheck(c, check.FitsTypeOf, true, "", simpleStruct{42}, simpleStruct{})
|
||||
testCheck(c, check.FitsTypeOf, true, "", &simpleStruct{42}, &simpleStruct{})
|
||||
|
||||
// Some bad values
|
||||
testCheck(c, check.FitsTypeOf, false, "Invalid sample value", 1, interface{}(nil))
|
||||
testCheck(c, check.FitsTypeOf, false, "", interface{}(nil), 0)
|
||||
}
|
||||
|
||||
func (s *CheckersS) TestImplements(c *check.C) {
|
||||
testInfo(c, check.Implements, "Implements", []string{"obtained", "ifaceptr"})
|
||||
|
||||
var e error
|
||||
var re runtime.Error
|
||||
testCheck(c, check.Implements, true, "", errors.New(""), &e)
|
||||
testCheck(c, check.Implements, false, "", errors.New(""), &re)
|
||||
|
||||
// Some bad values
|
||||
testCheck(c, check.Implements, false, "ifaceptr should be a pointer to an interface variable", 0, errors.New(""))
|
||||
testCheck(c, check.Implements, false, "ifaceptr should be a pointer to an interface variable", 0, interface{}(nil))
|
||||
testCheck(c, check.Implements, false, "", interface{}(nil), &e)
|
||||
}
|
19
vendor/gopkg.in/check.v1/export_test.go
generated
vendored
Normal file
19
vendor/gopkg.in/check.v1/export_test.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
package check
|
||||
|
||||
import "io"
|
||||
|
||||
func PrintLine(filename string, line int) (string, error) {
|
||||
return printLine(filename, line)
|
||||
}
|
||||
|
||||
func Indent(s, with string) string {
|
||||
return indent(s, with)
|
||||
}
|
||||
|
||||
func NewOutputWriter(writer io.Writer, stream, verbose bool) *outputWriter {
|
||||
return newOutputWriter(writer, stream, verbose)
|
||||
}
|
||||
|
||||
func (c *C) FakeSkip(reason string) {
|
||||
c.reason = reason
|
||||
}
|
484
vendor/gopkg.in/check.v1/fixture_test.go
generated
vendored
Normal file
484
vendor/gopkg.in/check.v1/fixture_test.go
generated
vendored
Normal file
@ -0,0 +1,484 @@
|
||||
// Tests for the behavior of the test fixture system.
|
||||
|
||||
package check_test
|
||||
|
||||
import (
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Fixture test suite.
|
||||
|
||||
type FixtureS struct{}
|
||||
|
||||
var fixtureS = Suite(&FixtureS{})
|
||||
|
||||
func (s *FixtureS) TestCountSuite(c *C) {
|
||||
suitesRun += 1
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Basic fixture ordering verification.
|
||||
|
||||
func (s *FixtureS) TestOrder(c *C) {
|
||||
helper := FixtureHelper{}
|
||||
Run(&helper, nil)
|
||||
c.Check(helper.calls[0], Equals, "SetUpSuite")
|
||||
c.Check(helper.calls[1], Equals, "SetUpTest")
|
||||
c.Check(helper.calls[2], Equals, "Test1")
|
||||
c.Check(helper.calls[3], Equals, "TearDownTest")
|
||||
c.Check(helper.calls[4], Equals, "SetUpTest")
|
||||
c.Check(helper.calls[5], Equals, "Test2")
|
||||
c.Check(helper.calls[6], Equals, "TearDownTest")
|
||||
c.Check(helper.calls[7], Equals, "TearDownSuite")
|
||||
c.Check(len(helper.calls), Equals, 8)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Check the behavior when panics occur within tests and fixtures.
|
||||
|
||||
func (s *FixtureS) TestPanicOnTest(c *C) {
|
||||
helper := FixtureHelper{panicOn: "Test1"}
|
||||
output := String{}
|
||||
Run(&helper, &RunConf{Output: &output})
|
||||
c.Check(helper.calls[0], Equals, "SetUpSuite")
|
||||
c.Check(helper.calls[1], Equals, "SetUpTest")
|
||||
c.Check(helper.calls[2], Equals, "Test1")
|
||||
c.Check(helper.calls[3], Equals, "TearDownTest")
|
||||
c.Check(helper.calls[4], Equals, "SetUpTest")
|
||||
c.Check(helper.calls[5], Equals, "Test2")
|
||||
c.Check(helper.calls[6], Equals, "TearDownTest")
|
||||
c.Check(helper.calls[7], Equals, "TearDownSuite")
|
||||
c.Check(len(helper.calls), Equals, 8)
|
||||
|
||||
expected := "^\n-+\n" +
|
||||
"PANIC: check_test\\.go:[0-9]+: FixtureHelper.Test1\n\n" +
|
||||
"\\.\\.\\. Panic: Test1 \\(PC=[xA-F0-9]+\\)\n\n" +
|
||||
".+:[0-9]+\n" +
|
||||
" in (go)?panic\n" +
|
||||
".*check_test.go:[0-9]+\n" +
|
||||
" in FixtureHelper.trace\n" +
|
||||
".*check_test.go:[0-9]+\n" +
|
||||
" in FixtureHelper.Test1\n" +
|
||||
"(.|\n)*$"
|
||||
|
||||
c.Check(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
func (s *FixtureS) TestPanicOnSetUpTest(c *C) {
|
||||
helper := FixtureHelper{panicOn: "SetUpTest"}
|
||||
output := String{}
|
||||
Run(&helper, &RunConf{Output: &output})
|
||||
c.Check(helper.calls[0], Equals, "SetUpSuite")
|
||||
c.Check(helper.calls[1], Equals, "SetUpTest")
|
||||
c.Check(helper.calls[2], Equals, "TearDownTest")
|
||||
c.Check(helper.calls[3], Equals, "TearDownSuite")
|
||||
c.Check(len(helper.calls), Equals, 4)
|
||||
|
||||
expected := "^\n-+\n" +
|
||||
"PANIC: check_test\\.go:[0-9]+: " +
|
||||
"FixtureHelper\\.SetUpTest\n\n" +
|
||||
"\\.\\.\\. Panic: SetUpTest \\(PC=[xA-F0-9]+\\)\n\n" +
|
||||
".+:[0-9]+\n" +
|
||||
" in (go)?panic\n" +
|
||||
".*check_test.go:[0-9]+\n" +
|
||||
" in FixtureHelper.trace\n" +
|
||||
".*check_test.go:[0-9]+\n" +
|
||||
" in FixtureHelper.SetUpTest\n" +
|
||||
"(.|\n)*" +
|
||||
"\n-+\n" +
|
||||
"PANIC: check_test\\.go:[0-9]+: " +
|
||||
"FixtureHelper\\.Test1\n\n" +
|
||||
"\\.\\.\\. Panic: Fixture has panicked " +
|
||||
"\\(see related PANIC\\)\n$"
|
||||
|
||||
c.Check(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
func (s *FixtureS) TestPanicOnTearDownTest(c *C) {
|
||||
helper := FixtureHelper{panicOn: "TearDownTest"}
|
||||
output := String{}
|
||||
Run(&helper, &RunConf{Output: &output})
|
||||
c.Check(helper.calls[0], Equals, "SetUpSuite")
|
||||
c.Check(helper.calls[1], Equals, "SetUpTest")
|
||||
c.Check(helper.calls[2], Equals, "Test1")
|
||||
c.Check(helper.calls[3], Equals, "TearDownTest")
|
||||
c.Check(helper.calls[4], Equals, "TearDownSuite")
|
||||
c.Check(len(helper.calls), Equals, 5)
|
||||
|
||||
expected := "^\n-+\n" +
|
||||
"PANIC: check_test\\.go:[0-9]+: " +
|
||||
"FixtureHelper.TearDownTest\n\n" +
|
||||
"\\.\\.\\. Panic: TearDownTest \\(PC=[xA-F0-9]+\\)\n\n" +
|
||||
".+:[0-9]+\n" +
|
||||
" in (go)?panic\n" +
|
||||
".*check_test.go:[0-9]+\n" +
|
||||
" in FixtureHelper.trace\n" +
|
||||
".*check_test.go:[0-9]+\n" +
|
||||
" in FixtureHelper.TearDownTest\n" +
|
||||
"(.|\n)*" +
|
||||
"\n-+\n" +
|
||||
"PANIC: check_test\\.go:[0-9]+: " +
|
||||
"FixtureHelper\\.Test1\n\n" +
|
||||
"\\.\\.\\. Panic: Fixture has panicked " +
|
||||
"\\(see related PANIC\\)\n$"
|
||||
|
||||
c.Check(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
func (s *FixtureS) TestPanicOnSetUpSuite(c *C) {
|
||||
helper := FixtureHelper{panicOn: "SetUpSuite"}
|
||||
output := String{}
|
||||
Run(&helper, &RunConf{Output: &output})
|
||||
c.Check(helper.calls[0], Equals, "SetUpSuite")
|
||||
c.Check(helper.calls[1], Equals, "TearDownSuite")
|
||||
c.Check(len(helper.calls), Equals, 2)
|
||||
|
||||
expected := "^\n-+\n" +
|
||||
"PANIC: check_test\\.go:[0-9]+: " +
|
||||
"FixtureHelper.SetUpSuite\n\n" +
|
||||
"\\.\\.\\. Panic: SetUpSuite \\(PC=[xA-F0-9]+\\)\n\n" +
|
||||
".+:[0-9]+\n" +
|
||||
" in (go)?panic\n" +
|
||||
".*check_test.go:[0-9]+\n" +
|
||||
" in FixtureHelper.trace\n" +
|
||||
".*check_test.go:[0-9]+\n" +
|
||||
" in FixtureHelper.SetUpSuite\n" +
|
||||
"(.|\n)*$"
|
||||
|
||||
c.Check(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
func (s *FixtureS) TestPanicOnTearDownSuite(c *C) {
|
||||
helper := FixtureHelper{panicOn: "TearDownSuite"}
|
||||
output := String{}
|
||||
Run(&helper, &RunConf{Output: &output})
|
||||
c.Check(helper.calls[0], Equals, "SetUpSuite")
|
||||
c.Check(helper.calls[1], Equals, "SetUpTest")
|
||||
c.Check(helper.calls[2], Equals, "Test1")
|
||||
c.Check(helper.calls[3], Equals, "TearDownTest")
|
||||
c.Check(helper.calls[4], Equals, "SetUpTest")
|
||||
c.Check(helper.calls[5], Equals, "Test2")
|
||||
c.Check(helper.calls[6], Equals, "TearDownTest")
|
||||
c.Check(helper.calls[7], Equals, "TearDownSuite")
|
||||
c.Check(len(helper.calls), Equals, 8)
|
||||
|
||||
expected := "^\n-+\n" +
|
||||
"PANIC: check_test\\.go:[0-9]+: " +
|
||||
"FixtureHelper.TearDownSuite\n\n" +
|
||||
"\\.\\.\\. Panic: TearDownSuite \\(PC=[xA-F0-9]+\\)\n\n" +
|
||||
".+:[0-9]+\n" +
|
||||
" in (go)?panic\n" +
|
||||
".*check_test.go:[0-9]+\n" +
|
||||
" in FixtureHelper.trace\n" +
|
||||
".*check_test.go:[0-9]+\n" +
|
||||
" in FixtureHelper.TearDownSuite\n" +
|
||||
"(.|\n)*$"
|
||||
|
||||
c.Check(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// A wrong argument on a test or fixture will produce a nice error.
|
||||
|
||||
func (s *FixtureS) TestPanicOnWrongTestArg(c *C) {
|
||||
helper := WrongTestArgHelper{}
|
||||
output := String{}
|
||||
Run(&helper, &RunConf{Output: &output})
|
||||
c.Check(helper.calls[0], Equals, "SetUpSuite")
|
||||
c.Check(helper.calls[1], Equals, "SetUpTest")
|
||||
c.Check(helper.calls[2], Equals, "TearDownTest")
|
||||
c.Check(helper.calls[3], Equals, "SetUpTest")
|
||||
c.Check(helper.calls[4], Equals, "Test2")
|
||||
c.Check(helper.calls[5], Equals, "TearDownTest")
|
||||
c.Check(helper.calls[6], Equals, "TearDownSuite")
|
||||
c.Check(len(helper.calls), Equals, 7)
|
||||
|
||||
expected := "^\n-+\n" +
|
||||
"PANIC: fixture_test\\.go:[0-9]+: " +
|
||||
"WrongTestArgHelper\\.Test1\n\n" +
|
||||
"\\.\\.\\. Panic: WrongTestArgHelper\\.Test1 argument " +
|
||||
"should be \\*check\\.C\n"
|
||||
|
||||
c.Check(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
func (s *FixtureS) TestPanicOnWrongSetUpTestArg(c *C) {
|
||||
helper := WrongSetUpTestArgHelper{}
|
||||
output := String{}
|
||||
Run(&helper, &RunConf{Output: &output})
|
||||
c.Check(len(helper.calls), Equals, 0)
|
||||
|
||||
expected :=
|
||||
"^\n-+\n" +
|
||||
"PANIC: fixture_test\\.go:[0-9]+: " +
|
||||
"WrongSetUpTestArgHelper\\.SetUpTest\n\n" +
|
||||
"\\.\\.\\. Panic: WrongSetUpTestArgHelper\\.SetUpTest argument " +
|
||||
"should be \\*check\\.C\n"
|
||||
|
||||
c.Check(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
func (s *FixtureS) TestPanicOnWrongSetUpSuiteArg(c *C) {
|
||||
helper := WrongSetUpSuiteArgHelper{}
|
||||
output := String{}
|
||||
Run(&helper, &RunConf{Output: &output})
|
||||
c.Check(len(helper.calls), Equals, 0)
|
||||
|
||||
expected :=
|
||||
"^\n-+\n" +
|
||||
"PANIC: fixture_test\\.go:[0-9]+: " +
|
||||
"WrongSetUpSuiteArgHelper\\.SetUpSuite\n\n" +
|
||||
"\\.\\.\\. Panic: WrongSetUpSuiteArgHelper\\.SetUpSuite argument " +
|
||||
"should be \\*check\\.C\n"
|
||||
|
||||
c.Check(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Nice errors also when tests or fixture have wrong arg count.
|
||||
|
||||
func (s *FixtureS) TestPanicOnWrongTestArgCount(c *C) {
|
||||
helper := WrongTestArgCountHelper{}
|
||||
output := String{}
|
||||
Run(&helper, &RunConf{Output: &output})
|
||||
c.Check(helper.calls[0], Equals, "SetUpSuite")
|
||||
c.Check(helper.calls[1], Equals, "SetUpTest")
|
||||
c.Check(helper.calls[2], Equals, "TearDownTest")
|
||||
c.Check(helper.calls[3], Equals, "SetUpTest")
|
||||
c.Check(helper.calls[4], Equals, "Test2")
|
||||
c.Check(helper.calls[5], Equals, "TearDownTest")
|
||||
c.Check(helper.calls[6], Equals, "TearDownSuite")
|
||||
c.Check(len(helper.calls), Equals, 7)
|
||||
|
||||
expected := "^\n-+\n" +
|
||||
"PANIC: fixture_test\\.go:[0-9]+: " +
|
||||
"WrongTestArgCountHelper\\.Test1\n\n" +
|
||||
"\\.\\.\\. Panic: WrongTestArgCountHelper\\.Test1 argument " +
|
||||
"should be \\*check\\.C\n"
|
||||
|
||||
c.Check(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
func (s *FixtureS) TestPanicOnWrongSetUpTestArgCount(c *C) {
|
||||
helper := WrongSetUpTestArgCountHelper{}
|
||||
output := String{}
|
||||
Run(&helper, &RunConf{Output: &output})
|
||||
c.Check(len(helper.calls), Equals, 0)
|
||||
|
||||
expected :=
|
||||
"^\n-+\n" +
|
||||
"PANIC: fixture_test\\.go:[0-9]+: " +
|
||||
"WrongSetUpTestArgCountHelper\\.SetUpTest\n\n" +
|
||||
"\\.\\.\\. Panic: WrongSetUpTestArgCountHelper\\.SetUpTest argument " +
|
||||
"should be \\*check\\.C\n"
|
||||
|
||||
c.Check(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
func (s *FixtureS) TestPanicOnWrongSetUpSuiteArgCount(c *C) {
|
||||
helper := WrongSetUpSuiteArgCountHelper{}
|
||||
output := String{}
|
||||
Run(&helper, &RunConf{Output: &output})
|
||||
c.Check(len(helper.calls), Equals, 0)
|
||||
|
||||
expected :=
|
||||
"^\n-+\n" +
|
||||
"PANIC: fixture_test\\.go:[0-9]+: " +
|
||||
"WrongSetUpSuiteArgCountHelper\\.SetUpSuite\n\n" +
|
||||
"\\.\\.\\. Panic: WrongSetUpSuiteArgCountHelper" +
|
||||
"\\.SetUpSuite argument should be \\*check\\.C\n"
|
||||
|
||||
c.Check(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Helper test suites with wrong function arguments.
|
||||
|
||||
type WrongTestArgHelper struct {
|
||||
FixtureHelper
|
||||
}
|
||||
|
||||
func (s *WrongTestArgHelper) Test1(t int) {
|
||||
}
|
||||
|
||||
type WrongSetUpTestArgHelper struct {
|
||||
FixtureHelper
|
||||
}
|
||||
|
||||
func (s *WrongSetUpTestArgHelper) SetUpTest(t int) {
|
||||
}
|
||||
|
||||
type WrongSetUpSuiteArgHelper struct {
|
||||
FixtureHelper
|
||||
}
|
||||
|
||||
func (s *WrongSetUpSuiteArgHelper) SetUpSuite(t int) {
|
||||
}
|
||||
|
||||
type WrongTestArgCountHelper struct {
|
||||
FixtureHelper
|
||||
}
|
||||
|
||||
func (s *WrongTestArgCountHelper) Test1(c *C, i int) {
|
||||
}
|
||||
|
||||
type WrongSetUpTestArgCountHelper struct {
|
||||
FixtureHelper
|
||||
}
|
||||
|
||||
func (s *WrongSetUpTestArgCountHelper) SetUpTest(c *C, i int) {
|
||||
}
|
||||
|
||||
type WrongSetUpSuiteArgCountHelper struct {
|
||||
FixtureHelper
|
||||
}
|
||||
|
||||
func (s *WrongSetUpSuiteArgCountHelper) SetUpSuite(c *C, i int) {
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Ensure fixture doesn't run without tests.
|
||||
|
||||
type NoTestsHelper struct {
|
||||
hasRun bool
|
||||
}
|
||||
|
||||
func (s *NoTestsHelper) SetUpSuite(c *C) {
|
||||
s.hasRun = true
|
||||
}
|
||||
|
||||
func (s *NoTestsHelper) TearDownSuite(c *C) {
|
||||
s.hasRun = true
|
||||
}
|
||||
|
||||
func (s *FixtureS) TestFixtureDoesntRunWithoutTests(c *C) {
|
||||
helper := NoTestsHelper{}
|
||||
output := String{}
|
||||
Run(&helper, &RunConf{Output: &output})
|
||||
c.Check(helper.hasRun, Equals, false)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Verify that checks and assertions work correctly inside the fixture.
|
||||
|
||||
type FixtureCheckHelper struct {
|
||||
fail string
|
||||
completed bool
|
||||
}
|
||||
|
||||
func (s *FixtureCheckHelper) SetUpSuite(c *C) {
|
||||
switch s.fail {
|
||||
case "SetUpSuiteAssert":
|
||||
c.Assert(false, Equals, true)
|
||||
case "SetUpSuiteCheck":
|
||||
c.Check(false, Equals, true)
|
||||
}
|
||||
s.completed = true
|
||||
}
|
||||
|
||||
func (s *FixtureCheckHelper) SetUpTest(c *C) {
|
||||
switch s.fail {
|
||||
case "SetUpTestAssert":
|
||||
c.Assert(false, Equals, true)
|
||||
case "SetUpTestCheck":
|
||||
c.Check(false, Equals, true)
|
||||
}
|
||||
s.completed = true
|
||||
}
|
||||
|
||||
func (s *FixtureCheckHelper) Test(c *C) {
|
||||
// Do nothing.
|
||||
}
|
||||
|
||||
func (s *FixtureS) TestSetUpSuiteCheck(c *C) {
|
||||
helper := FixtureCheckHelper{fail: "SetUpSuiteCheck"}
|
||||
output := String{}
|
||||
Run(&helper, &RunConf{Output: &output})
|
||||
c.Assert(output.value, Matches,
|
||||
"\n---+\n"+
|
||||
"FAIL: fixture_test\\.go:[0-9]+: "+
|
||||
"FixtureCheckHelper\\.SetUpSuite\n\n"+
|
||||
"fixture_test\\.go:[0-9]+:\n"+
|
||||
" c\\.Check\\(false, Equals, true\\)\n"+
|
||||
"\\.+ obtained bool = false\n"+
|
||||
"\\.+ expected bool = true\n\n")
|
||||
c.Assert(helper.completed, Equals, true)
|
||||
}
|
||||
|
||||
func (s *FixtureS) TestSetUpSuiteAssert(c *C) {
|
||||
helper := FixtureCheckHelper{fail: "SetUpSuiteAssert"}
|
||||
output := String{}
|
||||
Run(&helper, &RunConf{Output: &output})
|
||||
c.Assert(output.value, Matches,
|
||||
"\n---+\n"+
|
||||
"FAIL: fixture_test\\.go:[0-9]+: "+
|
||||
"FixtureCheckHelper\\.SetUpSuite\n\n"+
|
||||
"fixture_test\\.go:[0-9]+:\n"+
|
||||
" c\\.Assert\\(false, Equals, true\\)\n"+
|
||||
"\\.+ obtained bool = false\n"+
|
||||
"\\.+ expected bool = true\n\n")
|
||||
c.Assert(helper.completed, Equals, false)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Verify that logging within SetUpTest() persists within the test log itself.
|
||||
|
||||
type FixtureLogHelper struct {
|
||||
c *C
|
||||
}
|
||||
|
||||
func (s *FixtureLogHelper) SetUpTest(c *C) {
|
||||
s.c = c
|
||||
c.Log("1")
|
||||
}
|
||||
|
||||
func (s *FixtureLogHelper) Test(c *C) {
|
||||
c.Log("2")
|
||||
s.c.Log("3")
|
||||
c.Log("4")
|
||||
c.Fail()
|
||||
}
|
||||
|
||||
func (s *FixtureLogHelper) TearDownTest(c *C) {
|
||||
s.c.Log("5")
|
||||
}
|
||||
|
||||
func (s *FixtureS) TestFixtureLogging(c *C) {
|
||||
helper := FixtureLogHelper{}
|
||||
output := String{}
|
||||
Run(&helper, &RunConf{Output: &output})
|
||||
c.Assert(output.value, Matches,
|
||||
"\n---+\n"+
|
||||
"FAIL: fixture_test\\.go:[0-9]+: "+
|
||||
"FixtureLogHelper\\.Test\n\n"+
|
||||
"1\n2\n3\n4\n5\n")
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Skip() within fixture methods.
|
||||
|
||||
func (s *FixtureS) TestSkipSuite(c *C) {
|
||||
helper := FixtureHelper{skip: true, skipOnN: 0}
|
||||
output := String{}
|
||||
result := Run(&helper, &RunConf{Output: &output})
|
||||
c.Assert(output.value, Equals, "")
|
||||
c.Assert(helper.calls[0], Equals, "SetUpSuite")
|
||||
c.Assert(helper.calls[1], Equals, "TearDownSuite")
|
||||
c.Assert(len(helper.calls), Equals, 2)
|
||||
c.Assert(result.Skipped, Equals, 2)
|
||||
}
|
||||
|
||||
func (s *FixtureS) TestSkipTest(c *C) {
|
||||
helper := FixtureHelper{skip: true, skipOnN: 1}
|
||||
output := String{}
|
||||
result := Run(&helper, &RunConf{Output: &output})
|
||||
c.Assert(helper.calls[0], Equals, "SetUpSuite")
|
||||
c.Assert(helper.calls[1], Equals, "SetUpTest")
|
||||
c.Assert(helper.calls[2], Equals, "SetUpTest")
|
||||
c.Assert(helper.calls[3], Equals, "Test2")
|
||||
c.Assert(helper.calls[4], Equals, "TearDownTest")
|
||||
c.Assert(helper.calls[5], Equals, "TearDownSuite")
|
||||
c.Assert(len(helper.calls), Equals, 6)
|
||||
c.Assert(result.Skipped, Equals, 1)
|
||||
}
|
335
vendor/gopkg.in/check.v1/foundation_test.go
generated
vendored
Normal file
335
vendor/gopkg.in/check.v1/foundation_test.go
generated
vendored
Normal file
@ -0,0 +1,335 @@
|
||||
// These tests check that the foundations of gocheck are working properly.
|
||||
// They already assume that fundamental failing is working already, though,
|
||||
// since this was tested in bootstrap_test.go. Even then, some care may
|
||||
// still have to be taken when using external functions, since they should
|
||||
// of course not rely on functionality tested here.
|
||||
|
||||
package check_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"gopkg.in/check.v1"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Foundation test suite.
|
||||
|
||||
type FoundationS struct{}
|
||||
|
||||
var foundationS = check.Suite(&FoundationS{})
|
||||
|
||||
func (s *FoundationS) TestCountSuite(c *check.C) {
|
||||
suitesRun += 1
|
||||
}
|
||||
|
||||
func (s *FoundationS) TestErrorf(c *check.C) {
|
||||
// Do not use checkState() here. It depends on Errorf() working.
|
||||
expectedLog := fmt.Sprintf("foundation_test.go:%d:\n"+
|
||||
" c.Errorf(\"Error %%v!\", \"message\")\n"+
|
||||
"... Error: Error message!\n\n",
|
||||
getMyLine()+1)
|
||||
c.Errorf("Error %v!", "message")
|
||||
failed := c.Failed()
|
||||
c.Succeed()
|
||||
if log := c.GetTestLog(); log != expectedLog {
|
||||
c.Logf("Errorf() logged %#v rather than %#v", log, expectedLog)
|
||||
c.Fail()
|
||||
}
|
||||
if !failed {
|
||||
c.Logf("Errorf() didn't put the test in a failed state")
|
||||
c.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *FoundationS) TestError(c *check.C) {
|
||||
expectedLog := fmt.Sprintf("foundation_test.go:%d:\n"+
|
||||
" c\\.Error\\(\"Error \", \"message!\"\\)\n"+
|
||||
"\\.\\.\\. Error: Error message!\n\n",
|
||||
getMyLine()+1)
|
||||
c.Error("Error ", "message!")
|
||||
checkState(c, nil,
|
||||
&expectedState{
|
||||
name: "Error(`Error `, `message!`)",
|
||||
failed: true,
|
||||
log: expectedLog,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *FoundationS) TestFailNow(c *check.C) {
|
||||
defer (func() {
|
||||
if !c.Failed() {
|
||||
c.Error("FailNow() didn't fail the test")
|
||||
} else {
|
||||
c.Succeed()
|
||||
if c.GetTestLog() != "" {
|
||||
c.Error("Something got logged:\n" + c.GetTestLog())
|
||||
}
|
||||
}
|
||||
})()
|
||||
|
||||
c.FailNow()
|
||||
c.Log("FailNow() didn't stop the test")
|
||||
}
|
||||
|
||||
func (s *FoundationS) TestSucceedNow(c *check.C) {
|
||||
defer (func() {
|
||||
if c.Failed() {
|
||||
c.Error("SucceedNow() didn't succeed the test")
|
||||
}
|
||||
if c.GetTestLog() != "" {
|
||||
c.Error("Something got logged:\n" + c.GetTestLog())
|
||||
}
|
||||
})()
|
||||
|
||||
c.Fail()
|
||||
c.SucceedNow()
|
||||
c.Log("SucceedNow() didn't stop the test")
|
||||
}
|
||||
|
||||
func (s *FoundationS) TestFailureHeader(c *check.C) {
|
||||
output := String{}
|
||||
failHelper := FailHelper{}
|
||||
check.Run(&failHelper, &check.RunConf{Output: &output})
|
||||
header := fmt.Sprintf(""+
|
||||
"\n-----------------------------------"+
|
||||
"-----------------------------------\n"+
|
||||
"FAIL: check_test.go:%d: FailHelper.TestLogAndFail\n",
|
||||
failHelper.testLine)
|
||||
if strings.Index(output.value, header) == -1 {
|
||||
c.Errorf(""+
|
||||
"Failure didn't print a proper header.\n"+
|
||||
"... Got:\n%s... Expected something with:\n%s",
|
||||
output.value, header)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *FoundationS) TestFatal(c *check.C) {
|
||||
var line int
|
||||
defer (func() {
|
||||
if !c.Failed() {
|
||||
c.Error("Fatal() didn't fail the test")
|
||||
} else {
|
||||
c.Succeed()
|
||||
expected := fmt.Sprintf("foundation_test.go:%d:\n"+
|
||||
" c.Fatal(\"Die \", \"now!\")\n"+
|
||||
"... Error: Die now!\n\n",
|
||||
line)
|
||||
if c.GetTestLog() != expected {
|
||||
c.Error("Incorrect log:", c.GetTestLog())
|
||||
}
|
||||
}
|
||||
})()
|
||||
|
||||
line = getMyLine() + 1
|
||||
c.Fatal("Die ", "now!")
|
||||
c.Log("Fatal() didn't stop the test")
|
||||
}
|
||||
|
||||
func (s *FoundationS) TestFatalf(c *check.C) {
|
||||
var line int
|
||||
defer (func() {
|
||||
if !c.Failed() {
|
||||
c.Error("Fatalf() didn't fail the test")
|
||||
} else {
|
||||
c.Succeed()
|
||||
expected := fmt.Sprintf("foundation_test.go:%d:\n"+
|
||||
" c.Fatalf(\"Die %%s!\", \"now\")\n"+
|
||||
"... Error: Die now!\n\n",
|
||||
line)
|
||||
if c.GetTestLog() != expected {
|
||||
c.Error("Incorrect log:", c.GetTestLog())
|
||||
}
|
||||
}
|
||||
})()
|
||||
|
||||
line = getMyLine() + 1
|
||||
c.Fatalf("Die %s!", "now")
|
||||
c.Log("Fatalf() didn't stop the test")
|
||||
}
|
||||
|
||||
func (s *FoundationS) TestCallerLoggingInsideTest(c *check.C) {
|
||||
log := fmt.Sprintf(""+
|
||||
"foundation_test.go:%d:\n"+
|
||||
" result := c.Check\\(10, check.Equals, 20\\)\n"+
|
||||
"\\.\\.\\. obtained int = 10\n"+
|
||||
"\\.\\.\\. expected int = 20\n\n",
|
||||
getMyLine()+1)
|
||||
result := c.Check(10, check.Equals, 20)
|
||||
checkState(c, result,
|
||||
&expectedState{
|
||||
name: "Check(10, Equals, 20)",
|
||||
result: false,
|
||||
failed: true,
|
||||
log: log,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *FoundationS) TestCallerLoggingInDifferentFile(c *check.C) {
|
||||
result, line := checkEqualWrapper(c, 10, 20)
|
||||
testLine := getMyLine() - 1
|
||||
log := fmt.Sprintf(""+
|
||||
"foundation_test.go:%d:\n"+
|
||||
" result, line := checkEqualWrapper\\(c, 10, 20\\)\n"+
|
||||
"check_test.go:%d:\n"+
|
||||
" return c.Check\\(obtained, check.Equals, expected\\), getMyLine\\(\\)\n"+
|
||||
"\\.\\.\\. obtained int = 10\n"+
|
||||
"\\.\\.\\. expected int = 20\n\n",
|
||||
testLine, line)
|
||||
checkState(c, result,
|
||||
&expectedState{
|
||||
name: "Check(10, Equals, 20)",
|
||||
result: false,
|
||||
failed: true,
|
||||
log: log,
|
||||
})
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// ExpectFailure() inverts the logic of failure.
|
||||
|
||||
type ExpectFailureSucceedHelper struct{}
|
||||
|
||||
func (s *ExpectFailureSucceedHelper) TestSucceed(c *check.C) {
|
||||
c.ExpectFailure("It booms!")
|
||||
c.Error("Boom!")
|
||||
}
|
||||
|
||||
type ExpectFailureFailHelper struct{}
|
||||
|
||||
func (s *ExpectFailureFailHelper) TestFail(c *check.C) {
|
||||
c.ExpectFailure("Bug #XYZ")
|
||||
}
|
||||
|
||||
func (s *FoundationS) TestExpectFailureFail(c *check.C) {
|
||||
helper := ExpectFailureFailHelper{}
|
||||
output := String{}
|
||||
result := check.Run(&helper, &check.RunConf{Output: &output})
|
||||
|
||||
expected := "" +
|
||||
"^\n-+\n" +
|
||||
"FAIL: foundation_test\\.go:[0-9]+:" +
|
||||
" ExpectFailureFailHelper\\.TestFail\n\n" +
|
||||
"\\.\\.\\. Error: Test succeeded, but was expected to fail\n" +
|
||||
"\\.\\.\\. Reason: Bug #XYZ\n$"
|
||||
|
||||
matched, err := regexp.MatchString(expected, output.value)
|
||||
if err != nil {
|
||||
c.Error("Bad expression: ", expected)
|
||||
} else if !matched {
|
||||
c.Error("ExpectFailure() didn't log properly:\n", output.value)
|
||||
}
|
||||
|
||||
c.Assert(result.ExpectedFailures, check.Equals, 0)
|
||||
}
|
||||
|
||||
func (s *FoundationS) TestExpectFailureSucceed(c *check.C) {
|
||||
helper := ExpectFailureSucceedHelper{}
|
||||
output := String{}
|
||||
result := check.Run(&helper, &check.RunConf{Output: &output})
|
||||
|
||||
c.Assert(output.value, check.Equals, "")
|
||||
c.Assert(result.ExpectedFailures, check.Equals, 1)
|
||||
}
|
||||
|
||||
func (s *FoundationS) TestExpectFailureSucceedVerbose(c *check.C) {
|
||||
helper := ExpectFailureSucceedHelper{}
|
||||
output := String{}
|
||||
result := check.Run(&helper, &check.RunConf{Output: &output, Verbose: true})
|
||||
|
||||
expected := "" +
|
||||
"FAIL EXPECTED: foundation_test\\.go:[0-9]+:" +
|
||||
" ExpectFailureSucceedHelper\\.TestSucceed \\(It booms!\\)\t *[.0-9]+s\n"
|
||||
|
||||
matched, err := regexp.MatchString(expected, output.value)
|
||||
if err != nil {
|
||||
c.Error("Bad expression: ", expected)
|
||||
} else if !matched {
|
||||
c.Error("ExpectFailure() didn't log properly:\n", output.value)
|
||||
}
|
||||
|
||||
c.Assert(result.ExpectedFailures, check.Equals, 1)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Skip() allows stopping a test without positive/negative results.
|
||||
|
||||
type SkipTestHelper struct{}
|
||||
|
||||
func (s *SkipTestHelper) TestFail(c *check.C) {
|
||||
c.Skip("Wrong platform or whatever")
|
||||
c.Error("Boom!")
|
||||
}
|
||||
|
||||
func (s *FoundationS) TestSkip(c *check.C) {
|
||||
helper := SkipTestHelper{}
|
||||
output := String{}
|
||||
check.Run(&helper, &check.RunConf{Output: &output})
|
||||
|
||||
if output.value != "" {
|
||||
c.Error("Skip() logged something:\n", output.value)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *FoundationS) TestSkipVerbose(c *check.C) {
|
||||
helper := SkipTestHelper{}
|
||||
output := String{}
|
||||
check.Run(&helper, &check.RunConf{Output: &output, Verbose: true})
|
||||
|
||||
expected := "SKIP: foundation_test\\.go:[0-9]+: SkipTestHelper\\.TestFail" +
|
||||
" \\(Wrong platform or whatever\\)"
|
||||
matched, err := regexp.MatchString(expected, output.value)
|
||||
if err != nil {
|
||||
c.Error("Bad expression: ", expected)
|
||||
} else if !matched {
|
||||
c.Error("Skip() didn't log properly:\n", output.value)
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Check minimum *log.Logger interface provided by *check.C.
|
||||
|
||||
type minLogger interface {
|
||||
Output(calldepth int, s string) error
|
||||
}
|
||||
|
||||
func (s *BootstrapS) TestMinLogger(c *check.C) {
|
||||
var logger minLogger
|
||||
logger = log.New(os.Stderr, "", 0)
|
||||
logger = c
|
||||
logger.Output(0, "Hello there")
|
||||
expected := `\[LOG\] [0-9]+:[0-9][0-9]\.[0-9][0-9][0-9] +Hello there\n`
|
||||
output := c.GetTestLog()
|
||||
c.Assert(output, check.Matches, expected)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Ensure that suites with embedded types are working fine, including the
|
||||
// the workaround for issue 906.
|
||||
|
||||
type EmbeddedInternalS struct {
|
||||
called bool
|
||||
}
|
||||
|
||||
type EmbeddedS struct {
|
||||
EmbeddedInternalS
|
||||
}
|
||||
|
||||
var embeddedS = check.Suite(&EmbeddedS{})
|
||||
|
||||
func (s *EmbeddedS) TestCountSuite(c *check.C) {
|
||||
suitesRun += 1
|
||||
}
|
||||
|
||||
func (s *EmbeddedInternalS) TestMethod(c *check.C) {
|
||||
c.Error("TestMethod() of the embedded type was called!?")
|
||||
}
|
||||
|
||||
func (s *EmbeddedS) TestMethod(c *check.C) {
|
||||
// http://code.google.com/p/go/issues/detail?id=906
|
||||
c.Check(s.called, check.Equals, false) // Go issue 906 is affecting the runner?
|
||||
s.called = true
|
||||
}
|
231
vendor/gopkg.in/check.v1/helpers.go
generated
vendored
Normal file
231
vendor/gopkg.in/check.v1/helpers.go
generated
vendored
Normal file
@ -0,0 +1,231 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestName returns the current test name in the form "SuiteName.TestName"
|
||||
func (c *C) TestName() string {
|
||||
return c.testName
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Basic succeeding/failing logic.
|
||||
|
||||
// Failed returns whether the currently running test has already failed.
|
||||
func (c *C) Failed() bool {
|
||||
return c.status() == failedSt
|
||||
}
|
||||
|
||||
// Fail marks the currently running test as failed.
|
||||
//
|
||||
// Something ought to have been previously logged so the developer can tell
|
||||
// what went wrong. The higher level helper functions will fail the test
|
||||
// and do the logging properly.
|
||||
func (c *C) Fail() {
|
||||
c.setStatus(failedSt)
|
||||
}
|
||||
|
||||
// FailNow marks the currently running test as failed and stops running it.
|
||||
// Something ought to have been previously logged so the developer can tell
|
||||
// what went wrong. The higher level helper functions will fail the test
|
||||
// and do the logging properly.
|
||||
func (c *C) FailNow() {
|
||||
c.Fail()
|
||||
c.stopNow()
|
||||
}
|
||||
|
||||
// Succeed marks the currently running test as succeeded, undoing any
|
||||
// previous failures.
|
||||
func (c *C) Succeed() {
|
||||
c.setStatus(succeededSt)
|
||||
}
|
||||
|
||||
// SucceedNow marks the currently running test as succeeded, undoing any
|
||||
// previous failures, and stops running the test.
|
||||
func (c *C) SucceedNow() {
|
||||
c.Succeed()
|
||||
c.stopNow()
|
||||
}
|
||||
|
||||
// ExpectFailure informs that the running test is knowingly broken for
|
||||
// the provided reason. If the test does not fail, an error will be reported
|
||||
// to raise attention to this fact. This method is useful to temporarily
|
||||
// disable tests which cover well known problems until a better time to
|
||||
// fix the problem is found, without forgetting about the fact that a
|
||||
// failure still exists.
|
||||
func (c *C) ExpectFailure(reason string) {
|
||||
if reason == "" {
|
||||
panic("Missing reason why the test is expected to fail")
|
||||
}
|
||||
c.mustFail = true
|
||||
c.reason = reason
|
||||
}
|
||||
|
||||
// Skip skips the running test for the provided reason. If run from within
|
||||
// SetUpTest, the individual test being set up will be skipped, and if run
|
||||
// from within SetUpSuite, the whole suite is skipped.
|
||||
func (c *C) Skip(reason string) {
|
||||
if reason == "" {
|
||||
panic("Missing reason why the test is being skipped")
|
||||
}
|
||||
c.reason = reason
|
||||
c.setStatus(skippedSt)
|
||||
c.stopNow()
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Basic logging.
|
||||
|
||||
// GetTestLog returns the current test error output.
|
||||
func (c *C) GetTestLog() string {
|
||||
return c.logb.String()
|
||||
}
|
||||
|
||||
// Log logs some information into the test error output.
|
||||
// The provided arguments are assembled together into a string with fmt.Sprint.
|
||||
func (c *C) Log(args ...interface{}) {
|
||||
c.log(args...)
|
||||
}
|
||||
|
||||
// Log logs some information into the test error output.
|
||||
// The provided arguments are assembled together into a string with fmt.Sprintf.
|
||||
func (c *C) Logf(format string, args ...interface{}) {
|
||||
c.logf(format, args...)
|
||||
}
|
||||
|
||||
// Output enables *C to be used as a logger in functions that require only
|
||||
// the minimum interface of *log.Logger.
|
||||
func (c *C) Output(calldepth int, s string) error {
|
||||
d := time.Now().Sub(c.startTime)
|
||||
msec := d / time.Millisecond
|
||||
sec := d / time.Second
|
||||
min := d / time.Minute
|
||||
|
||||
c.Logf("[LOG] %d:%02d.%03d %s", min, sec%60, msec%1000, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Error logs an error into the test error output and marks the test as failed.
|
||||
// The provided arguments are assembled together into a string with fmt.Sprint.
|
||||
func (c *C) Error(args ...interface{}) {
|
||||
c.logCaller(1)
|
||||
c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...)))
|
||||
c.logNewLine()
|
||||
c.Fail()
|
||||
}
|
||||
|
||||
// Errorf logs an error into the test error output and marks the test as failed.
|
||||
// The provided arguments are assembled together into a string with fmt.Sprintf.
|
||||
func (c *C) Errorf(format string, args ...interface{}) {
|
||||
c.logCaller(1)
|
||||
c.logString(fmt.Sprintf("Error: "+format, args...))
|
||||
c.logNewLine()
|
||||
c.Fail()
|
||||
}
|
||||
|
||||
// Fatal logs an error into the test error output, marks the test as failed, and
|
||||
// stops the test execution. The provided arguments are assembled together into
|
||||
// a string with fmt.Sprint.
|
||||
func (c *C) Fatal(args ...interface{}) {
|
||||
c.logCaller(1)
|
||||
c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...)))
|
||||
c.logNewLine()
|
||||
c.FailNow()
|
||||
}
|
||||
|
||||
// Fatlaf logs an error into the test error output, marks the test as failed, and
|
||||
// stops the test execution. The provided arguments are assembled together into
|
||||
// a string with fmt.Sprintf.
|
||||
func (c *C) Fatalf(format string, args ...interface{}) {
|
||||
c.logCaller(1)
|
||||
c.logString(fmt.Sprint("Error: ", fmt.Sprintf(format, args...)))
|
||||
c.logNewLine()
|
||||
c.FailNow()
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Generic checks and assertions based on checkers.
|
||||
|
||||
// Check verifies if the first value matches the expected value according
|
||||
// to the provided checker. If they do not match, an error is logged, the
|
||||
// test is marked as failed, and the test execution continues.
|
||||
//
|
||||
// Some checkers may not need the expected argument (e.g. IsNil).
|
||||
//
|
||||
// Extra arguments provided to the function are logged next to the reported
|
||||
// problem when the matching fails.
|
||||
func (c *C) Check(obtained interface{}, checker Checker, args ...interface{}) bool {
|
||||
return c.internalCheck("Check", obtained, checker, args...)
|
||||
}
|
||||
|
||||
// Assert ensures that the first value matches the expected value according
|
||||
// to the provided checker. If they do not match, an error is logged, the
|
||||
// test is marked as failed, and the test execution stops.
|
||||
//
|
||||
// Some checkers may not need the expected argument (e.g. IsNil).
|
||||
//
|
||||
// Extra arguments provided to the function are logged next to the reported
|
||||
// problem when the matching fails.
|
||||
func (c *C) Assert(obtained interface{}, checker Checker, args ...interface{}) {
|
||||
if !c.internalCheck("Assert", obtained, checker, args...) {
|
||||
c.stopNow()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *C) internalCheck(funcName string, obtained interface{}, checker Checker, args ...interface{}) bool {
|
||||
if checker == nil {
|
||||
c.logCaller(2)
|
||||
c.logString(fmt.Sprintf("%s(obtained, nil!?, ...):", funcName))
|
||||
c.logString("Oops.. you've provided a nil checker!")
|
||||
c.logNewLine()
|
||||
c.Fail()
|
||||
return false
|
||||
}
|
||||
|
||||
// If the last argument is a bug info, extract it out.
|
||||
var comment CommentInterface
|
||||
if len(args) > 0 {
|
||||
if c, ok := args[len(args)-1].(CommentInterface); ok {
|
||||
comment = c
|
||||
args = args[:len(args)-1]
|
||||
}
|
||||
}
|
||||
|
||||
params := append([]interface{}{obtained}, args...)
|
||||
info := checker.Info()
|
||||
|
||||
if len(params) != len(info.Params) {
|
||||
names := append([]string{info.Params[0], info.Name}, info.Params[1:]...)
|
||||
c.logCaller(2)
|
||||
c.logString(fmt.Sprintf("%s(%s):", funcName, strings.Join(names, ", ")))
|
||||
c.logString(fmt.Sprintf("Wrong number of parameters for %s: want %d, got %d", info.Name, len(names), len(params)+1))
|
||||
c.logNewLine()
|
||||
c.Fail()
|
||||
return false
|
||||
}
|
||||
|
||||
// Copy since it may be mutated by Check.
|
||||
names := append([]string{}, info.Params...)
|
||||
|
||||
// Do the actual check.
|
||||
result, error := checker.Check(params, names)
|
||||
if !result || error != "" {
|
||||
c.logCaller(2)
|
||||
for i := 0; i != len(params); i++ {
|
||||
c.logValue(names[i], params[i])
|
||||
}
|
||||
if comment != nil {
|
||||
c.logString(comment.CheckCommentString())
|
||||
}
|
||||
if error != "" {
|
||||
c.logString(error)
|
||||
}
|
||||
c.logNewLine()
|
||||
c.Fail()
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
519
vendor/gopkg.in/check.v1/helpers_test.go
generated
vendored
Normal file
519
vendor/gopkg.in/check.v1/helpers_test.go
generated
vendored
Normal file
@ -0,0 +1,519 @@
|
||||
// These tests verify the inner workings of the helper methods associated
|
||||
// with check.T.
|
||||
|
||||
package check_test
|
||||
|
||||
import (
|
||||
"gopkg.in/check.v1"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var helpersS = check.Suite(&HelpersS{})
|
||||
|
||||
type HelpersS struct{}
|
||||
|
||||
func (s *HelpersS) TestCountSuite(c *check.C) {
|
||||
suitesRun += 1
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Fake checker and bug info to verify the behavior of Assert() and Check().
|
||||
|
||||
type MyChecker struct {
|
||||
info *check.CheckerInfo
|
||||
params []interface{}
|
||||
names []string
|
||||
result bool
|
||||
error string
|
||||
}
|
||||
|
||||
func (checker *MyChecker) Info() *check.CheckerInfo {
|
||||
if checker.info == nil {
|
||||
return &check.CheckerInfo{Name: "MyChecker", Params: []string{"myobtained", "myexpected"}}
|
||||
}
|
||||
return checker.info
|
||||
}
|
||||
|
||||
func (checker *MyChecker) Check(params []interface{}, names []string) (bool, string) {
|
||||
rparams := checker.params
|
||||
rnames := checker.names
|
||||
checker.params = append([]interface{}{}, params...)
|
||||
checker.names = append([]string{}, names...)
|
||||
if rparams != nil {
|
||||
copy(params, rparams)
|
||||
}
|
||||
if rnames != nil {
|
||||
copy(names, rnames)
|
||||
}
|
||||
return checker.result, checker.error
|
||||
}
|
||||
|
||||
type myCommentType string
|
||||
|
||||
func (c myCommentType) CheckCommentString() string {
|
||||
return string(c)
|
||||
}
|
||||
|
||||
func myComment(s string) myCommentType {
|
||||
return myCommentType(s)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Ensure a real checker actually works fine.
|
||||
|
||||
func (s *HelpersS) TestCheckerInterface(c *check.C) {
|
||||
testHelperSuccess(c, "Check(1, Equals, 1)", true, func() interface{} {
|
||||
return c.Check(1, check.Equals, 1)
|
||||
})
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Tests for Check(), mostly the same as for Assert() following these.
|
||||
|
||||
func (s *HelpersS) TestCheckSucceedWithExpected(c *check.C) {
|
||||
checker := &MyChecker{result: true}
|
||||
testHelperSuccess(c, "Check(1, checker, 2)", true, func() interface{} {
|
||||
return c.Check(1, checker, 2)
|
||||
})
|
||||
if !reflect.DeepEqual(checker.params, []interface{}{1, 2}) {
|
||||
c.Fatalf("Bad params for check: %#v", checker.params)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *HelpersS) TestCheckSucceedWithoutExpected(c *check.C) {
|
||||
checker := &MyChecker{result: true, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
|
||||
testHelperSuccess(c, "Check(1, checker)", true, func() interface{} {
|
||||
return c.Check(1, checker)
|
||||
})
|
||||
if !reflect.DeepEqual(checker.params, []interface{}{1}) {
|
||||
c.Fatalf("Bad params for check: %#v", checker.params)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *HelpersS) TestCheckFailWithExpected(c *check.C) {
|
||||
checker := &MyChecker{result: false}
|
||||
log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
|
||||
" return c\\.Check\\(1, checker, 2\\)\n" +
|
||||
"\\.+ myobtained int = 1\n" +
|
||||
"\\.+ myexpected int = 2\n\n"
|
||||
testHelperFailure(c, "Check(1, checker, 2)", false, false, log,
|
||||
func() interface{} {
|
||||
return c.Check(1, checker, 2)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *HelpersS) TestCheckFailWithExpectedAndComment(c *check.C) {
|
||||
checker := &MyChecker{result: false}
|
||||
log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
|
||||
" return c\\.Check\\(1, checker, 2, myComment\\(\"Hello world!\"\\)\\)\n" +
|
||||
"\\.+ myobtained int = 1\n" +
|
||||
"\\.+ myexpected int = 2\n" +
|
||||
"\\.+ Hello world!\n\n"
|
||||
testHelperFailure(c, "Check(1, checker, 2, msg)", false, false, log,
|
||||
func() interface{} {
|
||||
return c.Check(1, checker, 2, myComment("Hello world!"))
|
||||
})
|
||||
}
|
||||
|
||||
func (s *HelpersS) TestCheckFailWithExpectedAndStaticComment(c *check.C) {
|
||||
checker := &MyChecker{result: false}
|
||||
log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
|
||||
" // Nice leading comment\\.\n" +
|
||||
" return c\\.Check\\(1, checker, 2\\) // Hello there\n" +
|
||||
"\\.+ myobtained int = 1\n" +
|
||||
"\\.+ myexpected int = 2\n\n"
|
||||
testHelperFailure(c, "Check(1, checker, 2, msg)", false, false, log,
|
||||
func() interface{} {
|
||||
// Nice leading comment.
|
||||
return c.Check(1, checker, 2) // Hello there
|
||||
})
|
||||
}
|
||||
|
||||
func (s *HelpersS) TestCheckFailWithoutExpected(c *check.C) {
|
||||
checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
|
||||
log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
|
||||
" return c\\.Check\\(1, checker\\)\n" +
|
||||
"\\.+ myvalue int = 1\n\n"
|
||||
testHelperFailure(c, "Check(1, checker)", false, false, log,
|
||||
func() interface{} {
|
||||
return c.Check(1, checker)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *HelpersS) TestCheckFailWithoutExpectedAndMessage(c *check.C) {
|
||||
checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
|
||||
log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
|
||||
" return c\\.Check\\(1, checker, myComment\\(\"Hello world!\"\\)\\)\n" +
|
||||
"\\.+ myvalue int = 1\n" +
|
||||
"\\.+ Hello world!\n\n"
|
||||
testHelperFailure(c, "Check(1, checker, msg)", false, false, log,
|
||||
func() interface{} {
|
||||
return c.Check(1, checker, myComment("Hello world!"))
|
||||
})
|
||||
}
|
||||
|
||||
func (s *HelpersS) TestCheckWithMissingExpected(c *check.C) {
|
||||
checker := &MyChecker{result: true}
|
||||
log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
|
||||
" return c\\.Check\\(1, checker\\)\n" +
|
||||
"\\.+ Check\\(myobtained, MyChecker, myexpected\\):\n" +
|
||||
"\\.+ Wrong number of parameters for MyChecker: " +
|
||||
"want 3, got 2\n\n"
|
||||
testHelperFailure(c, "Check(1, checker, !?)", false, false, log,
|
||||
func() interface{} {
|
||||
return c.Check(1, checker)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *HelpersS) TestCheckWithTooManyExpected(c *check.C) {
|
||||
checker := &MyChecker{result: true}
|
||||
log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
|
||||
" return c\\.Check\\(1, checker, 2, 3\\)\n" +
|
||||
"\\.+ Check\\(myobtained, MyChecker, myexpected\\):\n" +
|
||||
"\\.+ Wrong number of parameters for MyChecker: " +
|
||||
"want 3, got 4\n\n"
|
||||
testHelperFailure(c, "Check(1, checker, 2, 3)", false, false, log,
|
||||
func() interface{} {
|
||||
return c.Check(1, checker, 2, 3)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *HelpersS) TestCheckWithError(c *check.C) {
|
||||
checker := &MyChecker{result: false, error: "Some not so cool data provided!"}
|
||||
log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
|
||||
" return c\\.Check\\(1, checker, 2\\)\n" +
|
||||
"\\.+ myobtained int = 1\n" +
|
||||
"\\.+ myexpected int = 2\n" +
|
||||
"\\.+ Some not so cool data provided!\n\n"
|
||||
testHelperFailure(c, "Check(1, checker, 2)", false, false, log,
|
||||
func() interface{} {
|
||||
return c.Check(1, checker, 2)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *HelpersS) TestCheckWithNilChecker(c *check.C) {
|
||||
log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
|
||||
" return c\\.Check\\(1, nil\\)\n" +
|
||||
"\\.+ Check\\(obtained, nil!\\?, \\.\\.\\.\\):\n" +
|
||||
"\\.+ Oops\\.\\. you've provided a nil checker!\n\n"
|
||||
testHelperFailure(c, "Check(obtained, nil)", false, false, log,
|
||||
func() interface{} {
|
||||
return c.Check(1, nil)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *HelpersS) TestCheckWithParamsAndNamesMutation(c *check.C) {
|
||||
checker := &MyChecker{result: false, params: []interface{}{3, 4}, names: []string{"newobtained", "newexpected"}}
|
||||
log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
|
||||
" return c\\.Check\\(1, checker, 2\\)\n" +
|
||||
"\\.+ newobtained int = 3\n" +
|
||||
"\\.+ newexpected int = 4\n\n"
|
||||
testHelperFailure(c, "Check(1, checker, 2) with mutation", false, false, log,
|
||||
func() interface{} {
|
||||
return c.Check(1, checker, 2)
|
||||
})
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Tests for Assert(), mostly the same as for Check() above.
|
||||
|
||||
func (s *HelpersS) TestAssertSucceedWithExpected(c *check.C) {
|
||||
checker := &MyChecker{result: true}
|
||||
testHelperSuccess(c, "Assert(1, checker, 2)", nil, func() interface{} {
|
||||
c.Assert(1, checker, 2)
|
||||
return nil
|
||||
})
|
||||
if !reflect.DeepEqual(checker.params, []interface{}{1, 2}) {
|
||||
c.Fatalf("Bad params for check: %#v", checker.params)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *HelpersS) TestAssertSucceedWithoutExpected(c *check.C) {
|
||||
checker := &MyChecker{result: true, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
|
||||
testHelperSuccess(c, "Assert(1, checker)", nil, func() interface{} {
|
||||
c.Assert(1, checker)
|
||||
return nil
|
||||
})
|
||||
if !reflect.DeepEqual(checker.params, []interface{}{1}) {
|
||||
c.Fatalf("Bad params for check: %#v", checker.params)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *HelpersS) TestAssertFailWithExpected(c *check.C) {
|
||||
checker := &MyChecker{result: false}
|
||||
log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
|
||||
" c\\.Assert\\(1, checker, 2\\)\n" +
|
||||
"\\.+ myobtained int = 1\n" +
|
||||
"\\.+ myexpected int = 2\n\n"
|
||||
testHelperFailure(c, "Assert(1, checker, 2)", nil, true, log,
|
||||
func() interface{} {
|
||||
c.Assert(1, checker, 2)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *HelpersS) TestAssertFailWithExpectedAndMessage(c *check.C) {
|
||||
checker := &MyChecker{result: false}
|
||||
log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
|
||||
" c\\.Assert\\(1, checker, 2, myComment\\(\"Hello world!\"\\)\\)\n" +
|
||||
"\\.+ myobtained int = 1\n" +
|
||||
"\\.+ myexpected int = 2\n" +
|
||||
"\\.+ Hello world!\n\n"
|
||||
testHelperFailure(c, "Assert(1, checker, 2, msg)", nil, true, log,
|
||||
func() interface{} {
|
||||
c.Assert(1, checker, 2, myComment("Hello world!"))
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *HelpersS) TestAssertFailWithoutExpected(c *check.C) {
|
||||
checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
|
||||
log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
|
||||
" c\\.Assert\\(1, checker\\)\n" +
|
||||
"\\.+ myvalue int = 1\n\n"
|
||||
testHelperFailure(c, "Assert(1, checker)", nil, true, log,
|
||||
func() interface{} {
|
||||
c.Assert(1, checker)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *HelpersS) TestAssertFailWithoutExpectedAndMessage(c *check.C) {
|
||||
checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
|
||||
log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
|
||||
" c\\.Assert\\(1, checker, myComment\\(\"Hello world!\"\\)\\)\n" +
|
||||
"\\.+ myvalue int = 1\n" +
|
||||
"\\.+ Hello world!\n\n"
|
||||
testHelperFailure(c, "Assert(1, checker, msg)", nil, true, log,
|
||||
func() interface{} {
|
||||
c.Assert(1, checker, myComment("Hello world!"))
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *HelpersS) TestAssertWithMissingExpected(c *check.C) {
|
||||
checker := &MyChecker{result: true}
|
||||
log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
|
||||
" c\\.Assert\\(1, checker\\)\n" +
|
||||
"\\.+ Assert\\(myobtained, MyChecker, myexpected\\):\n" +
|
||||
"\\.+ Wrong number of parameters for MyChecker: " +
|
||||
"want 3, got 2\n\n"
|
||||
testHelperFailure(c, "Assert(1, checker, !?)", nil, true, log,
|
||||
func() interface{} {
|
||||
c.Assert(1, checker)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *HelpersS) TestAssertWithError(c *check.C) {
|
||||
checker := &MyChecker{result: false, error: "Some not so cool data provided!"}
|
||||
log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
|
||||
" c\\.Assert\\(1, checker, 2\\)\n" +
|
||||
"\\.+ myobtained int = 1\n" +
|
||||
"\\.+ myexpected int = 2\n" +
|
||||
"\\.+ Some not so cool data provided!\n\n"
|
||||
testHelperFailure(c, "Assert(1, checker, 2)", nil, true, log,
|
||||
func() interface{} {
|
||||
c.Assert(1, checker, 2)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *HelpersS) TestAssertWithNilChecker(c *check.C) {
|
||||
log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
|
||||
" c\\.Assert\\(1, nil\\)\n" +
|
||||
"\\.+ Assert\\(obtained, nil!\\?, \\.\\.\\.\\):\n" +
|
||||
"\\.+ Oops\\.\\. you've provided a nil checker!\n\n"
|
||||
testHelperFailure(c, "Assert(obtained, nil)", nil, true, log,
|
||||
func() interface{} {
|
||||
c.Assert(1, nil)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Ensure that values logged work properly in some interesting cases.
|
||||
|
||||
func (s *HelpersS) TestValueLoggingWithArrays(c *check.C) {
|
||||
checker := &MyChecker{result: false}
|
||||
log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" +
|
||||
" return c\\.Check\\(\\[\\]byte{1, 2}, checker, \\[\\]byte{1, 3}\\)\n" +
|
||||
"\\.+ myobtained \\[\\]uint8 = \\[\\]byte{0x1, 0x2}\n" +
|
||||
"\\.+ myexpected \\[\\]uint8 = \\[\\]byte{0x1, 0x3}\n\n"
|
||||
testHelperFailure(c, "Check([]byte{1}, chk, []byte{3})", false, false, log,
|
||||
func() interface{} {
|
||||
return c.Check([]byte{1, 2}, checker, []byte{1, 3})
|
||||
})
|
||||
}
|
||||
|
||||
func (s *HelpersS) TestValueLoggingWithMultiLine(c *check.C) {
|
||||
checker := &MyChecker{result: false}
|
||||
log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" +
|
||||
" return c\\.Check\\(\"a\\\\nb\\\\n\", checker, \"a\\\\nb\\\\nc\"\\)\n" +
|
||||
"\\.+ myobtained string = \"\" \\+\n" +
|
||||
"\\.+ \"a\\\\n\" \\+\n" +
|
||||
"\\.+ \"b\\\\n\"\n" +
|
||||
"\\.+ myexpected string = \"\" \\+\n" +
|
||||
"\\.+ \"a\\\\n\" \\+\n" +
|
||||
"\\.+ \"b\\\\n\" \\+\n" +
|
||||
"\\.+ \"c\"\n\n"
|
||||
testHelperFailure(c, `Check("a\nb\n", chk, "a\nb\nc")`, false, false, log,
|
||||
func() interface{} {
|
||||
return c.Check("a\nb\n", checker, "a\nb\nc")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *HelpersS) TestValueLoggingWithMultiLineException(c *check.C) {
|
||||
// If the newline is at the end of the string, don't log as multi-line.
|
||||
checker := &MyChecker{result: false}
|
||||
log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" +
|
||||
" return c\\.Check\\(\"a b\\\\n\", checker, \"a\\\\nb\"\\)\n" +
|
||||
"\\.+ myobtained string = \"a b\\\\n\"\n" +
|
||||
"\\.+ myexpected string = \"\" \\+\n" +
|
||||
"\\.+ \"a\\\\n\" \\+\n" +
|
||||
"\\.+ \"b\"\n\n"
|
||||
testHelperFailure(c, `Check("a b\n", chk, "a\nb")`, false, false, log,
|
||||
func() interface{} {
|
||||
return c.Check("a b\n", checker, "a\nb")
|
||||
})
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// MakeDir() tests.
|
||||
|
||||
type MkDirHelper struct {
|
||||
path1 string
|
||||
path2 string
|
||||
isDir1 bool
|
||||
isDir2 bool
|
||||
isDir3 bool
|
||||
isDir4 bool
|
||||
}
|
||||
|
||||
func (s *MkDirHelper) SetUpSuite(c *check.C) {
|
||||
s.path1 = c.MkDir()
|
||||
s.isDir1 = isDir(s.path1)
|
||||
}
|
||||
|
||||
func (s *MkDirHelper) Test(c *check.C) {
|
||||
s.path2 = c.MkDir()
|
||||
s.isDir2 = isDir(s.path2)
|
||||
}
|
||||
|
||||
func (s *MkDirHelper) TearDownSuite(c *check.C) {
|
||||
s.isDir3 = isDir(s.path1)
|
||||
s.isDir4 = isDir(s.path2)
|
||||
}
|
||||
|
||||
func (s *HelpersS) TestMkDir(c *check.C) {
|
||||
helper := MkDirHelper{}
|
||||
output := String{}
|
||||
check.Run(&helper, &check.RunConf{Output: &output})
|
||||
c.Assert(output.value, check.Equals, "")
|
||||
c.Check(helper.isDir1, check.Equals, true)
|
||||
c.Check(helper.isDir2, check.Equals, true)
|
||||
c.Check(helper.isDir3, check.Equals, true)
|
||||
c.Check(helper.isDir4, check.Equals, true)
|
||||
c.Check(helper.path1, check.Not(check.Equals),
|
||||
helper.path2)
|
||||
c.Check(isDir(helper.path1), check.Equals, false)
|
||||
c.Check(isDir(helper.path2), check.Equals, false)
|
||||
}
|
||||
|
||||
func isDir(path string) bool {
|
||||
if stat, err := os.Stat(path); err == nil {
|
||||
return stat.IsDir()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Concurrent logging should not corrupt the underling buffer.
|
||||
// Use go test -race to detect the race in this test.
|
||||
func (s *HelpersS) TestConcurrentLogging(c *check.C) {
|
||||
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU()))
|
||||
var start, stop sync.WaitGroup
|
||||
start.Add(1)
|
||||
for i, n := 0, runtime.NumCPU()*2; i < n; i++ {
|
||||
stop.Add(1)
|
||||
go func(i int) {
|
||||
start.Wait()
|
||||
for j := 0; j < 30; j++ {
|
||||
c.Logf("Worker %d: line %d", i, j)
|
||||
}
|
||||
stop.Done()
|
||||
}(i)
|
||||
}
|
||||
start.Done()
|
||||
stop.Wait()
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Test the TestName function
|
||||
|
||||
type TestNameHelper struct {
|
||||
name1 string
|
||||
name2 string
|
||||
name3 string
|
||||
name4 string
|
||||
name5 string
|
||||
}
|
||||
|
||||
func (s *TestNameHelper) SetUpSuite(c *check.C) { s.name1 = c.TestName() }
|
||||
func (s *TestNameHelper) SetUpTest(c *check.C) { s.name2 = c.TestName() }
|
||||
func (s *TestNameHelper) Test(c *check.C) { s.name3 = c.TestName() }
|
||||
func (s *TestNameHelper) TearDownTest(c *check.C) { s.name4 = c.TestName() }
|
||||
func (s *TestNameHelper) TearDownSuite(c *check.C) { s.name5 = c.TestName() }
|
||||
|
||||
func (s *HelpersS) TestTestName(c *check.C) {
|
||||
helper := TestNameHelper{}
|
||||
output := String{}
|
||||
check.Run(&helper, &check.RunConf{Output: &output})
|
||||
c.Check(helper.name1, check.Equals, "")
|
||||
c.Check(helper.name2, check.Equals, "TestNameHelper.Test")
|
||||
c.Check(helper.name3, check.Equals, "TestNameHelper.Test")
|
||||
c.Check(helper.name4, check.Equals, "TestNameHelper.Test")
|
||||
c.Check(helper.name5, check.Equals, "")
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// A couple of helper functions to test helper functions. :-)
|
||||
|
||||
func testHelperSuccess(c *check.C, name string, expectedResult interface{}, closure func() interface{}) {
|
||||
var result interface{}
|
||||
defer (func() {
|
||||
if err := recover(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
checkState(c, result,
|
||||
&expectedState{
|
||||
name: name,
|
||||
result: expectedResult,
|
||||
failed: false,
|
||||
log: "",
|
||||
})
|
||||
})()
|
||||
result = closure()
|
||||
}
|
||||
|
||||
func testHelperFailure(c *check.C, name string, expectedResult interface{}, shouldStop bool, log string, closure func() interface{}) {
|
||||
var result interface{}
|
||||
defer (func() {
|
||||
if err := recover(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
checkState(c, result,
|
||||
&expectedState{
|
||||
name: name,
|
||||
result: expectedResult,
|
||||
failed: true,
|
||||
log: log,
|
||||
})
|
||||
})()
|
||||
result = closure()
|
||||
if shouldStop {
|
||||
c.Logf("%s didn't stop when it should", name)
|
||||
}
|
||||
}
|
168
vendor/gopkg.in/check.v1/printer.go
generated
vendored
Normal file
168
vendor/gopkg.in/check.v1/printer.go
generated
vendored
Normal file
@ -0,0 +1,168 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/printer"
|
||||
"go/token"
|
||||
"os"
|
||||
)
|
||||
|
||||
func indent(s, with string) (r string) {
|
||||
eol := true
|
||||
for i := 0; i != len(s); i++ {
|
||||
c := s[i]
|
||||
switch {
|
||||
case eol && c == '\n' || c == '\r':
|
||||
case c == '\n' || c == '\r':
|
||||
eol = true
|
||||
case eol:
|
||||
eol = false
|
||||
s = s[:i] + with + s[i:]
|
||||
i += len(with)
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func printLine(filename string, line int) (string, error) {
|
||||
fset := token.NewFileSet()
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
fnode, err := parser.ParseFile(fset, filename, file, parser.ParseComments)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
config := &printer.Config{Mode: printer.UseSpaces, Tabwidth: 4}
|
||||
lp := &linePrinter{fset: fset, fnode: fnode, line: line, config: config}
|
||||
ast.Walk(lp, fnode)
|
||||
result := lp.output.Bytes()
|
||||
// Comments leave \n at the end.
|
||||
n := len(result)
|
||||
for n > 0 && result[n-1] == '\n' {
|
||||
n--
|
||||
}
|
||||
return string(result[:n]), nil
|
||||
}
|
||||
|
||||
type linePrinter struct {
|
||||
config *printer.Config
|
||||
fset *token.FileSet
|
||||
fnode *ast.File
|
||||
line int
|
||||
output bytes.Buffer
|
||||
stmt ast.Stmt
|
||||
}
|
||||
|
||||
func (lp *linePrinter) emit() bool {
|
||||
if lp.stmt != nil {
|
||||
lp.trim(lp.stmt)
|
||||
lp.printWithComments(lp.stmt)
|
||||
lp.stmt = nil
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (lp *linePrinter) printWithComments(n ast.Node) {
|
||||
nfirst := lp.fset.Position(n.Pos()).Line
|
||||
nlast := lp.fset.Position(n.End()).Line
|
||||
for _, g := range lp.fnode.Comments {
|
||||
cfirst := lp.fset.Position(g.Pos()).Line
|
||||
clast := lp.fset.Position(g.End()).Line
|
||||
if clast == nfirst-1 && lp.fset.Position(n.Pos()).Column == lp.fset.Position(g.Pos()).Column {
|
||||
for _, c := range g.List {
|
||||
lp.output.WriteString(c.Text)
|
||||
lp.output.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
if cfirst >= nfirst && cfirst <= nlast && n.End() <= g.List[0].Slash {
|
||||
// The printer will not include the comment if it starts past
|
||||
// the node itself. Trick it into printing by overlapping the
|
||||
// slash with the end of the statement.
|
||||
g.List[0].Slash = n.End() - 1
|
||||
}
|
||||
}
|
||||
node := &printer.CommentedNode{n, lp.fnode.Comments}
|
||||
lp.config.Fprint(&lp.output, lp.fset, node)
|
||||
}
|
||||
|
||||
func (lp *linePrinter) Visit(n ast.Node) (w ast.Visitor) {
|
||||
if n == nil {
|
||||
if lp.output.Len() == 0 {
|
||||
lp.emit()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
first := lp.fset.Position(n.Pos()).Line
|
||||
last := lp.fset.Position(n.End()).Line
|
||||
if first <= lp.line && last >= lp.line {
|
||||
// Print the innermost statement containing the line.
|
||||
if stmt, ok := n.(ast.Stmt); ok {
|
||||
if _, ok := n.(*ast.BlockStmt); !ok {
|
||||
lp.stmt = stmt
|
||||
}
|
||||
}
|
||||
if first == lp.line && lp.emit() {
|
||||
return nil
|
||||
}
|
||||
return lp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lp *linePrinter) trim(n ast.Node) bool {
|
||||
stmt, ok := n.(ast.Stmt)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
line := lp.fset.Position(n.Pos()).Line
|
||||
if line != lp.line {
|
||||
return false
|
||||
}
|
||||
switch stmt := stmt.(type) {
|
||||
case *ast.IfStmt:
|
||||
stmt.Body = lp.trimBlock(stmt.Body)
|
||||
case *ast.SwitchStmt:
|
||||
stmt.Body = lp.trimBlock(stmt.Body)
|
||||
case *ast.TypeSwitchStmt:
|
||||
stmt.Body = lp.trimBlock(stmt.Body)
|
||||
case *ast.CaseClause:
|
||||
stmt.Body = lp.trimList(stmt.Body)
|
||||
case *ast.CommClause:
|
||||
stmt.Body = lp.trimList(stmt.Body)
|
||||
case *ast.BlockStmt:
|
||||
stmt.List = lp.trimList(stmt.List)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (lp *linePrinter) trimBlock(stmt *ast.BlockStmt) *ast.BlockStmt {
|
||||
if !lp.trim(stmt) {
|
||||
return lp.emptyBlock(stmt)
|
||||
}
|
||||
stmt.Rbrace = stmt.Lbrace
|
||||
return stmt
|
||||
}
|
||||
|
||||
func (lp *linePrinter) trimList(stmts []ast.Stmt) []ast.Stmt {
|
||||
for i := 0; i != len(stmts); i++ {
|
||||
if !lp.trim(stmts[i]) {
|
||||
stmts[i] = lp.emptyStmt(stmts[i])
|
||||
break
|
||||
}
|
||||
}
|
||||
return stmts
|
||||
}
|
||||
|
||||
func (lp *linePrinter) emptyStmt(n ast.Node) *ast.ExprStmt {
|
||||
return &ast.ExprStmt{&ast.Ellipsis{n.Pos(), nil}}
|
||||
}
|
||||
|
||||
func (lp *linePrinter) emptyBlock(n ast.Node) *ast.BlockStmt {
|
||||
p := n.Pos()
|
||||
return &ast.BlockStmt{p, []ast.Stmt{lp.emptyStmt(n)}, p}
|
||||
}
|
104
vendor/gopkg.in/check.v1/printer_test.go
generated
vendored
Normal file
104
vendor/gopkg.in/check.v1/printer_test.go
generated
vendored
Normal file
@ -0,0 +1,104 @@
|
||||
package check_test
|
||||
|
||||
import (
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
var _ = Suite(&PrinterS{})
|
||||
|
||||
type PrinterS struct{}
|
||||
|
||||
func (s *PrinterS) TestCountSuite(c *C) {
|
||||
suitesRun += 1
|
||||
}
|
||||
|
||||
var printTestFuncLine int
|
||||
|
||||
func init() {
|
||||
printTestFuncLine = getMyLine() + 3
|
||||
}
|
||||
|
||||
func printTestFunc() {
|
||||
println(1) // Comment1
|
||||
if 2 == 2 { // Comment2
|
||||
println(3) // Comment3
|
||||
}
|
||||
switch 5 {
|
||||
case 6: println(6) // Comment6
|
||||
println(7)
|
||||
}
|
||||
switch interface{}(9).(type) {// Comment9
|
||||
case int: println(10)
|
||||
println(11)
|
||||
}
|
||||
select {
|
||||
case <-(chan bool)(nil): println(14)
|
||||
println(15)
|
||||
default: println(16)
|
||||
println(17)
|
||||
}
|
||||
println(19,
|
||||
20)
|
||||
_ = func() { println(21)
|
||||
println(22)
|
||||
}
|
||||
println(24, func() {
|
||||
println(25)
|
||||
})
|
||||
// Leading comment
|
||||
// with multiple lines.
|
||||
println(29) // Comment29
|
||||
}
|
||||
|
||||
var printLineTests = []struct {
|
||||
line int
|
||||
output string
|
||||
}{
|
||||
{1, "println(1) // Comment1"},
|
||||
{2, "if 2 == 2 { // Comment2\n ...\n}"},
|
||||
{3, "println(3) // Comment3"},
|
||||
{5, "switch 5 {\n...\n}"},
|
||||
{6, "case 6:\n println(6) // Comment6\n ..."},
|
||||
{7, "println(7)"},
|
||||
{9, "switch interface{}(9).(type) { // Comment9\n...\n}"},
|
||||
{10, "case int:\n println(10)\n ..."},
|
||||
{14, "case <-(chan bool)(nil):\n println(14)\n ..."},
|
||||
{15, "println(15)"},
|
||||
{16, "default:\n println(16)\n ..."},
|
||||
{17, "println(17)"},
|
||||
{19, "println(19,\n 20)"},
|
||||
{20, "println(19,\n 20)"},
|
||||
{21, "_ = func() {\n println(21)\n println(22)\n}"},
|
||||
{22, "println(22)"},
|
||||
{24, "println(24, func() {\n println(25)\n})"},
|
||||
{25, "println(25)"},
|
||||
{26, "println(24, func() {\n println(25)\n})"},
|
||||
{29, "// Leading comment\n// with multiple lines.\nprintln(29) // Comment29"},
|
||||
}
|
||||
|
||||
func (s *PrinterS) TestPrintLine(c *C) {
|
||||
for _, test := range printLineTests {
|
||||
output, err := PrintLine("printer_test.go", printTestFuncLine+test.line)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(output, Equals, test.output)
|
||||
}
|
||||
}
|
||||
|
||||
var indentTests = []struct {
|
||||
in, out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"\n", "\n"},
|
||||
{"a", ">>>a"},
|
||||
{"a\n", ">>>a\n"},
|
||||
{"a\nb", ">>>a\n>>>b"},
|
||||
{" ", ">>> "},
|
||||
}
|
||||
|
||||
func (s *PrinterS) TestIndent(c *C) {
|
||||
for _, test := range indentTests {
|
||||
out := Indent(test.in, ">>>")
|
||||
c.Assert(out, Equals, test.out)
|
||||
}
|
||||
|
||||
}
|
88
vendor/gopkg.in/check.v1/reporter.go
generated
vendored
Normal file
88
vendor/gopkg.in/check.v1/reporter.go
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Output writer manages atomic output writing according to settings.
|
||||
|
||||
type outputWriter struct {
|
||||
m sync.Mutex
|
||||
writer io.Writer
|
||||
wroteCallProblemLast bool
|
||||
Stream bool
|
||||
Verbose bool
|
||||
}
|
||||
|
||||
func newOutputWriter(writer io.Writer, stream, verbose bool) *outputWriter {
|
||||
return &outputWriter{writer: writer, Stream: stream, Verbose: verbose}
|
||||
}
|
||||
|
||||
func (ow *outputWriter) Write(content []byte) (n int, err error) {
|
||||
ow.m.Lock()
|
||||
n, err = ow.writer.Write(content)
|
||||
ow.m.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (ow *outputWriter) WriteCallStarted(label string, c *C) {
|
||||
if ow.Stream {
|
||||
header := renderCallHeader(label, c, "", "\n")
|
||||
ow.m.Lock()
|
||||
ow.writer.Write([]byte(header))
|
||||
ow.m.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (ow *outputWriter) WriteCallProblem(label string, c *C) {
|
||||
var prefix string
|
||||
if !ow.Stream {
|
||||
prefix = "\n-----------------------------------" +
|
||||
"-----------------------------------\n"
|
||||
}
|
||||
header := renderCallHeader(label, c, prefix, "\n\n")
|
||||
ow.m.Lock()
|
||||
ow.wroteCallProblemLast = true
|
||||
ow.writer.Write([]byte(header))
|
||||
if !ow.Stream {
|
||||
c.logb.WriteTo(ow.writer)
|
||||
}
|
||||
ow.m.Unlock()
|
||||
}
|
||||
|
||||
func (ow *outputWriter) WriteCallSuccess(label string, c *C) {
|
||||
if ow.Stream || (ow.Verbose && c.kind == testKd) {
|
||||
// TODO Use a buffer here.
|
||||
var suffix string
|
||||
if c.reason != "" {
|
||||
suffix = " (" + c.reason + ")"
|
||||
}
|
||||
if c.status() == succeededSt {
|
||||
suffix += "\t" + c.timerString()
|
||||
}
|
||||
suffix += "\n"
|
||||
if ow.Stream {
|
||||
suffix += "\n"
|
||||
}
|
||||
header := renderCallHeader(label, c, "", suffix)
|
||||
ow.m.Lock()
|
||||
// Resist temptation of using line as prefix above due to race.
|
||||
if !ow.Stream && ow.wroteCallProblemLast {
|
||||
header = "\n-----------------------------------" +
|
||||
"-----------------------------------\n" +
|
||||
header
|
||||
}
|
||||
ow.wroteCallProblemLast = false
|
||||
ow.writer.Write([]byte(header))
|
||||
ow.m.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func renderCallHeader(label string, c *C, prefix, suffix string) string {
|
||||
pc := c.method.PC()
|
||||
return fmt.Sprintf("%s%s: %s: %s%s", prefix, label, niceFuncPath(pc),
|
||||
niceFuncName(pc), suffix)
|
||||
}
|
159
vendor/gopkg.in/check.v1/reporter_test.go
generated
vendored
Normal file
159
vendor/gopkg.in/check.v1/reporter_test.go
generated
vendored
Normal file
@ -0,0 +1,159 @@
|
||||
package check_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
var _ = Suite(&reporterS{})
|
||||
|
||||
type reporterS struct {
|
||||
testFile string
|
||||
}
|
||||
|
||||
func (s *reporterS) SetUpSuite(c *C) {
|
||||
_, fileName, _, ok := runtime.Caller(0)
|
||||
c.Assert(ok, Equals, true)
|
||||
s.testFile = filepath.Base(fileName)
|
||||
}
|
||||
|
||||
func (s *reporterS) TestWrite(c *C) {
|
||||
testString := "test string"
|
||||
output := String{}
|
||||
|
||||
dummyStream := true
|
||||
dummyVerbose := true
|
||||
o := NewOutputWriter(&output, dummyStream, dummyVerbose)
|
||||
|
||||
o.Write([]byte(testString))
|
||||
c.Assert(output.value, Equals, testString)
|
||||
}
|
||||
|
||||
func (s *reporterS) TestWriteCallStartedWithStreamFlag(c *C) {
|
||||
testLabel := "test started label"
|
||||
stream := true
|
||||
output := String{}
|
||||
|
||||
dummyVerbose := true
|
||||
o := NewOutputWriter(&output, stream, dummyVerbose)
|
||||
|
||||
o.WriteCallStarted(testLabel, c)
|
||||
expected := fmt.Sprintf("%s: %s:\\d+: %s\n", testLabel, s.testFile, c.TestName())
|
||||
c.Assert(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
func (s *reporterS) TestWriteCallStartedWithoutStreamFlag(c *C) {
|
||||
stream := false
|
||||
output := String{}
|
||||
|
||||
dummyLabel := "dummy"
|
||||
dummyVerbose := true
|
||||
o := NewOutputWriter(&output, stream, dummyVerbose)
|
||||
|
||||
o.WriteCallStarted(dummyLabel, c)
|
||||
c.Assert(output.value, Equals, "")
|
||||
}
|
||||
|
||||
func (s *reporterS) TestWriteCallProblemWithStreamFlag(c *C) {
|
||||
testLabel := "test problem label"
|
||||
stream := true
|
||||
output := String{}
|
||||
|
||||
dummyVerbose := true
|
||||
o := NewOutputWriter(&output, stream, dummyVerbose)
|
||||
|
||||
o.WriteCallProblem(testLabel, c)
|
||||
expected := fmt.Sprintf("%s: %s:\\d+: %s\n\n", testLabel, s.testFile, c.TestName())
|
||||
c.Assert(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
func (s *reporterS) TestWriteCallProblemWithoutStreamFlag(c *C) {
|
||||
testLabel := "test problem label"
|
||||
stream := false
|
||||
output := String{}
|
||||
|
||||
dummyVerbose := true
|
||||
o := NewOutputWriter(&output, stream, dummyVerbose)
|
||||
|
||||
o.WriteCallProblem(testLabel, c)
|
||||
expected := fmt.Sprintf(""+
|
||||
"\n"+
|
||||
"----------------------------------------------------------------------\n"+
|
||||
"%s: %s:\\d+: %s\n\n", testLabel, s.testFile, c.TestName())
|
||||
c.Assert(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
func (s *reporterS) TestWriteCallProblemWithoutStreamFlagWithLog(c *C) {
|
||||
testLabel := "test problem label"
|
||||
testLog := "test log"
|
||||
stream := false
|
||||
output := String{}
|
||||
|
||||
dummyVerbose := true
|
||||
o := NewOutputWriter(&output, stream, dummyVerbose)
|
||||
|
||||
c.Log(testLog)
|
||||
o.WriteCallProblem(testLabel, c)
|
||||
expected := fmt.Sprintf(""+
|
||||
"\n"+
|
||||
"----------------------------------------------------------------------\n"+
|
||||
"%s: %s:\\d+: %s\n\n%s\n", testLabel, s.testFile, c.TestName(), testLog)
|
||||
c.Assert(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
func (s *reporterS) TestWriteCallSuccessWithStreamFlag(c *C) {
|
||||
testLabel := "test success label"
|
||||
stream := true
|
||||
output := String{}
|
||||
|
||||
dummyVerbose := true
|
||||
o := NewOutputWriter(&output, stream, dummyVerbose)
|
||||
|
||||
o.WriteCallSuccess(testLabel, c)
|
||||
expected := fmt.Sprintf("%s: %s:\\d+: %s\t\\d\\.\\d+s\n\n", testLabel, s.testFile, c.TestName())
|
||||
c.Assert(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
func (s *reporterS) TestWriteCallSuccessWithStreamFlagAndReason(c *C) {
|
||||
testLabel := "test success label"
|
||||
testReason := "test skip reason"
|
||||
stream := true
|
||||
output := String{}
|
||||
|
||||
dummyVerbose := true
|
||||
o := NewOutputWriter(&output, stream, dummyVerbose)
|
||||
c.FakeSkip(testReason)
|
||||
|
||||
o.WriteCallSuccess(testLabel, c)
|
||||
expected := fmt.Sprintf("%s: %s:\\d+: %s \\(%s\\)\t\\d\\.\\d+s\n\n",
|
||||
testLabel, s.testFile, c.TestName(), testReason)
|
||||
c.Assert(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
func (s *reporterS) TestWriteCallSuccessWithoutStreamFlagWithVerboseFlag(c *C) {
|
||||
testLabel := "test success label"
|
||||
stream := false
|
||||
verbose := true
|
||||
output := String{}
|
||||
|
||||
o := NewOutputWriter(&output, stream, verbose)
|
||||
|
||||
o.WriteCallSuccess(testLabel, c)
|
||||
expected := fmt.Sprintf("%s: %s:\\d+: %s\t\\d\\.\\d+s\n", testLabel, s.testFile, c.TestName())
|
||||
c.Assert(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
func (s *reporterS) TestWriteCallSuccessWithoutStreamFlagWithoutVerboseFlag(c *C) {
|
||||
testLabel := "test success label"
|
||||
stream := false
|
||||
verbose := false
|
||||
output := String{}
|
||||
|
||||
o := NewOutputWriter(&output, stream, verbose)
|
||||
|
||||
o.WriteCallSuccess(testLabel, c)
|
||||
c.Assert(output.value, Equals, "")
|
||||
}
|
175
vendor/gopkg.in/check.v1/run.go
generated
vendored
Normal file
175
vendor/gopkg.in/check.v1/run.go
generated
vendored
Normal file
@ -0,0 +1,175 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Test suite registry.
|
||||
|
||||
var allSuites []interface{}
|
||||
|
||||
// Suite registers the given value as a test suite to be run. Any methods
|
||||
// starting with the Test prefix in the given value will be considered as
|
||||
// a test method.
|
||||
func Suite(suite interface{}) interface{} {
|
||||
allSuites = append(allSuites, suite)
|
||||
return suite
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Public running interface.
|
||||
|
||||
var (
|
||||
oldFilterFlag = flag.String("gocheck.f", "", "Regular expression selecting which tests and/or suites to run")
|
||||
oldVerboseFlag = flag.Bool("gocheck.v", false, "Verbose mode")
|
||||
oldStreamFlag = flag.Bool("gocheck.vv", false, "Super verbose mode (disables output caching)")
|
||||
oldBenchFlag = flag.Bool("gocheck.b", false, "Run benchmarks")
|
||||
oldBenchTime = flag.Duration("gocheck.btime", 1*time.Second, "approximate run time for each benchmark")
|
||||
oldListFlag = flag.Bool("gocheck.list", false, "List the names of all tests that will be run")
|
||||
oldWorkFlag = flag.Bool("gocheck.work", false, "Display and do not remove the test working directory")
|
||||
|
||||
newFilterFlag = flag.String("check.f", "", "Regular expression selecting which tests and/or suites to run")
|
||||
newVerboseFlag = flag.Bool("check.v", false, "Verbose mode")
|
||||
newStreamFlag = flag.Bool("check.vv", false, "Super verbose mode (disables output caching)")
|
||||
newBenchFlag = flag.Bool("check.b", false, "Run benchmarks")
|
||||
newBenchTime = flag.Duration("check.btime", 1*time.Second, "approximate run time for each benchmark")
|
||||
newBenchMem = flag.Bool("check.bmem", false, "Report memory benchmarks")
|
||||
newListFlag = flag.Bool("check.list", false, "List the names of all tests that will be run")
|
||||
newWorkFlag = flag.Bool("check.work", false, "Display and do not remove the test working directory")
|
||||
)
|
||||
|
||||
// TestingT runs all test suites registered with the Suite function,
|
||||
// printing results to stdout, and reporting any failures back to
|
||||
// the "testing" package.
|
||||
func TestingT(testingT *testing.T) {
|
||||
benchTime := *newBenchTime
|
||||
if benchTime == 1*time.Second {
|
||||
benchTime = *oldBenchTime
|
||||
}
|
||||
conf := &RunConf{
|
||||
Filter: *oldFilterFlag + *newFilterFlag,
|
||||
Verbose: *oldVerboseFlag || *newVerboseFlag,
|
||||
Stream: *oldStreamFlag || *newStreamFlag,
|
||||
Benchmark: *oldBenchFlag || *newBenchFlag,
|
||||
BenchmarkTime: benchTime,
|
||||
BenchmarkMem: *newBenchMem,
|
||||
KeepWorkDir: *oldWorkFlag || *newWorkFlag,
|
||||
}
|
||||
if *oldListFlag || *newListFlag {
|
||||
w := bufio.NewWriter(os.Stdout)
|
||||
for _, name := range ListAll(conf) {
|
||||
fmt.Fprintln(w, name)
|
||||
}
|
||||
w.Flush()
|
||||
return
|
||||
}
|
||||
result := RunAll(conf)
|
||||
println(result.String())
|
||||
if !result.Passed() {
|
||||
testingT.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
// RunAll runs all test suites registered with the Suite function, using the
|
||||
// provided run configuration.
|
||||
func RunAll(runConf *RunConf) *Result {
|
||||
result := Result{}
|
||||
for _, suite := range allSuites {
|
||||
result.Add(Run(suite, runConf))
|
||||
}
|
||||
return &result
|
||||
}
|
||||
|
||||
// Run runs the provided test suite using the provided run configuration.
|
||||
func Run(suite interface{}, runConf *RunConf) *Result {
|
||||
runner := newSuiteRunner(suite, runConf)
|
||||
return runner.run()
|
||||
}
|
||||
|
||||
// ListAll returns the names of all the test functions registered with the
|
||||
// Suite function that will be run with the provided run configuration.
|
||||
func ListAll(runConf *RunConf) []string {
|
||||
var names []string
|
||||
for _, suite := range allSuites {
|
||||
names = append(names, List(suite, runConf)...)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// List returns the names of the test functions in the given
|
||||
// suite that will be run with the provided run configuration.
|
||||
func List(suite interface{}, runConf *RunConf) []string {
|
||||
var names []string
|
||||
runner := newSuiteRunner(suite, runConf)
|
||||
for _, t := range runner.tests {
|
||||
names = append(names, t.String())
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Result methods.
|
||||
|
||||
func (r *Result) Add(other *Result) {
|
||||
r.Succeeded += other.Succeeded
|
||||
r.Skipped += other.Skipped
|
||||
r.Failed += other.Failed
|
||||
r.Panicked += other.Panicked
|
||||
r.FixturePanicked += other.FixturePanicked
|
||||
r.ExpectedFailures += other.ExpectedFailures
|
||||
r.Missed += other.Missed
|
||||
if r.WorkDir != "" && other.WorkDir != "" {
|
||||
r.WorkDir += ":" + other.WorkDir
|
||||
} else if other.WorkDir != "" {
|
||||
r.WorkDir = other.WorkDir
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Result) Passed() bool {
|
||||
return (r.Failed == 0 && r.Panicked == 0 &&
|
||||
r.FixturePanicked == 0 && r.Missed == 0 &&
|
||||
r.RunError == nil)
|
||||
}
|
||||
|
||||
func (r *Result) String() string {
|
||||
if r.RunError != nil {
|
||||
return "ERROR: " + r.RunError.Error()
|
||||
}
|
||||
|
||||
var value string
|
||||
if r.Failed == 0 && r.Panicked == 0 && r.FixturePanicked == 0 &&
|
||||
r.Missed == 0 {
|
||||
value = "OK: "
|
||||
} else {
|
||||
value = "OOPS: "
|
||||
}
|
||||
value += fmt.Sprintf("%d passed", r.Succeeded)
|
||||
if r.Skipped != 0 {
|
||||
value += fmt.Sprintf(", %d skipped", r.Skipped)
|
||||
}
|
||||
if r.ExpectedFailures != 0 {
|
||||
value += fmt.Sprintf(", %d expected failures", r.ExpectedFailures)
|
||||
}
|
||||
if r.Failed != 0 {
|
||||
value += fmt.Sprintf(", %d FAILED", r.Failed)
|
||||
}
|
||||
if r.Panicked != 0 {
|
||||
value += fmt.Sprintf(", %d PANICKED", r.Panicked)
|
||||
}
|
||||
if r.FixturePanicked != 0 {
|
||||
value += fmt.Sprintf(", %d FIXTURE-PANICKED", r.FixturePanicked)
|
||||
}
|
||||
if r.Missed != 0 {
|
||||
value += fmt.Sprintf(", %d MISSED", r.Missed)
|
||||
}
|
||||
if r.WorkDir != "" {
|
||||
value += "\nWORK=" + r.WorkDir
|
||||
}
|
||||
return value
|
||||
}
|
419
vendor/gopkg.in/check.v1/run_test.go
generated
vendored
Normal file
419
vendor/gopkg.in/check.v1/run_test.go
generated
vendored
Normal file
@ -0,0 +1,419 @@
|
||||
// These tests verify the test running logic.
|
||||
|
||||
package check_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
. "gopkg.in/check.v1"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var runnerS = Suite(&RunS{})
|
||||
|
||||
type RunS struct{}
|
||||
|
||||
func (s *RunS) TestCountSuite(c *C) {
|
||||
suitesRun += 1
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Tests ensuring result counting works properly.
|
||||
|
||||
func (s *RunS) TestSuccess(c *C) {
|
||||
output := String{}
|
||||
result := Run(&SuccessHelper{}, &RunConf{Output: &output})
|
||||
c.Check(result.Succeeded, Equals, 1)
|
||||
c.Check(result.Failed, Equals, 0)
|
||||
c.Check(result.Skipped, Equals, 0)
|
||||
c.Check(result.Panicked, Equals, 0)
|
||||
c.Check(result.FixturePanicked, Equals, 0)
|
||||
c.Check(result.Missed, Equals, 0)
|
||||
c.Check(result.RunError, IsNil)
|
||||
}
|
||||
|
||||
func (s *RunS) TestFailure(c *C) {
|
||||
output := String{}
|
||||
result := Run(&FailHelper{}, &RunConf{Output: &output})
|
||||
c.Check(result.Succeeded, Equals, 0)
|
||||
c.Check(result.Failed, Equals, 1)
|
||||
c.Check(result.Skipped, Equals, 0)
|
||||
c.Check(result.Panicked, Equals, 0)
|
||||
c.Check(result.FixturePanicked, Equals, 0)
|
||||
c.Check(result.Missed, Equals, 0)
|
||||
c.Check(result.RunError, IsNil)
|
||||
}
|
||||
|
||||
func (s *RunS) TestFixture(c *C) {
|
||||
output := String{}
|
||||
result := Run(&FixtureHelper{}, &RunConf{Output: &output})
|
||||
c.Check(result.Succeeded, Equals, 2)
|
||||
c.Check(result.Failed, Equals, 0)
|
||||
c.Check(result.Skipped, Equals, 0)
|
||||
c.Check(result.Panicked, Equals, 0)
|
||||
c.Check(result.FixturePanicked, Equals, 0)
|
||||
c.Check(result.Missed, Equals, 0)
|
||||
c.Check(result.RunError, IsNil)
|
||||
}
|
||||
|
||||
func (s *RunS) TestPanicOnTest(c *C) {
|
||||
output := String{}
|
||||
helper := &FixtureHelper{panicOn: "Test1"}
|
||||
result := Run(helper, &RunConf{Output: &output})
|
||||
c.Check(result.Succeeded, Equals, 1)
|
||||
c.Check(result.Failed, Equals, 0)
|
||||
c.Check(result.Skipped, Equals, 0)
|
||||
c.Check(result.Panicked, Equals, 1)
|
||||
c.Check(result.FixturePanicked, Equals, 0)
|
||||
c.Check(result.Missed, Equals, 0)
|
||||
c.Check(result.RunError, IsNil)
|
||||
}
|
||||
|
||||
func (s *RunS) TestPanicOnSetUpTest(c *C) {
|
||||
output := String{}
|
||||
helper := &FixtureHelper{panicOn: "SetUpTest"}
|
||||
result := Run(helper, &RunConf{Output: &output})
|
||||
c.Check(result.Succeeded, Equals, 0)
|
||||
c.Check(result.Failed, Equals, 0)
|
||||
c.Check(result.Skipped, Equals, 0)
|
||||
c.Check(result.Panicked, Equals, 0)
|
||||
c.Check(result.FixturePanicked, Equals, 1)
|
||||
c.Check(result.Missed, Equals, 2)
|
||||
c.Check(result.RunError, IsNil)
|
||||
}
|
||||
|
||||
func (s *RunS) TestPanicOnSetUpSuite(c *C) {
|
||||
output := String{}
|
||||
helper := &FixtureHelper{panicOn: "SetUpSuite"}
|
||||
result := Run(helper, &RunConf{Output: &output})
|
||||
c.Check(result.Succeeded, Equals, 0)
|
||||
c.Check(result.Failed, Equals, 0)
|
||||
c.Check(result.Skipped, Equals, 0)
|
||||
c.Check(result.Panicked, Equals, 0)
|
||||
c.Check(result.FixturePanicked, Equals, 1)
|
||||
c.Check(result.Missed, Equals, 2)
|
||||
c.Check(result.RunError, IsNil)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Check result aggregation.
|
||||
|
||||
func (s *RunS) TestAdd(c *C) {
|
||||
result := &Result{
|
||||
Succeeded: 1,
|
||||
Skipped: 2,
|
||||
Failed: 3,
|
||||
Panicked: 4,
|
||||
FixturePanicked: 5,
|
||||
Missed: 6,
|
||||
ExpectedFailures: 7,
|
||||
}
|
||||
result.Add(&Result{
|
||||
Succeeded: 10,
|
||||
Skipped: 20,
|
||||
Failed: 30,
|
||||
Panicked: 40,
|
||||
FixturePanicked: 50,
|
||||
Missed: 60,
|
||||
ExpectedFailures: 70,
|
||||
})
|
||||
c.Check(result.Succeeded, Equals, 11)
|
||||
c.Check(result.Skipped, Equals, 22)
|
||||
c.Check(result.Failed, Equals, 33)
|
||||
c.Check(result.Panicked, Equals, 44)
|
||||
c.Check(result.FixturePanicked, Equals, 55)
|
||||
c.Check(result.Missed, Equals, 66)
|
||||
c.Check(result.ExpectedFailures, Equals, 77)
|
||||
c.Check(result.RunError, IsNil)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Check the Passed() method.
|
||||
|
||||
func (s *RunS) TestPassed(c *C) {
|
||||
c.Assert((&Result{}).Passed(), Equals, true)
|
||||
c.Assert((&Result{Succeeded: 1}).Passed(), Equals, true)
|
||||
c.Assert((&Result{Skipped: 1}).Passed(), Equals, true)
|
||||
c.Assert((&Result{Failed: 1}).Passed(), Equals, false)
|
||||
c.Assert((&Result{Panicked: 1}).Passed(), Equals, false)
|
||||
c.Assert((&Result{FixturePanicked: 1}).Passed(), Equals, false)
|
||||
c.Assert((&Result{Missed: 1}).Passed(), Equals, false)
|
||||
c.Assert((&Result{RunError: errors.New("!")}).Passed(), Equals, false)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Check that result printing is working correctly.
|
||||
|
||||
func (s *RunS) TestPrintSuccess(c *C) {
|
||||
result := &Result{Succeeded: 5}
|
||||
c.Check(result.String(), Equals, "OK: 5 passed")
|
||||
}
|
||||
|
||||
func (s *RunS) TestPrintFailure(c *C) {
|
||||
result := &Result{Failed: 5}
|
||||
c.Check(result.String(), Equals, "OOPS: 0 passed, 5 FAILED")
|
||||
}
|
||||
|
||||
func (s *RunS) TestPrintSkipped(c *C) {
|
||||
result := &Result{Skipped: 5}
|
||||
c.Check(result.String(), Equals, "OK: 0 passed, 5 skipped")
|
||||
}
|
||||
|
||||
func (s *RunS) TestPrintExpectedFailures(c *C) {
|
||||
result := &Result{ExpectedFailures: 5}
|
||||
c.Check(result.String(), Equals, "OK: 0 passed, 5 expected failures")
|
||||
}
|
||||
|
||||
func (s *RunS) TestPrintPanicked(c *C) {
|
||||
result := &Result{Panicked: 5}
|
||||
c.Check(result.String(), Equals, "OOPS: 0 passed, 5 PANICKED")
|
||||
}
|
||||
|
||||
func (s *RunS) TestPrintFixturePanicked(c *C) {
|
||||
result := &Result{FixturePanicked: 5}
|
||||
c.Check(result.String(), Equals, "OOPS: 0 passed, 5 FIXTURE-PANICKED")
|
||||
}
|
||||
|
||||
func (s *RunS) TestPrintMissed(c *C) {
|
||||
result := &Result{Missed: 5}
|
||||
c.Check(result.String(), Equals, "OOPS: 0 passed, 5 MISSED")
|
||||
}
|
||||
|
||||
func (s *RunS) TestPrintAll(c *C) {
|
||||
result := &Result{Succeeded: 1, Skipped: 2, ExpectedFailures: 3,
|
||||
Panicked: 4, FixturePanicked: 5, Missed: 6}
|
||||
c.Check(result.String(), Equals,
|
||||
"OOPS: 1 passed, 2 skipped, 3 expected failures, 4 PANICKED, "+
|
||||
"5 FIXTURE-PANICKED, 6 MISSED")
|
||||
}
|
||||
|
||||
func (s *RunS) TestPrintRunError(c *C) {
|
||||
result := &Result{Succeeded: 1, Failed: 1,
|
||||
RunError: errors.New("Kaboom!")}
|
||||
c.Check(result.String(), Equals, "ERROR: Kaboom!")
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Verify that the method pattern flag works correctly.
|
||||
|
||||
func (s *RunS) TestFilterTestName(c *C) {
|
||||
helper := FixtureHelper{}
|
||||
output := String{}
|
||||
runConf := RunConf{Output: &output, Filter: "Test[91]"}
|
||||
Run(&helper, &runConf)
|
||||
c.Check(helper.calls[0], Equals, "SetUpSuite")
|
||||
c.Check(helper.calls[1], Equals, "SetUpTest")
|
||||
c.Check(helper.calls[2], Equals, "Test1")
|
||||
c.Check(helper.calls[3], Equals, "TearDownTest")
|
||||
c.Check(helper.calls[4], Equals, "TearDownSuite")
|
||||
c.Check(len(helper.calls), Equals, 5)
|
||||
}
|
||||
|
||||
func (s *RunS) TestFilterTestNameWithAll(c *C) {
|
||||
helper := FixtureHelper{}
|
||||
output := String{}
|
||||
runConf := RunConf{Output: &output, Filter: ".*"}
|
||||
Run(&helper, &runConf)
|
||||
c.Check(helper.calls[0], Equals, "SetUpSuite")
|
||||
c.Check(helper.calls[1], Equals, "SetUpTest")
|
||||
c.Check(helper.calls[2], Equals, "Test1")
|
||||
c.Check(helper.calls[3], Equals, "TearDownTest")
|
||||
c.Check(helper.calls[4], Equals, "SetUpTest")
|
||||
c.Check(helper.calls[5], Equals, "Test2")
|
||||
c.Check(helper.calls[6], Equals, "TearDownTest")
|
||||
c.Check(helper.calls[7], Equals, "TearDownSuite")
|
||||
c.Check(len(helper.calls), Equals, 8)
|
||||
}
|
||||
|
||||
func (s *RunS) TestFilterSuiteName(c *C) {
|
||||
helper := FixtureHelper{}
|
||||
output := String{}
|
||||
runConf := RunConf{Output: &output, Filter: "FixtureHelper"}
|
||||
Run(&helper, &runConf)
|
||||
c.Check(helper.calls[0], Equals, "SetUpSuite")
|
||||
c.Check(helper.calls[1], Equals, "SetUpTest")
|
||||
c.Check(helper.calls[2], Equals, "Test1")
|
||||
c.Check(helper.calls[3], Equals, "TearDownTest")
|
||||
c.Check(helper.calls[4], Equals, "SetUpTest")
|
||||
c.Check(helper.calls[5], Equals, "Test2")
|
||||
c.Check(helper.calls[6], Equals, "TearDownTest")
|
||||
c.Check(helper.calls[7], Equals, "TearDownSuite")
|
||||
c.Check(len(helper.calls), Equals, 8)
|
||||
}
|
||||
|
||||
func (s *RunS) TestFilterSuiteNameAndTestName(c *C) {
|
||||
helper := FixtureHelper{}
|
||||
output := String{}
|
||||
runConf := RunConf{Output: &output, Filter: "FixtureHelper\\.Test2"}
|
||||
Run(&helper, &runConf)
|
||||
c.Check(helper.calls[0], Equals, "SetUpSuite")
|
||||
c.Check(helper.calls[1], Equals, "SetUpTest")
|
||||
c.Check(helper.calls[2], Equals, "Test2")
|
||||
c.Check(helper.calls[3], Equals, "TearDownTest")
|
||||
c.Check(helper.calls[4], Equals, "TearDownSuite")
|
||||
c.Check(len(helper.calls), Equals, 5)
|
||||
}
|
||||
|
||||
func (s *RunS) TestFilterAllOut(c *C) {
|
||||
helper := FixtureHelper{}
|
||||
output := String{}
|
||||
runConf := RunConf{Output: &output, Filter: "NotFound"}
|
||||
Run(&helper, &runConf)
|
||||
c.Check(len(helper.calls), Equals, 0)
|
||||
}
|
||||
|
||||
func (s *RunS) TestRequirePartialMatch(c *C) {
|
||||
helper := FixtureHelper{}
|
||||
output := String{}
|
||||
runConf := RunConf{Output: &output, Filter: "est"}
|
||||
Run(&helper, &runConf)
|
||||
c.Check(len(helper.calls), Equals, 8)
|
||||
}
|
||||
|
||||
func (s *RunS) TestFilterError(c *C) {
|
||||
helper := FixtureHelper{}
|
||||
output := String{}
|
||||
runConf := RunConf{Output: &output, Filter: "]["}
|
||||
result := Run(&helper, &runConf)
|
||||
c.Check(result.String(), Equals,
|
||||
"ERROR: Bad filter expression: error parsing regexp: missing closing ]: `[`")
|
||||
c.Check(len(helper.calls), Equals, 0)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Verify that List works correctly.
|
||||
|
||||
func (s *RunS) TestListFiltered(c *C) {
|
||||
names := List(&FixtureHelper{}, &RunConf{Filter: "1"})
|
||||
c.Assert(names, DeepEquals, []string{
|
||||
"FixtureHelper.Test1",
|
||||
})
|
||||
}
|
||||
|
||||
func (s *RunS) TestList(c *C) {
|
||||
names := List(&FixtureHelper{}, &RunConf{})
|
||||
c.Assert(names, DeepEquals, []string{
|
||||
"FixtureHelper.Test1",
|
||||
"FixtureHelper.Test2",
|
||||
})
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Verify that verbose mode prints tests which pass as well.
|
||||
|
||||
func (s *RunS) TestVerboseMode(c *C) {
|
||||
helper := FixtureHelper{}
|
||||
output := String{}
|
||||
runConf := RunConf{Output: &output, Verbose: true}
|
||||
Run(&helper, &runConf)
|
||||
|
||||
expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test1\t *[.0-9]+s\n" +
|
||||
"PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t *[.0-9]+s\n"
|
||||
|
||||
c.Assert(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
func (s *RunS) TestVerboseModeWithFailBeforePass(c *C) {
|
||||
helper := FixtureHelper{panicOn: "Test1"}
|
||||
output := String{}
|
||||
runConf := RunConf{Output: &output, Verbose: true}
|
||||
Run(&helper, &runConf)
|
||||
|
||||
expected := "(?s).*PANIC.*\n-+\n" + // Should have an extra line.
|
||||
"PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t *[.0-9]+s\n"
|
||||
|
||||
c.Assert(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Verify the stream output mode. In this mode there's no output caching.
|
||||
|
||||
type StreamHelper struct {
|
||||
l2 sync.Mutex
|
||||
l3 sync.Mutex
|
||||
}
|
||||
|
||||
func (s *StreamHelper) SetUpSuite(c *C) {
|
||||
c.Log("0")
|
||||
}
|
||||
|
||||
func (s *StreamHelper) Test1(c *C) {
|
||||
c.Log("1")
|
||||
s.l2.Lock()
|
||||
s.l3.Lock()
|
||||
go func() {
|
||||
s.l2.Lock() // Wait for "2".
|
||||
c.Log("3")
|
||||
s.l3.Unlock()
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *StreamHelper) Test2(c *C) {
|
||||
c.Log("2")
|
||||
s.l2.Unlock()
|
||||
s.l3.Lock() // Wait for "3".
|
||||
c.Fail()
|
||||
c.Log("4")
|
||||
}
|
||||
|
||||
func (s *RunS) TestStreamMode(c *C) {
|
||||
helper := &StreamHelper{}
|
||||
output := String{}
|
||||
runConf := RunConf{Output: &output, Stream: true}
|
||||
Run(helper, &runConf)
|
||||
|
||||
expected := "START: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\n0\n" +
|
||||
"PASS: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\t *[.0-9]+s\n\n" +
|
||||
"START: run_test\\.go:[0-9]+: StreamHelper\\.Test1\n1\n" +
|
||||
"PASS: run_test\\.go:[0-9]+: StreamHelper\\.Test1\t *[.0-9]+s\n\n" +
|
||||
"START: run_test\\.go:[0-9]+: StreamHelper\\.Test2\n2\n3\n4\n" +
|
||||
"FAIL: run_test\\.go:[0-9]+: StreamHelper\\.Test2\n\n"
|
||||
|
||||
c.Assert(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
type StreamMissHelper struct{}
|
||||
|
||||
func (s *StreamMissHelper) SetUpSuite(c *C) {
|
||||
c.Log("0")
|
||||
c.Fail()
|
||||
}
|
||||
|
||||
func (s *StreamMissHelper) Test1(c *C) {
|
||||
c.Log("1")
|
||||
}
|
||||
|
||||
func (s *RunS) TestStreamModeWithMiss(c *C) {
|
||||
helper := &StreamMissHelper{}
|
||||
output := String{}
|
||||
runConf := RunConf{Output: &output, Stream: true}
|
||||
Run(helper, &runConf)
|
||||
|
||||
expected := "START: run_test\\.go:[0-9]+: StreamMissHelper\\.SetUpSuite\n0\n" +
|
||||
"FAIL: run_test\\.go:[0-9]+: StreamMissHelper\\.SetUpSuite\n\n" +
|
||||
"START: run_test\\.go:[0-9]+: StreamMissHelper\\.Test1\n" +
|
||||
"MISS: run_test\\.go:[0-9]+: StreamMissHelper\\.Test1\n\n"
|
||||
|
||||
c.Assert(output.value, Matches, expected)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Verify that that the keep work dir request indeed does so.
|
||||
|
||||
type WorkDirSuite struct {}
|
||||
|
||||
func (s *WorkDirSuite) Test(c *C) {
|
||||
c.MkDir()
|
||||
}
|
||||
|
||||
func (s *RunS) TestKeepWorkDir(c *C) {
|
||||
output := String{}
|
||||
runConf := RunConf{Output: &output, Verbose: true, KeepWorkDir: true}
|
||||
result := Run(&WorkDirSuite{}, &runConf)
|
||||
|
||||
c.Assert(result.String(), Matches, ".*\nWORK=" + result.WorkDir)
|
||||
|
||||
stat, err := os.Stat(result.WorkDir)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(stat.IsDir(), Equals, true)
|
||||
}
|
12
vendor/gopkg.in/yaml.v2/.travis.yml
generated
vendored
Normal file
12
vendor/gopkg.in/yaml.v2/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- 1.7
|
||||
- 1.8
|
||||
- 1.9
|
||||
- tip
|
||||
|
||||
go_import_path: gopkg.in/yaml.v2
|
201
vendor/gopkg.in/yaml.v2/LICENSE
generated
vendored
Normal file
201
vendor/gopkg.in/yaml.v2/LICENSE
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
31
vendor/gopkg.in/yaml.v2/LICENSE.libyaml
generated
vendored
Normal file
31
vendor/gopkg.in/yaml.v2/LICENSE.libyaml
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
The following files were ported to Go from C files of libyaml, and thus
|
||||
are still covered by their original copyright and license:
|
||||
|
||||
apic.go
|
||||
emitterc.go
|
||||
parserc.go
|
||||
readerc.go
|
||||
scannerc.go
|
||||
writerc.go
|
||||
yamlh.go
|
||||
yamlprivateh.go
|
||||
|
||||
Copyright (c) 2006 Kirill Simonov
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
13
vendor/gopkg.in/yaml.v2/NOTICE
generated
vendored
Normal file
13
vendor/gopkg.in/yaml.v2/NOTICE
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
Copyright 2011-2016 Canonical Ltd.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
133
vendor/gopkg.in/yaml.v2/README.md
generated
vendored
Normal file
133
vendor/gopkg.in/yaml.v2/README.md
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
# YAML support for the Go language
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
The yaml package enables Go programs to comfortably encode and decode YAML
|
||||
values. It was developed within [Canonical](https://www.canonical.com) as
|
||||
part of the [juju](https://juju.ubuntu.com) project, and is based on a
|
||||
pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
|
||||
C library to parse and generate YAML data quickly and reliably.
|
||||
|
||||
Compatibility
|
||||
-------------
|
||||
|
||||
The yaml package supports most of YAML 1.1 and 1.2, including support for
|
||||
anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
|
||||
implemented, and base-60 floats from YAML 1.1 are purposefully not
|
||||
supported since they're a poor design and are gone in YAML 1.2.
|
||||
|
||||
Installation and usage
|
||||
----------------------
|
||||
|
||||
The import path for the package is *gopkg.in/yaml.v2*.
|
||||
|
||||
To install it, run:
|
||||
|
||||
go get gopkg.in/yaml.v2
|
||||
|
||||
API documentation
|
||||
-----------------
|
||||
|
||||
If opened in a browser, the import path itself leads to the API documentation:
|
||||
|
||||
* [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
|
||||
|
||||
API stability
|
||||
-------------
|
||||
|
||||
The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
|
||||
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
```Go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var data = `
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d: [3, 4]
|
||||
`
|
||||
|
||||
// Note: struct fields must be public in order for unmarshal to
|
||||
// correctly populate the data.
|
||||
type T struct {
|
||||
A string
|
||||
B struct {
|
||||
RenamedC int `yaml:"c"`
|
||||
D []int `yaml:",flow"`
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
t := T{}
|
||||
|
||||
err := yaml.Unmarshal([]byte(data), &t)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- t:\n%v\n\n", t)
|
||||
|
||||
d, err := yaml.Marshal(&t)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- t dump:\n%s\n\n", string(d))
|
||||
|
||||
m := make(map[interface{}]interface{})
|
||||
|
||||
err = yaml.Unmarshal([]byte(data), &m)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- m:\n%v\n\n", m)
|
||||
|
||||
d, err = yaml.Marshal(&m)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- m dump:\n%s\n\n", string(d))
|
||||
}
|
||||
```
|
||||
|
||||
This example will generate the following output:
|
||||
|
||||
```
|
||||
--- t:
|
||||
{Easy! {2 [3 4]}}
|
||||
|
||||
--- t dump:
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d: [3, 4]
|
||||
|
||||
|
||||
--- m:
|
||||
map[a:Easy! b:map[c:2 d:[3 4]]]
|
||||
|
||||
--- m dump:
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d:
|
||||
- 3
|
||||
- 4
|
||||
```
|
||||
|
739
vendor/gopkg.in/yaml.v2/apic.go
generated
vendored
Normal file
739
vendor/gopkg.in/yaml.v2/apic.go
generated
vendored
Normal file
@ -0,0 +1,739 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
|
||||
//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
|
||||
|
||||
// Check if we can move the queue at the beginning of the buffer.
|
||||
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
|
||||
if parser.tokens_head != len(parser.tokens) {
|
||||
copy(parser.tokens, parser.tokens[parser.tokens_head:])
|
||||
}
|
||||
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
|
||||
parser.tokens_head = 0
|
||||
}
|
||||
parser.tokens = append(parser.tokens, *token)
|
||||
if pos < 0 {
|
||||
return
|
||||
}
|
||||
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
|
||||
parser.tokens[parser.tokens_head+pos] = *token
|
||||
}
|
||||
|
||||
// Create a new parser object.
|
||||
func yaml_parser_initialize(parser *yaml_parser_t) bool {
|
||||
*parser = yaml_parser_t{
|
||||
raw_buffer: make([]byte, 0, input_raw_buffer_size),
|
||||
buffer: make([]byte, 0, input_buffer_size),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Destroy a parser object.
|
||||
func yaml_parser_delete(parser *yaml_parser_t) {
|
||||
*parser = yaml_parser_t{}
|
||||
}
|
||||
|
||||
// String read handler.
|
||||
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
||||
if parser.input_pos == len(parser.input) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n = copy(buffer, parser.input[parser.input_pos:])
|
||||
parser.input_pos += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Reader read handler.
|
||||
func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
||||
return parser.input_reader.Read(buffer)
|
||||
}
|
||||
|
||||
// Set a string input.
|
||||
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
|
||||
if parser.read_handler != nil {
|
||||
panic("must set the input source only once")
|
||||
}
|
||||
parser.read_handler = yaml_string_read_handler
|
||||
parser.input = input
|
||||
parser.input_pos = 0
|
||||
}
|
||||
|
||||
// Set a file input.
|
||||
func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
|
||||
if parser.read_handler != nil {
|
||||
panic("must set the input source only once")
|
||||
}
|
||||
parser.read_handler = yaml_reader_read_handler
|
||||
parser.input_reader = r
|
||||
}
|
||||
|
||||
// Set the source encoding.
|
||||
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
|
||||
if parser.encoding != yaml_ANY_ENCODING {
|
||||
panic("must set the encoding only once")
|
||||
}
|
||||
parser.encoding = encoding
|
||||
}
|
||||
|
||||
// Create a new emitter object.
|
||||
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
|
||||
*emitter = yaml_emitter_t{
|
||||
buffer: make([]byte, output_buffer_size),
|
||||
raw_buffer: make([]byte, 0, output_raw_buffer_size),
|
||||
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
|
||||
events: make([]yaml_event_t, 0, initial_queue_size),
|
||||
}
|
||||
}
|
||||
|
||||
// Destroy an emitter object.
|
||||
func yaml_emitter_delete(emitter *yaml_emitter_t) {
|
||||
*emitter = yaml_emitter_t{}
|
||||
}
|
||||
|
||||
// String write handler.
|
||||
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||||
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// yaml_writer_write_handler uses emitter.output_writer to write the
|
||||
// emitted text.
|
||||
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||||
_, err := emitter.output_writer.Write(buffer)
|
||||
return err
|
||||
}
|
||||
|
||||
// Set a string output.
|
||||
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
|
||||
if emitter.write_handler != nil {
|
||||
panic("must set the output target only once")
|
||||
}
|
||||
emitter.write_handler = yaml_string_write_handler
|
||||
emitter.output_buffer = output_buffer
|
||||
}
|
||||
|
||||
// Set a file output.
|
||||
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
|
||||
if emitter.write_handler != nil {
|
||||
panic("must set the output target only once")
|
||||
}
|
||||
emitter.write_handler = yaml_writer_write_handler
|
||||
emitter.output_writer = w
|
||||
}
|
||||
|
||||
// Set the output encoding.
|
||||
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
|
||||
if emitter.encoding != yaml_ANY_ENCODING {
|
||||
panic("must set the output encoding only once")
|
||||
}
|
||||
emitter.encoding = encoding
|
||||
}
|
||||
|
||||
// Set the canonical output style.
|
||||
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
|
||||
emitter.canonical = canonical
|
||||
}
|
||||
|
||||
//// Set the indentation increment.
|
||||
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
|
||||
if indent < 2 || indent > 9 {
|
||||
indent = 2
|
||||
}
|
||||
emitter.best_indent = indent
|
||||
}
|
||||
|
||||
// Set the preferred line width.
|
||||
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
|
||||
if width < 0 {
|
||||
width = -1
|
||||
}
|
||||
emitter.best_width = width
|
||||
}
|
||||
|
||||
// Set if unescaped non-ASCII characters are allowed.
|
||||
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
|
||||
emitter.unicode = unicode
|
||||
}
|
||||
|
||||
// Set the preferred line break character.
|
||||
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
|
||||
emitter.line_break = line_break
|
||||
}
|
||||
|
||||
///*
|
||||
// * Destroy a token object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(void)
|
||||
//yaml_token_delete(yaml_token_t *token)
|
||||
//{
|
||||
// assert(token); // Non-NULL token object expected.
|
||||
//
|
||||
// switch (token.type)
|
||||
// {
|
||||
// case YAML_TAG_DIRECTIVE_TOKEN:
|
||||
// yaml_free(token.data.tag_directive.handle);
|
||||
// yaml_free(token.data.tag_directive.prefix);
|
||||
// break;
|
||||
//
|
||||
// case YAML_ALIAS_TOKEN:
|
||||
// yaml_free(token.data.alias.value);
|
||||
// break;
|
||||
//
|
||||
// case YAML_ANCHOR_TOKEN:
|
||||
// yaml_free(token.data.anchor.value);
|
||||
// break;
|
||||
//
|
||||
// case YAML_TAG_TOKEN:
|
||||
// yaml_free(token.data.tag.handle);
|
||||
// yaml_free(token.data.tag.suffix);
|
||||
// break;
|
||||
//
|
||||
// case YAML_SCALAR_TOKEN:
|
||||
// yaml_free(token.data.scalar.value);
|
||||
// break;
|
||||
//
|
||||
// default:
|
||||
// break;
|
||||
// }
|
||||
//
|
||||
// memset(token, 0, sizeof(yaml_token_t));
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Check if a string is a valid UTF-8 sequence.
|
||||
// *
|
||||
// * Check 'reader.c' for more details on UTF-8 encoding.
|
||||
// */
|
||||
//
|
||||
//static int
|
||||
//yaml_check_utf8(yaml_char_t *start, size_t length)
|
||||
//{
|
||||
// yaml_char_t *end = start+length;
|
||||
// yaml_char_t *pointer = start;
|
||||
//
|
||||
// while (pointer < end) {
|
||||
// unsigned char octet;
|
||||
// unsigned int width;
|
||||
// unsigned int value;
|
||||
// size_t k;
|
||||
//
|
||||
// octet = pointer[0];
|
||||
// width = (octet & 0x80) == 0x00 ? 1 :
|
||||
// (octet & 0xE0) == 0xC0 ? 2 :
|
||||
// (octet & 0xF0) == 0xE0 ? 3 :
|
||||
// (octet & 0xF8) == 0xF0 ? 4 : 0;
|
||||
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
|
||||
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
|
||||
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
|
||||
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
|
||||
// if (!width) return 0;
|
||||
// if (pointer+width > end) return 0;
|
||||
// for (k = 1; k < width; k ++) {
|
||||
// octet = pointer[k];
|
||||
// if ((octet & 0xC0) != 0x80) return 0;
|
||||
// value = (value << 6) + (octet & 0x3F);
|
||||
// }
|
||||
// if (!((width == 1) ||
|
||||
// (width == 2 && value >= 0x80) ||
|
||||
// (width == 3 && value >= 0x800) ||
|
||||
// (width == 4 && value >= 0x10000))) return 0;
|
||||
//
|
||||
// pointer += width;
|
||||
// }
|
||||
//
|
||||
// return 1;
|
||||
//}
|
||||
//
|
||||
|
||||
// Create STREAM-START.
|
||||
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_STREAM_START_EVENT,
|
||||
encoding: encoding,
|
||||
}
|
||||
}
|
||||
|
||||
// Create STREAM-END.
|
||||
func yaml_stream_end_event_initialize(event *yaml_event_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_STREAM_END_EVENT,
|
||||
}
|
||||
}
|
||||
|
||||
// Create DOCUMENT-START.
|
||||
func yaml_document_start_event_initialize(
|
||||
event *yaml_event_t,
|
||||
version_directive *yaml_version_directive_t,
|
||||
tag_directives []yaml_tag_directive_t,
|
||||
implicit bool,
|
||||
) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_DOCUMENT_START_EVENT,
|
||||
version_directive: version_directive,
|
||||
tag_directives: tag_directives,
|
||||
implicit: implicit,
|
||||
}
|
||||
}
|
||||
|
||||
// Create DOCUMENT-END.
|
||||
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_DOCUMENT_END_EVENT,
|
||||
implicit: implicit,
|
||||
}
|
||||
}
|
||||
|
||||
///*
|
||||
// * Create ALIAS.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
|
||||
//{
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// anchor_copy *yaml_char_t = NULL
|
||||
//
|
||||
// assert(event) // Non-NULL event object is expected.
|
||||
// assert(anchor) // Non-NULL anchor is expected.
|
||||
//
|
||||
// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
|
||||
//
|
||||
// anchor_copy = yaml_strdup(anchor)
|
||||
// if (!anchor_copy)
|
||||
// return 0
|
||||
//
|
||||
// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
|
||||
//
|
||||
// return 1
|
||||
//}
|
||||
|
||||
// Create SCALAR.
|
||||
func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SCALAR_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
value: value,
|
||||
implicit: plain_implicit,
|
||||
quoted_implicit: quoted_implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create SEQUENCE-START.
|
||||
func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SEQUENCE_START_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
implicit: implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create SEQUENCE-END.
|
||||
func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SEQUENCE_END_EVENT,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create MAPPING-START.
|
||||
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_MAPPING_START_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
implicit: implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
}
|
||||
|
||||
// Create MAPPING-END.
|
||||
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_MAPPING_END_EVENT,
|
||||
}
|
||||
}
|
||||
|
||||
// Destroy an event object.
|
||||
func yaml_event_delete(event *yaml_event_t) {
|
||||
*event = yaml_event_t{}
|
||||
}
|
||||
|
||||
///*
|
||||
// * Create a document object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_initialize(document *yaml_document_t,
|
||||
// version_directive *yaml_version_directive_t,
|
||||
// tag_directives_start *yaml_tag_directive_t,
|
||||
// tag_directives_end *yaml_tag_directive_t,
|
||||
// start_implicit int, end_implicit int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// struct {
|
||||
// start *yaml_node_t
|
||||
// end *yaml_node_t
|
||||
// top *yaml_node_t
|
||||
// } nodes = { NULL, NULL, NULL }
|
||||
// version_directive_copy *yaml_version_directive_t = NULL
|
||||
// struct {
|
||||
// start *yaml_tag_directive_t
|
||||
// end *yaml_tag_directive_t
|
||||
// top *yaml_tag_directive_t
|
||||
// } tag_directives_copy = { NULL, NULL, NULL }
|
||||
// value yaml_tag_directive_t = { NULL, NULL }
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
// assert((tag_directives_start && tag_directives_end) ||
|
||||
// (tag_directives_start == tag_directives_end))
|
||||
// // Valid tag directives are expected.
|
||||
//
|
||||
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// if (version_directive) {
|
||||
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
|
||||
// if (!version_directive_copy) goto error
|
||||
// version_directive_copy.major = version_directive.major
|
||||
// version_directive_copy.minor = version_directive.minor
|
||||
// }
|
||||
//
|
||||
// if (tag_directives_start != tag_directives_end) {
|
||||
// tag_directive *yaml_tag_directive_t
|
||||
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
|
||||
// goto error
|
||||
// for (tag_directive = tag_directives_start
|
||||
// tag_directive != tag_directives_end; tag_directive ++) {
|
||||
// assert(tag_directive.handle)
|
||||
// assert(tag_directive.prefix)
|
||||
// if (!yaml_check_utf8(tag_directive.handle,
|
||||
// strlen((char *)tag_directive.handle)))
|
||||
// goto error
|
||||
// if (!yaml_check_utf8(tag_directive.prefix,
|
||||
// strlen((char *)tag_directive.prefix)))
|
||||
// goto error
|
||||
// value.handle = yaml_strdup(tag_directive.handle)
|
||||
// value.prefix = yaml_strdup(tag_directive.prefix)
|
||||
// if (!value.handle || !value.prefix) goto error
|
||||
// if (!PUSH(&context, tag_directives_copy, value))
|
||||
// goto error
|
||||
// value.handle = NULL
|
||||
// value.prefix = NULL
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
|
||||
// tag_directives_copy.start, tag_directives_copy.top,
|
||||
// start_implicit, end_implicit, mark, mark)
|
||||
//
|
||||
// return 1
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, nodes)
|
||||
// yaml_free(version_directive_copy)
|
||||
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
|
||||
// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
|
||||
// yaml_free(value.handle)
|
||||
// yaml_free(value.prefix)
|
||||
// }
|
||||
// STACK_DEL(&context, tag_directives_copy)
|
||||
// yaml_free(value.handle)
|
||||
// yaml_free(value.prefix)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Destroy a document object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(void)
|
||||
//yaml_document_delete(document *yaml_document_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// tag_directive *yaml_tag_directive_t
|
||||
//
|
||||
// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// while (!STACK_EMPTY(&context, document.nodes)) {
|
||||
// node yaml_node_t = POP(&context, document.nodes)
|
||||
// yaml_free(node.tag)
|
||||
// switch (node.type) {
|
||||
// case YAML_SCALAR_NODE:
|
||||
// yaml_free(node.data.scalar.value)
|
||||
// break
|
||||
// case YAML_SEQUENCE_NODE:
|
||||
// STACK_DEL(&context, node.data.sequence.items)
|
||||
// break
|
||||
// case YAML_MAPPING_NODE:
|
||||
// STACK_DEL(&context, node.data.mapping.pairs)
|
||||
// break
|
||||
// default:
|
||||
// assert(0) // Should not happen.
|
||||
// }
|
||||
// }
|
||||
// STACK_DEL(&context, document.nodes)
|
||||
//
|
||||
// yaml_free(document.version_directive)
|
||||
// for (tag_directive = document.tag_directives.start
|
||||
// tag_directive != document.tag_directives.end
|
||||
// tag_directive++) {
|
||||
// yaml_free(tag_directive.handle)
|
||||
// yaml_free(tag_directive.prefix)
|
||||
// }
|
||||
// yaml_free(document.tag_directives.start)
|
||||
//
|
||||
// memset(document, 0, sizeof(yaml_document_t))
|
||||
//}
|
||||
//
|
||||
///**
|
||||
// * Get a document node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(yaml_node_t *)
|
||||
//yaml_document_get_node(document *yaml_document_t, index int)
|
||||
//{
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
|
||||
// return document.nodes.start + index - 1
|
||||
// }
|
||||
// return NULL
|
||||
//}
|
||||
//
|
||||
///**
|
||||
// * Get the root object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(yaml_node_t *)
|
||||
//yaml_document_get_root_node(document *yaml_document_t)
|
||||
//{
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (document.nodes.top != document.nodes.start) {
|
||||
// return document.nodes.start
|
||||
// }
|
||||
// return NULL
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a scalar node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_scalar(document *yaml_document_t,
|
||||
// tag *yaml_char_t, value *yaml_char_t, length int,
|
||||
// style yaml_scalar_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// value_copy *yaml_char_t = NULL
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
// assert(value) // Non-NULL value is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (length < 0) {
|
||||
// length = strlen((char *)value)
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(value, length)) goto error
|
||||
// value_copy = yaml_malloc(length+1)
|
||||
// if (!value_copy) goto error
|
||||
// memcpy(value_copy, value, length)
|
||||
// value_copy[length] = '\0'
|
||||
//
|
||||
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// yaml_free(tag_copy)
|
||||
// yaml_free(value_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a sequence node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_sequence(document *yaml_document_t,
|
||||
// tag *yaml_char_t, style yaml_sequence_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// struct {
|
||||
// start *yaml_node_item_t
|
||||
// end *yaml_node_item_t
|
||||
// top *yaml_node_item_t
|
||||
// } items = { NULL, NULL, NULL }
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
|
||||
// style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, items)
|
||||
// yaml_free(tag_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a mapping node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_mapping(document *yaml_document_t,
|
||||
// tag *yaml_char_t, style yaml_mapping_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// struct {
|
||||
// start *yaml_node_pair_t
|
||||
// end *yaml_node_pair_t
|
||||
// top *yaml_node_pair_t
|
||||
// } pairs = { NULL, NULL, NULL }
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
|
||||
// style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, pairs)
|
||||
// yaml_free(tag_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Append an item to a sequence node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_append_sequence_item(document *yaml_document_t,
|
||||
// sequence int, item int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
//
|
||||
// assert(document) // Non-NULL document is required.
|
||||
// assert(sequence > 0
|
||||
// && document.nodes.start + sequence <= document.nodes.top)
|
||||
// // Valid sequence id is required.
|
||||
// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
|
||||
// // A sequence node is required.
|
||||
// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
|
||||
// // Valid item id is required.
|
||||
//
|
||||
// if (!PUSH(&context,
|
||||
// document.nodes.start[sequence-1].data.sequence.items, item))
|
||||
// return 0
|
||||
//
|
||||
// return 1
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Append a pair of a key and a value to a mapping node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_append_mapping_pair(document *yaml_document_t,
|
||||
// mapping int, key int, value int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
//
|
||||
// pair yaml_node_pair_t
|
||||
//
|
||||
// assert(document) // Non-NULL document is required.
|
||||
// assert(mapping > 0
|
||||
// && document.nodes.start + mapping <= document.nodes.top)
|
||||
// // Valid mapping id is required.
|
||||
// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
|
||||
// // A mapping node is required.
|
||||
// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
|
||||
// // Valid key id is required.
|
||||
// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
|
||||
// // Valid value id is required.
|
||||
//
|
||||
// pair.key = key
|
||||
// pair.value = value
|
||||
//
|
||||
// if (!PUSH(&context,
|
||||
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
|
||||
// return 0
|
||||
//
|
||||
// return 1
|
||||
//}
|
||||
//
|
||||
//
|
775
vendor/gopkg.in/yaml.v2/decode.go
generated
vendored
Normal file
775
vendor/gopkg.in/yaml.v2/decode.go
generated
vendored
Normal file
@ -0,0 +1,775 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
documentNode = 1 << iota
|
||||
mappingNode
|
||||
sequenceNode
|
||||
scalarNode
|
||||
aliasNode
|
||||
)
|
||||
|
||||
type node struct {
|
||||
kind int
|
||||
line, column int
|
||||
tag string
|
||||
// For an alias node, alias holds the resolved alias.
|
||||
alias *node
|
||||
value string
|
||||
implicit bool
|
||||
children []*node
|
||||
anchors map[string]*node
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Parser, produces a node tree out of a libyaml event stream.
|
||||
|
||||
type parser struct {
|
||||
parser yaml_parser_t
|
||||
event yaml_event_t
|
||||
doc *node
|
||||
doneInit bool
|
||||
}
|
||||
|
||||
func newParser(b []byte) *parser {
|
||||
p := parser{}
|
||||
if !yaml_parser_initialize(&p.parser) {
|
||||
panic("failed to initialize YAML emitter")
|
||||
}
|
||||
if len(b) == 0 {
|
||||
b = []byte{'\n'}
|
||||
}
|
||||
yaml_parser_set_input_string(&p.parser, b)
|
||||
return &p
|
||||
}
|
||||
|
||||
func newParserFromReader(r io.Reader) *parser {
|
||||
p := parser{}
|
||||
if !yaml_parser_initialize(&p.parser) {
|
||||
panic("failed to initialize YAML emitter")
|
||||
}
|
||||
yaml_parser_set_input_reader(&p.parser, r)
|
||||
return &p
|
||||
}
|
||||
|
||||
func (p *parser) init() {
|
||||
if p.doneInit {
|
||||
return
|
||||
}
|
||||
p.expect(yaml_STREAM_START_EVENT)
|
||||
p.doneInit = true
|
||||
}
|
||||
|
||||
func (p *parser) destroy() {
|
||||
if p.event.typ != yaml_NO_EVENT {
|
||||
yaml_event_delete(&p.event)
|
||||
}
|
||||
yaml_parser_delete(&p.parser)
|
||||
}
|
||||
|
||||
// expect consumes an event from the event stream and
|
||||
// checks that it's of the expected type.
|
||||
func (p *parser) expect(e yaml_event_type_t) {
|
||||
if p.event.typ == yaml_NO_EVENT {
|
||||
if !yaml_parser_parse(&p.parser, &p.event) {
|
||||
p.fail()
|
||||
}
|
||||
}
|
||||
if p.event.typ == yaml_STREAM_END_EVENT {
|
||||
failf("attempted to go past the end of stream; corrupted value?")
|
||||
}
|
||||
if p.event.typ != e {
|
||||
p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
|
||||
p.fail()
|
||||
}
|
||||
yaml_event_delete(&p.event)
|
||||
p.event.typ = yaml_NO_EVENT
|
||||
}
|
||||
|
||||
// peek peeks at the next event in the event stream,
|
||||
// puts the results into p.event and returns the event type.
|
||||
func (p *parser) peek() yaml_event_type_t {
|
||||
if p.event.typ != yaml_NO_EVENT {
|
||||
return p.event.typ
|
||||
}
|
||||
if !yaml_parser_parse(&p.parser, &p.event) {
|
||||
p.fail()
|
||||
}
|
||||
return p.event.typ
|
||||
}
|
||||
|
||||
func (p *parser) fail() {
|
||||
var where string
|
||||
var line int
|
||||
if p.parser.problem_mark.line != 0 {
|
||||
line = p.parser.problem_mark.line
|
||||
// Scanner errors don't iterate line before returning error
|
||||
if p.parser.error == yaml_SCANNER_ERROR {
|
||||
line++
|
||||
}
|
||||
} else if p.parser.context_mark.line != 0 {
|
||||
line = p.parser.context_mark.line
|
||||
}
|
||||
if line != 0 {
|
||||
where = "line " + strconv.Itoa(line) + ": "
|
||||
}
|
||||
var msg string
|
||||
if len(p.parser.problem) > 0 {
|
||||
msg = p.parser.problem
|
||||
} else {
|
||||
msg = "unknown problem parsing YAML content"
|
||||
}
|
||||
failf("%s%s", where, msg)
|
||||
}
|
||||
|
||||
func (p *parser) anchor(n *node, anchor []byte) {
|
||||
if anchor != nil {
|
||||
p.doc.anchors[string(anchor)] = n
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) parse() *node {
|
||||
p.init()
|
||||
switch p.peek() {
|
||||
case yaml_SCALAR_EVENT:
|
||||
return p.scalar()
|
||||
case yaml_ALIAS_EVENT:
|
||||
return p.alias()
|
||||
case yaml_MAPPING_START_EVENT:
|
||||
return p.mapping()
|
||||
case yaml_SEQUENCE_START_EVENT:
|
||||
return p.sequence()
|
||||
case yaml_DOCUMENT_START_EVENT:
|
||||
return p.document()
|
||||
case yaml_STREAM_END_EVENT:
|
||||
// Happens when attempting to decode an empty buffer.
|
||||
return nil
|
||||
default:
|
||||
panic("attempted to parse unknown event: " + p.event.typ.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) node(kind int) *node {
|
||||
return &node{
|
||||
kind: kind,
|
||||
line: p.event.start_mark.line,
|
||||
column: p.event.start_mark.column,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) document() *node {
|
||||
n := p.node(documentNode)
|
||||
n.anchors = make(map[string]*node)
|
||||
p.doc = n
|
||||
p.expect(yaml_DOCUMENT_START_EVENT)
|
||||
n.children = append(n.children, p.parse())
|
||||
p.expect(yaml_DOCUMENT_END_EVENT)
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) alias() *node {
|
||||
n := p.node(aliasNode)
|
||||
n.value = string(p.event.anchor)
|
||||
n.alias = p.doc.anchors[n.value]
|
||||
if n.alias == nil {
|
||||
failf("unknown anchor '%s' referenced", n.value)
|
||||
}
|
||||
p.expect(yaml_ALIAS_EVENT)
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) scalar() *node {
|
||||
n := p.node(scalarNode)
|
||||
n.value = string(p.event.value)
|
||||
n.tag = string(p.event.tag)
|
||||
n.implicit = p.event.implicit
|
||||
p.anchor(n, p.event.anchor)
|
||||
p.expect(yaml_SCALAR_EVENT)
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) sequence() *node {
|
||||
n := p.node(sequenceNode)
|
||||
p.anchor(n, p.event.anchor)
|
||||
p.expect(yaml_SEQUENCE_START_EVENT)
|
||||
for p.peek() != yaml_SEQUENCE_END_EVENT {
|
||||
n.children = append(n.children, p.parse())
|
||||
}
|
||||
p.expect(yaml_SEQUENCE_END_EVENT)
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) mapping() *node {
|
||||
n := p.node(mappingNode)
|
||||
p.anchor(n, p.event.anchor)
|
||||
p.expect(yaml_MAPPING_START_EVENT)
|
||||
for p.peek() != yaml_MAPPING_END_EVENT {
|
||||
n.children = append(n.children, p.parse(), p.parse())
|
||||
}
|
||||
p.expect(yaml_MAPPING_END_EVENT)
|
||||
return n
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Decoder, unmarshals a node into a provided value.
|
||||
|
||||
type decoder struct {
|
||||
doc *node
|
||||
aliases map[*node]bool
|
||||
mapType reflect.Type
|
||||
terrors []string
|
||||
strict bool
|
||||
}
|
||||
|
||||
var (
|
||||
mapItemType = reflect.TypeOf(MapItem{})
|
||||
durationType = reflect.TypeOf(time.Duration(0))
|
||||
defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
|
||||
ifaceType = defaultMapType.Elem()
|
||||
timeType = reflect.TypeOf(time.Time{})
|
||||
ptrTimeType = reflect.TypeOf(&time.Time{})
|
||||
)
|
||||
|
||||
func newDecoder(strict bool) *decoder {
|
||||
d := &decoder{mapType: defaultMapType, strict: strict}
|
||||
d.aliases = make(map[*node]bool)
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *decoder) terror(n *node, tag string, out reflect.Value) {
|
||||
if n.tag != "" {
|
||||
tag = n.tag
|
||||
}
|
||||
value := n.value
|
||||
if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
|
||||
if len(value) > 10 {
|
||||
value = " `" + value[:7] + "...`"
|
||||
} else {
|
||||
value = " `" + value + "`"
|
||||
}
|
||||
}
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
|
||||
}
|
||||
|
||||
func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
|
||||
terrlen := len(d.terrors)
|
||||
err := u.UnmarshalYAML(func(v interface{}) (err error) {
|
||||
defer handleErr(&err)
|
||||
d.unmarshal(n, reflect.ValueOf(v))
|
||||
if len(d.terrors) > terrlen {
|
||||
issues := d.terrors[terrlen:]
|
||||
d.terrors = d.terrors[:terrlen]
|
||||
return &TypeError{issues}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if e, ok := err.(*TypeError); ok {
|
||||
d.terrors = append(d.terrors, e.Errors...)
|
||||
return false
|
||||
}
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
|
||||
// if a value is found to implement it.
|
||||
// It returns the initialized and dereferenced out value, whether
|
||||
// unmarshalling was already done by UnmarshalYAML, and if so whether
|
||||
// its types unmarshalled appropriately.
|
||||
//
|
||||
// If n holds a null value, prepare returns before doing anything.
|
||||
func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
|
||||
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
|
||||
return out, false, false
|
||||
}
|
||||
again := true
|
||||
for again {
|
||||
again = false
|
||||
if out.Kind() == reflect.Ptr {
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.New(out.Type().Elem()))
|
||||
}
|
||||
out = out.Elem()
|
||||
again = true
|
||||
}
|
||||
if out.CanAddr() {
|
||||
if u, ok := out.Addr().Interface().(Unmarshaler); ok {
|
||||
good = d.callUnmarshaler(n, u)
|
||||
return out, true, good
|
||||
}
|
||||
}
|
||||
}
|
||||
return out, false, false
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
|
||||
switch n.kind {
|
||||
case documentNode:
|
||||
return d.document(n, out)
|
||||
case aliasNode:
|
||||
return d.alias(n, out)
|
||||
}
|
||||
out, unmarshaled, good := d.prepare(n, out)
|
||||
if unmarshaled {
|
||||
return good
|
||||
}
|
||||
switch n.kind {
|
||||
case scalarNode:
|
||||
good = d.scalar(n, out)
|
||||
case mappingNode:
|
||||
good = d.mapping(n, out)
|
||||
case sequenceNode:
|
||||
good = d.sequence(n, out)
|
||||
default:
|
||||
panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
|
||||
}
|
||||
return good
|
||||
}
|
||||
|
||||
func (d *decoder) document(n *node, out reflect.Value) (good bool) {
|
||||
if len(n.children) == 1 {
|
||||
d.doc = n
|
||||
d.unmarshal(n.children[0], out)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
|
||||
if d.aliases[n] {
|
||||
// TODO this could actually be allowed in some circumstances.
|
||||
failf("anchor '%s' value contains itself", n.value)
|
||||
}
|
||||
d.aliases[n] = true
|
||||
good = d.unmarshal(n.alias, out)
|
||||
delete(d.aliases, n)
|
||||
return good
|
||||
}
|
||||
|
||||
var zeroValue reflect.Value
|
||||
|
||||
func resetMap(out reflect.Value) {
|
||||
for _, k := range out.MapKeys() {
|
||||
out.SetMapIndex(k, zeroValue)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) scalar(n *node, out reflect.Value) bool {
|
||||
var tag string
|
||||
var resolved interface{}
|
||||
if n.tag == "" && !n.implicit {
|
||||
tag = yaml_STR_TAG
|
||||
resolved = n.value
|
||||
} else {
|
||||
tag, resolved = resolve(n.tag, n.value)
|
||||
if tag == yaml_BINARY_TAG {
|
||||
data, err := base64.StdEncoding.DecodeString(resolved.(string))
|
||||
if err != nil {
|
||||
failf("!!binary value contains invalid base64 data")
|
||||
}
|
||||
resolved = string(data)
|
||||
}
|
||||
}
|
||||
if resolved == nil {
|
||||
if out.Kind() == reflect.Map && !out.CanAddr() {
|
||||
resetMap(out)
|
||||
} else {
|
||||
out.Set(reflect.Zero(out.Type()))
|
||||
}
|
||||
return true
|
||||
}
|
||||
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
|
||||
// We've resolved to exactly the type we want, so use that.
|
||||
out.Set(resolvedv)
|
||||
return true
|
||||
}
|
||||
// Perhaps we can use the value as a TextUnmarshaler to
|
||||
// set its value.
|
||||
if out.CanAddr() {
|
||||
u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
|
||||
if ok {
|
||||
var text []byte
|
||||
if tag == yaml_BINARY_TAG {
|
||||
text = []byte(resolved.(string))
|
||||
} else {
|
||||
// We let any value be unmarshaled into TextUnmarshaler.
|
||||
// That might be more lax than we'd like, but the
|
||||
// TextUnmarshaler itself should bowl out any dubious values.
|
||||
text = []byte(n.value)
|
||||
}
|
||||
err := u.UnmarshalText(text)
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
switch out.Kind() {
|
||||
case reflect.String:
|
||||
if tag == yaml_BINARY_TAG {
|
||||
out.SetString(resolved.(string))
|
||||
return true
|
||||
}
|
||||
if resolved != nil {
|
||||
out.SetString(n.value)
|
||||
return true
|
||||
}
|
||||
case reflect.Interface:
|
||||
if resolved == nil {
|
||||
out.Set(reflect.Zero(out.Type()))
|
||||
} else if tag == yaml_TIMESTAMP_TAG {
|
||||
// It looks like a timestamp but for backward compatibility
|
||||
// reasons we set it as a string, so that code that unmarshals
|
||||
// timestamp-like values into interface{} will continue to
|
||||
// see a string and not a time.Time.
|
||||
// TODO(v3) Drop this.
|
||||
out.Set(reflect.ValueOf(n.value))
|
||||
} else {
|
||||
out.Set(reflect.ValueOf(resolved))
|
||||
}
|
||||
return true
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
switch resolved := resolved.(type) {
|
||||
case int:
|
||||
if !out.OverflowInt(int64(resolved)) {
|
||||
out.SetInt(int64(resolved))
|
||||
return true
|
||||
}
|
||||
case int64:
|
||||
if !out.OverflowInt(resolved) {
|
||||
out.SetInt(resolved)
|
||||
return true
|
||||
}
|
||||
case uint64:
|
||||
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
|
||||
out.SetInt(int64(resolved))
|
||||
return true
|
||||
}
|
||||
case float64:
|
||||
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
|
||||
out.SetInt(int64(resolved))
|
||||
return true
|
||||
}
|
||||
case string:
|
||||
if out.Type() == durationType {
|
||||
d, err := time.ParseDuration(resolved)
|
||||
if err == nil {
|
||||
out.SetInt(int64(d))
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
switch resolved := resolved.(type) {
|
||||
case int:
|
||||
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
return true
|
||||
}
|
||||
case int64:
|
||||
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
return true
|
||||
}
|
||||
case uint64:
|
||||
if !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
return true
|
||||
}
|
||||
case float64:
|
||||
if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
return true
|
||||
}
|
||||
}
|
||||
case reflect.Bool:
|
||||
switch resolved := resolved.(type) {
|
||||
case bool:
|
||||
out.SetBool(resolved)
|
||||
return true
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
switch resolved := resolved.(type) {
|
||||
case int:
|
||||
out.SetFloat(float64(resolved))
|
||||
return true
|
||||
case int64:
|
||||
out.SetFloat(float64(resolved))
|
||||
return true
|
||||
case uint64:
|
||||
out.SetFloat(float64(resolved))
|
||||
return true
|
||||
case float64:
|
||||
out.SetFloat(resolved)
|
||||
return true
|
||||
}
|
||||
case reflect.Struct:
|
||||
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
|
||||
out.Set(resolvedv)
|
||||
return true
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if out.Type().Elem() == reflect.TypeOf(resolved) {
|
||||
// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
|
||||
elem := reflect.New(out.Type().Elem())
|
||||
elem.Elem().Set(reflect.ValueOf(resolved))
|
||||
out.Set(elem)
|
||||
return true
|
||||
}
|
||||
}
|
||||
d.terror(n, tag, out)
|
||||
return false
|
||||
}
|
||||
|
||||
func settableValueOf(i interface{}) reflect.Value {
|
||||
v := reflect.ValueOf(i)
|
||||
sv := reflect.New(v.Type()).Elem()
|
||||
sv.Set(v)
|
||||
return sv
|
||||
}
|
||||
|
||||
func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
|
||||
l := len(n.children)
|
||||
|
||||
var iface reflect.Value
|
||||
switch out.Kind() {
|
||||
case reflect.Slice:
|
||||
out.Set(reflect.MakeSlice(out.Type(), l, l))
|
||||
case reflect.Array:
|
||||
if l != out.Len() {
|
||||
failf("invalid array: want %d elements but got %d", out.Len(), l)
|
||||
}
|
||||
case reflect.Interface:
|
||||
// No type hints. Will have to use a generic sequence.
|
||||
iface = out
|
||||
out = settableValueOf(make([]interface{}, l))
|
||||
default:
|
||||
d.terror(n, yaml_SEQ_TAG, out)
|
||||
return false
|
||||
}
|
||||
et := out.Type().Elem()
|
||||
|
||||
j := 0
|
||||
for i := 0; i < l; i++ {
|
||||
e := reflect.New(et).Elem()
|
||||
if ok := d.unmarshal(n.children[i], e); ok {
|
||||
out.Index(j).Set(e)
|
||||
j++
|
||||
}
|
||||
}
|
||||
if out.Kind() != reflect.Array {
|
||||
out.Set(out.Slice(0, j))
|
||||
}
|
||||
if iface.IsValid() {
|
||||
iface.Set(out)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
|
||||
switch out.Kind() {
|
||||
case reflect.Struct:
|
||||
return d.mappingStruct(n, out)
|
||||
case reflect.Slice:
|
||||
return d.mappingSlice(n, out)
|
||||
case reflect.Map:
|
||||
// okay
|
||||
case reflect.Interface:
|
||||
if d.mapType.Kind() == reflect.Map {
|
||||
iface := out
|
||||
out = reflect.MakeMap(d.mapType)
|
||||
iface.Set(out)
|
||||
} else {
|
||||
slicev := reflect.New(d.mapType).Elem()
|
||||
if !d.mappingSlice(n, slicev) {
|
||||
return false
|
||||
}
|
||||
out.Set(slicev)
|
||||
return true
|
||||
}
|
||||
default:
|
||||
d.terror(n, yaml_MAP_TAG, out)
|
||||
return false
|
||||
}
|
||||
outt := out.Type()
|
||||
kt := outt.Key()
|
||||
et := outt.Elem()
|
||||
|
||||
mapType := d.mapType
|
||||
if outt.Key() == ifaceType && outt.Elem() == ifaceType {
|
||||
d.mapType = outt
|
||||
}
|
||||
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeMap(outt))
|
||||
}
|
||||
l := len(n.children)
|
||||
for i := 0; i < l; i += 2 {
|
||||
if isMerge(n.children[i]) {
|
||||
d.merge(n.children[i+1], out)
|
||||
continue
|
||||
}
|
||||
k := reflect.New(kt).Elem()
|
||||
if d.unmarshal(n.children[i], k) {
|
||||
kkind := k.Kind()
|
||||
if kkind == reflect.Interface {
|
||||
kkind = k.Elem().Kind()
|
||||
}
|
||||
if kkind == reflect.Map || kkind == reflect.Slice {
|
||||
failf("invalid map key: %#v", k.Interface())
|
||||
}
|
||||
e := reflect.New(et).Elem()
|
||||
if d.unmarshal(n.children[i+1], e) {
|
||||
d.setMapIndex(n.children[i+1], out, k, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.mapType = mapType
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
|
||||
if d.strict && out.MapIndex(k) != zeroValue {
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
|
||||
return
|
||||
}
|
||||
out.SetMapIndex(k, v)
|
||||
}
|
||||
|
||||
func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
|
||||
outt := out.Type()
|
||||
if outt.Elem() != mapItemType {
|
||||
d.terror(n, yaml_MAP_TAG, out)
|
||||
return false
|
||||
}
|
||||
|
||||
mapType := d.mapType
|
||||
d.mapType = outt
|
||||
|
||||
var slice []MapItem
|
||||
var l = len(n.children)
|
||||
for i := 0; i < l; i += 2 {
|
||||
if isMerge(n.children[i]) {
|
||||
d.merge(n.children[i+1], out)
|
||||
continue
|
||||
}
|
||||
item := MapItem{}
|
||||
k := reflect.ValueOf(&item.Key).Elem()
|
||||
if d.unmarshal(n.children[i], k) {
|
||||
v := reflect.ValueOf(&item.Value).Elem()
|
||||
if d.unmarshal(n.children[i+1], v) {
|
||||
slice = append(slice, item)
|
||||
}
|
||||
}
|
||||
}
|
||||
out.Set(reflect.ValueOf(slice))
|
||||
d.mapType = mapType
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
|
||||
sinfo, err := getStructInfo(out.Type())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
name := settableValueOf("")
|
||||
l := len(n.children)
|
||||
|
||||
var inlineMap reflect.Value
|
||||
var elemType reflect.Type
|
||||
if sinfo.InlineMap != -1 {
|
||||
inlineMap = out.Field(sinfo.InlineMap)
|
||||
inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
|
||||
elemType = inlineMap.Type().Elem()
|
||||
}
|
||||
|
||||
var doneFields []bool
|
||||
if d.strict {
|
||||
doneFields = make([]bool, len(sinfo.FieldsList))
|
||||
}
|
||||
for i := 0; i < l; i += 2 {
|
||||
ni := n.children[i]
|
||||
if isMerge(ni) {
|
||||
d.merge(n.children[i+1], out)
|
||||
continue
|
||||
}
|
||||
if !d.unmarshal(ni, name) {
|
||||
continue
|
||||
}
|
||||
if info, ok := sinfo.FieldsMap[name.String()]; ok {
|
||||
if d.strict {
|
||||
if doneFields[info.Id] {
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
|
||||
continue
|
||||
}
|
||||
doneFields[info.Id] = true
|
||||
}
|
||||
var field reflect.Value
|
||||
if info.Inline == nil {
|
||||
field = out.Field(info.Num)
|
||||
} else {
|
||||
field = out.FieldByIndex(info.Inline)
|
||||
}
|
||||
d.unmarshal(n.children[i+1], field)
|
||||
} else if sinfo.InlineMap != -1 {
|
||||
if inlineMap.IsNil() {
|
||||
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
|
||||
}
|
||||
value := reflect.New(elemType).Elem()
|
||||
d.unmarshal(n.children[i+1], value)
|
||||
d.setMapIndex(n.children[i+1], inlineMap, name, value)
|
||||
} else if d.strict {
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func failWantMap() {
|
||||
failf("map merge requires map or sequence of maps as the value")
|
||||
}
|
||||
|
||||
func (d *decoder) merge(n *node, out reflect.Value) {
|
||||
switch n.kind {
|
||||
case mappingNode:
|
||||
d.unmarshal(n, out)
|
||||
case aliasNode:
|
||||
an, ok := d.doc.anchors[n.value]
|
||||
if ok && an.kind != mappingNode {
|
||||
failWantMap()
|
||||
}
|
||||
d.unmarshal(n, out)
|
||||
case sequenceNode:
|
||||
// Step backwards as earlier nodes take precedence.
|
||||
for i := len(n.children) - 1; i >= 0; i-- {
|
||||
ni := n.children[i]
|
||||
if ni.kind == aliasNode {
|
||||
an, ok := d.doc.anchors[ni.value]
|
||||
if ok && an.kind != mappingNode {
|
||||
failWantMap()
|
||||
}
|
||||
} else if ni.kind != mappingNode {
|
||||
failWantMap()
|
||||
}
|
||||
d.unmarshal(ni, out)
|
||||
}
|
||||
default:
|
||||
failWantMap()
|
||||
}
|
||||
}
|
||||
|
||||
func isMerge(n *node) bool {
|
||||
return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
|
||||
}
|
1326
vendor/gopkg.in/yaml.v2/decode_test.go
generated
vendored
Normal file
1326
vendor/gopkg.in/yaml.v2/decode_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1685
vendor/gopkg.in/yaml.v2/emitterc.go
generated
vendored
Normal file
1685
vendor/gopkg.in/yaml.v2/emitterc.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
362
vendor/gopkg.in/yaml.v2/encode.go
generated
vendored
Normal file
362
vendor/gopkg.in/yaml.v2/encode.go
generated
vendored
Normal file
@ -0,0 +1,362 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type encoder struct {
|
||||
emitter yaml_emitter_t
|
||||
event yaml_event_t
|
||||
out []byte
|
||||
flow bool
|
||||
// doneInit holds whether the initial stream_start_event has been
|
||||
// emitted.
|
||||
doneInit bool
|
||||
}
|
||||
|
||||
func newEncoder() *encoder {
|
||||
e := &encoder{}
|
||||
yaml_emitter_initialize(&e.emitter)
|
||||
yaml_emitter_set_output_string(&e.emitter, &e.out)
|
||||
yaml_emitter_set_unicode(&e.emitter, true)
|
||||
return e
|
||||
}
|
||||
|
||||
func newEncoderWithWriter(w io.Writer) *encoder {
|
||||
e := &encoder{}
|
||||
yaml_emitter_initialize(&e.emitter)
|
||||
yaml_emitter_set_output_writer(&e.emitter, w)
|
||||
yaml_emitter_set_unicode(&e.emitter, true)
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *encoder) init() {
|
||||
if e.doneInit {
|
||||
return
|
||||
}
|
||||
yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
|
||||
e.emit()
|
||||
e.doneInit = true
|
||||
}
|
||||
|
||||
func (e *encoder) finish() {
|
||||
e.emitter.open_ended = false
|
||||
yaml_stream_end_event_initialize(&e.event)
|
||||
e.emit()
|
||||
}
|
||||
|
||||
func (e *encoder) destroy() {
|
||||
yaml_emitter_delete(&e.emitter)
|
||||
}
|
||||
|
||||
func (e *encoder) emit() {
|
||||
// This will internally delete the e.event value.
|
||||
e.must(yaml_emitter_emit(&e.emitter, &e.event))
|
||||
}
|
||||
|
||||
func (e *encoder) must(ok bool) {
|
||||
if !ok {
|
||||
msg := e.emitter.problem
|
||||
if msg == "" {
|
||||
msg = "unknown problem generating YAML content"
|
||||
}
|
||||
failf("%s", msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) marshalDoc(tag string, in reflect.Value) {
|
||||
e.init()
|
||||
yaml_document_start_event_initialize(&e.event, nil, nil, true)
|
||||
e.emit()
|
||||
e.marshal(tag, in)
|
||||
yaml_document_end_event_initialize(&e.event, true)
|
||||
e.emit()
|
||||
}
|
||||
|
||||
func (e *encoder) marshal(tag string, in reflect.Value) {
|
||||
if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
|
||||
e.nilv()
|
||||
return
|
||||
}
|
||||
iface := in.Interface()
|
||||
switch m := iface.(type) {
|
||||
case time.Time, *time.Time:
|
||||
// Although time.Time implements TextMarshaler,
|
||||
// we don't want to treat it as a string for YAML
|
||||
// purposes because YAML has special support for
|
||||
// timestamps.
|
||||
case Marshaler:
|
||||
v, err := m.MarshalYAML()
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
if v == nil {
|
||||
e.nilv()
|
||||
return
|
||||
}
|
||||
in = reflect.ValueOf(v)
|
||||
case encoding.TextMarshaler:
|
||||
text, err := m.MarshalText()
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
in = reflect.ValueOf(string(text))
|
||||
case nil:
|
||||
e.nilv()
|
||||
return
|
||||
}
|
||||
switch in.Kind() {
|
||||
case reflect.Interface:
|
||||
e.marshal(tag, in.Elem())
|
||||
case reflect.Map:
|
||||
e.mapv(tag, in)
|
||||
case reflect.Ptr:
|
||||
if in.Type() == ptrTimeType {
|
||||
e.timev(tag, in.Elem())
|
||||
} else {
|
||||
e.marshal(tag, in.Elem())
|
||||
}
|
||||
case reflect.Struct:
|
||||
if in.Type() == timeType {
|
||||
e.timev(tag, in)
|
||||
} else {
|
||||
e.structv(tag, in)
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
if in.Type().Elem() == mapItemType {
|
||||
e.itemsv(tag, in)
|
||||
} else {
|
||||
e.slicev(tag, in)
|
||||
}
|
||||
case reflect.String:
|
||||
e.stringv(tag, in)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
if in.Type() == durationType {
|
||||
e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
|
||||
} else {
|
||||
e.intv(tag, in)
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
e.uintv(tag, in)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
e.floatv(tag, in)
|
||||
case reflect.Bool:
|
||||
e.boolv(tag, in)
|
||||
default:
|
||||
panic("cannot marshal type: " + in.Type().String())
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) mapv(tag string, in reflect.Value) {
|
||||
e.mappingv(tag, func() {
|
||||
keys := keyList(in.MapKeys())
|
||||
sort.Sort(keys)
|
||||
for _, k := range keys {
|
||||
e.marshal("", k)
|
||||
e.marshal("", in.MapIndex(k))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (e *encoder) itemsv(tag string, in reflect.Value) {
|
||||
e.mappingv(tag, func() {
|
||||
slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
|
||||
for _, item := range slice {
|
||||
e.marshal("", reflect.ValueOf(item.Key))
|
||||
e.marshal("", reflect.ValueOf(item.Value))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (e *encoder) structv(tag string, in reflect.Value) {
|
||||
sinfo, err := getStructInfo(in.Type())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
e.mappingv(tag, func() {
|
||||
for _, info := range sinfo.FieldsList {
|
||||
var value reflect.Value
|
||||
if info.Inline == nil {
|
||||
value = in.Field(info.Num)
|
||||
} else {
|
||||
value = in.FieldByIndex(info.Inline)
|
||||
}
|
||||
if info.OmitEmpty && isZero(value) {
|
||||
continue
|
||||
}
|
||||
e.marshal("", reflect.ValueOf(info.Key))
|
||||
e.flow = info.Flow
|
||||
e.marshal("", value)
|
||||
}
|
||||
if sinfo.InlineMap >= 0 {
|
||||
m := in.Field(sinfo.InlineMap)
|
||||
if m.Len() > 0 {
|
||||
e.flow = false
|
||||
keys := keyList(m.MapKeys())
|
||||
sort.Sort(keys)
|
||||
for _, k := range keys {
|
||||
if _, found := sinfo.FieldsMap[k.String()]; found {
|
||||
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
|
||||
}
|
||||
e.marshal("", k)
|
||||
e.flow = false
|
||||
e.marshal("", m.MapIndex(k))
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (e *encoder) mappingv(tag string, f func()) {
|
||||
implicit := tag == ""
|
||||
style := yaml_BLOCK_MAPPING_STYLE
|
||||
if e.flow {
|
||||
e.flow = false
|
||||
style = yaml_FLOW_MAPPING_STYLE
|
||||
}
|
||||
yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
|
||||
e.emit()
|
||||
f()
|
||||
yaml_mapping_end_event_initialize(&e.event)
|
||||
e.emit()
|
||||
}
|
||||
|
||||
func (e *encoder) slicev(tag string, in reflect.Value) {
|
||||
implicit := tag == ""
|
||||
style := yaml_BLOCK_SEQUENCE_STYLE
|
||||
if e.flow {
|
||||
e.flow = false
|
||||
style = yaml_FLOW_SEQUENCE_STYLE
|
||||
}
|
||||
e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
|
||||
e.emit()
|
||||
n := in.Len()
|
||||
for i := 0; i < n; i++ {
|
||||
e.marshal("", in.Index(i))
|
||||
}
|
||||
e.must(yaml_sequence_end_event_initialize(&e.event))
|
||||
e.emit()
|
||||
}
|
||||
|
||||
// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
|
||||
//
|
||||
// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
|
||||
// in YAML 1.2 and by this package, but these should be marshalled quoted for
|
||||
// the time being for compatibility with other parsers.
|
||||
func isBase60Float(s string) (result bool) {
|
||||
// Fast path.
|
||||
if s == "" {
|
||||
return false
|
||||
}
|
||||
c := s[0]
|
||||
if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
|
||||
return false
|
||||
}
|
||||
// Do the full match.
|
||||
return base60float.MatchString(s)
|
||||
}
|
||||
|
||||
// From http://yaml.org/type/float.html, except the regular expression there
|
||||
// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
|
||||
var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
|
||||
|
||||
func (e *encoder) stringv(tag string, in reflect.Value) {
|
||||
var style yaml_scalar_style_t
|
||||
s := in.String()
|
||||
canUsePlain := true
|
||||
switch {
|
||||
case !utf8.ValidString(s):
|
||||
if tag == yaml_BINARY_TAG {
|
||||
failf("explicitly tagged !!binary data must be base64-encoded")
|
||||
}
|
||||
if tag != "" {
|
||||
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
|
||||
}
|
||||
// It can't be encoded directly as YAML so use a binary tag
|
||||
// and encode it as base64.
|
||||
tag = yaml_BINARY_TAG
|
||||
s = encodeBase64(s)
|
||||
case tag == "":
|
||||
// Check to see if it would resolve to a specific
|
||||
// tag when encoded unquoted. If it doesn't,
|
||||
// there's no need to quote it.
|
||||
rtag, _ := resolve("", s)
|
||||
canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
|
||||
}
|
||||
// Note: it's possible for user code to emit invalid YAML
|
||||
// if they explicitly specify a tag and a string containing
|
||||
// text that's incompatible with that tag.
|
||||
switch {
|
||||
case strings.Contains(s, "\n"):
|
||||
style = yaml_LITERAL_SCALAR_STYLE
|
||||
case canUsePlain:
|
||||
style = yaml_PLAIN_SCALAR_STYLE
|
||||
default:
|
||||
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||||
}
|
||||
e.emitScalar(s, "", tag, style)
|
||||
}
|
||||
|
||||
func (e *encoder) boolv(tag string, in reflect.Value) {
|
||||
var s string
|
||||
if in.Bool() {
|
||||
s = "true"
|
||||
} else {
|
||||
s = "false"
|
||||
}
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) intv(tag string, in reflect.Value) {
|
||||
s := strconv.FormatInt(in.Int(), 10)
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) uintv(tag string, in reflect.Value) {
|
||||
s := strconv.FormatUint(in.Uint(), 10)
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) timev(tag string, in reflect.Value) {
|
||||
t := in.Interface().(time.Time)
|
||||
s := t.Format(time.RFC3339Nano)
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) floatv(tag string, in reflect.Value) {
|
||||
// Issue #352: When formatting, use the precision of the underlying value
|
||||
precision := 64
|
||||
if in.Kind() == reflect.Float32 {
|
||||
precision = 32
|
||||
}
|
||||
|
||||
s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
|
||||
switch s {
|
||||
case "+Inf":
|
||||
s = ".inf"
|
||||
case "-Inf":
|
||||
s = "-.inf"
|
||||
case "NaN":
|
||||
s = ".nan"
|
||||
}
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) nilv() {
|
||||
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
|
||||
implicit := tag == ""
|
||||
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
|
||||
e.emit()
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user