lvm: setupLV wait device + go mod

This commit is contained in:
Mikaël Cluseau
2019-02-05 09:36:11 +11:00
parent 10e72f18ae
commit c62ddaf2e0
134 changed files with 336 additions and 38985 deletions

View File

@ -1,33 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xz
import (
"bytes"
"testing"
)
func TestUvarint(t *testing.T) {
tests := []uint64{0, 0x80, 0x100, 0xffffffff, 0x100000000, 1<<64 - 1}
p := make([]byte, 10)
for _, u := range tests {
p = p[:10]
n := putUvarint(p, u)
if n < 1 {
t.Fatalf("putUvarint returned %d", n)
}
r := bytes.NewReader(p[:n])
x, m, err := readUvarint(r)
if err != nil {
t.Fatalf("readUvarint returned %s", err)
}
if m != n {
t.Fatalf("readUvarint read %d bytes; want %d", m, n)
}
if x != u {
t.Fatalf("readUvarint returned 0x%x; want 0x%x", x, u)
}
}
}

View File

@ -1,142 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xz
import (
"bytes"
"testing"
)
func TestHeader(t *testing.T) {
h := header{flags: CRC32}
data, err := h.MarshalBinary()
if err != nil {
t.Fatalf("MarshalBinary error %s", err)
}
var g header
if err = g.UnmarshalBinary(data); err != nil {
t.Fatalf("UnmarshalBinary error %s", err)
}
if g != h {
t.Fatalf("unmarshalled %#v; want %#v", g, h)
}
}
func TestFooter(t *testing.T) {
f := footer{indexSize: 64, flags: CRC32}
data, err := f.MarshalBinary()
if err != nil {
t.Fatalf("MarshalBinary error %s", err)
}
var g footer
if err = g.UnmarshalBinary(data); err != nil {
t.Fatalf("UnmarshalBinary error %s", err)
}
if g != f {
t.Fatalf("unmarshalled %#v; want %#v", g, f)
}
}
func TestRecord(t *testing.T) {
r := record{1234567, 10000}
p, err := r.MarshalBinary()
if err != nil {
t.Fatalf("MarshalBinary error %s", err)
}
n := len(p)
buf := bytes.NewReader(p)
g, m, err := readRecord(buf)
if err != nil {
t.Fatalf("readFrom error %s", err)
}
if m != n {
t.Fatalf("read %d bytes; wrote %d", m, n)
}
if g.unpaddedSize != r.unpaddedSize {
t.Fatalf("got unpaddedSize %d; want %d", g.unpaddedSize,
r.unpaddedSize)
}
if g.uncompressedSize != r.uncompressedSize {
t.Fatalf("got uncompressedSize %d; want %d", g.uncompressedSize,
r.uncompressedSize)
}
}
func TestIndex(t *testing.T) {
records := []record{{1234, 1}, {2345, 2}}
var buf bytes.Buffer
n, err := writeIndex(&buf, records)
if err != nil {
t.Fatalf("writeIndex error %s", err)
}
if n != int64(buf.Len()) {
t.Fatalf("writeIndex returned %d; want %d", n, buf.Len())
}
// indicator
c, err := buf.ReadByte()
if err != nil {
t.Fatalf("buf.ReadByte error %s", err)
}
if c != 0 {
t.Fatalf("indicator %d; want %d", c, 0)
}
g, m, err := readIndexBody(&buf)
if err != nil {
for i, r := range g {
t.Logf("records[%d] %v", i, r)
}
t.Fatalf("readIndexBody error %s", err)
}
if m != n-1 {
t.Fatalf("readIndexBody returned %d; want %d", m, n-1)
}
for i, rec := range records {
if g[i] != rec {
t.Errorf("records[%d] is %v; want %v", i, g[i], rec)
}
}
}
func TestBlockHeader(t *testing.T) {
h := blockHeader{
compressedSize: 1234,
uncompressedSize: -1,
filters: []filter{&lzmaFilter{4096}},
}
data, err := h.MarshalBinary()
if err != nil {
t.Fatalf("MarshalBinary error %s", err)
}
r := bytes.NewReader(data)
g, n, err := readBlockHeader(r)
if err != nil {
t.Fatalf("readBlockHeader error %s", err)
}
if n != len(data) {
t.Fatalf("readBlockHeader returns %d bytes; want %d", n,
len(data))
}
if g.compressedSize != h.compressedSize {
t.Errorf("got compressedSize %d; want %d",
g.compressedSize, h.compressedSize)
}
if g.uncompressedSize != h.uncompressedSize {
t.Errorf("got uncompressedSize %d; want %d",
g.uncompressedSize, h.uncompressedSize)
}
if len(g.filters) != len(h.filters) {
t.Errorf("got len(filters) %d; want %d",
len(g.filters), len(h.filters))
}
glf := g.filters[0].(*lzmaFilter)
hlf := h.filters[0].(*lzmaFilter)
if glf.dictCap != hlf.dictCap {
t.Errorf("got dictCap %d; want %d", glf.dictCap, hlf.dictCap)
}
}

View File

@ -1,30 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package hash
import "testing"
func TestCyclicPolySimple(t *testing.T) {
p := []byte("abcde")
r := NewCyclicPoly(4)
h2 := Hashes(r, p)
for i, h := range h2 {
w := Hashes(r, p[i:i+4])[0]
t.Logf("%d h=%#016x w=%#016x", i, h, w)
if h != w {
t.Errorf("rolling hash %d: %#016x; want %#016x",
i, h, w)
}
}
}
func BenchmarkCyclicPoly(b *testing.B) {
p := makeBenchmarkBytes(4096)
r := NewCyclicPoly(4)
b.ResetTimer()
for i := 0; i < b.N; i++ {
Hashes(r, p)
}
}

View File

@ -1,42 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package hash
import (
"math/rand"
"testing"
)
func TestRabinKarpSimple(t *testing.T) {
p := []byte("abcde")
r := NewRabinKarp(4)
h2 := Hashes(r, p)
for i, h := range h2 {
w := Hashes(r, p[i:i+4])[0]
t.Logf("%d h=%#016x w=%#016x", i, h, w)
if h != w {
t.Errorf("rolling hash %d: %#016x; want %#016x",
i, h, w)
}
}
}
func makeBenchmarkBytes(n int) []byte {
rnd := rand.New(rand.NewSource(42))
p := make([]byte, n)
for i := range p {
p[i] = byte(rnd.Uint32())
}
return p
}
func BenchmarkRabinKarp(b *testing.B) {
p := makeBenchmarkBytes(4096)
r := NewRabinKarp(4)
b.ResetTimer()
for i := 0; i < b.N; i++ {
Hashes(r, p)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,82 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package randtxt
import (
"bufio"
"io"
"unicode"
)
// GroupReader groups the incoming text in groups of 5, whereby the
// number of groups per line can be controlled.
type GroupReader struct {
R io.ByteReader
GroupsPerLine int
off int64
eof bool
}
// NewGroupReader creates a new group reader.
func NewGroupReader(r io.Reader) *GroupReader {
return &GroupReader{R: bufio.NewReader(r)}
}
// Read formats the data provided by the internal reader in groups of 5
// characters. If GroupsPerLine hasn't been initialized 8 groups per
// line will be produced.
func (r *GroupReader) Read(p []byte) (n int, err error) {
if r.eof {
return 0, io.EOF
}
groupsPerLine := r.GroupsPerLine
if groupsPerLine < 1 {
groupsPerLine = 8
}
lineLen := int64(groupsPerLine * 6)
var c byte
for i := range p {
switch {
case r.off%lineLen == lineLen-1:
if i+1 == len(p) && len(p) > 1 {
return i, nil
}
c = '\n'
case r.off%6 == 5:
if i+1 == len(p) && len(p) > 1 {
return i, nil
}
c = ' '
default:
c, err = r.R.ReadByte()
if err == io.EOF {
r.eof = true
if i > 0 {
switch p[i-1] {
case ' ':
p[i-1] = '\n'
fallthrough
case '\n':
return i, io.EOF
}
}
p[i] = '\n'
return i + 1, io.EOF
}
if err != nil {
return i, err
}
switch {
case c == ' ':
c = '_'
case !unicode.IsPrint(rune(c)):
c = '-'
}
}
p[i] = c
r.off++
}
return len(p), nil
}

View File

@ -1,185 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package randtxt supports the generation of random text using a
// trigram model for the English language.
package randtxt
import (
"math"
"math/rand"
"sort"
)
// ngram stores an entry from the language model.
type ngram struct {
s string
lgP float64
lgQ float64
}
// ngrams represents a slice of ngram values and is used to represent a
// language model.
type ngrams []ngram
func (s ngrams) Len() int { return len(s) }
func (s ngrams) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s ngrams) Less(i, j int) bool { return s[i].s < s[j].s }
// Sorts the language model in the sequence of their ngrams.
func (s ngrams) Sort() { sort.Sort(s) }
// Search is looking for an ngram or the position where it would be
// inserted.
func (s ngrams) Search(g string) int {
return sort.Search(len(s), func(k int) bool { return s[k].s >= g })
}
// prob represents a string, usually an ngram, and a probability value.
type prob struct {
s string
p float64
}
// probs is a slice of prob values that can be sorted and searched.
type probs []prob
func (s probs) Len() int { return len(s) }
func (s probs) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s probs) Less(i, j int) bool { return s[i].s < s[j].s }
// SortByNgram sorts the probs slice by ngram, field s.
func (s probs) SortByNgram() { sort.Sort(s) }
// SortsByProb sorts the probs slice by probability, field p.
func (s probs) SortByProb() { sort.Sort(byProb{s}) }
// SearchNgram searches for an ngram or the position where it would be
// inserted.
func (s probs) SearchNgram(g string) int {
return sort.Search(len(s), func(k int) bool { return s[k].s >= g })
}
// SearchProb searches ngrams for a specific probability or where it
// would be inserted.
func (s probs) SearchProb(p float64) int {
return sort.Search(len(s), func(k int) bool { return s[k].p >= p })
}
// byProb is used to sort probs slice by probability, field p.
type byProb struct {
probs
}
func (s byProb) Less(i, j int) bool {
return s.probs[i].p < s.probs[j].p
}
// cdf can be used to setup a cumulative distribution function
// represented by a probs slice. We should have returned an actual
// function.
func cdf(n int, p func(i int) prob) probs {
prs := make(probs, n)
sum := 0.0
for i := range prs {
pr := p(i)
sum += pr.p
prs[i] = pr
}
q := 1.0 / sum
x := 0.0
for i, pr := range prs {
x += pr.p * q
if x > 1.0 {
x = 1.0
}
prs[i].p = x
}
if !sort.IsSorted(byProb{prs}) {
panic("cdf not sorted")
}
return prs
}
// pCDFOfLM converts a language model into a cumulative distribution
// function represented by probs.
func pCDFOfLM(lm ngrams) probs {
return cdf(len(lm), func(i int) prob {
return prob{lm[i].s, math.Exp2(lm[i].lgP)}
})
}
// cCDF converts a ngrams slice into a cumulative distribution function
// using the conditional probability lgQ.
func cCDF(s ngrams) probs {
return cdf(len(s), func(i int) prob {
return prob{s[i].s, math.Exp2(s[i].lgQ)}
})
}
// comap contains a map of conditional distribution function for the
// last character.
type comap map[string]probs
// comapOfLM converts a language model in a map of conditional
// distribution functions.
func comapOfLM(lm ngrams) comap {
if !sort.IsSorted(lm) {
panic("lm is not sorted")
}
m := make(comap, 26*26)
for i := 0; i < len(lm); {
j := i
g := lm[i].s
g2 := g[:2]
z := g2 + "Z"
i = lm.Search(z)
if i >= len(lm) || lm[i].s != z {
panic("unexpected search result")
}
i++
m[g2] = cCDF(lm[j:i])
}
return m
}
// trigram returns the trigram with prefix g2 using a probability value
// in the range [0.0,1.0).
func (c comap) trigram(g2 string, p float64) string {
prs := c[g2]
i := prs.SearchProb(p)
return prs[i].s
}
var (
// CDF for normal probabilities
pcdf = pCDFOfLM(englm3)
// map of two letter conditionals
cmap = comapOfLM(englm3)
)
// Reader generates a stream of text of uppercase letters with trigrams
// distributed according to a language model of the English language.
type Reader struct {
rnd *rand.Rand
g3 string
}
// NewReader creates a new reader. The argument src must create a uniformly
// distributed stream of random values.
func NewReader(src rand.Source) *Reader {
rnd := rand.New(src)
i := pcdf.SearchProb(rnd.Float64())
return &Reader{rnd, pcdf[i].s}
}
// Read reads random text. The Read function will always return len(p)
// bytes and will never return an error.
func (r *Reader) Read(p []byte) (n int, err error) {
for i := range p {
r.g3 = cmap.trigram(r.g3[1:], r.rnd.Float64())
p[i] = r.g3[2]
}
return len(p), nil
}

View File

@ -1,37 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package randtxt
import (
"bufio"
"io"
"math/rand"
"testing"
)
func TestReader(t *testing.T) {
lr := io.LimitReader(NewReader(rand.NewSource(13)), 195)
pretty := NewGroupReader(lr)
scanner := bufio.NewScanner(pretty)
for scanner.Scan() {
t.Log(scanner.Text())
}
if err := scanner.Err(); err != nil {
t.Fatalf("scanner error %s", err)
}
}
func TestComap(t *testing.T) {
prs := cmap["TH"]
for _, p := range prs[3:6] {
t.Logf("%v", p)
}
p := 0.2
x := cmap.trigram("TH", p)
if x != "THE" {
t.Fatalf("cmap.trigram(%q, %.1f) returned %q; want %q",
"TH", p, x, "THE")
}
}

View File

@ -1,107 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzma
import (
"bytes"
"io"
"math/rand"
"strings"
"testing"
"github.com/ulikunitz/xz/internal/randtxt"
)
func TestBinTree_Find(t *testing.T) {
bt, err := newBinTree(30)
if err != nil {
t.Fatal(err)
}
const s = "Klopp feiert mit Liverpool seinen hoechsten SiegSieg"
n, err := io.WriteString(bt, s)
if err != nil {
t.Fatalf("WriteString error %s", err)
}
if n != len(s) {
t.Fatalf("WriteString returned %d; want %d", n, len(s))
}
/* dump info writes the complete tree
if err = bt.dump(os.Stdout); err != nil {
t.Fatalf("bt.dump error %s", err)
}
*/
tests := []string{"Sieg", "Sieb", "Simu"}
for _, c := range tests {
x := xval([]byte(c))
a, b := bt.search(bt.root, x)
t.Logf("%q: a, b == %d, %d", c, a, b)
}
}
func TestBinTree_PredSucc(t *testing.T) {
bt, err := newBinTree(30)
if err != nil {
t.Fatal(err)
}
const s = "Klopp feiert mit Liverpool seinen hoechsten Sieg."
n, err := io.WriteString(bt, s)
if err != nil {
t.Fatalf("WriteString error %s", err)
}
if n != len(s) {
t.Fatalf("WriteString returned %d; want %d", n, len(s))
}
for v := bt.min(bt.root); v != null; v = bt.succ(v) {
t.Log(dumpX(bt.node[v].x))
}
t.Log("")
for v := bt.max(bt.root); v != null; v = bt.pred(v) {
t.Log(dumpX(bt.node[v].x))
}
}
func TestBinTree_Cycle(t *testing.T) {
buf := new(bytes.Buffer)
w, err := Writer2Config{
DictCap: 4096,
Matcher: BinaryTree,
}.NewWriter2(buf)
if err != nil {
t.Fatalf("NewWriter error %s", err)
}
// const txtlen = 1024
const txtlen = 10000
io.CopyN(buf, randtxt.NewReader(rand.NewSource(42)), txtlen)
txt := buf.String()
buf.Reset()
n, err := io.Copy(w, strings.NewReader(txt))
if err != nil {
t.Fatalf("Compressing copy error %s", err)
}
if n != txtlen {
t.Fatalf("Compressing data length %d; want %d", n, txtlen)
}
if err = w.Close(); err != nil {
t.Fatalf("w.Close error %s", err)
}
t.Logf("buf.Len() %d", buf.Len())
r, err := Reader2Config{DictCap: 4096}.NewReader2(buf)
if err != nil {
t.Fatalf("NewReader error %s", err)
}
out := new(bytes.Buffer)
n, err = io.Copy(out, r)
if err != nil {
t.Fatalf("Decompressing copy error %s after %d bytes", err, n)
}
if n != txtlen {
t.Fatalf("Decompression data length %d; want %d", n, txtlen)
}
if txt != out.String() {
t.Fatal("decompressed data differs from original")
}
}

View File

@ -1,230 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzma
import (
"bytes"
"io"
"testing"
)
func TestBuffer_Write(t *testing.T) {
buf := newBuffer(10)
b := []byte("1234567890")
for i := range b {
n, err := buf.Write(b[i : i+1])
if err != nil {
t.Fatalf("buf.Write(b[%d:%d]) error %s", i, i+1, err)
}
if n != 1 {
t.Fatalf("buf.Write(b[%d:%d]) returned %d; want %d",
i, i+1, n, 1)
}
}
const c = 8
n, err := buf.Discard(c)
if err != nil {
t.Fatalf("Discard error %s", err)
}
if n != c {
t.Fatalf("Discard returned %d; want %d", n, c)
}
n, err = buf.Write(b)
if err == nil {
t.Fatalf("Write length exceed returned no error; n %d", n)
}
if n != c {
t.Fatalf("Write length exceeding returned %d; want %d", n, c)
}
n, err = buf.Discard(4)
if err != nil {
t.Fatalf("Discard error %s", err)
}
if n != 4 {
t.Fatalf("Discard returned %d; want %d", n, 4)
}
n, err = buf.Write(b[:3])
if err != nil {
t.Fatalf("buf.Write(b[:3]) error %s; n %d", err, n)
}
if n != 3 {
t.Fatalf("buf.Write(b[:3]) returned %d; want %d", n, 3)
}
}
func TestBuffer_Buffered_Available(t *testing.T) {
buf := newBuffer(19)
b := []byte("0123456789")
var err error
if _, err = buf.Write(b); err != nil {
t.Fatalf("buf.Write(b) error %s", err)
}
if n := buf.Buffered(); n != 10 {
t.Fatalf("buf.Buffered() returns %d; want %d", n, 10)
}
if _, err = buf.Discard(8); err != nil {
t.Fatalf("buf.Discard(8) error %s", err)
}
if _, err = buf.Write(b[:7]); err != nil {
t.Fatalf("buf.Write(b[:7]) error %s", err)
}
if n := buf.Buffered(); n != 9 {
t.Fatalf("buf.Buffered() returns %d; want %d", n, 9)
}
}
func TestBuffer_Read(t *testing.T) {
buf := newBuffer(10)
b := []byte("0123456789")
var err error
if _, err = buf.Write(b); err != nil {
t.Fatalf("buf.Write(b) error %s", err)
}
p := make([]byte, 8)
n, err := buf.Read(p)
if err != nil {
t.Fatalf("buf.Read(p) error %s", err)
}
if n != len(p) {
t.Fatalf("buf.Read(p) returned %d; want %d", n, len(p))
}
if !bytes.Equal(p, b[:8]) {
t.Fatalf("buf.Read(p) put %s into p; want %s", p, b[:8])
}
if _, err = buf.Write(b[:7]); err != nil {
t.Fatalf("buf.Write(b[:7]) error %s", err)
}
q := make([]byte, 7)
n, err = buf.Read(q)
if err != nil {
t.Fatalf("buf.Read(q) error %s", err)
}
if n != len(q) {
t.Fatalf("buf.Read(q) returns %d; want %d", n, len(q))
}
c := []byte("8901234")
if !bytes.Equal(q, c) {
t.Fatalf("buf.Read(q) put %s into q; want %s", q, c)
}
if _, err := buf.Write(b[7:]); err != nil {
t.Fatalf("buf.Write(b[7:]) error %s", err)
}
if _, err := buf.Write(b[:2]); err != nil {
t.Fatalf("buf.Write(b[:2]) error %s", err)
}
t.Logf("buf.rear %d buf.front %d", buf.rear, buf.front)
r := make([]byte, 2)
n, err = buf.Read(r)
if err != nil {
t.Fatalf("buf.Read(r) error %s", err)
}
if n != len(r) {
t.Fatalf("buf.Read(r) returns %d; want %d", n, len(r))
}
d := []byte("56")
if !bytes.Equal(r, d) {
t.Fatalf("buf.Read(r) put %s into r; want %s", r, d)
}
}
func TestBuffer_Discard(t *testing.T) {
buf := newBuffer(10)
b := []byte("0123456789")
var err error
if _, err = buf.Write(b); err != nil {
t.Fatalf("buf.Write(b) error %s", err)
}
n, err := buf.Discard(11)
if err == nil {
t.Fatalf("buf.Discard(11) didn't return error")
}
if n != 10 {
t.Fatalf("buf.Discard(11) returned %d; want %d", n, 10)
}
if _, err := buf.Write(b); err != nil {
t.Fatalf("buf.Write(b) #2 error %s", err)
}
n, err = buf.Discard(10)
if err != nil {
t.Fatalf("buf.Discard(10) error %s", err)
}
if n != 10 {
t.Fatalf("buf.Discard(11) returned %d; want %d", n, 10)
}
if _, err := buf.Write(b[:4]); err != nil {
t.Fatalf("buf.Write(b[:4]) error %s", err)
}
n, err = buf.Discard(1)
if err != nil {
t.Fatalf("buf.Discard(1) error %s", err)
}
if n != 1 {
t.Fatalf("buf.Discard(1) returned %d; want %d", n, 1)
}
}
func TestBuffer_Discard_error(t *testing.T) {
buf := newBuffer(10)
n, err := buf.Discard(-1)
if err == nil {
t.Fatal("buf.Discard(-1) didn't return an error")
}
if n != 0 {
t.Fatalf("buf.Discard(-1) returned %d; want %d", n, 0)
}
}
func TestPrefixLen(t *testing.T) {
tests := []struct {
a, b []byte
k int
}{
{[]byte("abcde"), []byte("abc"), 3},
{[]byte("abc"), []byte("uvw"), 0},
{[]byte(""), []byte("uvw"), 0},
{[]byte("abcde"), []byte("abcuvw"), 3},
}
for _, c := range tests {
k := prefixLen(c.a, c.b)
if k != c.k {
t.Errorf("prefixLen(%q,%q) returned %d; want %d",
c.a, c.b, k, c.k)
}
k = prefixLen(c.b, c.a)
if k != c.k {
t.Errorf("prefixLen(%q,%q) returned %d; want %d",
c.b, c.a, k, c.k)
}
}
}
func TestMatchLen(t *testing.T) {
buf := newBuffer(13)
const s = "abcaba"
_, err := io.WriteString(buf, s)
if err != nil {
t.Fatalf("WriteString error %s", err)
}
_, err = io.WriteString(buf, s)
if err != nil {
t.Fatalf("WriteString error %s", err)
}
if _, err = buf.Discard(12); err != nil {
t.Fatalf("buf.Discard(6) error %s", err)
}
_, err = io.WriteString(buf, s)
if err != nil {
t.Fatalf("WriteString error %s", err)
}
tests := []struct{ d, n int }{{1, 1}, {3, 2}, {6, 6}, {5, 0}, {2, 0}}
for _, c := range tests {
n := buf.matchLen(c.d, []byte(s))
if n != c.n {
t.Errorf(
"MatchLen(%d,[]byte(%q)) returned %d; want %d",
c.d, s, n, c.n)
}
}
}

View File

@ -1,59 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzma
import (
"bufio"
"io"
"io/ioutil"
"os"
"testing"
)
func TestDecoder(t *testing.T) {
filename := "fox.lzma"
want := "The quick brown fox jumps over the lazy dog.\n"
for i := 0; i < 2; i++ {
f, err := os.Open(filename)
if err != nil {
t.Fatalf("os.Open(%q) error %s", filename, err)
}
p := make([]byte, 13)
_, err = io.ReadFull(f, p)
if err != nil {
t.Fatalf("io.ReadFull error %s", err)
}
props, err := PropertiesForCode(p[0])
if err != nil {
t.Fatalf("p[0] error %s", err)
}
state := newState(props)
const capacity = 0x800000
dict, err := newDecoderDict(capacity)
if err != nil {
t.Fatalf("newDecoderDict: error %s", err)
}
size := int64(-1)
if i > 0 {
size = int64(len(want))
}
br := bufio.NewReader(f)
r, err := newDecoder(br, state, dict, size)
if err != nil {
t.Fatalf("newDecoder error %s", err)
}
bytes, err := ioutil.ReadAll(r)
if err != nil {
t.Fatalf("[%d] ReadAll error %s", i, err)
}
if err = f.Close(); err != nil {
t.Fatalf("Close error %s", err)
}
got := string(bytes)
if got != want {
t.Fatalf("read %q; but want %q", got, want)
}
}
}

View File

@ -1,33 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzma
import (
"fmt"
"testing"
)
func peek(d *decoderDict) []byte {
p := make([]byte, d.buffered())
k, err := d.peek(p)
if err != nil {
panic(fmt.Errorf("peek: "+
"Read returned unexpected error %s", err))
}
if k != len(p) {
panic(fmt.Errorf("peek: "+
"Read returned %d; wanted %d", k, len(p)))
}
return p
}
func TestNewDecoderDict(t *testing.T) {
if _, err := newDecoderDict(0); err == nil {
t.Fatalf("no error for zero dictionary capacity")
}
if _, err := newDecoderDict(8); err != nil {
t.Fatalf("error %s", err)
}
}

View File

@ -1,151 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzma
import (
"bytes"
"io"
"io/ioutil"
"math/rand"
"testing"
"github.com/ulikunitz/xz/internal/randtxt"
)
var testString = `LZMA decoder test example
=========================
! LZMA ! Decoder ! TEST !
=========================
! TEST ! LZMA ! Decoder !
=========================
---- Test Line 1 --------
=========================
---- Test Line 2 --------
=========================
=== End of test file ====
=========================
`
func cycle(t *testing.T, n int) {
t.Logf("cycle(t,%d)", n)
if n > len(testString) {
t.Fatalf("cycle: n=%d larger than len(testString)=%d", n,
len(testString))
}
const dictCap = MinDictCap
m, err := newHashTable(dictCap, 4)
if err != nil {
t.Fatal(err)
}
encoderDict, err := newEncoderDict(dictCap, dictCap+1024, m)
if err != nil {
t.Fatal(err)
}
props := Properties{2, 0, 2}
if err := props.verify(); err != nil {
t.Fatalf("properties error %s", err)
}
state := newState(props)
var buf bytes.Buffer
w, err := newEncoder(&buf, state, encoderDict, eosMarker)
if err != nil {
t.Fatalf("newEncoder error %s", err)
}
orig := []byte(testString)[:n]
t.Logf("len(orig) %d", len(orig))
k, err := w.Write(orig)
if err != nil {
t.Fatalf("w.Write error %s", err)
}
if k != len(orig) {
t.Fatalf("w.Write returned %d; want %d", k, len(orig))
}
if err = w.Close(); err != nil {
t.Fatalf("w.Close error %s", err)
}
t.Logf("buf.Len() %d len(orig) %d", buf.Len(), len(orig))
decoderDict, err := newDecoderDict(dictCap)
if err != nil {
t.Fatalf("newDecoderDict error %s", err)
}
state.Reset()
r, err := newDecoder(&buf, state, decoderDict, -1)
if err != nil {
t.Fatalf("newDecoder error %s", err)
}
decoded, err := ioutil.ReadAll(r)
if err != nil {
t.Fatalf("ReadAll(lr) error %s", err)
}
t.Logf("decoded: %s", decoded)
if len(orig) != len(decoded) {
t.Fatalf("length decoded is %d; want %d", len(decoded),
len(orig))
}
if !bytes.Equal(orig, decoded) {
t.Fatalf("decoded file differs from original")
}
}
func TestEncoderCycle1(t *testing.T) {
cycle(t, len(testString))
}
func TestEncoderCycle2(t *testing.T) {
buf := new(bytes.Buffer)
const txtlen = 50000
io.CopyN(buf, randtxt.NewReader(rand.NewSource(42)), txtlen)
txt := buf.String()
buf.Reset()
const dictCap = MinDictCap
m, err := newHashTable(dictCap, 4)
if err != nil {
t.Fatal(err)
}
encoderDict, err := newEncoderDict(dictCap, dictCap+1024, m)
if err != nil {
t.Fatal(err)
}
props := Properties{3, 0, 2}
if err := props.verify(); err != nil {
t.Fatalf("properties error %s", err)
}
state := newState(props)
lbw := &LimitedByteWriter{BW: buf, N: 100}
w, err := newEncoder(lbw, state, encoderDict, 0)
if err != nil {
t.Fatalf("NewEncoder error %s", err)
}
_, err = io.WriteString(w, txt)
if err != nil && err != ErrLimit {
t.Fatalf("WriteString error %s", err)
}
if err = w.Close(); err != nil {
t.Fatalf("w.Close error %s", err)
}
n := w.Compressed()
txt = txt[:n]
decoderDict, err := newDecoderDict(dictCap)
if err != nil {
t.Fatalf("NewDecoderDict error %s", err)
}
state.Reset()
r, err := newDecoder(buf, state, decoderDict, n)
if err != nil {
t.Fatalf("NewDecoder error %s", err)
}
out := new(bytes.Buffer)
if _, err = io.Copy(out, r); err != nil {
t.Fatalf("decompress copy error %s", err)
}
got := out.String()
t.Logf("%s", got)
if len(got) != int(n) {
t.Fatalf("len(got) %d; want %d", len(got), n)
}
if got != txt {
t.Fatalf("got and txt differ")
}
}

View File

@ -1,47 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzma
import (
"fmt"
"testing"
)
func TestHashTable(t *testing.T) {
ht, err := newHashTable(32, 2)
if err != nil {
t.Fatalf("newHashTable: error %s", err)
}
// 01234567890123456
s := "abcabcdefghijklmn"
n, err := ht.Write([]byte(s))
if err != nil {
t.Fatalf("ht.Write: error %s", err)
}
if n != len(s) {
t.Fatalf("ht.Write returned %d; want %d", n, len(s))
}
tests := []struct {
s string
w string
}{
{"ab", "[3 0]"},
{"bc", "[4 1]"},
{"ca", "[2]"},
{"xx", "[]"},
{"gh", "[9]"},
{"mn", "[15]"},
}
distances := make([]int64, 20)
for _, c := range tests {
distances := distances[:20]
k := ht.Matches([]byte(c.s), distances)
distances = distances[:k]
o := fmt.Sprintf("%v", distances)
if o != c.w {
t.Errorf("%s: offsets %s; want %s", c.s, o, c.w)
}
}
}

View File

@ -1,153 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzma
import (
"bytes"
"fmt"
"testing"
)
func TestChunkTypeString(t *testing.T) {
tests := [...]struct {
c chunkType
s string
}{
{cEOS, "EOS"},
{cUD, "UD"},
{cU, "U"},
{cL, "L"},
{cLR, "LR"},
{cLRN, "LRN"},
{cLRND, "LRND"},
}
for _, c := range tests {
s := fmt.Sprintf("%v", c.c)
if s != c.s {
t.Errorf("got %s; want %s", s, c.s)
}
}
}
func TestHeaderChunkType(t *testing.T) {
tests := []struct {
h byte
c chunkType
}{
{h: 0, c: cEOS},
{h: 1, c: cUD},
{h: 2, c: cU},
{h: 1<<7 | 0x1f, c: cL},
{h: 1<<7 | 1<<5 | 0x1f, c: cLR},
{h: 1<<7 | 1<<6 | 0x1f, c: cLRN},
{h: 1<<7 | 1<<6 | 1<<5 | 0x1f, c: cLRND},
{h: 1<<7 | 1<<6 | 1<<5, c: cLRND},
}
if _, err := headerChunkType(3); err == nil {
t.Fatalf("headerChunkType(%d) got %v; want %v",
3, err, errHeaderByte)
}
for _, tc := range tests {
c, err := headerChunkType(tc.h)
if err != nil {
t.Fatalf("headerChunkType error %s", err)
}
if c != tc.c {
t.Errorf("got %s; want %s", c, tc.c)
}
}
}
func TestHeaderLen(t *testing.T) {
tests := []struct {
c chunkType
n int
}{
{cEOS, 1}, {cU, 3}, {cUD, 3}, {cL, 5}, {cLR, 5}, {cLRN, 6},
{cLRND, 6},
}
for _, tc := range tests {
n := headerLen(tc.c)
if n != tc.n {
t.Errorf("header length for %s %d; want %d",
tc.c, n, tc.n)
}
}
}
func chunkHeaderSamples(t *testing.T) []chunkHeader {
props := Properties{LC: 3, LP: 0, PB: 2}
headers := make([]chunkHeader, 0, 12)
for c := cEOS; c <= cLRND; c++ {
var h chunkHeader
h.ctype = c
if c >= cUD {
h.uncompressed = 0x0304
}
if c >= cL {
h.compressed = 0x0201
}
if c >= cLRN {
h.props = props
}
headers = append(headers, h)
}
return headers
}
func TestChunkHeaderMarshalling(t *testing.T) {
for _, h := range chunkHeaderSamples(t) {
data, err := h.MarshalBinary()
if err != nil {
t.Fatalf("MarshalBinary for %v error %s", h, err)
}
var g chunkHeader
if err = g.UnmarshalBinary(data); err != nil {
t.Fatalf("UnmarshalBinary error %s", err)
}
if g != h {
t.Fatalf("got %v; want %v", g, h)
}
}
}
func TestReadChunkHeader(t *testing.T) {
for _, h := range chunkHeaderSamples(t) {
data, err := h.MarshalBinary()
if err != nil {
t.Fatalf("MarshalBinary for %v error %s", h, err)
}
r := bytes.NewReader(data)
g, err := readChunkHeader(r)
if err != nil {
t.Fatalf("readChunkHeader for %v error %s", h, err)
}
if *g != h {
t.Fatalf("got %v; want %v", g, h)
}
}
}
func TestReadEOS(t *testing.T) {
var b [1]byte
r := bytes.NewReader(b[:])
h, err := readChunkHeader(r)
if err != nil {
t.Fatalf("readChunkHeader error %s", err)
}
if h.ctype != cEOS {
t.Errorf("ctype got %s; want %s", h.ctype, cEOS)
}
if h.compressed != 0 {
t.Errorf("compressed got %d; want %d", h.compressed, 0)
}
if h.uncompressed != 0 {
t.Errorf("uncompressed got %d; want %d", h.uncompressed, 0)
}
wantProps := Properties{}
if h.props != wantProps {
t.Errorf("props got %v; want %v", h.props, wantProps)
}
}

View File

@ -1,52 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzma
import "testing"
func TestHeaderMarshalling(t *testing.T) {
tests := []header{
{properties: Properties{3, 0, 2}, dictCap: 8 * 1024 * 1024,
size: -1},
{properties: Properties{4, 3, 3}, dictCap: 4096,
size: 10},
}
for _, h := range tests {
data, err := h.marshalBinary()
if err != nil {
t.Fatalf("marshalBinary error %s", err)
}
var g header
if err = g.unmarshalBinary(data); err != nil {
t.Fatalf("unmarshalBinary error %s", err)
}
if h != g {
t.Errorf("got header %#v; want %#v", g, h)
}
}
}
func TestValidHeader(t *testing.T) {
tests := []header{
{properties: Properties{3, 0, 2}, dictCap: 8 * 1024 * 1024,
size: -1},
{properties: Properties{4, 3, 3}, dictCap: 4096,
size: 10},
}
for _, h := range tests {
data, err := h.marshalBinary()
if err != nil {
t.Fatalf("marshalBinary error %s", err)
}
if !ValidHeader(data) {
t.Errorf("ValidHeader returns false for header %v;"+
" want true", h)
}
}
const a = "1234567890123"
if ValidHeader([]byte(a)) {
t.Errorf("ValidHeader returns true for %s; want false", a)
}
}

View File

@ -1,312 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzma
import (
"bufio"
"bytes"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"testing"
"testing/iotest"
)
func TestNewReader(t *testing.T) {
f, err := os.Open("examples/a.lzma")
if err != nil {
t.Fatalf("open examples/a.lzma: %s", err)
}
defer f.Close()
_, err = NewReader(bufio.NewReader(f))
if err != nil {
t.Fatalf("NewReader: %s", err)
}
}
const (
dirname = "examples"
origname = "a.txt"
)
func readOrigFile(t *testing.T) []byte {
orig, err := ioutil.ReadFile(filepath.Join(dirname, origname))
if err != nil {
t.Fatalf("ReadFile: %s", err)
}
return orig
}
func testDecodeFile(t *testing.T, filename string, orig []byte) {
pathname := filepath.Join(dirname, filename)
f, err := os.Open(pathname)
if err != nil {
t.Fatalf("Open(%q): %s", pathname, err)
}
defer func() {
if err = f.Close(); err != nil {
t.Fatalf("f.Close() error %s", err)
}
}()
t.Logf("file %s opened", filename)
l, err := NewReader(bufio.NewReader(f))
if err != nil {
t.Fatalf("NewReader: %s", err)
}
decoded, err := ioutil.ReadAll(l)
if err != nil {
t.Fatalf("ReadAll: %s", err)
}
t.Logf("%s", decoded)
if len(orig) != len(decoded) {
t.Fatalf("length decoded is %d; want %d",
len(decoded), len(orig))
}
if !bytes.Equal(orig, decoded) {
t.Fatalf("decoded file differs from original")
}
}
func TestReaderSimple(t *testing.T) {
// DebugOn(os.Stderr)
// defer DebugOff()
testDecodeFile(t, "a.lzma", readOrigFile(t))
}
func TestReaderAll(t *testing.T) {
dirname := "examples"
dir, err := os.Open(dirname)
if err != nil {
t.Fatalf("Open: %s", err)
}
defer func() {
if err := dir.Close(); err != nil {
t.Fatalf("dir.Close() error %s", err)
}
}()
all, err := dir.Readdirnames(0)
if err != nil {
t.Fatalf("Readdirnames: %s", err)
}
// filter now all file with the pattern "a*.lzma"
files := make([]string, 0, len(all))
for _, fn := range all {
match, err := filepath.Match("a*.lzma", fn)
if err != nil {
t.Fatalf("Match: %s", err)
}
if match {
files = append(files, fn)
}
}
t.Log("files:", files)
orig := readOrigFile(t)
// actually test the files
for _, fn := range files {
testDecodeFile(t, fn, orig)
}
}
//
func Example_reader() {
f, err := os.Open("fox.lzma")
if err != nil {
log.Fatal(err)
}
// no need for defer; Fatal calls os.Exit(1) that doesn't execute deferred functions
r, err := NewReader(bufio.NewReader(f))
if err != nil {
log.Fatal(err)
}
_, err = io.Copy(os.Stdout, r)
if err != nil {
log.Fatal(err)
}
if err := f.Close(); err != nil {
log.Fatal(err)
}
// Output:
// The quick brown fox jumps over the lazy dog.
}
type wrapTest struct {
name string
wrap func(io.Reader) io.Reader
}
func (w *wrapTest) testFile(t *testing.T, filename string, orig []byte) {
pathname := filepath.Join(dirname, filename)
f, err := os.Open(pathname)
if err != nil {
t.Fatalf("Open(\"%s\"): %s", pathname, err)
}
defer func() {
if err := f.Close(); err != nil {
log.Fatal(err)
}
}()
t.Logf("%s file %s opened", w.name, filename)
l, err := NewReader(w.wrap(f))
if err != nil {
t.Fatalf("%s NewReader: %s", w.name, err)
}
decoded, err := ioutil.ReadAll(l)
if err != nil {
t.Fatalf("%s ReadAll: %s", w.name, err)
}
t.Logf("%s", decoded)
if len(orig) != len(decoded) {
t.Fatalf("%s length decoded is %d; want %d",
w.name, len(decoded), len(orig))
}
if !bytes.Equal(orig, decoded) {
t.Fatalf("%s decoded file differs from original", w.name)
}
}
func TestReaderWrap(t *testing.T) {
tests := [...]wrapTest{
{"DataErrReader", iotest.DataErrReader},
{"HalfReader", iotest.HalfReader},
{"OneByteReader", iotest.OneByteReader},
// TimeOutReader would require buffer
}
orig := readOrigFile(t)
for _, tst := range tests {
tst.testFile(t, "a.lzma", orig)
}
}
func TestReaderBadFiles(t *testing.T) {
dirname := "examples"
dir, err := os.Open(dirname)
if err != nil {
t.Fatalf("Open: %s", err)
}
defer func() {
if err := dir.Close(); err != nil {
t.Fatalf("dir.Close() error %s", err)
}
}()
all, err := dir.Readdirnames(0)
if err != nil {
t.Fatalf("Readdirnames: %s", err)
}
// filter now all file with the pattern "bad*.lzma"
files := make([]string, 0, len(all))
for _, fn := range all {
match, err := filepath.Match("bad*.lzma", fn)
if err != nil {
t.Fatalf("Match: %s", err)
}
if match {
files = append(files, fn)
}
}
t.Log("files:", files)
for _, filename := range files {
pathname := filepath.Join(dirname, filename)
f, err := os.Open(pathname)
if err != nil {
t.Fatalf("Open(\"%s\"): %s", pathname, err)
}
defer func(f *os.File) {
if err := f.Close(); err != nil {
t.Fatalf("f.Close() error %s", err)
}
}(f)
t.Logf("file %s opened", filename)
l, err := NewReader(f)
if err != nil {
t.Fatalf("NewReader: %s", err)
}
decoded, err := ioutil.ReadAll(l)
if err == nil {
t.Errorf("ReadAll for %s: no error", filename)
t.Logf("%s", decoded)
continue
}
t.Logf("%s: error %s", filename, err)
}
}
type repReader byte
func (r repReader) Read(p []byte) (n int, err error) {
for i := range p {
p[i] = byte(r)
}
return len(p), nil
}
func newRepReader(c byte, n int64) *io.LimitedReader {
return &io.LimitedReader{R: repReader(c), N: n}
}
func newCodeReader(r io.Reader) *io.PipeReader {
pr, pw := io.Pipe()
go func() {
bw := bufio.NewWriter(pw)
lw, err := NewWriter(bw)
if err != nil {
log.Fatalf("NewWriter error %s", err)
}
if _, err = io.Copy(lw, r); err != nil {
log.Fatalf("io.Copy error %s", err)
}
if err = lw.Close(); err != nil {
log.Fatalf("lw.Close error %s", err)
}
if err = bw.Flush(); err != nil {
log.Fatalf("bw.Flush() error %s", err)
}
if err = pw.CloseWithError(io.EOF); err != nil {
log.Fatalf("pw.CloseWithError(io.EOF) error %s", err)
}
}()
return pr
}
func TestReaderErrAgain(t *testing.T) {
lengths := []int64{0, 128, 1024, 4095, 4096, 4097, 8191, 8192, 8193}
buf := make([]byte, 128)
const c = 'A'
for _, n := range lengths {
t.Logf("n: %d", n)
pr := newCodeReader(newRepReader(c, n))
r, err := NewReader(pr)
if err != nil {
t.Fatalf("NewReader(pr) error %s", err)
}
k := int64(0)
for {
m, err := r.Read(buf)
k += int64(m)
if err == io.EOF {
break
}
if err != nil {
t.Errorf("r.Read(buf) error %s", err)
break
}
if m > len(buf) {
t.Fatalf("r.Read(buf) %d; want <= %d", m,
len(buf))
}
for i, b := range buf[:m] {
if b != c {
t.Fatalf("buf[%d]=%c; want %c", i, b,
c)
}
}
}
if k != n {
t.Errorf("Read %d bytes; want %d", k, n)
}
}
}

View File

@ -1,109 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzma
import (
"bytes"
"io"
"math/rand"
"strings"
"testing"
"github.com/ulikunitz/xz/internal/randtxt"
)
func TestWriter2(t *testing.T) {
var buf bytes.Buffer
w, err := Writer2Config{DictCap: 4096}.NewWriter2(&buf)
if err != nil {
t.Fatalf("NewWriter error %s", err)
}
n, err := w.Write([]byte{'a'})
if err != nil {
t.Fatalf("w.Write([]byte{'a'}) error %s", err)
}
if n != 1 {
t.Fatalf("w.Write([]byte{'a'}) returned %d; want %d", n, 1)
}
if err = w.Flush(); err != nil {
t.Fatalf("w.Flush() error %s", err)
}
// check that double Flush doesn't write another chunk
if err = w.Flush(); err != nil {
t.Fatalf("w.Flush() error %s", err)
}
if err = w.Close(); err != nil {
t.Fatalf("w.Close() error %s", err)
}
p := buf.Bytes()
want := []byte{1, 0, 0, 'a', 0}
if !bytes.Equal(p, want) {
t.Fatalf("bytes written %#v; want %#v", p, want)
}
}
func TestCycle1(t *testing.T) {
var buf bytes.Buffer
w, err := Writer2Config{DictCap: 4096}.NewWriter2(&buf)
if err != nil {
t.Fatalf("NewWriter error %s", err)
}
n, err := w.Write([]byte{'a'})
if err != nil {
t.Fatalf("w.Write([]byte{'a'}) error %s", err)
}
if n != 1 {
t.Fatalf("w.Write([]byte{'a'}) returned %d; want %d", n, 1)
}
if err = w.Close(); err != nil {
t.Fatalf("w.Close() error %s", err)
}
r, err := Reader2Config{DictCap: 4096}.NewReader2(&buf)
if err != nil {
t.Fatalf("NewReader error %s", err)
}
p := make([]byte, 3)
n, err = r.Read(p)
t.Logf("n %d error %v", n, err)
}
func TestCycle2(t *testing.T) {
buf := new(bytes.Buffer)
w, err := Writer2Config{DictCap: 4096}.NewWriter2(buf)
if err != nil {
t.Fatalf("NewWriter error %s", err)
}
// const txtlen = 1024
const txtlen = 2100000
io.CopyN(buf, randtxt.NewReader(rand.NewSource(42)), txtlen)
txt := buf.String()
buf.Reset()
n, err := io.Copy(w, strings.NewReader(txt))
if err != nil {
t.Fatalf("Compressing copy error %s", err)
}
if n != txtlen {
t.Fatalf("Compressing data length %d; want %d", n, txtlen)
}
if err = w.Close(); err != nil {
t.Fatalf("w.Close error %s", err)
}
t.Logf("buf.Len() %d", buf.Len())
r, err := Reader2Config{DictCap: 4096}.NewReader2(buf)
if err != nil {
t.Fatalf("NewReader error %s", err)
}
out := new(bytes.Buffer)
n, err = io.Copy(out, r)
if err != nil {
t.Fatalf("Decompressing copy error %s after %d bytes", err, n)
}
if n != txtlen {
t.Fatalf("Decompression data length %d; want %d", n, txtlen)
}
if txt != out.String() {
t.Fatal("decompressed data differs from original")
}
}

View File

@ -1,249 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzma
import (
"bufio"
"bytes"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
"testing"
"github.com/ulikunitz/xz/internal/randtxt"
)
func TestWriterCycle(t *testing.T) {
orig := readOrigFile(t)
buf := new(bytes.Buffer)
w, err := NewWriter(buf)
if err != nil {
t.Fatalf("NewWriter: error %s", err)
}
n, err := w.Write(orig)
if err != nil {
t.Fatalf("w.Write error %s", err)
}
if n != len(orig) {
t.Fatalf("w.Write returned %d; want %d", n, len(orig))
}
if err = w.Close(); err != nil {
t.Fatalf("w.Close error %s", err)
}
t.Logf("buf.Len() %d len(orig) %d", buf.Len(), len(orig))
if buf.Len() > len(orig) {
t.Errorf("buf.Len()=%d bigger then len(orig)=%d", buf.Len(),
len(orig))
}
lr, err := NewReader(buf)
if err != nil {
t.Fatalf("NewReader error %s", err)
}
decoded, err := ioutil.ReadAll(lr)
if err != nil {
t.Fatalf("ReadAll(lr) error %s", err)
}
t.Logf("%s", decoded)
if len(orig) != len(decoded) {
t.Fatalf("length decoded is %d; want %d", len(decoded),
len(orig))
}
if !bytes.Equal(orig, decoded) {
t.Fatalf("decoded file differs from original")
}
}
func TestWriterLongData(t *testing.T) {
const (
seed = 49
size = 82237
)
r := io.LimitReader(randtxt.NewReader(rand.NewSource(seed)), size)
txt, err := ioutil.ReadAll(r)
if err != nil {
t.Fatalf("ReadAll error %s", err)
}
if len(txt) != size {
t.Fatalf("ReadAll read %d bytes; want %d", len(txt), size)
}
buf := &bytes.Buffer{}
w, err := WriterConfig{DictCap: 0x4000}.NewWriter(buf)
if err != nil {
t.Fatalf("WriterConfig.NewWriter error %s", err)
}
n, err := w.Write(txt)
if err != nil {
t.Fatalf("w.Write error %s", err)
}
if n != len(txt) {
t.Fatalf("w.Write wrote %d bytes; want %d", n, size)
}
if err = w.Close(); err != nil {
t.Fatalf("w.Close error %s", err)
}
t.Logf("compressed length %d", buf.Len())
lr, err := NewReader(buf)
if err != nil {
t.Fatalf("NewReader error %s", err)
}
txtRead, err := ioutil.ReadAll(lr)
if err != nil {
t.Fatalf("ReadAll(lr) error %s", err)
}
if len(txtRead) != size {
t.Fatalf("ReadAll(lr) returned %d bytes; want %d",
len(txtRead), size)
}
if !bytes.Equal(txtRead, txt) {
t.Fatal("ReadAll(lr) returned txt differs from origin")
}
}
func TestWriter_Size(t *testing.T) {
buf := new(bytes.Buffer)
w, err := WriterConfig{Size: 10, EOSMarker: true}.NewWriter(buf)
if err != nil {
t.Fatalf("WriterConfig.NewWriter error %s", err)
}
q := []byte{'a'}
for i := 0; i < 9; i++ {
n, err := w.Write(q)
if err != nil {
t.Fatalf("w.Write error %s", err)
}
if n != 1 {
t.Fatalf("w.Write returned %d; want %d", n, 1)
}
q[0]++
}
if err := w.Close(); err != errSize {
t.Fatalf("expected errSize, but got %v", err)
}
n, err := w.Write(q)
if err != nil {
t.Fatalf("w.Write error %s", err)
}
if n != 1 {
t.Fatalf("w.Write returned %d; want %d", n, 1)
}
if err = w.Close(); err != nil {
t.Fatalf("w.Close error %s", err)
}
t.Logf("compressed size %d", buf.Len())
r, err := NewReader(buf)
if err != nil {
t.Fatalf("NewReader error %s", err)
}
b, err := ioutil.ReadAll(r)
if err != nil {
t.Fatalf("ReadAll error %s", err)
}
s := string(b)
want := "abcdefghij"
if s != want {
t.Fatalf("read %q, want %q", s, want)
}
}
// The example uses the buffered reader and writer from package bufio.
func Example_writer() {
pr, pw := io.Pipe()
go func() {
bw := bufio.NewWriter(pw)
w, err := NewWriter(bw)
if err != nil {
log.Fatal(err)
}
input := []byte("The quick brown fox jumps over the lazy dog.")
if _, err = w.Write(input); err != nil {
log.Fatal(err)
}
if err = w.Close(); err != nil {
log.Fatal(err)
}
// reader waits for the data
if err = bw.Flush(); err != nil {
log.Fatal(err)
}
}()
r, err := NewReader(pr)
if err != nil {
log.Fatal(err)
}
_, err = io.Copy(os.Stdout, r)
if err != nil {
log.Fatal(err)
}
// Output:
// The quick brown fox jumps over the lazy dog.
}
func BenchmarkReader(b *testing.B) {
const (
seed = 49
size = 50000
)
r := io.LimitReader(randtxt.NewReader(rand.NewSource(seed)), size)
txt, err := ioutil.ReadAll(r)
if err != nil {
b.Fatalf("ReadAll error %s", err)
}
buf := &bytes.Buffer{}
w, err := WriterConfig{DictCap: 0x4000}.NewWriter(buf)
if err != nil {
b.Fatalf("WriterConfig{}.NewWriter error %s", err)
}
if _, err = w.Write(txt); err != nil {
b.Fatalf("w.Write error %s", err)
}
if err = w.Close(); err != nil {
b.Fatalf("w.Close error %s", err)
}
data, err := ioutil.ReadAll(buf)
if err != nil {
b.Fatalf("ReadAll error %s", err)
}
b.SetBytes(int64(len(txt)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
lr, err := NewReader(bytes.NewReader(data))
if err != nil {
b.Fatalf("NewReader error %s", err)
}
if _, err = ioutil.ReadAll(lr); err != nil {
b.Fatalf("ReadAll(lr) error %s", err)
}
}
}
func BenchmarkWriter(b *testing.B) {
const (
seed = 49
size = 50000
)
r := io.LimitReader(randtxt.NewReader(rand.NewSource(seed)), size)
txt, err := ioutil.ReadAll(r)
if err != nil {
b.Fatalf("ReadAll error %s", err)
}
buf := &bytes.Buffer{}
b.SetBytes(int64(len(txt)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
buf.Reset()
w, err := WriterConfig{DictCap: 0x4000}.NewWriter(buf)
if err != nil {
b.Fatalf("NewWriter error %s", err)
}
if _, err = w.Write(txt); err != nil {
b.Fatalf("w.Write error %s", err)
}
if err = w.Close(); err != nil {
b.Fatalf("w.Close error %s", err)
}
}
}

View File

@ -1,81 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xz
import (
"bytes"
"io"
"io/ioutil"
"os"
"testing"
)
func TestReaderSimple(t *testing.T) {
const file = "fox.xz"
xz, err := os.Open(file)
if err != nil {
t.Fatalf("os.Open(%q) error %s", file, err)
}
r, err := NewReader(xz)
if err != nil {
t.Fatalf("NewReader error %s", err)
}
var buf bytes.Buffer
if _, err = io.Copy(&buf, r); err != nil {
t.Fatalf("io.Copy error %s", err)
}
}
func TestReaderSingleStream(t *testing.T) {
data, err := ioutil.ReadFile("fox.xz")
if err != nil {
t.Fatalf("ReadFile error %s", err)
}
xz := bytes.NewReader(data)
rc := ReaderConfig{SingleStream: true}
r, err := rc.NewReader(xz)
if err != nil {
t.Fatalf("NewReader error %s", err)
}
var buf bytes.Buffer
if _, err = io.Copy(&buf, r); err != nil {
t.Fatalf("io.Copy error %s", err)
}
buf.Reset()
data = append(data, 0)
xz = bytes.NewReader(data)
r, err = rc.NewReader(xz)
if err != nil {
t.Fatalf("NewReader error %s", err)
}
if _, err = io.Copy(&buf, r); err != errUnexpectedData {
t.Fatalf("io.Copy returned %v; want %v", err, errUnexpectedData)
}
}
func TestReaaderMultipleStreams(t *testing.T) {
data, err := ioutil.ReadFile("fox.xz")
if err != nil {
t.Fatalf("ReadFile error %s", err)
}
m := make([]byte, 0, 4*len(data)+4*4)
m = append(m, data...)
m = append(m, data...)
m = append(m, 0, 0, 0, 0)
m = append(m, data...)
m = append(m, 0, 0, 0, 0)
m = append(m, 0, 0, 0, 0)
m = append(m, data...)
m = append(m, 0, 0, 0, 0)
xz := bytes.NewReader(m)
r, err := NewReader(xz)
if err != nil {
t.Fatalf("NewReader error %s", err)
}
var buf bytes.Buffer
if _, err = io.Copy(&buf, r); err != nil {
t.Fatalf("io.Copy error %s", err)
}
}

View File

@ -1,138 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xz
import (
"bytes"
"io"
"log"
"math/rand"
"os"
"testing"
"github.com/ulikunitz/xz/internal/randtxt"
)
func TestWriter(t *testing.T) {
const text = "The quick brown fox jumps over the lazy dog."
var buf bytes.Buffer
w, err := NewWriter(&buf)
if err != nil {
t.Fatalf("NewWriter error %s", err)
}
n, err := io.WriteString(w, text)
if err != nil {
t.Fatalf("WriteString error %s", err)
}
if n != len(text) {
t.Fatalf("Writestring wrote %d bytes; want %d", n, len(text))
}
if err = w.Close(); err != nil {
t.Fatalf("w.Close error %s", err)
}
var out bytes.Buffer
r, err := NewReader(&buf)
if err != nil {
t.Fatalf("NewReader error %s", err)
}
if _, err = io.Copy(&out, r); err != nil {
t.Fatalf("io.Copy error %s", err)
}
s := out.String()
if s != text {
t.Fatalf("reader decompressed to %q; want %q", s, text)
}
}
func TestIssue12(t *testing.T) {
var buf bytes.Buffer
w, err := NewWriter(&buf)
if err != nil {
t.Fatalf("NewWriter error %s", err)
}
if err = w.Close(); err != nil {
t.Fatalf("w.Close error %s", err)
}
r, err := NewReader(&buf)
if err != nil {
t.Fatalf("NewReader error %s", err)
}
var out bytes.Buffer
if _, err = io.Copy(&out, r); err != nil {
t.Fatalf("io.Copy error %s", err)
}
s := out.String()
if s != "" {
t.Fatalf("reader decompressed to %q; want %q", s, "")
}
}
func Example() {
const text = "The quick brown fox jumps over the lazy dog."
var buf bytes.Buffer
// compress text
w, err := NewWriter(&buf)
if err != nil {
log.Fatalf("NewWriter error %s", err)
}
if _, err := io.WriteString(w, text); err != nil {
log.Fatalf("WriteString error %s", err)
}
if err := w.Close(); err != nil {
log.Fatalf("w.Close error %s", err)
}
// decompress buffer and write result to stdout
r, err := NewReader(&buf)
if err != nil {
log.Fatalf("NewReader error %s", err)
}
if _, err = io.Copy(os.Stdout, r); err != nil {
log.Fatalf("io.Copy error %s", err)
}
// Output:
// The quick brown fox jumps over the lazy dog.
}
func TestWriter2(t *testing.T) {
const txtlen = 1023
var buf bytes.Buffer
io.CopyN(&buf, randtxt.NewReader(rand.NewSource(41)), txtlen)
txt := buf.String()
buf.Reset()
w, err := NewWriter(&buf)
if err != nil {
t.Fatalf("NewWriter error %s", err)
}
n, err := io.WriteString(w, txt)
if err != nil {
t.Fatalf("WriteString error %s", err)
}
if n != len(txt) {
t.Fatalf("WriteString wrote %d bytes; want %d", n, len(txt))
}
if err = w.Close(); err != nil {
t.Fatalf("Close error %s", err)
}
t.Logf("buf.Len() %d", buf.Len())
r, err := NewReader(&buf)
if err != nil {
t.Fatalf("NewReader error %s", err)
}
var out bytes.Buffer
k, err := io.Copy(&out, r)
if err != nil {
t.Fatalf("Decompressing copy error %s after %d bytes", err, n)
}
if k != txtlen {
t.Fatalf("Decompression data length %d; want %d", k, txtlen)
}
if txt != out.String() {
t.Fatal("decompressed data differs from original")
}
}