bump modules

This commit is contained in:
Mikaël Cluseau
2018-12-10 18:22:47 +11:00
parent 3124a1bd36
commit e155660ab0
56 changed files with 90 additions and 26811 deletions

View File

@ -1,107 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzma
import (
"bytes"
"io"
"math/rand"
"strings"
"testing"
"github.com/ulikunitz/xz/internal/randtxt"
)
func TestBinTree_Find(t *testing.T) {
bt, err := newBinTree(30)
if err != nil {
t.Fatal(err)
}
const s = "Klopp feiert mit Liverpool seinen hoechsten SiegSieg"
n, err := io.WriteString(bt, s)
if err != nil {
t.Fatalf("WriteString error %s", err)
}
if n != len(s) {
t.Fatalf("WriteString returned %d; want %d", n, len(s))
}
/* dump info writes the complete tree
if err = bt.dump(os.Stdout); err != nil {
t.Fatalf("bt.dump error %s", err)
}
*/
tests := []string{"Sieg", "Sieb", "Simu"}
for _, c := range tests {
x := xval([]byte(c))
a, b := bt.search(bt.root, x)
t.Logf("%q: a, b == %d, %d", c, a, b)
}
}
func TestBinTree_PredSucc(t *testing.T) {
bt, err := newBinTree(30)
if err != nil {
t.Fatal(err)
}
const s = "Klopp feiert mit Liverpool seinen hoechsten Sieg."
n, err := io.WriteString(bt, s)
if err != nil {
t.Fatalf("WriteString error %s", err)
}
if n != len(s) {
t.Fatalf("WriteString returned %d; want %d", n, len(s))
}
for v := bt.min(bt.root); v != null; v = bt.succ(v) {
t.Log(dumpX(bt.node[v].x))
}
t.Log("")
for v := bt.max(bt.root); v != null; v = bt.pred(v) {
t.Log(dumpX(bt.node[v].x))
}
}
func TestBinTree_Cycle(t *testing.T) {
buf := new(bytes.Buffer)
w, err := Writer2Config{
DictCap: 4096,
Matcher: BinaryTree,
}.NewWriter2(buf)
if err != nil {
t.Fatalf("NewWriter error %s", err)
}
// const txtlen = 1024
const txtlen = 10000
io.CopyN(buf, randtxt.NewReader(rand.NewSource(42)), txtlen)
txt := buf.String()
buf.Reset()
n, err := io.Copy(w, strings.NewReader(txt))
if err != nil {
t.Fatalf("Compressing copy error %s", err)
}
if n != txtlen {
t.Fatalf("Compressing data length %d; want %d", n, txtlen)
}
if err = w.Close(); err != nil {
t.Fatalf("w.Close error %s", err)
}
t.Logf("buf.Len() %d", buf.Len())
r, err := Reader2Config{DictCap: 4096}.NewReader2(buf)
if err != nil {
t.Fatalf("NewReader error %s", err)
}
out := new(bytes.Buffer)
n, err = io.Copy(out, r)
if err != nil {
t.Fatalf("Decompressing copy error %s after %d bytes", err, n)
}
if n != txtlen {
t.Fatalf("Decompression data length %d; want %d", n, txtlen)
}
if txt != out.String() {
t.Fatal("decompressed data differs from original")
}
}

View File

@ -1,230 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzma
import (
"bytes"
"io"
"testing"
)
func TestBuffer_Write(t *testing.T) {
buf := newBuffer(10)
b := []byte("1234567890")
for i := range b {
n, err := buf.Write(b[i : i+1])
if err != nil {
t.Fatalf("buf.Write(b[%d:%d]) error %s", i, i+1, err)
}
if n != 1 {
t.Fatalf("buf.Write(b[%d:%d]) returned %d; want %d",
i, i+1, n, 1)
}
}
const c = 8
n, err := buf.Discard(c)
if err != nil {
t.Fatalf("Discard error %s", err)
}
if n != c {
t.Fatalf("Discard returned %d; want %d", n, c)
}
n, err = buf.Write(b)
if err == nil {
t.Fatalf("Write length exceed returned no error; n %d", n)
}
if n != c {
t.Fatalf("Write length exceeding returned %d; want %d", n, c)
}
n, err = buf.Discard(4)
if err != nil {
t.Fatalf("Discard error %s", err)
}
if n != 4 {
t.Fatalf("Discard returned %d; want %d", n, 4)
}
n, err = buf.Write(b[:3])
if err != nil {
t.Fatalf("buf.Write(b[:3]) error %s; n %d", err, n)
}
if n != 3 {
t.Fatalf("buf.Write(b[:3]) returned %d; want %d", n, 3)
}
}
func TestBuffer_Buffered_Available(t *testing.T) {
buf := newBuffer(19)
b := []byte("0123456789")
var err error
if _, err = buf.Write(b); err != nil {
t.Fatalf("buf.Write(b) error %s", err)
}
if n := buf.Buffered(); n != 10 {
t.Fatalf("buf.Buffered() returns %d; want %d", n, 10)
}
if _, err = buf.Discard(8); err != nil {
t.Fatalf("buf.Discard(8) error %s", err)
}
if _, err = buf.Write(b[:7]); err != nil {
t.Fatalf("buf.Write(b[:7]) error %s", err)
}
if n := buf.Buffered(); n != 9 {
t.Fatalf("buf.Buffered() returns %d; want %d", n, 9)
}
}
func TestBuffer_Read(t *testing.T) {
buf := newBuffer(10)
b := []byte("0123456789")
var err error
if _, err = buf.Write(b); err != nil {
t.Fatalf("buf.Write(b) error %s", err)
}
p := make([]byte, 8)
n, err := buf.Read(p)
if err != nil {
t.Fatalf("buf.Read(p) error %s", err)
}
if n != len(p) {
t.Fatalf("buf.Read(p) returned %d; want %d", n, len(p))
}
if !bytes.Equal(p, b[:8]) {
t.Fatalf("buf.Read(p) put %s into p; want %s", p, b[:8])
}
if _, err = buf.Write(b[:7]); err != nil {
t.Fatalf("buf.Write(b[:7]) error %s", err)
}
q := make([]byte, 7)
n, err = buf.Read(q)
if err != nil {
t.Fatalf("buf.Read(q) error %s", err)
}
if n != len(q) {
t.Fatalf("buf.Read(q) returns %d; want %d", n, len(q))
}
c := []byte("8901234")
if !bytes.Equal(q, c) {
t.Fatalf("buf.Read(q) put %s into q; want %s", q, c)
}
if _, err := buf.Write(b[7:]); err != nil {
t.Fatalf("buf.Write(b[7:]) error %s", err)
}
if _, err := buf.Write(b[:2]); err != nil {
t.Fatalf("buf.Write(b[:2]) error %s", err)
}
t.Logf("buf.rear %d buf.front %d", buf.rear, buf.front)
r := make([]byte, 2)
n, err = buf.Read(r)
if err != nil {
t.Fatalf("buf.Read(r) error %s", err)
}
if n != len(r) {
t.Fatalf("buf.Read(r) returns %d; want %d", n, len(r))
}
d := []byte("56")
if !bytes.Equal(r, d) {
t.Fatalf("buf.Read(r) put %s into r; want %s", r, d)
}
}
func TestBuffer_Discard(t *testing.T) {
buf := newBuffer(10)
b := []byte("0123456789")
var err error
if _, err = buf.Write(b); err != nil {
t.Fatalf("buf.Write(b) error %s", err)
}
n, err := buf.Discard(11)
if err == nil {
t.Fatalf("buf.Discard(11) didn't return error")
}
if n != 10 {
t.Fatalf("buf.Discard(11) returned %d; want %d", n, 10)
}
if _, err := buf.Write(b); err != nil {
t.Fatalf("buf.Write(b) #2 error %s", err)
}
n, err = buf.Discard(10)
if err != nil {
t.Fatalf("buf.Discard(10) error %s", err)
}
if n != 10 {
t.Fatalf("buf.Discard(11) returned %d; want %d", n, 10)
}
if _, err := buf.Write(b[:4]); err != nil {
t.Fatalf("buf.Write(b[:4]) error %s", err)
}
n, err = buf.Discard(1)
if err != nil {
t.Fatalf("buf.Discard(1) error %s", err)
}
if n != 1 {
t.Fatalf("buf.Discard(1) returned %d; want %d", n, 1)
}
}
func TestBuffer_Discard_error(t *testing.T) {
buf := newBuffer(10)
n, err := buf.Discard(-1)
if err == nil {
t.Fatal("buf.Discard(-1) didn't return an error")
}
if n != 0 {
t.Fatalf("buf.Discard(-1) returned %d; want %d", n, 0)
}
}
func TestPrefixLen(t *testing.T) {
tests := []struct {
a, b []byte
k int
}{
{[]byte("abcde"), []byte("abc"), 3},
{[]byte("abc"), []byte("uvw"), 0},
{[]byte(""), []byte("uvw"), 0},
{[]byte("abcde"), []byte("abcuvw"), 3},
}
for _, c := range tests {
k := prefixLen(c.a, c.b)
if k != c.k {
t.Errorf("prefixLen(%q,%q) returned %d; want %d",
c.a, c.b, k, c.k)
}
k = prefixLen(c.b, c.a)
if k != c.k {
t.Errorf("prefixLen(%q,%q) returned %d; want %d",
c.b, c.a, k, c.k)
}
}
}
func TestMatchLen(t *testing.T) {
buf := newBuffer(13)
const s = "abcaba"
_, err := io.WriteString(buf, s)
if err != nil {
t.Fatalf("WriteString error %s", err)
}
_, err = io.WriteString(buf, s)
if err != nil {
t.Fatalf("WriteString error %s", err)
}
if _, err = buf.Discard(12); err != nil {
t.Fatalf("buf.Discard(6) error %s", err)
}
_, err = io.WriteString(buf, s)
if err != nil {
t.Fatalf("WriteString error %s", err)
}
tests := []struct{ d, n int }{{1, 1}, {3, 2}, {6, 6}, {5, 0}, {2, 0}}
for _, c := range tests {
n := buf.matchLen(c.d, []byte(s))
if n != c.n {
t.Errorf(
"MatchLen(%d,[]byte(%q)) returned %d; want %d",
c.d, s, n, c.n)
}
}
}

View File

@ -1,59 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzma
import (
"bufio"
"io"
"io/ioutil"
"os"
"testing"
)
func TestDecoder(t *testing.T) {
filename := "fox.lzma"
want := "The quick brown fox jumps over the lazy dog.\n"
for i := 0; i < 2; i++ {
f, err := os.Open(filename)
if err != nil {
t.Fatalf("os.Open(%q) error %s", filename, err)
}
p := make([]byte, 13)
_, err = io.ReadFull(f, p)
if err != nil {
t.Fatalf("io.ReadFull error %s", err)
}
props, err := PropertiesForCode(p[0])
if err != nil {
t.Fatalf("p[0] error %s", err)
}
state := newState(props)
const capacity = 0x800000
dict, err := newDecoderDict(capacity)
if err != nil {
t.Fatalf("newDecoderDict: error %s", err)
}
size := int64(-1)
if i > 0 {
size = int64(len(want))
}
br := bufio.NewReader(f)
r, err := newDecoder(br, state, dict, size)
if err != nil {
t.Fatalf("newDecoder error %s", err)
}
bytes, err := ioutil.ReadAll(r)
if err != nil {
t.Fatalf("[%d] ReadAll error %s", i, err)
}
if err = f.Close(); err != nil {
t.Fatalf("Close error %s", err)
}
got := string(bytes)
if got != want {
t.Fatalf("read %q; but want %q", got, want)
}
}
}

View File

@ -1,33 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzma
import (
"fmt"
"testing"
)
func peek(d *decoderDict) []byte {
p := make([]byte, d.buffered())
k, err := d.peek(p)
if err != nil {
panic(fmt.Errorf("peek: "+
"Read returned unexpected error %s", err))
}
if k != len(p) {
panic(fmt.Errorf("peek: "+
"Read returned %d; wanted %d", k, len(p)))
}
return p
}
func TestNewDecoderDict(t *testing.T) {
if _, err := newDecoderDict(0); err == nil {
t.Fatalf("no error for zero dictionary capacity")
}
if _, err := newDecoderDict(8); err != nil {
t.Fatalf("error %s", err)
}
}

View File

@ -11,7 +11,7 @@ import (
// opLenMargin provides the upper limit of the number of bytes required
// to encode a single operation.
const opLenMargin = 10
const opLenMargin = 16
// compressFlags control the compression process.
type compressFlags uint32

View File

@ -1,151 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzma
import (
"bytes"
"io"
"io/ioutil"
"math/rand"
"testing"
"github.com/ulikunitz/xz/internal/randtxt"
)
var testString = `LZMA decoder test example
=========================
! LZMA ! Decoder ! TEST !
=========================
! TEST ! LZMA ! Decoder !
=========================
---- Test Line 1 --------
=========================
---- Test Line 2 --------
=========================
=== End of test file ====
=========================
`
func cycle(t *testing.T, n int) {
t.Logf("cycle(t,%d)", n)
if n > len(testString) {
t.Fatalf("cycle: n=%d larger than len(testString)=%d", n,
len(testString))
}
const dictCap = MinDictCap
m, err := newHashTable(dictCap, 4)
if err != nil {
t.Fatal(err)
}
encoderDict, err := newEncoderDict(dictCap, dictCap+1024, m)
if err != nil {
t.Fatal(err)
}
props := Properties{2, 0, 2}
if err := props.verify(); err != nil {
t.Fatalf("properties error %s", err)
}
state := newState(props)
var buf bytes.Buffer
w, err := newEncoder(&buf, state, encoderDict, eosMarker)
if err != nil {
t.Fatalf("newEncoder error %s", err)
}
orig := []byte(testString)[:n]
t.Logf("len(orig) %d", len(orig))
k, err := w.Write(orig)
if err != nil {
t.Fatalf("w.Write error %s", err)
}
if k != len(orig) {
t.Fatalf("w.Write returned %d; want %d", k, len(orig))
}
if err = w.Close(); err != nil {
t.Fatalf("w.Close error %s", err)
}
t.Logf("buf.Len() %d len(orig) %d", buf.Len(), len(orig))
decoderDict, err := newDecoderDict(dictCap)
if err != nil {
t.Fatalf("newDecoderDict error %s", err)
}
state.Reset()
r, err := newDecoder(&buf, state, decoderDict, -1)
if err != nil {
t.Fatalf("newDecoder error %s", err)
}
decoded, err := ioutil.ReadAll(r)
if err != nil {
t.Fatalf("ReadAll(lr) error %s", err)
}
t.Logf("decoded: %s", decoded)
if len(orig) != len(decoded) {
t.Fatalf("length decoded is %d; want %d", len(decoded),
len(orig))
}
if !bytes.Equal(orig, decoded) {
t.Fatalf("decoded file differs from original")
}
}
func TestEncoderCycle1(t *testing.T) {
cycle(t, len(testString))
}
func TestEncoderCycle2(t *testing.T) {
buf := new(bytes.Buffer)
const txtlen = 50000
io.CopyN(buf, randtxt.NewReader(rand.NewSource(42)), txtlen)
txt := buf.String()
buf.Reset()
const dictCap = MinDictCap
m, err := newHashTable(dictCap, 4)
if err != nil {
t.Fatal(err)
}
encoderDict, err := newEncoderDict(dictCap, dictCap+1024, m)
if err != nil {
t.Fatal(err)
}
props := Properties{3, 0, 2}
if err := props.verify(); err != nil {
t.Fatalf("properties error %s", err)
}
state := newState(props)
lbw := &LimitedByteWriter{BW: buf, N: 100}
w, err := newEncoder(lbw, state, encoderDict, 0)
if err != nil {
t.Fatalf("NewEncoder error %s", err)
}
_, err = io.WriteString(w, txt)
if err != nil && err != ErrLimit {
t.Fatalf("WriteString error %s", err)
}
if err = w.Close(); err != nil {
t.Fatalf("w.Close error %s", err)
}
n := w.Compressed()
txt = txt[:n]
decoderDict, err := newDecoderDict(dictCap)
if err != nil {
t.Fatalf("NewDecoderDict error %s", err)
}
state.Reset()
r, err := newDecoder(buf, state, decoderDict, n)
if err != nil {
t.Fatalf("NewDecoder error %s", err)
}
out := new(bytes.Buffer)
if _, err = io.Copy(out, r); err != nil {
t.Fatalf("decompress copy error %s", err)
}
got := out.String()
t.Logf("%s", got)
if len(got) != int(n) {
t.Fatalf("len(got) %d; want %d", len(got), n)
}
if got != txt {
t.Fatalf("got and txt differ")
}
}

View File

@ -1,47 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzma
import (
"fmt"
"testing"
)
func TestHashTable(t *testing.T) {
ht, err := newHashTable(32, 2)
if err != nil {
t.Fatalf("newHashTable: error %s", err)
}
// 01234567890123456
s := "abcabcdefghijklmn"
n, err := ht.Write([]byte(s))
if err != nil {
t.Fatalf("ht.Write: error %s", err)
}
if n != len(s) {
t.Fatalf("ht.Write returned %d; want %d", n, len(s))
}
tests := []struct {
s string
w string
}{
{"ab", "[3 0]"},
{"bc", "[4 1]"},
{"ca", "[2]"},
{"xx", "[]"},
{"gh", "[9]"},
{"mn", "[15]"},
}
distances := make([]int64, 20)
for _, c := range tests {
distances := distances[:20]
k := ht.Matches([]byte(c.s), distances)
distances = distances[:k]
o := fmt.Sprintf("%v", distances)
if o != c.w {
t.Errorf("%s: offsets %s; want %s", c.s, o, c.w)
}
}
}

View File

@ -1,153 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzma
import (
"bytes"
"fmt"
"testing"
)
func TestChunkTypeString(t *testing.T) {
tests := [...]struct {
c chunkType
s string
}{
{cEOS, "EOS"},
{cUD, "UD"},
{cU, "U"},
{cL, "L"},
{cLR, "LR"},
{cLRN, "LRN"},
{cLRND, "LRND"},
}
for _, c := range tests {
s := fmt.Sprintf("%v", c.c)
if s != c.s {
t.Errorf("got %s; want %s", s, c.s)
}
}
}
func TestHeaderChunkType(t *testing.T) {
tests := []struct {
h byte
c chunkType
}{
{h: 0, c: cEOS},
{h: 1, c: cUD},
{h: 2, c: cU},
{h: 1<<7 | 0x1f, c: cL},
{h: 1<<7 | 1<<5 | 0x1f, c: cLR},
{h: 1<<7 | 1<<6 | 0x1f, c: cLRN},
{h: 1<<7 | 1<<6 | 1<<5 | 0x1f, c: cLRND},
{h: 1<<7 | 1<<6 | 1<<5, c: cLRND},
}
if _, err := headerChunkType(3); err == nil {
t.Fatalf("headerChunkType(%d) got %v; want %v",
3, err, errHeaderByte)
}
for _, tc := range tests {
c, err := headerChunkType(tc.h)
if err != nil {
t.Fatalf("headerChunkType error %s", err)
}
if c != tc.c {
t.Errorf("got %s; want %s", c, tc.c)
}
}
}
func TestHeaderLen(t *testing.T) {
tests := []struct {
c chunkType
n int
}{
{cEOS, 1}, {cU, 3}, {cUD, 3}, {cL, 5}, {cLR, 5}, {cLRN, 6},
{cLRND, 6},
}
for _, tc := range tests {
n := headerLen(tc.c)
if n != tc.n {
t.Errorf("header length for %s %d; want %d",
tc.c, n, tc.n)
}
}
}
func chunkHeaderSamples(t *testing.T) []chunkHeader {
props := Properties{LC: 3, LP: 0, PB: 2}
headers := make([]chunkHeader, 0, 12)
for c := cEOS; c <= cLRND; c++ {
var h chunkHeader
h.ctype = c
if c >= cUD {
h.uncompressed = 0x0304
}
if c >= cL {
h.compressed = 0x0201
}
if c >= cLRN {
h.props = props
}
headers = append(headers, h)
}
return headers
}
func TestChunkHeaderMarshalling(t *testing.T) {
for _, h := range chunkHeaderSamples(t) {
data, err := h.MarshalBinary()
if err != nil {
t.Fatalf("MarshalBinary for %v error %s", h, err)
}
var g chunkHeader
if err = g.UnmarshalBinary(data); err != nil {
t.Fatalf("UnmarshalBinary error %s", err)
}
if g != h {
t.Fatalf("got %v; want %v", g, h)
}
}
}
func TestReadChunkHeader(t *testing.T) {
for _, h := range chunkHeaderSamples(t) {
data, err := h.MarshalBinary()
if err != nil {
t.Fatalf("MarshalBinary for %v error %s", h, err)
}
r := bytes.NewReader(data)
g, err := readChunkHeader(r)
if err != nil {
t.Fatalf("readChunkHeader for %v error %s", h, err)
}
if *g != h {
t.Fatalf("got %v; want %v", g, h)
}
}
}
func TestReadEOS(t *testing.T) {
var b [1]byte
r := bytes.NewReader(b[:])
h, err := readChunkHeader(r)
if err != nil {
t.Fatalf("readChunkHeader error %s", err)
}
if h.ctype != cEOS {
t.Errorf("ctype got %s; want %s", h.ctype, cEOS)
}
if h.compressed != 0 {
t.Errorf("compressed got %d; want %d", h.compressed, 0)
}
if h.uncompressed != 0 {
t.Errorf("uncompressed got %d; want %d", h.uncompressed, 0)
}
wantProps := Properties{}
if h.props != wantProps {
t.Errorf("props got %v; want %v", h.props, wantProps)
}
}

View File

@ -1,52 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzma
import "testing"
func TestHeaderMarshalling(t *testing.T) {
tests := []header{
{properties: Properties{3, 0, 2}, dictCap: 8 * 1024 * 1024,
size: -1},
{properties: Properties{4, 3, 3}, dictCap: 4096,
size: 10},
}
for _, h := range tests {
data, err := h.marshalBinary()
if err != nil {
t.Fatalf("marshalBinary error %s", err)
}
var g header
if err = g.unmarshalBinary(data); err != nil {
t.Fatalf("unmarshalBinary error %s", err)
}
if h != g {
t.Errorf("got header %#v; want %#v", g, h)
}
}
}
func TestValidHeader(t *testing.T) {
tests := []header{
{properties: Properties{3, 0, 2}, dictCap: 8 * 1024 * 1024,
size: -1},
{properties: Properties{4, 3, 3}, dictCap: 4096,
size: 10},
}
for _, h := range tests {
data, err := h.marshalBinary()
if err != nil {
t.Fatalf("marshalBinary error %s", err)
}
if !ValidHeader(data) {
t.Errorf("ValidHeader returns false for header %v;"+
" want true", h)
}
}
const a = "1234567890123"
if ValidHeader([]byte(a)) {
t.Errorf("ValidHeader returns true for %s; want false", a)
}
}

View File

@ -1,312 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzma
import (
"bufio"
"bytes"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"testing"
"testing/iotest"
)
func TestNewReader(t *testing.T) {
f, err := os.Open("examples/a.lzma")
if err != nil {
t.Fatalf("open examples/a.lzma: %s", err)
}
defer f.Close()
_, err = NewReader(bufio.NewReader(f))
if err != nil {
t.Fatalf("NewReader: %s", err)
}
}
const (
dirname = "examples"
origname = "a.txt"
)
func readOrigFile(t *testing.T) []byte {
orig, err := ioutil.ReadFile(filepath.Join(dirname, origname))
if err != nil {
t.Fatalf("ReadFile: %s", err)
}
return orig
}
func testDecodeFile(t *testing.T, filename string, orig []byte) {
pathname := filepath.Join(dirname, filename)
f, err := os.Open(pathname)
if err != nil {
t.Fatalf("Open(%q): %s", pathname, err)
}
defer func() {
if err = f.Close(); err != nil {
t.Fatalf("f.Close() error %s", err)
}
}()
t.Logf("file %s opened", filename)
l, err := NewReader(bufio.NewReader(f))
if err != nil {
t.Fatalf("NewReader: %s", err)
}
decoded, err := ioutil.ReadAll(l)
if err != nil {
t.Fatalf("ReadAll: %s", err)
}
t.Logf("%s", decoded)
if len(orig) != len(decoded) {
t.Fatalf("length decoded is %d; want %d",
len(decoded), len(orig))
}
if !bytes.Equal(orig, decoded) {
t.Fatalf("decoded file differs from original")
}
}
func TestReaderSimple(t *testing.T) {
// DebugOn(os.Stderr)
// defer DebugOff()
testDecodeFile(t, "a.lzma", readOrigFile(t))
}
func TestReaderAll(t *testing.T) {
dirname := "examples"
dir, err := os.Open(dirname)
if err != nil {
t.Fatalf("Open: %s", err)
}
defer func() {
if err := dir.Close(); err != nil {
t.Fatalf("dir.Close() error %s", err)
}
}()
all, err := dir.Readdirnames(0)
if err != nil {
t.Fatalf("Readdirnames: %s", err)
}
// filter now all file with the pattern "a*.lzma"
files := make([]string, 0, len(all))
for _, fn := range all {
match, err := filepath.Match("a*.lzma", fn)
if err != nil {
t.Fatalf("Match: %s", err)
}
if match {
files = append(files, fn)
}
}
t.Log("files:", files)
orig := readOrigFile(t)
// actually test the files
for _, fn := range files {
testDecodeFile(t, fn, orig)
}
}
//
func Example_reader() {
f, err := os.Open("fox.lzma")
if err != nil {
log.Fatal(err)
}
// no need for defer; Fatal calls os.Exit(1) that doesn't execute deferred functions
r, err := NewReader(bufio.NewReader(f))
if err != nil {
log.Fatal(err)
}
_, err = io.Copy(os.Stdout, r)
if err != nil {
log.Fatal(err)
}
if err := f.Close(); err != nil {
log.Fatal(err)
}
// Output:
// The quick brown fox jumps over the lazy dog.
}
type wrapTest struct {
name string
wrap func(io.Reader) io.Reader
}
func (w *wrapTest) testFile(t *testing.T, filename string, orig []byte) {
pathname := filepath.Join(dirname, filename)
f, err := os.Open(pathname)
if err != nil {
t.Fatalf("Open(\"%s\"): %s", pathname, err)
}
defer func() {
if err := f.Close(); err != nil {
log.Fatal(err)
}
}()
t.Logf("%s file %s opened", w.name, filename)
l, err := NewReader(w.wrap(f))
if err != nil {
t.Fatalf("%s NewReader: %s", w.name, err)
}
decoded, err := ioutil.ReadAll(l)
if err != nil {
t.Fatalf("%s ReadAll: %s", w.name, err)
}
t.Logf("%s", decoded)
if len(orig) != len(decoded) {
t.Fatalf("%s length decoded is %d; want %d",
w.name, len(decoded), len(orig))
}
if !bytes.Equal(orig, decoded) {
t.Fatalf("%s decoded file differs from original", w.name)
}
}
func TestReaderWrap(t *testing.T) {
tests := [...]wrapTest{
{"DataErrReader", iotest.DataErrReader},
{"HalfReader", iotest.HalfReader},
{"OneByteReader", iotest.OneByteReader},
// TimeOutReader would require buffer
}
orig := readOrigFile(t)
for _, tst := range tests {
tst.testFile(t, "a.lzma", orig)
}
}
func TestReaderBadFiles(t *testing.T) {
dirname := "examples"
dir, err := os.Open(dirname)
if err != nil {
t.Fatalf("Open: %s", err)
}
defer func() {
if err := dir.Close(); err != nil {
t.Fatalf("dir.Close() error %s", err)
}
}()
all, err := dir.Readdirnames(0)
if err != nil {
t.Fatalf("Readdirnames: %s", err)
}
// filter now all file with the pattern "bad*.lzma"
files := make([]string, 0, len(all))
for _, fn := range all {
match, err := filepath.Match("bad*.lzma", fn)
if err != nil {
t.Fatalf("Match: %s", err)
}
if match {
files = append(files, fn)
}
}
t.Log("files:", files)
for _, filename := range files {
pathname := filepath.Join(dirname, filename)
f, err := os.Open(pathname)
if err != nil {
t.Fatalf("Open(\"%s\"): %s", pathname, err)
}
defer func(f *os.File) {
if err := f.Close(); err != nil {
t.Fatalf("f.Close() error %s", err)
}
}(f)
t.Logf("file %s opened", filename)
l, err := NewReader(f)
if err != nil {
t.Fatalf("NewReader: %s", err)
}
decoded, err := ioutil.ReadAll(l)
if err == nil {
t.Errorf("ReadAll for %s: no error", filename)
t.Logf("%s", decoded)
continue
}
t.Logf("%s: error %s", filename, err)
}
}
type repReader byte
func (r repReader) Read(p []byte) (n int, err error) {
for i := range p {
p[i] = byte(r)
}
return len(p), nil
}
func newRepReader(c byte, n int64) *io.LimitedReader {
return &io.LimitedReader{R: repReader(c), N: n}
}
func newCodeReader(r io.Reader) *io.PipeReader {
pr, pw := io.Pipe()
go func() {
bw := bufio.NewWriter(pw)
lw, err := NewWriter(bw)
if err != nil {
log.Fatalf("NewWriter error %s", err)
}
if _, err = io.Copy(lw, r); err != nil {
log.Fatalf("io.Copy error %s", err)
}
if err = lw.Close(); err != nil {
log.Fatalf("lw.Close error %s", err)
}
if err = bw.Flush(); err != nil {
log.Fatalf("bw.Flush() error %s", err)
}
if err = pw.CloseWithError(io.EOF); err != nil {
log.Fatalf("pw.CloseWithError(io.EOF) error %s", err)
}
}()
return pr
}
func TestReaderErrAgain(t *testing.T) {
lengths := []int64{0, 128, 1024, 4095, 4096, 4097, 8191, 8192, 8193}
buf := make([]byte, 128)
const c = 'A'
for _, n := range lengths {
t.Logf("n: %d", n)
pr := newCodeReader(newRepReader(c, n))
r, err := NewReader(pr)
if err != nil {
t.Fatalf("NewReader(pr) error %s", err)
}
k := int64(0)
for {
m, err := r.Read(buf)
k += int64(m)
if err == io.EOF {
break
}
if err != nil {
t.Errorf("r.Read(buf) error %s", err)
break
}
if m > len(buf) {
t.Fatalf("r.Read(buf) %d; want <= %d", m,
len(buf))
}
for i, b := range buf[:m] {
if b != c {
t.Fatalf("buf[%d]=%c; want %c", i, b,
c)
}
}
}
if k != n {
t.Errorf("Read %d bytes; want %d", k, n)
}
}
}

View File

@ -1,109 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzma
import (
"bytes"
"io"
"math/rand"
"strings"
"testing"
"github.com/ulikunitz/xz/internal/randtxt"
)
func TestWriter2(t *testing.T) {
var buf bytes.Buffer
w, err := Writer2Config{DictCap: 4096}.NewWriter2(&buf)
if err != nil {
t.Fatalf("NewWriter error %s", err)
}
n, err := w.Write([]byte{'a'})
if err != nil {
t.Fatalf("w.Write([]byte{'a'}) error %s", err)
}
if n != 1 {
t.Fatalf("w.Write([]byte{'a'}) returned %d; want %d", n, 1)
}
if err = w.Flush(); err != nil {
t.Fatalf("w.Flush() error %s", err)
}
// check that double Flush doesn't write another chunk
if err = w.Flush(); err != nil {
t.Fatalf("w.Flush() error %s", err)
}
if err = w.Close(); err != nil {
t.Fatalf("w.Close() error %s", err)
}
p := buf.Bytes()
want := []byte{1, 0, 0, 'a', 0}
if !bytes.Equal(p, want) {
t.Fatalf("bytes written %#v; want %#v", p, want)
}
}
func TestCycle1(t *testing.T) {
var buf bytes.Buffer
w, err := Writer2Config{DictCap: 4096}.NewWriter2(&buf)
if err != nil {
t.Fatalf("NewWriter error %s", err)
}
n, err := w.Write([]byte{'a'})
if err != nil {
t.Fatalf("w.Write([]byte{'a'}) error %s", err)
}
if n != 1 {
t.Fatalf("w.Write([]byte{'a'}) returned %d; want %d", n, 1)
}
if err = w.Close(); err != nil {
t.Fatalf("w.Close() error %s", err)
}
r, err := Reader2Config{DictCap: 4096}.NewReader2(&buf)
if err != nil {
t.Fatalf("NewReader error %s", err)
}
p := make([]byte, 3)
n, err = r.Read(p)
t.Logf("n %d error %v", n, err)
}
func TestCycle2(t *testing.T) {
buf := new(bytes.Buffer)
w, err := Writer2Config{DictCap: 4096}.NewWriter2(buf)
if err != nil {
t.Fatalf("NewWriter error %s", err)
}
// const txtlen = 1024
const txtlen = 2100000
io.CopyN(buf, randtxt.NewReader(rand.NewSource(42)), txtlen)
txt := buf.String()
buf.Reset()
n, err := io.Copy(w, strings.NewReader(txt))
if err != nil {
t.Fatalf("Compressing copy error %s", err)
}
if n != txtlen {
t.Fatalf("Compressing data length %d; want %d", n, txtlen)
}
if err = w.Close(); err != nil {
t.Fatalf("w.Close error %s", err)
}
t.Logf("buf.Len() %d", buf.Len())
r, err := Reader2Config{DictCap: 4096}.NewReader2(buf)
if err != nil {
t.Fatalf("NewReader error %s", err)
}
out := new(bytes.Buffer)
n, err = io.Copy(out, r)
if err != nil {
t.Fatalf("Decompressing copy error %s after %d bytes", err, n)
}
if n != txtlen {
t.Fatalf("Decompression data length %d; want %d", n, txtlen)
}
if txt != out.String() {
t.Fatal("decompressed data differs from original")
}
}

View File

@ -1,249 +0,0 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzma
import (
"bufio"
"bytes"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
"testing"
"github.com/ulikunitz/xz/internal/randtxt"
)
func TestWriterCycle(t *testing.T) {
orig := readOrigFile(t)
buf := new(bytes.Buffer)
w, err := NewWriter(buf)
if err != nil {
t.Fatalf("NewWriter: error %s", err)
}
n, err := w.Write(orig)
if err != nil {
t.Fatalf("w.Write error %s", err)
}
if n != len(orig) {
t.Fatalf("w.Write returned %d; want %d", n, len(orig))
}
if err = w.Close(); err != nil {
t.Fatalf("w.Close error %s", err)
}
t.Logf("buf.Len() %d len(orig) %d", buf.Len(), len(orig))
if buf.Len() > len(orig) {
t.Errorf("buf.Len()=%d bigger then len(orig)=%d", buf.Len(),
len(orig))
}
lr, err := NewReader(buf)
if err != nil {
t.Fatalf("NewReader error %s", err)
}
decoded, err := ioutil.ReadAll(lr)
if err != nil {
t.Fatalf("ReadAll(lr) error %s", err)
}
t.Logf("%s", decoded)
if len(orig) != len(decoded) {
t.Fatalf("length decoded is %d; want %d", len(decoded),
len(orig))
}
if !bytes.Equal(orig, decoded) {
t.Fatalf("decoded file differs from original")
}
}
func TestWriterLongData(t *testing.T) {
const (
seed = 49
size = 82237
)
r := io.LimitReader(randtxt.NewReader(rand.NewSource(seed)), size)
txt, err := ioutil.ReadAll(r)
if err != nil {
t.Fatalf("ReadAll error %s", err)
}
if len(txt) != size {
t.Fatalf("ReadAll read %d bytes; want %d", len(txt), size)
}
buf := &bytes.Buffer{}
w, err := WriterConfig{DictCap: 0x4000}.NewWriter(buf)
if err != nil {
t.Fatalf("WriterConfig.NewWriter error %s", err)
}
n, err := w.Write(txt)
if err != nil {
t.Fatalf("w.Write error %s", err)
}
if n != len(txt) {
t.Fatalf("w.Write wrote %d bytes; want %d", n, size)
}
if err = w.Close(); err != nil {
t.Fatalf("w.Close error %s", err)
}
t.Logf("compressed length %d", buf.Len())
lr, err := NewReader(buf)
if err != nil {
t.Fatalf("NewReader error %s", err)
}
txtRead, err := ioutil.ReadAll(lr)
if err != nil {
t.Fatalf("ReadAll(lr) error %s", err)
}
if len(txtRead) != size {
t.Fatalf("ReadAll(lr) returned %d bytes; want %d",
len(txtRead), size)
}
if !bytes.Equal(txtRead, txt) {
t.Fatal("ReadAll(lr) returned txt differs from origin")
}
}
func TestWriter_Size(t *testing.T) {
buf := new(bytes.Buffer)
w, err := WriterConfig{Size: 10, EOSMarker: true}.NewWriter(buf)
if err != nil {
t.Fatalf("WriterConfig.NewWriter error %s", err)
}
q := []byte{'a'}
for i := 0; i < 9; i++ {
n, err := w.Write(q)
if err != nil {
t.Fatalf("w.Write error %s", err)
}
if n != 1 {
t.Fatalf("w.Write returned %d; want %d", n, 1)
}
q[0]++
}
if err := w.Close(); err != errSize {
t.Fatalf("expected errSize, but got %v", err)
}
n, err := w.Write(q)
if err != nil {
t.Fatalf("w.Write error %s", err)
}
if n != 1 {
t.Fatalf("w.Write returned %d; want %d", n, 1)
}
if err = w.Close(); err != nil {
t.Fatalf("w.Close error %s", err)
}
t.Logf("compressed size %d", buf.Len())
r, err := NewReader(buf)
if err != nil {
t.Fatalf("NewReader error %s", err)
}
b, err := ioutil.ReadAll(r)
if err != nil {
t.Fatalf("ReadAll error %s", err)
}
s := string(b)
want := "abcdefghij"
if s != want {
t.Fatalf("read %q, want %q", s, want)
}
}
// The example uses the buffered reader and writer from package bufio.
func Example_writer() {
pr, pw := io.Pipe()
go func() {
bw := bufio.NewWriter(pw)
w, err := NewWriter(bw)
if err != nil {
log.Fatal(err)
}
input := []byte("The quick brown fox jumps over the lazy dog.")
if _, err = w.Write(input); err != nil {
log.Fatal(err)
}
if err = w.Close(); err != nil {
log.Fatal(err)
}
// reader waits for the data
if err = bw.Flush(); err != nil {
log.Fatal(err)
}
}()
r, err := NewReader(pr)
if err != nil {
log.Fatal(err)
}
_, err = io.Copy(os.Stdout, r)
if err != nil {
log.Fatal(err)
}
// Output:
// The quick brown fox jumps over the lazy dog.
}
func BenchmarkReader(b *testing.B) {
const (
seed = 49
size = 50000
)
r := io.LimitReader(randtxt.NewReader(rand.NewSource(seed)), size)
txt, err := ioutil.ReadAll(r)
if err != nil {
b.Fatalf("ReadAll error %s", err)
}
buf := &bytes.Buffer{}
w, err := WriterConfig{DictCap: 0x4000}.NewWriter(buf)
if err != nil {
b.Fatalf("WriterConfig{}.NewWriter error %s", err)
}
if _, err = w.Write(txt); err != nil {
b.Fatalf("w.Write error %s", err)
}
if err = w.Close(); err != nil {
b.Fatalf("w.Close error %s", err)
}
data, err := ioutil.ReadAll(buf)
if err != nil {
b.Fatalf("ReadAll error %s", err)
}
b.SetBytes(int64(len(txt)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
lr, err := NewReader(bytes.NewReader(data))
if err != nil {
b.Fatalf("NewReader error %s", err)
}
if _, err = ioutil.ReadAll(lr); err != nil {
b.Fatalf("ReadAll(lr) error %s", err)
}
}
}
func BenchmarkWriter(b *testing.B) {
const (
seed = 49
size = 50000
)
r := io.LimitReader(randtxt.NewReader(rand.NewSource(seed)), size)
txt, err := ioutil.ReadAll(r)
if err != nil {
b.Fatalf("ReadAll error %s", err)
}
buf := &bytes.Buffer{}
b.SetBytes(int64(len(txt)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
buf.Reset()
w, err := WriterConfig{DictCap: 0x4000}.NewWriter(buf)
if err != nil {
b.Fatalf("NewWriter error %s", err)
}
if _, err = w.Write(txt); err != nil {
b.Fatalf("w.Write error %s", err)
}
if err = w.Close(); err != nil {
b.Fatalf("w.Close error %s", err)
}
}
}