vendor: Mega update all dependencies

GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/4080
This commit is contained in:
Jakob Borg
2017-04-05 14:34:41 +00:00
parent 49c1527724
commit a1bcc15458
1354 changed files with 55066 additions and 797850 deletions

View File

@@ -1,147 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"bytes"
"fmt"
"testing"
"testing/quick"
"github.com/syndtr/goleveldb/leveldb/testutil"
)
func TestBatchHeader(t *testing.T) {
f := func(seq uint64, length uint32) bool {
encoded := encodeBatchHeader(nil, seq, int(length))
decSeq, decLength, err := decodeBatchHeader(encoded)
return err == nil && decSeq == seq && decLength == int(length)
}
config := &quick.Config{
Rand: testutil.NewRand(),
}
if err := quick.Check(f, config); err != nil {
t.Error(err)
}
}
type batchKV struct {
kt keyType
k, v []byte
}
func TestBatch(t *testing.T) {
var (
kvs []batchKV
internalLen int
)
batch := new(Batch)
rbatch := new(Batch)
abatch := new(Batch)
testBatch := func(i int, kt keyType, k, v []byte) error {
kv := kvs[i]
if kv.kt != kt {
return fmt.Errorf("invalid key type, index=%d: %d vs %d", i, kv.kt, kt)
}
if !bytes.Equal(kv.k, k) {
return fmt.Errorf("invalid key, index=%d", i)
}
if !bytes.Equal(kv.v, v) {
return fmt.Errorf("invalid value, index=%d", i)
}
return nil
}
f := func(ktr uint8, k, v []byte) bool {
kt := keyType(ktr % 2)
if kt == keyTypeVal {
batch.Put(k, v)
rbatch.Put(k, v)
kvs = append(kvs, batchKV{kt: kt, k: k, v: v})
internalLen += len(k) + len(v) + 8
} else {
batch.Delete(k)
rbatch.Delete(k)
kvs = append(kvs, batchKV{kt: kt, k: k})
internalLen += len(k) + 8
}
if batch.Len() != len(kvs) {
t.Logf("batch.Len: %d vs %d", len(kvs), batch.Len())
return false
}
if batch.internalLen != internalLen {
t.Logf("abatch.internalLen: %d vs %d", internalLen, batch.internalLen)
return false
}
if len(kvs)%1000 == 0 {
if err := batch.replayInternal(testBatch); err != nil {
t.Logf("batch.replayInternal: %v", err)
return false
}
abatch.append(rbatch)
rbatch.Reset()
if abatch.Len() != len(kvs) {
t.Logf("abatch.Len: %d vs %d", len(kvs), abatch.Len())
return false
}
if abatch.internalLen != internalLen {
t.Logf("abatch.internalLen: %d vs %d", internalLen, abatch.internalLen)
return false
}
if err := abatch.replayInternal(testBatch); err != nil {
t.Logf("abatch.replayInternal: %v", err)
return false
}
nbatch := new(Batch)
if err := nbatch.Load(batch.Dump()); err != nil {
t.Logf("nbatch.Load: %v", err)
return false
}
if nbatch.Len() != len(kvs) {
t.Logf("nbatch.Len: %d vs %d", len(kvs), nbatch.Len())
return false
}
if nbatch.internalLen != internalLen {
t.Logf("nbatch.internalLen: %d vs %d", internalLen, nbatch.internalLen)
return false
}
if err := nbatch.replayInternal(testBatch); err != nil {
t.Logf("nbatch.replayInternal: %v", err)
return false
}
}
if len(kvs)%10000 == 0 {
nbatch := new(Batch)
if err := batch.Replay(nbatch); err != nil {
t.Logf("batch.Replay: %v", err)
return false
}
if nbatch.Len() != len(kvs) {
t.Logf("nbatch.Len: %d vs %d", len(kvs), nbatch.Len())
return false
}
if nbatch.internalLen != internalLen {
t.Logf("nbatch.internalLen: %d vs %d", internalLen, nbatch.internalLen)
return false
}
if err := nbatch.replayInternal(testBatch); err != nil {
t.Logf("nbatch.replayInternal: %v", err)
return false
}
}
return true
}
config := &quick.Config{
MaxCount: 40000,
Rand: testutil.NewRand(),
}
if err := quick.Check(f, config); err != nil {
t.Error(err)
}
t.Logf("length=%d internalLen=%d", len(kvs), internalLen)
}

View File

@@ -1,507 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"bytes"
"fmt"
"math/rand"
"os"
"path/filepath"
"runtime"
"sync/atomic"
"testing"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
)
func randomString(r *rand.Rand, n int) []byte {
b := new(bytes.Buffer)
for i := 0; i < n; i++ {
b.WriteByte(' ' + byte(r.Intn(95)))
}
return b.Bytes()
}
func compressibleStr(r *rand.Rand, frac float32, n int) []byte {
nn := int(float32(n) * frac)
rb := randomString(r, nn)
b := make([]byte, 0, n+nn)
for len(b) < n {
b = append(b, rb...)
}
return b[:n]
}
type valueGen struct {
src []byte
pos int
}
func newValueGen(frac float32) *valueGen {
v := new(valueGen)
r := rand.New(rand.NewSource(301))
v.src = make([]byte, 0, 1048576+100)
for len(v.src) < 1048576 {
v.src = append(v.src, compressibleStr(r, frac, 100)...)
}
return v
}
func (v *valueGen) get(n int) []byte {
if v.pos+n > len(v.src) {
v.pos = 0
}
v.pos += n
return v.src[v.pos-n : v.pos]
}
var benchDB = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbbench-%d", os.Getuid()))
type dbBench struct {
b *testing.B
stor storage.Storage
db *DB
o *opt.Options
ro *opt.ReadOptions
wo *opt.WriteOptions
keys, values [][]byte
}
func openDBBench(b *testing.B, noCompress bool) *dbBench {
_, err := os.Stat(benchDB)
if err == nil {
err = os.RemoveAll(benchDB)
if err != nil {
b.Fatal("cannot remove old db: ", err)
}
}
p := &dbBench{
b: b,
o: &opt.Options{},
ro: &opt.ReadOptions{},
wo: &opt.WriteOptions{},
}
p.stor, err = storage.OpenFile(benchDB, false)
if err != nil {
b.Fatal("cannot open stor: ", err)
}
if noCompress {
p.o.Compression = opt.NoCompression
}
p.db, err = Open(p.stor, p.o)
if err != nil {
b.Fatal("cannot open db: ", err)
}
return p
}
func (p *dbBench) reopen() {
p.db.Close()
var err error
p.db, err = Open(p.stor, p.o)
if err != nil {
p.b.Fatal("Reopen: got error: ", err)
}
}
func (p *dbBench) populate(n int) {
p.keys, p.values = make([][]byte, n), make([][]byte, n)
v := newValueGen(0.5)
for i := range p.keys {
p.keys[i], p.values[i] = []byte(fmt.Sprintf("%016d", i)), v.get(100)
}
}
func (p *dbBench) randomize() {
m := len(p.keys)
times := m * 2
r1, r2 := rand.New(rand.NewSource(0xdeadbeef)), rand.New(rand.NewSource(0xbeefface))
for n := 0; n < times; n++ {
i, j := r1.Int()%m, r2.Int()%m
if i == j {
continue
}
p.keys[i], p.keys[j] = p.keys[j], p.keys[i]
p.values[i], p.values[j] = p.values[j], p.values[i]
}
}
func (p *dbBench) writes(perBatch int) {
b := p.b
db := p.db
n := len(p.keys)
m := n / perBatch
if n%perBatch > 0 {
m++
}
batches := make([]Batch, m)
j := 0
for i := range batches {
first := true
for ; j < n && ((j+1)%perBatch != 0 || first); j++ {
first = false
batches[i].Put(p.keys[j], p.values[j])
}
}
runtime.GC()
b.ResetTimer()
b.StartTimer()
for i := range batches {
err := db.Write(&(batches[i]), p.wo)
if err != nil {
b.Fatal("write failed: ", err)
}
}
b.StopTimer()
b.SetBytes(116)
}
func (p *dbBench) gc() {
p.keys, p.values = nil, nil
runtime.GC()
}
func (p *dbBench) puts() {
b := p.b
db := p.db
b.ResetTimer()
b.StartTimer()
for i := range p.keys {
err := db.Put(p.keys[i], p.values[i], p.wo)
if err != nil {
b.Fatal("put failed: ", err)
}
}
b.StopTimer()
b.SetBytes(116)
}
func (p *dbBench) fill() {
b := p.b
db := p.db
perBatch := 10000
batch := new(Batch)
for i, n := 0, len(p.keys); i < n; {
first := true
for ; i < n && ((i+1)%perBatch != 0 || first); i++ {
first = false
batch.Put(p.keys[i], p.values[i])
}
err := db.Write(batch, p.wo)
if err != nil {
b.Fatal("write failed: ", err)
}
batch.Reset()
}
}
func (p *dbBench) gets() {
b := p.b
db := p.db
b.ResetTimer()
for i := range p.keys {
_, err := db.Get(p.keys[i], p.ro)
if err != nil {
b.Error("got error: ", err)
}
}
b.StopTimer()
}
func (p *dbBench) seeks() {
b := p.b
iter := p.newIter()
defer iter.Release()
b.ResetTimer()
for i := range p.keys {
if !iter.Seek(p.keys[i]) {
b.Error("value not found for: ", string(p.keys[i]))
}
}
b.StopTimer()
}
func (p *dbBench) newIter() iterator.Iterator {
iter := p.db.NewIterator(nil, p.ro)
err := iter.Error()
if err != nil {
p.b.Fatal("cannot create iterator: ", err)
}
return iter
}
func (p *dbBench) close() {
if bp, err := p.db.GetProperty("leveldb.blockpool"); err == nil {
p.b.Log("Block pool stats: ", bp)
}
p.db.Close()
p.stor.Close()
os.RemoveAll(benchDB)
p.db = nil
p.keys = nil
p.values = nil
runtime.GC()
}
func BenchmarkDBWrite(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.writes(1)
p.close()
}
func BenchmarkDBWriteBatch(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.writes(1000)
p.close()
}
func BenchmarkDBWriteUncompressed(b *testing.B) {
p := openDBBench(b, true)
p.populate(b.N)
p.writes(1)
p.close()
}
func BenchmarkDBWriteBatchUncompressed(b *testing.B) {
p := openDBBench(b, true)
p.populate(b.N)
p.writes(1000)
p.close()
}
func BenchmarkDBWriteRandom(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.randomize()
p.writes(1)
p.close()
}
func BenchmarkDBWriteRandomSync(b *testing.B) {
p := openDBBench(b, false)
p.wo.Sync = true
p.populate(b.N)
p.writes(1)
p.close()
}
func BenchmarkDBOverwrite(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.writes(1)
p.writes(1)
p.close()
}
func BenchmarkDBOverwriteRandom(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.writes(1)
p.randomize()
p.writes(1)
p.close()
}
func BenchmarkDBPut(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.puts()
p.close()
}
func BenchmarkDBRead(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gc()
iter := p.newIter()
b.ResetTimer()
for iter.Next() {
}
iter.Release()
b.StopTimer()
b.SetBytes(116)
p.close()
}
func BenchmarkDBReadGC(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
iter := p.newIter()
b.ResetTimer()
for iter.Next() {
}
iter.Release()
b.StopTimer()
b.SetBytes(116)
p.close()
}
func BenchmarkDBReadUncompressed(b *testing.B) {
p := openDBBench(b, true)
p.populate(b.N)
p.fill()
p.gc()
iter := p.newIter()
b.ResetTimer()
for iter.Next() {
}
iter.Release()
b.StopTimer()
b.SetBytes(116)
p.close()
}
func BenchmarkDBReadTable(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.reopen()
p.gc()
iter := p.newIter()
b.ResetTimer()
for iter.Next() {
}
iter.Release()
b.StopTimer()
b.SetBytes(116)
p.close()
}
func BenchmarkDBReadReverse(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gc()
iter := p.newIter()
b.ResetTimer()
iter.Last()
for iter.Prev() {
}
iter.Release()
b.StopTimer()
b.SetBytes(116)
p.close()
}
func BenchmarkDBReadReverseTable(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.reopen()
p.gc()
iter := p.newIter()
b.ResetTimer()
iter.Last()
for iter.Prev() {
}
iter.Release()
b.StopTimer()
b.SetBytes(116)
p.close()
}
func BenchmarkDBSeek(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.seeks()
p.close()
}
func BenchmarkDBSeekRandom(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.randomize()
p.seeks()
p.close()
}
func BenchmarkDBGet(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gets()
p.close()
}
func BenchmarkDBGetRandom(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.randomize()
p.gets()
p.close()
}
func BenchmarkDBReadConcurrent(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gc()
defer p.close()
b.ResetTimer()
b.SetBytes(116)
b.RunParallel(func(pb *testing.PB) {
iter := p.newIter()
defer iter.Release()
for pb.Next() && iter.Next() {
}
})
}
func BenchmarkDBReadConcurrent2(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gc()
defer p.close()
b.ResetTimer()
b.SetBytes(116)
var dir uint32
b.RunParallel(func(pb *testing.PB) {
iter := p.newIter()
defer iter.Release()
if atomic.AddUint32(&dir, 1)%2 == 0 {
for pb.Next() && iter.Next() {
}
} else {
if pb.Next() && iter.Last() {
for pb.Next() && iter.Prev() {
}
}
}
})
}

View File

@@ -1,29 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package cache
import (
"math/rand"
"testing"
"time"
)
func BenchmarkLRUCache(b *testing.B) {
c := NewCache(NewLRU(10000))
b.SetParallelism(10)
b.RunParallel(func(pb *testing.PB) {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for pb.Next() {
key := uint64(r.Intn(1000000))
c.Get(0, key, func() (int, Value) {
return 1, key
}).Release()
}
})
}

View File

@@ -1,563 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package cache
import (
"math/rand"
"runtime"
"sync"
"sync/atomic"
"testing"
"time"
"unsafe"
)
type int32o int32
func (o *int32o) acquire() {
if atomic.AddInt32((*int32)(o), 1) != 1 {
panic("BUG: invalid ref")
}
}
func (o *int32o) Release() {
if atomic.AddInt32((*int32)(o), -1) != 0 {
panic("BUG: invalid ref")
}
}
type releaserFunc struct {
fn func()
value Value
}
func (r releaserFunc) Release() {
if r.fn != nil {
r.fn()
}
}
func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle {
return c.Get(ns, key, func() (int, Value) {
if relf != nil {
return charge, releaserFunc{relf, value}
}
return charge, value
})
}
type cacheMapTestParams struct {
nobjects, nhandles, concurrent, repeat int
}
func TestCacheMap(t *testing.T) {
runtime.GOMAXPROCS(runtime.NumCPU())
var params []cacheMapTestParams
if testing.Short() {
params = []cacheMapTestParams{
{1000, 100, 20, 3},
{10000, 300, 50, 10},
}
} else {
params = []cacheMapTestParams{
{10000, 400, 50, 3},
{100000, 1000, 100, 10},
}
}
var (
objects [][]int32o
handles [][]unsafe.Pointer
)
for _, x := range params {
objects = append(objects, make([]int32o, x.nobjects))
handles = append(handles, make([]unsafe.Pointer, x.nhandles))
}
c := NewCache(nil)
wg := new(sync.WaitGroup)
var done int32
for ns, x := range params {
for i := 0; i < x.concurrent; i++ {
wg.Add(1)
go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) {
defer wg.Done()
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for j := len(objects) * repeat; j >= 0; j-- {
key := uint64(r.Intn(len(objects)))
h := c.Get(uint64(ns), key, func() (int, Value) {
o := &objects[key]
o.acquire()
return 1, o
})
if v := h.Value().(*int32o); v != &objects[key] {
t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v)
}
if objects[key] != 1 {
t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key])
}
if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) {
h.Release()
}
}
}(ns, i, x.repeat, objects[ns], handles[ns])
}
go func(handles []unsafe.Pointer) {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for atomic.LoadInt32(&done) == 0 {
i := r.Intn(len(handles))
h := (*Handle)(atomic.LoadPointer(&handles[i]))
if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) {
h.Release()
}
time.Sleep(time.Millisecond)
}
}(handles[ns])
}
go func() {
handles := make([]*Handle, 100000)
for atomic.LoadInt32(&done) == 0 {
for i := range handles {
handles[i] = c.Get(999999999, uint64(i), func() (int, Value) {
return 1, 1
})
}
for _, h := range handles {
h.Release()
}
}
}()
wg.Wait()
atomic.StoreInt32(&done, 1)
for _, handles0 := range handles {
for i := range handles0 {
h := (*Handle)(atomic.LoadPointer(&handles0[i]))
if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) {
h.Release()
}
}
}
for ns, objects0 := range objects {
for i, o := range objects0 {
if o != 0 {
t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o)
}
}
}
}
func TestCacheMap_NodesAndSize(t *testing.T) {
c := NewCache(nil)
if c.Nodes() != 0 {
t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
}
if c.Size() != 0 {
t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
}
set(c, 0, 1, 1, 1, nil)
set(c, 0, 2, 2, 2, nil)
set(c, 1, 1, 3, 3, nil)
set(c, 2, 1, 4, 1, nil)
if c.Nodes() != 4 {
t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes())
}
if c.Size() != 7 {
t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size())
}
}
func TestLRUCache_Capacity(t *testing.T) {
c := NewCache(NewLRU(10))
if c.Capacity() != 10 {
t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity())
}
set(c, 0, 1, 1, 1, nil).Release()
set(c, 0, 2, 2, 2, nil).Release()
set(c, 1, 1, 3, 3, nil).Release()
set(c, 2, 1, 4, 1, nil).Release()
set(c, 2, 2, 5, 1, nil).Release()
set(c, 2, 3, 6, 1, nil).Release()
set(c, 2, 4, 7, 1, nil).Release()
set(c, 2, 5, 8, 1, nil).Release()
if c.Nodes() != 7 {
t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes())
}
if c.Size() != 10 {
t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size())
}
c.SetCapacity(9)
if c.Capacity() != 9 {
t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity())
}
if c.Nodes() != 6 {
t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes())
}
if c.Size() != 8 {
t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size())
}
}
func TestCacheMap_NilValue(t *testing.T) {
c := NewCache(NewLRU(10))
h := c.Get(0, 0, func() (size int, value Value) {
return 1, nil
})
if h != nil {
t.Error("cache handle is non-nil")
}
if c.Nodes() != 0 {
t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
}
if c.Size() != 0 {
t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
}
}
func TestLRUCache_GetLatency(t *testing.T) {
runtime.GOMAXPROCS(runtime.NumCPU())
const (
concurrentSet = 30
concurrentGet = 3
duration = 3 * time.Second
delay = 3 * time.Millisecond
maxkey = 100000
)
var (
set, getHit, getAll int32
getMaxLatency, getDuration int64
)
c := NewCache(NewLRU(5000))
wg := &sync.WaitGroup{}
until := time.Now().Add(duration)
for i := 0; i < concurrentSet; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for time.Now().Before(until) {
c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) {
time.Sleep(delay)
atomic.AddInt32(&set, 1)
return 1, 1
}).Release()
}
}(i)
}
for i := 0; i < concurrentGet; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for {
mark := time.Now()
if mark.Before(until) {
h := c.Get(0, uint64(r.Intn(maxkey)), nil)
latency := int64(time.Now().Sub(mark))
m := atomic.LoadInt64(&getMaxLatency)
if latency > m {
atomic.CompareAndSwapInt64(&getMaxLatency, m, latency)
}
atomic.AddInt64(&getDuration, latency)
if h != nil {
atomic.AddInt32(&getHit, 1)
h.Release()
}
atomic.AddInt32(&getAll, 1)
} else {
break
}
}
}(i)
}
wg.Wait()
getAvglatency := time.Duration(getDuration) / time.Duration(getAll)
t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v",
set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency)
if getAvglatency > delay/3 {
t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency)
}
}
func TestLRUCache_HitMiss(t *testing.T) {
cases := []struct {
key uint64
value string
}{
{1, "vvvvvvvvv"},
{100, "v1"},
{0, "v2"},
{12346, "v3"},
{777, "v4"},
{999, "v5"},
{7654, "v6"},
{2, "v7"},
{3, "v8"},
{9, "v9"},
}
setfin := 0
c := NewCache(NewLRU(1000))
for i, x := range cases {
set(c, 0, x.key, x.value, len(x.value), func() {
setfin++
}).Release()
for j, y := range cases {
h := c.Get(0, y.key, nil)
if j <= i {
// should hit
if h == nil {
t.Errorf("case '%d' iteration '%d' is miss", i, j)
} else {
if x := h.Value().(releaserFunc).value.(string); x != y.value {
t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
}
}
} else {
// should miss
if h != nil {
t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string))
}
}
if h != nil {
h.Release()
}
}
}
for i, x := range cases {
finalizerOk := false
c.Delete(0, x.key, func() {
finalizerOk = true
})
if !finalizerOk {
t.Errorf("case %d delete finalizer not executed", i)
}
for j, y := range cases {
h := c.Get(0, y.key, nil)
if j > i {
// should hit
if h == nil {
t.Errorf("case '%d' iteration '%d' is miss", i, j)
} else {
if x := h.Value().(releaserFunc).value.(string); x != y.value {
t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
}
}
} else {
// should miss
if h != nil {
t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string))
}
}
if h != nil {
h.Release()
}
}
}
if setfin != len(cases) {
t.Errorf("some set finalizer may not be executed, want=%d got=%d", len(cases), setfin)
}
}
func TestLRUCache_Eviction(t *testing.T) {
c := NewCache(NewLRU(12))
o1 := set(c, 0, 1, 1, 1, nil)
set(c, 0, 2, 2, 1, nil).Release()
set(c, 0, 3, 3, 1, nil).Release()
set(c, 0, 4, 4, 1, nil).Release()
set(c, 0, 5, 5, 1, nil).Release()
if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2
h.Release()
}
set(c, 0, 9, 9, 10, nil).Release() // 5,2,9
for _, key := range []uint64{9, 2, 5, 1} {
h := c.Get(0, key, nil)
if h == nil {
t.Errorf("miss for key '%d'", key)
} else {
if x := h.Value().(int); x != int(key) {
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
}
h.Release()
}
}
o1.Release()
for _, key := range []uint64{1, 2, 5} {
h := c.Get(0, key, nil)
if h == nil {
t.Errorf("miss for key '%d'", key)
} else {
if x := h.Value().(int); x != int(key) {
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
}
h.Release()
}
}
for _, key := range []uint64{3, 4, 9} {
h := c.Get(0, key, nil)
if h != nil {
t.Errorf("hit for key '%d'", key)
if x := h.Value().(int); x != int(key) {
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
}
h.Release()
}
}
}
func TestLRUCache_Evict(t *testing.T) {
c := NewCache(NewLRU(6))
set(c, 0, 1, 1, 1, nil).Release()
set(c, 0, 2, 2, 1, nil).Release()
set(c, 1, 1, 4, 1, nil).Release()
set(c, 1, 2, 5, 1, nil).Release()
set(c, 2, 1, 6, 1, nil).Release()
set(c, 2, 2, 7, 1, nil).Release()
for ns := 0; ns < 3; ns++ {
for key := 1; key < 3; key++ {
if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
h.Release()
} else {
t.Errorf("Cache.Get on #%d.%d return nil", ns, key)
}
}
}
if ok := c.Evict(0, 1); !ok {
t.Error("first Cache.Evict on #0.1 return false")
}
if ok := c.Evict(0, 1); ok {
t.Error("second Cache.Evict on #0.1 return true")
}
if h := c.Get(0, 1, nil); h != nil {
t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value())
}
c.EvictNS(1)
if h := c.Get(1, 1, nil); h != nil {
t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value())
}
if h := c.Get(1, 2, nil); h != nil {
t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value())
}
c.EvictAll()
for ns := 0; ns < 3; ns++ {
for key := 1; key < 3; key++ {
if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value())
}
}
}
}
func TestLRUCache_Delete(t *testing.T) {
delFuncCalled := 0
delFunc := func() {
delFuncCalled++
}
c := NewCache(NewLRU(2))
set(c, 0, 1, 1, 1, nil).Release()
set(c, 0, 2, 2, 1, nil).Release()
if ok := c.Delete(0, 1, delFunc); !ok {
t.Error("Cache.Delete on #1 return false")
}
if h := c.Get(0, 1, nil); h != nil {
t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value())
}
if ok := c.Delete(0, 1, delFunc); ok {
t.Error("Cache.Delete on #1 return true")
}
h2 := c.Get(0, 2, nil)
if h2 == nil {
t.Error("Cache.Get on #2 return nil")
}
if ok := c.Delete(0, 2, delFunc); !ok {
t.Error("(1) Cache.Delete on #2 return false")
}
if ok := c.Delete(0, 2, delFunc); !ok {
t.Error("(2) Cache.Delete on #2 return false")
}
set(c, 0, 3, 3, 1, nil).Release()
set(c, 0, 4, 4, 1, nil).Release()
c.Get(0, 2, nil).Release()
for key := 2; key <= 4; key++ {
if h := c.Get(0, uint64(key), nil); h != nil {
h.Release()
} else {
t.Errorf("Cache.Get on #%d return nil", key)
}
}
h2.Release()
if h := c.Get(0, 2, nil); h != nil {
t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value())
}
if delFuncCalled != 4 {
t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled)
}
}
func TestLRUCache_Close(t *testing.T) {
relFuncCalled := 0
relFunc := func() {
relFuncCalled++
}
delFuncCalled := 0
delFunc := func() {
delFuncCalled++
}
c := NewCache(NewLRU(2))
set(c, 0, 1, 1, 1, relFunc).Release()
set(c, 0, 2, 2, 1, relFunc).Release()
h3 := set(c, 0, 3, 3, 1, relFunc)
if h3 == nil {
t.Error("Cache.Get on #3 return nil")
}
if ok := c.Delete(0, 3, delFunc); !ok {
t.Error("Cache.Delete on #3 return false")
}
c.Close()
if relFuncCalled != 3 {
t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled)
}
if delFuncCalled != 1 {
t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled)
}
}

View File

@@ -1,496 +0,0 @@
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"bytes"
"fmt"
"io"
"math/rand"
"testing"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
)
const ctValSize = 1000
type dbCorruptHarness struct {
dbHarness
}
func newDbCorruptHarnessWopt(t *testing.T, o *opt.Options) *dbCorruptHarness {
h := new(dbCorruptHarness)
h.init(t, o)
return h
}
func newDbCorruptHarness(t *testing.T) *dbCorruptHarness {
return newDbCorruptHarnessWopt(t, &opt.Options{
BlockCacheCapacity: 100,
Strict: opt.StrictJournalChecksum,
})
}
func (h *dbCorruptHarness) recover() {
p := &h.dbHarness
t := p.t
var err error
p.db, err = Recover(h.stor, h.o)
if err != nil {
t.Fatal("Repair: got error: ", err)
}
}
func (h *dbCorruptHarness) build(n int) {
p := &h.dbHarness
t := p.t
db := p.db
batch := new(Batch)
for i := 0; i < n; i++ {
batch.Reset()
batch.Put(tkey(i), tval(i, ctValSize))
err := db.Write(batch, p.wo)
if err != nil {
t.Fatal("write error: ", err)
}
}
}
func (h *dbCorruptHarness) buildShuffled(n int, rnd *rand.Rand) {
p := &h.dbHarness
t := p.t
db := p.db
batch := new(Batch)
for i := range rnd.Perm(n) {
batch.Reset()
batch.Put(tkey(i), tval(i, ctValSize))
err := db.Write(batch, p.wo)
if err != nil {
t.Fatal("write error: ", err)
}
}
}
func (h *dbCorruptHarness) deleteRand(n, max int, rnd *rand.Rand) {
p := &h.dbHarness
t := p.t
db := p.db
batch := new(Batch)
for i := 0; i < n; i++ {
batch.Reset()
batch.Delete(tkey(rnd.Intn(max)))
err := db.Write(batch, p.wo)
if err != nil {
t.Fatal("write error: ", err)
}
}
}
func (h *dbCorruptHarness) corrupt(ft storage.FileType, fi, offset, n int) {
p := &h.dbHarness
t := p.t
fds, _ := p.stor.List(ft)
sortFds(fds)
if fi < 0 {
fi = len(fds) - 1
}
if fi >= len(fds) {
t.Fatalf("no such file with type %q with index %d", ft, fi)
}
fd := fds[fi]
r, err := h.stor.Open(fd)
if err != nil {
t.Fatal("cannot open file: ", err)
}
x, err := r.Seek(0, 2)
if err != nil {
t.Fatal("cannot query file size: ", err)
}
m := int(x)
if _, err := r.Seek(0, 0); err != nil {
t.Fatal(err)
}
if offset < 0 {
if -offset > m {
offset = 0
} else {
offset = m + offset
}
}
if offset > m {
offset = m
}
if offset+n > m {
n = m - offset
}
buf := make([]byte, m)
_, err = io.ReadFull(r, buf)
if err != nil {
t.Fatal("cannot read file: ", err)
}
r.Close()
for i := 0; i < n; i++ {
buf[offset+i] ^= 0x80
}
err = h.stor.Remove(fd)
if err != nil {
t.Fatal("cannot remove old file: ", err)
}
w, err := h.stor.Create(fd)
if err != nil {
t.Fatal("cannot create new file: ", err)
}
_, err = w.Write(buf)
if err != nil {
t.Fatal("cannot write new file: ", err)
}
w.Close()
}
func (h *dbCorruptHarness) removeAll(ft storage.FileType) {
fds, err := h.stor.List(ft)
if err != nil {
h.t.Fatal("get files: ", err)
}
for _, fd := range fds {
if err := h.stor.Remove(fd); err != nil {
h.t.Error("remove file: ", err)
}
}
}
func (h *dbCorruptHarness) forceRemoveAll(ft storage.FileType) {
fds, err := h.stor.List(ft)
if err != nil {
h.t.Fatal("get files: ", err)
}
for _, fd := range fds {
if err := h.stor.ForceRemove(fd); err != nil {
h.t.Error("remove file: ", err)
}
}
}
func (h *dbCorruptHarness) removeOne(ft storage.FileType) {
fds, err := h.stor.List(ft)
if err != nil {
h.t.Fatal("get files: ", err)
}
fd := fds[rand.Intn(len(fds))]
h.t.Logf("removing file @%d", fd.Num)
if err := h.stor.Remove(fd); err != nil {
h.t.Error("remove file: ", err)
}
}
func (h *dbCorruptHarness) check(min, max int) {
p := &h.dbHarness
t := p.t
db := p.db
var n, badk, badv, missed, good int
iter := db.NewIterator(nil, p.ro)
for iter.Next() {
k := 0
fmt.Sscanf(string(iter.Key()), "%d", &k)
if k < n {
badk++
continue
}
missed += k - n
n = k + 1
if !bytes.Equal(iter.Value(), tval(k, ctValSize)) {
badv++
} else {
good++
}
}
err := iter.Error()
iter.Release()
t.Logf("want=%d..%d got=%d badkeys=%d badvalues=%d missed=%d, err=%v",
min, max, good, badk, badv, missed, err)
if good < min || good > max {
t.Errorf("good entries number not in range")
}
}
func TestCorruptDB_Journal(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.build(100)
h.check(100, 100)
h.closeDB()
h.corrupt(storage.TypeJournal, -1, 19, 1)
h.corrupt(storage.TypeJournal, -1, 32*1024+1000, 1)
h.openDB()
h.check(36, 36)
}
func TestCorruptDB_Table(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.build(100)
h.compactMem()
h.compactRangeAt(0, "", "")
h.compactRangeAt(1, "", "")
h.closeDB()
h.corrupt(storage.TypeTable, -1, 100, 1)
h.openDB()
h.check(99, 99)
}
func TestCorruptDB_TableIndex(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.build(10000)
h.compactMem()
h.closeDB()
h.corrupt(storage.TypeTable, -1, -2000, 500)
h.openDB()
h.check(5000, 9999)
}
func TestCorruptDB_MissingManifest(t *testing.T) {
rnd := rand.New(rand.NewSource(0x0badda7a))
h := newDbCorruptHarnessWopt(t, &opt.Options{
BlockCacheCapacity: 100,
Strict: opt.StrictJournalChecksum,
WriteBuffer: 1000 * 60,
})
defer h.close()
h.build(1000)
h.compactMem()
h.buildShuffled(1000, rnd)
h.compactMem()
h.deleteRand(500, 1000, rnd)
h.compactMem()
h.buildShuffled(1000, rnd)
h.compactMem()
h.deleteRand(500, 1000, rnd)
h.compactMem()
h.buildShuffled(1000, rnd)
h.compactMem()
h.closeDB()
h.forceRemoveAll(storage.TypeManifest)
h.openAssert(false)
h.recover()
h.check(1000, 1000)
h.build(1000)
h.compactMem()
h.compactRange("", "")
h.closeDB()
h.recover()
h.check(1000, 1000)
}
func TestCorruptDB_SequenceNumberRecovery(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.put("foo", "v1")
h.put("foo", "v2")
h.put("foo", "v3")
h.put("foo", "v4")
h.put("foo", "v5")
h.closeDB()
h.recover()
h.getVal("foo", "v5")
h.put("foo", "v6")
h.getVal("foo", "v6")
h.reopenDB()
h.getVal("foo", "v6")
}
func TestCorruptDB_SequenceNumberRecoveryTable(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.put("foo", "v1")
h.put("foo", "v2")
h.put("foo", "v3")
h.compactMem()
h.put("foo", "v4")
h.put("foo", "v5")
h.compactMem()
h.closeDB()
h.recover()
h.getVal("foo", "v5")
h.put("foo", "v6")
h.getVal("foo", "v6")
h.reopenDB()
h.getVal("foo", "v6")
}
func TestCorruptDB_CorruptedManifest(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.put("foo", "hello")
h.compactMem()
h.compactRange("", "")
h.closeDB()
h.corrupt(storage.TypeManifest, -1, 0, 1000)
h.openAssert(false)
h.recover()
h.getVal("foo", "hello")
}
func TestCorruptDB_CompactionInputError(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.build(10)
h.compactMem()
h.closeDB()
h.corrupt(storage.TypeTable, -1, 100, 1)
h.openDB()
h.check(9, 9)
h.build(10000)
h.check(10000, 10000)
}
func TestCorruptDB_UnrelatedKeys(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.build(10)
h.compactMem()
h.closeDB()
h.corrupt(storage.TypeTable, -1, 100, 1)
h.openDB()
h.put(string(tkey(1000)), string(tval(1000, ctValSize)))
h.getVal(string(tkey(1000)), string(tval(1000, ctValSize)))
h.compactMem()
h.getVal(string(tkey(1000)), string(tval(1000, ctValSize)))
}
func TestCorruptDB_Level0NewerFileHasOlderSeqnum(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.put("a", "v1")
h.put("b", "v1")
h.compactMem()
h.put("a", "v2")
h.put("b", "v2")
h.compactMem()
h.put("a", "v3")
h.put("b", "v3")
h.compactMem()
h.put("c", "v0")
h.put("d", "v0")
h.compactMem()
h.compactRangeAt(1, "", "")
h.closeDB()
h.recover()
h.getVal("a", "v3")
h.getVal("b", "v3")
h.getVal("c", "v0")
h.getVal("d", "v0")
}
func TestCorruptDB_RecoverInvalidSeq_Issue53(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.put("a", "v1")
h.put("b", "v1")
h.compactMem()
h.put("a", "v2")
h.put("b", "v2")
h.compactMem()
h.put("a", "v3")
h.put("b", "v3")
h.compactMem()
h.put("c", "v0")
h.put("d", "v0")
h.compactMem()
h.compactRangeAt(0, "", "")
h.closeDB()
h.recover()
h.getVal("a", "v3")
h.getVal("b", "v3")
h.getVal("c", "v0")
h.getVal("d", "v0")
}
func TestCorruptDB_MissingTableFiles(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.put("a", "v1")
h.put("b", "v1")
h.compactMem()
h.put("c", "v2")
h.put("d", "v2")
h.compactMem()
h.put("e", "v3")
h.put("f", "v3")
h.closeDB()
h.removeOne(storage.TypeTable)
h.openAssert(false)
}
func TestCorruptDB_RecoverTable(t *testing.T) {
h := newDbCorruptHarnessWopt(t, &opt.Options{
WriteBuffer: 112 * opt.KiB,
CompactionTableSize: 90 * opt.KiB,
Filter: filter.NewBloomFilter(10),
})
defer h.close()
h.build(1000)
h.compactMem()
h.compactRangeAt(0, "", "")
h.compactRangeAt(1, "", "")
seq := h.db.seq
h.closeDB()
h.corrupt(storage.TypeTable, 0, 1000, 1)
h.corrupt(storage.TypeTable, 3, 10000, 1)
// Corrupted filter shouldn't affect recovery.
h.corrupt(storage.TypeTable, 3, 113888, 10)
h.corrupt(storage.TypeTable, -1, 20000, 1)
h.recover()
if h.db.seq != seq {
t.Errorf("invalid seq, want=%d got=%d", seq, h.db.seq)
}
h.check(985, 985)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,117 +0,0 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/testutil"
)
var _ = testutil.Defer(func() {
Describe("Leveldb external", func() {
o := &opt.Options{
DisableBlockCache: true,
BlockRestartInterval: 5,
BlockSize: 80,
Compression: opt.NoCompression,
OpenFilesCacheCapacity: -1,
Strict: opt.StrictAll,
WriteBuffer: 1000,
CompactionTableSize: 2000,
}
Describe("write test", func() {
It("should do write correctly", func(done Done) {
db := newTestingDB(o, nil, nil)
t := testutil.DBTesting{
DB: db,
Deleted: testutil.KeyValue_Generate(nil, 500, 1, 1, 50, 5, 5).Clone(),
}
testutil.DoDBTesting(&t)
db.TestClose()
done <- true
}, 80.0)
})
Describe("read test", func() {
testutil.AllKeyValueTesting(nil, nil, func(kv testutil.KeyValue) testutil.DB {
// Building the DB.
db := newTestingDB(o, nil, nil)
kv.IterateShuffled(nil, func(i int, key, value []byte) {
err := db.TestPut(key, value)
Expect(err).NotTo(HaveOccurred())
})
return db
}, func(db testutil.DB) {
db.(*testingDB).TestClose()
})
})
Describe("transaction test", func() {
It("should do transaction correctly", func(done Done) {
db := newTestingDB(o, nil, nil)
By("creating first transaction")
var err error
tr := &testingTransaction{}
tr.Transaction, err = db.OpenTransaction()
Expect(err).NotTo(HaveOccurred())
t0 := &testutil.DBTesting{
DB: tr,
Deleted: testutil.KeyValue_Generate(nil, 200, 1, 1, 50, 5, 5).Clone(),
}
testutil.DoDBTesting(t0)
testutil.TestGet(tr, t0.Present)
testutil.TestHas(tr, t0.Present)
By("committing first transaction")
err = tr.Commit()
Expect(err).NotTo(HaveOccurred())
testutil.TestIter(db, nil, t0.Present)
testutil.TestGet(db, t0.Present)
testutil.TestHas(db, t0.Present)
By("manipulating DB without transaction")
t0.DB = db
testutil.DoDBTesting(t0)
By("creating second transaction")
tr.Transaction, err = db.OpenTransaction()
Expect(err).NotTo(HaveOccurred())
t1 := &testutil.DBTesting{
DB: tr,
Deleted: t0.Deleted.Clone(),
Present: t0.Present.Clone(),
}
testutil.DoDBTesting(t1)
testutil.TestIter(db, nil, t0.Present)
By("discarding second transaction")
tr.Discard()
testutil.TestIter(db, nil, t0.Present)
By("creating third transaction")
tr.Transaction, err = db.OpenTransaction()
Expect(err).NotTo(HaveOccurred())
t0.DB = tr
testutil.DoDBTesting(t0)
By("committing third transaction")
err = tr.Commit()
Expect(err).NotTo(HaveOccurred())
testutil.TestIter(db, nil, t0.Present)
db.TestClose()
done <- true
}, 240.0)
})
})
})

View File

@@ -1,142 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package filter
import (
"encoding/binary"
"github.com/syndtr/goleveldb/leveldb/util"
"testing"
)
type harness struct {
t *testing.T
bloom Filter
generator FilterGenerator
filter []byte
}
func newHarness(t *testing.T) *harness {
bloom := NewBloomFilter(10)
return &harness{
t: t,
bloom: bloom,
generator: bloom.NewGenerator(),
}
}
func (h *harness) add(key []byte) {
h.generator.Add(key)
}
func (h *harness) addNum(key uint32) {
var b [4]byte
binary.LittleEndian.PutUint32(b[:], key)
h.add(b[:])
}
func (h *harness) build() {
b := &util.Buffer{}
h.generator.Generate(b)
h.filter = b.Bytes()
}
func (h *harness) reset() {
h.filter = nil
}
func (h *harness) filterLen() int {
return len(h.filter)
}
func (h *harness) assert(key []byte, want, silent bool) bool {
got := h.bloom.Contains(h.filter, key)
if !silent && got != want {
h.t.Errorf("assert on '%v' failed got '%v', want '%v'", key, got, want)
}
return got
}
func (h *harness) assertNum(key uint32, want, silent bool) bool {
var b [4]byte
binary.LittleEndian.PutUint32(b[:], key)
return h.assert(b[:], want, silent)
}
func TestBloomFilter_Empty(t *testing.T) {
h := newHarness(t)
h.build()
h.assert([]byte("hello"), false, false)
h.assert([]byte("world"), false, false)
}
func TestBloomFilter_Small(t *testing.T) {
h := newHarness(t)
h.add([]byte("hello"))
h.add([]byte("world"))
h.build()
h.assert([]byte("hello"), true, false)
h.assert([]byte("world"), true, false)
h.assert([]byte("x"), false, false)
h.assert([]byte("foo"), false, false)
}
func nextN(n int) int {
switch {
case n < 10:
n += 1
case n < 100:
n += 10
case n < 1000:
n += 100
default:
n += 1000
}
return n
}
func TestBloomFilter_VaryingLengths(t *testing.T) {
h := newHarness(t)
var mediocre, good int
for n := 1; n < 10000; n = nextN(n) {
h.reset()
for i := 0; i < n; i++ {
h.addNum(uint32(i))
}
h.build()
got := h.filterLen()
want := (n * 10 / 8) + 40
if got > want {
t.Errorf("filter len test failed, '%d' > '%d'", got, want)
}
for i := 0; i < n; i++ {
h.assertNum(uint32(i), true, false)
}
var rate float32
for i := 0; i < 10000; i++ {
if h.assertNum(uint32(i+1000000000), true, true) {
rate++
}
}
rate /= 10000
if rate > 0.02 {
t.Errorf("false positive rate is more than 2%%, got %v, at len %d", rate, n)
}
if rate > 0.0125 {
mediocre++
} else {
good++
}
}
t.Logf("false positive rate: %d good, %d mediocre", good, mediocre)
if mediocre > good/5 {
t.Error("mediocre false positive rate is more than expected")
}
}

View File

@@ -1,30 +0,0 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package iterator_test
import (
. "github.com/onsi/ginkgo"
. "github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/testutil"
)
var _ = testutil.Defer(func() {
Describe("Array iterator", func() {
It("Should iterates and seeks correctly", func() {
// Build key/value.
kv := testutil.KeyValue_Generate(nil, 70, 1, 1, 5, 3, 3)
// Test the iterator.
t := testutil.IteratorTesting{
KeyValue: kv.Clone(),
Iter: NewArrayIterator(kv),
}
testutil.DoIteratorTesting(&t)
})
})
})

View File

@@ -1,83 +0,0 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package iterator_test
import (
"sort"
. "github.com/onsi/ginkgo"
"github.com/syndtr/goleveldb/leveldb/comparer"
. "github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/testutil"
)
type keyValue struct {
key []byte
testutil.KeyValue
}
type keyValueIndex []keyValue
func (x keyValueIndex) Search(key []byte) int {
return sort.Search(x.Len(), func(i int) bool {
return comparer.DefaultComparer.Compare(x[i].key, key) >= 0
})
}
func (x keyValueIndex) Len() int { return len(x) }
func (x keyValueIndex) Index(i int) (key, value []byte) { return x[i].key, nil }
func (x keyValueIndex) Get(i int) Iterator { return NewArrayIterator(x[i]) }
var _ = testutil.Defer(func() {
Describe("Indexed iterator", func() {
Test := func(n ...int) func() {
if len(n) == 0 {
rnd := testutil.NewRand()
n = make([]int, rnd.Intn(17)+3)
for i := range n {
n[i] = rnd.Intn(19) + 1
}
}
return func() {
It("Should iterates and seeks correctly", func(done Done) {
// Build key/value.
index := make(keyValueIndex, len(n))
sum := 0
for _, x := range n {
sum += x
}
kv := testutil.KeyValue_Generate(nil, sum, 1, 1, 10, 4, 4)
for i, j := 0, 0; i < len(n); i++ {
for x := n[i]; x > 0; x-- {
key, value := kv.Index(j)
index[i].key = key
index[i].Put(key, value)
j++
}
}
// Test the iterator.
t := testutil.IteratorTesting{
KeyValue: kv.Clone(),
Iter: NewIndexedIterator(NewArrayIndexer(index), true),
}
testutil.DoIteratorTesting(&t)
done <- true
}, 15.0)
}
}
Describe("with 100 keys", Test(100))
Describe("with 50-50 keys", Test(50, 50))
Describe("with 50-1 keys", Test(50, 1))
Describe("with 50-1-50 keys", Test(50, 1, 50))
Describe("with 1-50 keys", Test(1, 50))
Describe("with random N-keys", Test())
})
})

View File

@@ -1,11 +0,0 @@
package iterator_test
import (
"testing"
"github.com/syndtr/goleveldb/leveldb/testutil"
)
func TestIterator(t *testing.T) {
testutil.RunSuite(t, "Iterator Suite")
}

View File

@@ -1,60 +0,0 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package iterator_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/syndtr/goleveldb/leveldb/comparer"
. "github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/testutil"
)
var _ = testutil.Defer(func() {
Describe("Merged iterator", func() {
Test := func(filled int, empty int) func() {
return func() {
It("Should iterates and seeks correctly", func(done Done) {
rnd := testutil.NewRand()
// Build key/value.
filledKV := make([]testutil.KeyValue, filled)
kv := testutil.KeyValue_Generate(nil, 100, 1, 1, 10, 4, 4)
kv.Iterate(func(i int, key, value []byte) {
filledKV[rnd.Intn(filled)].Put(key, value)
})
// Create itearators.
iters := make([]Iterator, filled+empty)
for i := range iters {
if empty == 0 || (rnd.Int()%2 == 0 && filled > 0) {
filled--
Expect(filledKV[filled].Len()).ShouldNot(BeZero())
iters[i] = NewArrayIterator(filledKV[filled])
} else {
empty--
iters[i] = NewEmptyIterator(nil)
}
}
// Test the iterator.
t := testutil.IteratorTesting{
KeyValue: kv.Clone(),
Iter: NewMergedIterator(iters, comparer.DefaultComparer, true),
}
testutil.DoIteratorTesting(&t)
done <- true
}, 15.0)
}
}
Describe("with three, all filled iterators", Test(3, 0))
Describe("with one filled, one empty iterators", Test(1, 1))
Describe("with one filled, two empty iterators", Test(1, 2))
})
})

View File

@@ -1,818 +0,0 @@
// Copyright 2011 The LevelDB-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record_test.go?r=df1fa28f7f3be6c3935548169002309c12967135
// License, authors and contributors informations can be found at bellow URLs respectively:
// https://code.google.com/p/leveldb-go/source/browse/LICENSE
// https://code.google.com/p/leveldb-go/source/browse/AUTHORS
// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS
package journal
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"math/rand"
"strings"
"testing"
)
type dropper struct {
t *testing.T
}
func (d dropper) Drop(err error) {
d.t.Log(err)
}
func short(s string) string {
if len(s) < 64 {
return s
}
return fmt.Sprintf("%s...(skipping %d bytes)...%s", s[:20], len(s)-40, s[len(s)-20:])
}
// big returns a string of length n, composed of repetitions of partial.
func big(partial string, n int) string {
return strings.Repeat(partial, n/len(partial)+1)[:n]
}
func TestEmpty(t *testing.T) {
buf := new(bytes.Buffer)
r := NewReader(buf, dropper{t}, true, true)
if _, err := r.Next(); err != io.EOF {
t.Fatalf("got %v, want %v", err, io.EOF)
}
}
func testGenerator(t *testing.T, reset func(), gen func() (string, bool)) {
buf := new(bytes.Buffer)
reset()
w := NewWriter(buf)
for {
s, ok := gen()
if !ok {
break
}
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write([]byte(s)); err != nil {
t.Fatal(err)
}
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
reset()
r := NewReader(buf, dropper{t}, true, true)
for {
s, ok := gen()
if !ok {
break
}
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
x, err := ioutil.ReadAll(rr)
if err != nil {
t.Fatal(err)
}
if string(x) != s {
t.Fatalf("got %q, want %q", short(string(x)), short(s))
}
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("got %v, want %v", err, io.EOF)
}
}
func testLiterals(t *testing.T, s []string) {
var i int
reset := func() {
i = 0
}
gen := func() (string, bool) {
if i == len(s) {
return "", false
}
i++
return s[i-1], true
}
testGenerator(t, reset, gen)
}
func TestMany(t *testing.T) {
const n = 1e5
var i int
reset := func() {
i = 0
}
gen := func() (string, bool) {
if i == n {
return "", false
}
i++
return fmt.Sprintf("%d.", i-1), true
}
testGenerator(t, reset, gen)
}
func TestRandom(t *testing.T) {
const n = 1e2
var (
i int
r *rand.Rand
)
reset := func() {
i, r = 0, rand.New(rand.NewSource(0))
}
gen := func() (string, bool) {
if i == n {
return "", false
}
i++
return strings.Repeat(string(uint8(i)), r.Intn(2*blockSize+16)), true
}
testGenerator(t, reset, gen)
}
func TestBasic(t *testing.T) {
testLiterals(t, []string{
strings.Repeat("a", 1000),
strings.Repeat("b", 97270),
strings.Repeat("c", 8000),
})
}
func TestBoundary(t *testing.T) {
for i := blockSize - 16; i < blockSize+16; i++ {
s0 := big("abcd", i)
for j := blockSize - 16; j < blockSize+16; j++ {
s1 := big("ABCDE", j)
testLiterals(t, []string{s0, s1})
testLiterals(t, []string{s0, "", s1})
testLiterals(t, []string{s0, "x", s1})
}
}
}
func TestFlush(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// Write a couple of records. Everything should still be held
// in the record.Writer buffer, so that buf.Len should be 0.
w0, _ := w.Next()
w0.Write([]byte("0"))
w1, _ := w.Next()
w1.Write([]byte("11"))
if got, want := buf.Len(), 0; got != want {
t.Fatalf("buffer length #0: got %d want %d", got, want)
}
// Flush the record.Writer buffer, which should yield 17 bytes.
// 17 = 2*7 + 1 + 2, which is two headers and 1 + 2 payload bytes.
if err := w.Flush(); err != nil {
t.Fatal(err)
}
if got, want := buf.Len(), 17; got != want {
t.Fatalf("buffer length #1: got %d want %d", got, want)
}
// Do another write, one that isn't large enough to complete the block.
// The write should not have flowed through to buf.
w2, _ := w.Next()
w2.Write(bytes.Repeat([]byte("2"), 10000))
if got, want := buf.Len(), 17; got != want {
t.Fatalf("buffer length #2: got %d want %d", got, want)
}
// Flushing should get us up to 10024 bytes written.
// 10024 = 17 + 7 + 10000.
if err := w.Flush(); err != nil {
t.Fatal(err)
}
if got, want := buf.Len(), 10024; got != want {
t.Fatalf("buffer length #3: got %d want %d", got, want)
}
// Do a bigger write, one that completes the current block.
// We should now have 32768 bytes (a complete block), without
// an explicit flush.
w3, _ := w.Next()
w3.Write(bytes.Repeat([]byte("3"), 40000))
if got, want := buf.Len(), 32768; got != want {
t.Fatalf("buffer length #4: got %d want %d", got, want)
}
// Flushing should get us up to 50038 bytes written.
// 50038 = 10024 + 2*7 + 40000. There are two headers because
// the one record was split into two chunks.
if err := w.Flush(); err != nil {
t.Fatal(err)
}
if got, want := buf.Len(), 50038; got != want {
t.Fatalf("buffer length #5: got %d want %d", got, want)
}
// Check that reading those records give the right lengths.
r := NewReader(buf, dropper{t}, true, true)
wants := []int64{1, 2, 10000, 40000}
for i, want := range wants {
rr, _ := r.Next()
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #%d: %v", i, err)
}
if n != want {
t.Fatalf("read #%d: got %d bytes want %d", i, n, want)
}
}
}
func TestNonExhaustiveRead(t *testing.T) {
const n = 100
buf := new(bytes.Buffer)
p := make([]byte, 10)
rnd := rand.New(rand.NewSource(1))
w := NewWriter(buf)
for i := 0; i < n; i++ {
length := len(p) + rnd.Intn(3*blockSize)
s := string(uint8(i)) + "123456789abcdefgh"
ww, _ := w.Next()
ww.Write([]byte(big(s, length)))
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
r := NewReader(buf, dropper{t}, true, true)
for i := 0; i < n; i++ {
rr, _ := r.Next()
_, err := io.ReadFull(rr, p)
if err != nil {
t.Fatal(err)
}
want := string(uint8(i)) + "123456789"
if got := string(p); got != want {
t.Fatalf("read #%d: got %q want %q", i, got, want)
}
}
}
func TestStaleReader(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
w0, err := w.Next()
if err != nil {
t.Fatal(err)
}
w0.Write([]byte("0"))
w1, err := w.Next()
if err != nil {
t.Fatal(err)
}
w1.Write([]byte("11"))
if err := w.Close(); err != nil {
t.Fatal(err)
}
r := NewReader(buf, dropper{t}, true, true)
r0, err := r.Next()
if err != nil {
t.Fatal(err)
}
r1, err := r.Next()
if err != nil {
t.Fatal(err)
}
p := make([]byte, 1)
if _, err := r0.Read(p); err == nil || !strings.Contains(err.Error(), "stale") {
t.Fatalf("stale read #0: unexpected error: %v", err)
}
if _, err := r1.Read(p); err != nil {
t.Fatalf("fresh read #1: got %v want nil error", err)
}
if p[0] != '1' {
t.Fatalf("fresh read #1: byte contents: got '%c' want '1'", p[0])
}
}
func TestStaleWriter(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
w0, err := w.Next()
if err != nil {
t.Fatal(err)
}
w1, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := w0.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") {
t.Fatalf("stale write #0: unexpected error: %v", err)
}
if _, err := w1.Write([]byte("11")); err != nil {
t.Fatalf("fresh write #1: got %v want nil error", err)
}
if err := w.Flush(); err != nil {
t.Fatalf("flush: %v", err)
}
if _, err := w1.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") {
t.Fatalf("stale write #1: unexpected error: %v", err)
}
}
func TestCorrupt_MissingLastBlock(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-1024)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
// Cut the last block.
b := buf.Bytes()[:blockSize]
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read.
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if n != blockSize-1024 {
t.Fatalf("read #0: got %d bytes want %d", n, blockSize-1024)
}
// Second read.
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != io.ErrUnexpectedEOF {
t.Fatalf("read #1: unexpected error: %v", err)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}
func TestCorrupt_CorruptedFirstBlock(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
// Third record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
t.Fatalf("write #2: unexpected error: %v", err)
}
// Fourth record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
t.Fatalf("write #3: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
b := buf.Bytes()
// Corrupting block #0.
for i := 0; i < 1024; i++ {
b[i] = '1'
}
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read (third record).
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if want := int64(blockSize-headerSize) + 1; n != want {
t.Fatalf("read #0: got %d bytes want %d", n, want)
}
// Second read (fourth record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #1: %v", err)
}
if want := int64(blockSize-headerSize) + 2; n != want {
t.Fatalf("read #1: got %d bytes want %d", n, want)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}
func TestCorrupt_CorruptedMiddleBlock(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
// Third record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
t.Fatalf("write #2: unexpected error: %v", err)
}
// Fourth record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
t.Fatalf("write #3: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
b := buf.Bytes()
// Corrupting block #1.
for i := 0; i < 1024; i++ {
b[blockSize+i] = '1'
}
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read (first record).
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if want := int64(blockSize / 2); n != want {
t.Fatalf("read #0: got %d bytes want %d", n, want)
}
// Second read (second record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != io.ErrUnexpectedEOF {
t.Fatalf("read #1: unexpected error: %v", err)
}
// Third read (fourth record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #2: %v", err)
}
if want := int64(blockSize-headerSize) + 2; n != want {
t.Fatalf("read #2: got %d bytes want %d", n, want)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}
func TestCorrupt_CorruptedLastBlock(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
// Third record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
t.Fatalf("write #2: unexpected error: %v", err)
}
// Fourth record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
t.Fatalf("write #3: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
b := buf.Bytes()
// Corrupting block #3.
for i := len(b) - 1; i > len(b)-1024; i-- {
b[i] = '1'
}
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read (first record).
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if want := int64(blockSize / 2); n != want {
t.Fatalf("read #0: got %d bytes want %d", n, want)
}
// Second read (second record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #1: %v", err)
}
if want := int64(blockSize - headerSize); n != want {
t.Fatalf("read #1: got %d bytes want %d", n, want)
}
// Third read (third record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #2: %v", err)
}
if want := int64(blockSize-headerSize) + 1; n != want {
t.Fatalf("read #2: got %d bytes want %d", n, want)
}
// Fourth read (fourth record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != io.ErrUnexpectedEOF {
t.Fatalf("read #3: unexpected error: %v", err)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}
func TestCorrupt_FirstChuckLengthOverflow(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
// Third record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
t.Fatalf("write #2: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
b := buf.Bytes()
// Corrupting record #1.
x := blockSize
binary.LittleEndian.PutUint16(b[x+4:], 0xffff)
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read (first record).
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if want := int64(blockSize / 2); n != want {
t.Fatalf("read #0: got %d bytes want %d", n, want)
}
// Second read (second record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != io.ErrUnexpectedEOF {
t.Fatalf("read #1: unexpected error: %v", err)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}
func TestCorrupt_MiddleChuckLengthOverflow(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
// Third record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
t.Fatalf("write #2: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
b := buf.Bytes()
// Corrupting record #1.
x := blockSize/2 + headerSize
binary.LittleEndian.PutUint16(b[x+4:], 0xffff)
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read (first record).
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if want := int64(blockSize / 2); n != want {
t.Fatalf("read #0: got %d bytes want %d", n, want)
}
// Second read (third record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #1: %v", err)
}
if want := int64(blockSize-headerSize) + 1; n != want {
t.Fatalf("read #1: got %d bytes want %d", n, want)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}

View File

@@ -1,133 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"bytes"
"testing"
"github.com/syndtr/goleveldb/leveldb/comparer"
)
var defaultIComparer = &iComparer{comparer.DefaultComparer}
func ikey(key string, seq uint64, kt keyType) internalKey {
return makeInternalKey(nil, []byte(key), uint64(seq), kt)
}
func shortSep(a, b []byte) []byte {
dst := make([]byte, len(a))
dst = defaultIComparer.Separator(dst[:0], a, b)
if dst == nil {
return a
}
return dst
}
func shortSuccessor(b []byte) []byte {
dst := make([]byte, len(b))
dst = defaultIComparer.Successor(dst[:0], b)
if dst == nil {
return b
}
return dst
}
func testSingleKey(t *testing.T, key string, seq uint64, kt keyType) {
ik := ikey(key, seq, kt)
if !bytes.Equal(ik.ukey(), []byte(key)) {
t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key)
}
rseq, rt := ik.parseNum()
if rseq != seq {
t.Errorf("seq number does not equal, got %v, want %v", rseq, seq)
}
if rt != kt {
t.Errorf("type does not equal, got %v, want %v", rt, kt)
}
if rukey, rseq, rt, kerr := parseInternalKey(ik); kerr == nil {
if !bytes.Equal(rukey, []byte(key)) {
t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key)
}
if rseq != seq {
t.Errorf("seq number does not equal, got %v, want %v", rseq, seq)
}
if rt != kt {
t.Errorf("type does not equal, got %v, want %v", rt, kt)
}
} else {
t.Errorf("key error: %v", kerr)
}
}
func TestInternalKey_EncodeDecode(t *testing.T) {
keys := []string{"", "k", "hello", "longggggggggggggggggggggg"}
seqs := []uint64{
1, 2, 3,
(1 << 8) - 1, 1 << 8, (1 << 8) + 1,
(1 << 16) - 1, 1 << 16, (1 << 16) + 1,
(1 << 32) - 1, 1 << 32, (1 << 32) + 1,
}
for _, key := range keys {
for _, seq := range seqs {
testSingleKey(t, key, seq, keyTypeVal)
testSingleKey(t, "hello", 1, keyTypeDel)
}
}
}
func assertBytes(t *testing.T, want, got []byte) {
if !bytes.Equal(got, want) {
t.Errorf("assert failed, got %v, want %v", got, want)
}
}
func TestInternalKeyShortSeparator(t *testing.T) {
// When user keys are same
assertBytes(t, ikey("foo", 100, keyTypeVal),
shortSep(ikey("foo", 100, keyTypeVal),
ikey("foo", 99, keyTypeVal)))
assertBytes(t, ikey("foo", 100, keyTypeVal),
shortSep(ikey("foo", 100, keyTypeVal),
ikey("foo", 101, keyTypeVal)))
assertBytes(t, ikey("foo", 100, keyTypeVal),
shortSep(ikey("foo", 100, keyTypeVal),
ikey("foo", 100, keyTypeVal)))
assertBytes(t, ikey("foo", 100, keyTypeVal),
shortSep(ikey("foo", 100, keyTypeVal),
ikey("foo", 100, keyTypeDel)))
// When user keys are misordered
assertBytes(t, ikey("foo", 100, keyTypeVal),
shortSep(ikey("foo", 100, keyTypeVal),
ikey("bar", 99, keyTypeVal)))
// When user keys are different, but correctly ordered
assertBytes(t, ikey("g", uint64(keyMaxSeq), keyTypeSeek),
shortSep(ikey("foo", 100, keyTypeVal),
ikey("hello", 200, keyTypeVal)))
// When start user key is prefix of limit user key
assertBytes(t, ikey("foo", 100, keyTypeVal),
shortSep(ikey("foo", 100, keyTypeVal),
ikey("foobar", 200, keyTypeVal)))
// When limit user key is prefix of start user key
assertBytes(t, ikey("foobar", 100, keyTypeVal),
shortSep(ikey("foobar", 100, keyTypeVal),
ikey("foo", 200, keyTypeVal)))
}
func TestInternalKeyShortestSuccessor(t *testing.T) {
assertBytes(t, ikey("g", uint64(keyMaxSeq), keyTypeSeek),
shortSuccessor(ikey("foo", 100, keyTypeVal)))
assertBytes(t, ikey("\xff\xff", 100, keyTypeVal),
shortSuccessor(ikey("\xff\xff", 100, keyTypeVal)))
}

View File

@@ -1,11 +0,0 @@
package leveldb
import (
"testing"
"github.com/syndtr/goleveldb/leveldb/testutil"
)
func TestLevelDB(t *testing.T) {
testutil.RunSuite(t, "LevelDB Suite")
}

View File

@@ -1,75 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package memdb
import (
"encoding/binary"
"math/rand"
"testing"
"github.com/syndtr/goleveldb/leveldb/comparer"
)
func BenchmarkPut(b *testing.B) {
buf := make([][4]byte, b.N)
for i := range buf {
binary.LittleEndian.PutUint32(buf[i][:], uint32(i))
}
b.ResetTimer()
p := New(comparer.DefaultComparer, 0)
for i := range buf {
p.Put(buf[i][:], nil)
}
}
func BenchmarkPutRandom(b *testing.B) {
buf := make([][4]byte, b.N)
for i := range buf {
binary.LittleEndian.PutUint32(buf[i][:], uint32(rand.Int()))
}
b.ResetTimer()
p := New(comparer.DefaultComparer, 0)
for i := range buf {
p.Put(buf[i][:], nil)
}
}
func BenchmarkGet(b *testing.B) {
buf := make([][4]byte, b.N)
for i := range buf {
binary.LittleEndian.PutUint32(buf[i][:], uint32(i))
}
p := New(comparer.DefaultComparer, 0)
for i := range buf {
p.Put(buf[i][:], nil)
}
b.ResetTimer()
for i := range buf {
p.Get(buf[i][:])
}
}
func BenchmarkGetRandom(b *testing.B) {
buf := make([][4]byte, b.N)
for i := range buf {
binary.LittleEndian.PutUint32(buf[i][:], uint32(i))
}
p := New(comparer.DefaultComparer, 0)
for i := range buf {
p.Put(buf[i][:], nil)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
p.Get(buf[rand.Int()%b.N][:])
}
}

View File

@@ -1,11 +0,0 @@
package memdb
import (
"testing"
"github.com/syndtr/goleveldb/leveldb/testutil"
)
func TestMemDB(t *testing.T) {
testutil.RunSuite(t, "MemDB Suite")
}

View File

@@ -1,135 +0,0 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package memdb
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/syndtr/goleveldb/leveldb/comparer"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/testutil"
"github.com/syndtr/goleveldb/leveldb/util"
)
func (p *DB) TestFindLT(key []byte) (rkey, value []byte, err error) {
p.mu.RLock()
if node := p.findLT(key); node != 0 {
n := p.nodeData[node]
m := n + p.nodeData[node+nKey]
rkey = p.kvData[n:m]
value = p.kvData[m : m+p.nodeData[node+nVal]]
} else {
err = ErrNotFound
}
p.mu.RUnlock()
return
}
func (p *DB) TestFindLast() (rkey, value []byte, err error) {
p.mu.RLock()
if node := p.findLast(); node != 0 {
n := p.nodeData[node]
m := n + p.nodeData[node+nKey]
rkey = p.kvData[n:m]
value = p.kvData[m : m+p.nodeData[node+nVal]]
} else {
err = ErrNotFound
}
p.mu.RUnlock()
return
}
func (p *DB) TestPut(key []byte, value []byte) error {
p.Put(key, value)
return nil
}
func (p *DB) TestDelete(key []byte) error {
p.Delete(key)
return nil
}
func (p *DB) TestFind(key []byte) (rkey, rvalue []byte, err error) {
return p.Find(key)
}
func (p *DB) TestGet(key []byte) (value []byte, err error) {
return p.Get(key)
}
func (p *DB) TestNewIterator(slice *util.Range) iterator.Iterator {
return p.NewIterator(slice)
}
var _ = testutil.Defer(func() {
Describe("Memdb", func() {
Describe("write test", func() {
It("should do write correctly", func() {
db := New(comparer.DefaultComparer, 0)
t := testutil.DBTesting{
DB: db,
Deleted: testutil.KeyValue_Generate(nil, 1000, 1, 1, 30, 5, 5).Clone(),
PostFn: func(t *testutil.DBTesting) {
Expect(db.Len()).Should(Equal(t.Present.Len()))
Expect(db.Size()).Should(Equal(t.Present.Size()))
switch t.Act {
case testutil.DBPut, testutil.DBOverwrite:
Expect(db.Contains(t.ActKey)).Should(BeTrue())
default:
Expect(db.Contains(t.ActKey)).Should(BeFalse())
}
},
}
testutil.DoDBTesting(&t)
})
})
Describe("read test", func() {
testutil.AllKeyValueTesting(nil, func(kv testutil.KeyValue) testutil.DB {
// Building the DB.
db := New(comparer.DefaultComparer, 0)
kv.IterateShuffled(nil, func(i int, key, value []byte) {
db.Put(key, value)
})
if kv.Len() > 1 {
It("Should find correct keys with findLT", func() {
testutil.ShuffledIndex(nil, kv.Len()-1, 1, func(i int) {
key_, key, _ := kv.IndexInexact(i + 1)
expectedKey, expectedValue := kv.Index(i)
// Using key that exist.
rkey, rvalue, err := db.TestFindLT(key)
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q -> %q", key, expectedKey)
Expect(rkey).Should(Equal(expectedKey), "Key")
Expect(rvalue).Should(Equal(expectedValue), "Value for key %q -> %q", key, expectedKey)
// Using key that doesn't exist.
rkey, rvalue, err = db.TestFindLT(key_)
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q) -> %q", key_, key, expectedKey)
Expect(rkey).Should(Equal(expectedKey))
Expect(rvalue).Should(Equal(expectedValue), "Value for key %q (%q) -> %q", key_, key, expectedKey)
})
})
}
if kv.Len() > 0 {
It("Should find last key with findLast", func() {
key, value := kv.Index(kv.Len() - 1)
rkey, rvalue, err := db.TestFindLast()
Expect(err).ShouldNot(HaveOccurred())
Expect(rkey).Should(Equal(key))
Expect(rvalue).Should(Equal(value))
})
}
return db
}, nil, nil)
})
})
})

View File

@@ -1,62 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"bytes"
"testing"
)
func decodeEncode(v *sessionRecord) (res bool, err error) {
b := new(bytes.Buffer)
err = v.encode(b)
if err != nil {
return
}
v2 := &sessionRecord{}
err = v.decode(b)
if err != nil {
return
}
b2 := new(bytes.Buffer)
err = v2.encode(b2)
if err != nil {
return
}
return bytes.Equal(b.Bytes(), b2.Bytes()), nil
}
func TestSessionRecord_EncodeDecode(t *testing.T) {
big := int64(1) << 50
v := &sessionRecord{}
i := int64(0)
test := func() {
res, err := decodeEncode(v)
if err != nil {
t.Fatalf("error when testing encode/decode sessionRecord: %v", err)
}
if !res {
t.Error("encode/decode test failed at iteration:", i)
}
}
for ; i < 4; i++ {
test()
v.addTable(3, big+300+i, big+400+i,
makeInternalKey(nil, []byte("foo"), uint64(big+500+1), keyTypeVal),
makeInternalKey(nil, []byte("zoo"), uint64(big+600+1), keyTypeDel))
v.delTable(4, big+700+i)
v.addCompPtr(int(i), makeInternalKey(nil, []byte("x"), uint64(big+900+1), keyTypeVal))
}
v.setComparer("foo")
v.setJournalNum(big + 100)
v.setPrevJournalNum(big + 99)
v.setNextFileNum(big + 200)
v.setSeqNum(uint64(big + 1000))
test()
}

View File

@@ -1,176 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package storage
import (
"fmt"
"os"
"path/filepath"
"testing"
)
var cases = []struct {
oldName []string
name string
ftype FileType
num int64
}{
{nil, "000100.log", TypeJournal, 100},
{nil, "000000.log", TypeJournal, 0},
{[]string{"000000.sst"}, "000000.ldb", TypeTable, 0},
{nil, "MANIFEST-000002", TypeManifest, 2},
{nil, "MANIFEST-000007", TypeManifest, 7},
{nil, "9223372036854775807.log", TypeJournal, 9223372036854775807},
{nil, "000100.tmp", TypeTemp, 100},
}
var invalidCases = []string{
"",
"foo",
"foo-dx-100.log",
".log",
"",
"manifest",
"CURREN",
"CURRENTX",
"MANIFES",
"MANIFEST",
"MANIFEST-",
"XMANIFEST-3",
"MANIFEST-3x",
"LOC",
"LOCKx",
"LO",
"LOGx",
"18446744073709551616.log",
"184467440737095516150.log",
"100",
"100.",
"100.lop",
}
func TestFileStorage_CreateFileName(t *testing.T) {
for _, c := range cases {
if name := fsGenName(FileDesc{c.ftype, c.num}); name != c.name {
t.Errorf("invalid filename got '%s', want '%s'", name, c.name)
}
}
}
func TestFileStorage_ParseFileName(t *testing.T) {
for _, c := range cases {
for _, name := range append([]string{c.name}, c.oldName...) {
fd, ok := fsParseName(name)
if !ok {
t.Errorf("cannot parse filename '%s'", name)
continue
}
if fd.Type != c.ftype {
t.Errorf("filename '%s' invalid type got '%d', want '%d'", name, fd.Type, c.ftype)
}
if fd.Num != c.num {
t.Errorf("filename '%s' invalid number got '%d', want '%d'", name, fd.Num, c.num)
}
}
}
}
func TestFileStorage_InvalidFileName(t *testing.T) {
for _, name := range invalidCases {
if fsParseNamePtr(name, nil) {
t.Errorf("filename '%s' should be invalid", name)
}
}
}
func TestFileStorage_Locking(t *testing.T) {
path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-testrwlock-%d", os.Getuid()))
if err := os.RemoveAll(path); err != nil && !os.IsNotExist(err) {
t.Fatal("RemoveAll: got error: ", err)
}
defer os.RemoveAll(path)
p1, err := OpenFile(path, false)
if err != nil {
t.Fatal("OpenFile(1): got error: ", err)
}
p2, err := OpenFile(path, false)
if err != nil {
t.Logf("OpenFile(2): got error: %s (expected)", err)
} else {
p2.Close()
p1.Close()
t.Fatal("OpenFile(2): expect error")
}
p1.Close()
p3, err := OpenFile(path, false)
if err != nil {
t.Fatal("OpenFile(3): got error: ", err)
}
defer p3.Close()
l, err := p3.Lock()
if err != nil {
t.Fatal("storage lock failed(1): ", err)
}
_, err = p3.Lock()
if err == nil {
t.Fatal("expect error for second storage lock attempt")
} else {
t.Logf("storage lock got error: %s (expected)", err)
}
l.Unlock()
_, err = p3.Lock()
if err != nil {
t.Fatal("storage lock failed(2): ", err)
}
}
func TestFileStorage_ReadOnlyLocking(t *testing.T) {
path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-testrolock-%d", os.Getuid()))
if err := os.RemoveAll(path); err != nil && !os.IsNotExist(err) {
t.Fatal("RemoveAll: got error: ", err)
}
defer os.RemoveAll(path)
p1, err := OpenFile(path, false)
if err != nil {
t.Fatal("OpenFile(1): got error: ", err)
}
_, err = OpenFile(path, true)
if err != nil {
t.Logf("OpenFile(2): got error: %s (expected)", err)
} else {
t.Fatal("OpenFile(2): expect error")
}
p1.Close()
p3, err := OpenFile(path, true)
if err != nil {
t.Fatal("OpenFile(3): got error: ", err)
}
p4, err := OpenFile(path, true)
if err != nil {
t.Fatal("OpenFile(4): got error: ", err)
}
_, err = OpenFile(path, false)
if err != nil {
t.Logf("OpenFile(5): got error: %s (expected)", err)
} else {
t.Fatal("OpenFile(2): expect error")
}
p3.Close()
p4.Close()
}

View File

@@ -1,65 +0,0 @@
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package storage
import (
"bytes"
"testing"
)
func TestMemStorage(t *testing.T) {
m := NewMemStorage()
l, err := m.Lock()
if err != nil {
t.Fatal("storage lock failed(1): ", err)
}
_, err = m.Lock()
if err == nil {
t.Fatal("expect error for second storage lock attempt")
} else {
t.Logf("storage lock got error: %s (expected)", err)
}
l.Unlock()
_, err = m.Lock()
if err != nil {
t.Fatal("storage lock failed(2): ", err)
}
w, err := m.Create(FileDesc{TypeTable, 1})
if err != nil {
t.Fatal("Storage.Create: ", err)
}
w.Write([]byte("abc"))
w.Close()
if fds, _ := m.List(TypeAll); len(fds) != 1 {
t.Fatal("invalid GetFiles len")
}
buf := new(bytes.Buffer)
r, err := m.Open(FileDesc{TypeTable, 1})
if err != nil {
t.Fatal("Open: got error: ", err)
}
buf.ReadFrom(r)
r.Close()
if got := buf.String(); got != "abc" {
t.Fatalf("Read: invalid value, want=abc got=%s", got)
}
if _, err := m.Open(FileDesc{TypeTable, 1}); err != nil {
t.Fatal("Open: got error: ", err)
}
if _, err := m.Open(FileDesc{TypeTable, 1}); err == nil {
t.Fatal("expecting error")
}
m.Remove(FileDesc{TypeTable, 1})
if fds, _ := m.List(TypeAll); len(fds) != 0 {
t.Fatal("invalid GetFiles len", len(fds))
}
if _, err := m.Open(FileDesc{TypeTable, 1}); err == nil {
t.Fatal("expecting error")
}
}

View File

@@ -1,139 +0,0 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package table
import (
"encoding/binary"
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/syndtr/goleveldb/leveldb/comparer"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/testutil"
"github.com/syndtr/goleveldb/leveldb/util"
)
type blockTesting struct {
tr *Reader
b *block
}
func (t *blockTesting) TestNewIterator(slice *util.Range) iterator.Iterator {
return t.tr.newBlockIter(t.b, nil, slice, false)
}
var _ = testutil.Defer(func() {
Describe("Block", func() {
Build := func(kv *testutil.KeyValue, restartInterval int) *blockTesting {
// Building the block.
bw := &blockWriter{
restartInterval: restartInterval,
scratch: make([]byte, 30),
}
kv.Iterate(func(i int, key, value []byte) {
bw.append(key, value)
})
bw.finish()
// Opening the block.
data := bw.buf.Bytes()
restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:]))
return &blockTesting{
tr: &Reader{cmp: comparer.DefaultComparer},
b: &block{
data: data,
restartsLen: restartsLen,
restartsOffset: len(data) - (restartsLen+1)*4,
},
}
}
Describe("read test", func() {
for restartInterval := 1; restartInterval <= 5; restartInterval++ {
Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() {
kv := &testutil.KeyValue{}
Text := func() string {
return fmt.Sprintf("and %d keys", kv.Len())
}
Test := func() {
// Make block.
br := Build(kv, restartInterval)
// Do testing.
testutil.KeyValueTesting(nil, kv.Clone(), br, nil, nil)
}
Describe(Text(), Test)
kv.PutString("", "empty")
Describe(Text(), Test)
kv.PutString("a1", "foo")
Describe(Text(), Test)
kv.PutString("a2", "v")
Describe(Text(), Test)
kv.PutString("a3qqwrkks", "hello")
Describe(Text(), Test)
kv.PutString("a4", "bar")
Describe(Text(), Test)
kv.PutString("a5111111", "v5")
kv.PutString("a6", "")
kv.PutString("a7", "v7")
kv.PutString("a8", "vvvvvvvvvvvvvvvvvvvvvv8")
kv.PutString("b", "v9")
kv.PutString("c9", "v9")
kv.PutString("c91", "v9")
kv.PutString("d0", "v9")
Describe(Text(), Test)
})
}
})
Describe("out-of-bound slice test", func() {
kv := &testutil.KeyValue{}
kv.PutString("k1", "v1")
kv.PutString("k2", "v2")
kv.PutString("k3abcdefgg", "v3")
kv.PutString("k4", "v4")
kv.PutString("k5", "v5")
for restartInterval := 1; restartInterval <= 5; restartInterval++ {
Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() {
// Make block.
bt := Build(kv, restartInterval)
Test := func(r *util.Range) func(done Done) {
return func(done Done) {
iter := bt.TestNewIterator(r)
Expect(iter.Error()).ShouldNot(HaveOccurred())
t := testutil.IteratorTesting{
KeyValue: kv.Clone(),
Iter: iter,
}
testutil.DoIteratorTesting(&t)
iter.Release()
done <- true
}
}
It("Should do iterations and seeks correctly #0",
Test(&util.Range{Start: []byte("k0"), Limit: []byte("k6")}), 2.0)
It("Should do iterations and seeks correctly #1",
Test(&util.Range{Start: []byte(""), Limit: []byte("zzzzzzz")}), 2.0)
})
}
})
})
})

View File

@@ -1,11 +0,0 @@
package table
import (
"testing"
"github.com/syndtr/goleveldb/leveldb/testutil"
)
func TestTable(t *testing.T) {
testutil.RunSuite(t, "Table Suite")
}

View File

@@ -1,123 +0,0 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package table
import (
"bytes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
"github.com/syndtr/goleveldb/leveldb/testutil"
"github.com/syndtr/goleveldb/leveldb/util"
)
type tableWrapper struct {
*Reader
}
func (t tableWrapper) TestFind(key []byte) (rkey, rvalue []byte, err error) {
return t.Reader.Find(key, false, nil)
}
func (t tableWrapper) TestGet(key []byte) (value []byte, err error) {
return t.Reader.Get(key, nil)
}
func (t tableWrapper) TestNewIterator(slice *util.Range) iterator.Iterator {
return t.Reader.NewIterator(slice, nil)
}
var _ = testutil.Defer(func() {
Describe("Table", func() {
Describe("approximate offset test", func() {
var (
buf = &bytes.Buffer{}
o = &opt.Options{
BlockSize: 1024,
Compression: opt.NoCompression,
}
)
// Building the table.
tw := NewWriter(buf, o)
tw.Append([]byte("k01"), []byte("hello"))
tw.Append([]byte("k02"), []byte("hello2"))
tw.Append([]byte("k03"), bytes.Repeat([]byte{'x'}, 10000))
tw.Append([]byte("k04"), bytes.Repeat([]byte{'x'}, 200000))
tw.Append([]byte("k05"), bytes.Repeat([]byte{'x'}, 300000))
tw.Append([]byte("k06"), []byte("hello3"))
tw.Append([]byte("k07"), bytes.Repeat([]byte{'x'}, 100000))
err := tw.Close()
It("Should be able to approximate offset of a key correctly", func() {
Expect(err).ShouldNot(HaveOccurred())
tr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), storage.FileDesc{}, nil, nil, o)
Expect(err).ShouldNot(HaveOccurred())
CheckOffset := func(key string, expect, threshold int) {
offset, err := tr.OffsetOf([]byte(key))
Expect(err).ShouldNot(HaveOccurred())
Expect(offset).Should(BeNumerically("~", expect, threshold), "Offset of key %q", key)
}
CheckOffset("k0", 0, 0)
CheckOffset("k01a", 0, 0)
CheckOffset("k02", 0, 0)
CheckOffset("k03", 0, 0)
CheckOffset("k04", 10000, 1000)
CheckOffset("k04a", 210000, 1000)
CheckOffset("k05", 210000, 1000)
CheckOffset("k06", 510000, 1000)
CheckOffset("k07", 510000, 1000)
CheckOffset("xyz", 610000, 2000)
})
})
Describe("read test", func() {
Build := func(kv testutil.KeyValue) testutil.DB {
o := &opt.Options{
BlockSize: 512,
BlockRestartInterval: 3,
}
buf := &bytes.Buffer{}
// Building the table.
tw := NewWriter(buf, o)
kv.Iterate(func(i int, key, value []byte) {
tw.Append(key, value)
})
tw.Close()
// Opening the table.
tr, _ := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), storage.FileDesc{}, nil, nil, o)
return tableWrapper{tr}
}
Test := func(kv *testutil.KeyValue, body func(r *Reader)) func() {
return func() {
db := Build(*kv)
if body != nil {
body(db.(tableWrapper).Reader)
}
testutil.KeyValueTesting(nil, *kv, db, nil, nil)
}
}
testutil.AllKeyValueTesting(nil, Build, nil, nil)
Describe("with one key per block", Test(testutil.KeyValue_Generate(nil, 9, 1, 1, 10, 512, 512), func(r *Reader) {
It("should have correct blocks number", func() {
indexBlock, err := r.readBlock(r.indexBH, true)
Expect(err).To(BeNil())
Expect(indexBlock.restartsLen).Should(Equal(9))
})
}))
})
})
})

View File

@@ -1,91 +0,0 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
. "github.com/onsi/gomega"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/testutil"
"github.com/syndtr/goleveldb/leveldb/util"
)
type testingDB struct {
*DB
ro *opt.ReadOptions
wo *opt.WriteOptions
stor *testutil.Storage
}
func (t *testingDB) TestPut(key []byte, value []byte) error {
return t.Put(key, value, t.wo)
}
func (t *testingDB) TestDelete(key []byte) error {
return t.Delete(key, t.wo)
}
func (t *testingDB) TestGet(key []byte) (value []byte, err error) {
return t.Get(key, t.ro)
}
func (t *testingDB) TestHas(key []byte) (ret bool, err error) {
return t.Has(key, t.ro)
}
func (t *testingDB) TestNewIterator(slice *util.Range) iterator.Iterator {
return t.NewIterator(slice, t.ro)
}
func (t *testingDB) TestClose() {
err := t.Close()
ExpectWithOffset(1, err).NotTo(HaveOccurred())
err = t.stor.Close()
ExpectWithOffset(1, err).NotTo(HaveOccurred())
}
func newTestingDB(o *opt.Options, ro *opt.ReadOptions, wo *opt.WriteOptions) *testingDB {
stor := testutil.NewStorage()
db, err := Open(stor, o)
// FIXME: This may be called from outside It, which may cause panic.
Expect(err).NotTo(HaveOccurred())
return &testingDB{
DB: db,
ro: ro,
wo: wo,
stor: stor,
}
}
type testingTransaction struct {
*Transaction
ro *opt.ReadOptions
wo *opt.WriteOptions
}
func (t *testingTransaction) TestPut(key []byte, value []byte) error {
return t.Put(key, value, t.wo)
}
func (t *testingTransaction) TestDelete(key []byte) error {
return t.Delete(key, t.wo)
}
func (t *testingTransaction) TestGet(key []byte) (value []byte, err error) {
return t.Get(key, t.ro)
}
func (t *testingTransaction) TestHas(key []byte) (ret bool, err error) {
return t.Has(key, t.ro)
}
func (t *testingTransaction) TestNewIterator(slice *util.Range) iterator.Iterator {
return t.NewIterator(slice, t.ro)
}
func (t *testingTransaction) TestClose() {}

View File

@@ -1,369 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package util
import (
"bytes"
"io"
"math/rand"
"runtime"
"testing"
)
const N = 10000 // make this bigger for a larger (and slower) test
var data string // test data for write tests
var testBytes []byte // test data; same as data but as a slice.
func init() {
testBytes = make([]byte, N)
for i := 0; i < N; i++ {
testBytes[i] = 'a' + byte(i%26)
}
data = string(testBytes)
}
// Verify that contents of buf match the string s.
func check(t *testing.T, testname string, buf *Buffer, s string) {
bytes := buf.Bytes()
str := buf.String()
if buf.Len() != len(bytes) {
t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes))
}
if buf.Len() != len(str) {
t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str))
}
if buf.Len() != len(s) {
t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s))
}
if string(bytes) != s {
t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s)
}
}
// Fill buf through n writes of byte slice fub.
// The initial contents of buf corresponds to the string s;
// the result is the final contents of buf returned as a string.
func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string {
check(t, testname+" (fill 1)", buf, s)
for ; n > 0; n-- {
m, err := buf.Write(fub)
if m != len(fub) {
t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub))
}
if err != nil {
t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err)
}
s += string(fub)
check(t, testname+" (fill 4)", buf, s)
}
return s
}
func TestNewBuffer(t *testing.T) {
buf := NewBuffer(testBytes)
check(t, "NewBuffer", buf, data)
}
// Empty buf through repeated reads into fub.
// The initial contents of buf corresponds to the string s.
func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) {
check(t, testname+" (empty 1)", buf, s)
for {
n, err := buf.Read(fub)
if n == 0 {
break
}
if err != nil {
t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err)
}
s = s[n:]
check(t, testname+" (empty 3)", buf, s)
}
check(t, testname+" (empty 4)", buf, "")
}
func TestBasicOperations(t *testing.T) {
var buf Buffer
for i := 0; i < 5; i++ {
check(t, "TestBasicOperations (1)", &buf, "")
buf.Reset()
check(t, "TestBasicOperations (2)", &buf, "")
buf.Truncate(0)
check(t, "TestBasicOperations (3)", &buf, "")
n, err := buf.Write([]byte(data[0:1]))
if n != 1 {
t.Errorf("wrote 1 byte, but n == %d", n)
}
if err != nil {
t.Errorf("err should always be nil, but err == %s", err)
}
check(t, "TestBasicOperations (4)", &buf, "a")
buf.WriteByte(data[1])
check(t, "TestBasicOperations (5)", &buf, "ab")
n, err = buf.Write([]byte(data[2:26]))
if n != 24 {
t.Errorf("wrote 25 bytes, but n == %d", n)
}
check(t, "TestBasicOperations (6)", &buf, string(data[0:26]))
buf.Truncate(26)
check(t, "TestBasicOperations (7)", &buf, string(data[0:26]))
buf.Truncate(20)
check(t, "TestBasicOperations (8)", &buf, string(data[0:20]))
empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5))
empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100))
buf.WriteByte(data[1])
c, err := buf.ReadByte()
if err != nil {
t.Error("ReadByte unexpected eof")
}
if c != data[1] {
t.Errorf("ReadByte wrong value c=%v", c)
}
c, err = buf.ReadByte()
if err == nil {
t.Error("ReadByte unexpected not eof")
}
}
}
func TestLargeByteWrites(t *testing.T) {
var buf Buffer
limit := 30
if testing.Short() {
limit = 9
}
for i := 3; i < limit; i += 3 {
s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes)
empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i))
}
check(t, "TestLargeByteWrites (3)", &buf, "")
}
func TestLargeByteReads(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data)))
}
check(t, "TestLargeByteReads (3)", &buf, "")
}
func TestMixedReadsAndWrites(t *testing.T) {
var buf Buffer
s := ""
for i := 0; i < 50; i++ {
wlen := rand.Intn(len(data))
s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen])
rlen := rand.Intn(len(data))
fub := make([]byte, rlen)
n, _ := buf.Read(fub)
s = s[n:]
}
empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len()))
}
func TestNil(t *testing.T) {
var b *Buffer
if b.String() != "<nil>" {
t.Errorf("expected <nil>; got %q", b.String())
}
}
func TestReadFrom(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
var b Buffer
b.ReadFrom(&buf)
empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data)))
}
}
func TestWriteTo(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
var b Buffer
buf.WriteTo(&b)
empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data)))
}
}
func TestNext(t *testing.T) {
b := []byte{0, 1, 2, 3, 4}
tmp := make([]byte, 5)
for i := 0; i <= 5; i++ {
for j := i; j <= 5; j++ {
for k := 0; k <= 6; k++ {
// 0 <= i <= j <= 5; 0 <= k <= 6
// Check that if we start with a buffer
// of length j at offset i and ask for
// Next(k), we get the right bytes.
buf := NewBuffer(b[0:j])
n, _ := buf.Read(tmp[0:i])
if n != i {
t.Fatalf("Read %d returned %d", i, n)
}
bb := buf.Next(k)
want := k
if want > j-i {
want = j - i
}
if len(bb) != want {
t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb))
}
for l, v := range bb {
if v != byte(l+i) {
t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i)
}
}
}
}
}
}
var readBytesTests = []struct {
buffer string
delim byte
expected []string
err error
}{
{"", 0, []string{""}, io.EOF},
{"a\x00", 0, []string{"a\x00"}, nil},
{"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil},
{"hello\x01world", 1, []string{"hello\x01"}, nil},
{"foo\nbar", 0, []string{"foo\nbar"}, io.EOF},
{"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil},
{"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, io.EOF},
}
func TestReadBytes(t *testing.T) {
for _, test := range readBytesTests {
buf := NewBuffer([]byte(test.buffer))
var err error
for _, expected := range test.expected {
var bytes []byte
bytes, err = buf.ReadBytes(test.delim)
if string(bytes) != expected {
t.Errorf("expected %q, got %q", expected, bytes)
}
if err != nil {
break
}
}
if err != test.err {
t.Errorf("expected error %v, got %v", test.err, err)
}
}
}
func TestGrow(t *testing.T) {
x := []byte{'x'}
y := []byte{'y'}
tmp := make([]byte, 72)
for _, startLen := range []int{0, 100, 1000, 10000, 100000} {
xBytes := bytes.Repeat(x, startLen)
for _, growLen := range []int{0, 100, 1000, 10000, 100000} {
buf := NewBuffer(xBytes)
// If we read, this affects buf.off, which is good to test.
readBytes, _ := buf.Read(tmp)
buf.Grow(growLen)
yBytes := bytes.Repeat(y, growLen)
// Check no allocation occurs in write, as long as we're single-threaded.
var m1, m2 runtime.MemStats
runtime.ReadMemStats(&m1)
buf.Write(yBytes)
runtime.ReadMemStats(&m2)
if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs {
t.Errorf("allocation occurred during write")
}
// Check that buffer has correct data.
if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) {
t.Errorf("bad initial data at %d %d", startLen, growLen)
}
if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) {
t.Errorf("bad written data at %d %d", startLen, growLen)
}
}
}
}
// Was a bug: used to give EOF reading empty slice at EOF.
func TestReadEmptyAtEOF(t *testing.T) {
b := new(Buffer)
slice := make([]byte, 0)
n, err := b.Read(slice)
if err != nil {
t.Errorf("read error: %v", err)
}
if n != 0 {
t.Errorf("wrong count; got %d want 0", n)
}
}
// Tests that we occasionally compact. Issue 5154.
func TestBufferGrowth(t *testing.T) {
var b Buffer
buf := make([]byte, 1024)
b.Write(buf[0:1])
var cap0 int
for i := 0; i < 5<<10; i++ {
b.Write(buf)
b.Read(buf)
if i == 0 {
cap0 = cap(b.buf)
}
}
cap1 := cap(b.buf)
// (*Buffer).grow allows for 2x capacity slop before sliding,
// so set our error threshold at 3x.
if cap1 > cap0*3 {
t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0)
}
}
// From Issue 5154.
func BenchmarkBufferNotEmptyWriteRead(b *testing.B) {
buf := make([]byte, 1024)
for i := 0; i < b.N; i++ {
var b Buffer
b.Write(buf[0:1])
for i := 0; i < 5<<10; i++ {
b.Write(buf)
b.Read(buf)
}
}
}
// Check that we don't compact too often. From Issue 5154.
func BenchmarkBufferFullSmallReads(b *testing.B) {
buf := make([]byte, 1024)
for i := 0; i < b.N; i++ {
var b Buffer
b.Write(buf)
for b.Len()+20 < cap(b.buf) {
b.Write(buf[:10])
}
for i := 0; i < 5<<10; i++ {
b.Read(buf[:1])
b.Write(buf[:1])
}
}
}

View File

@@ -1,46 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package util
import (
"testing"
)
var hashTests = []struct {
data []byte
seed uint32
hash uint32
}{
{nil, 0xbc9f1d34, 0xbc9f1d34},
{[]byte{0x62}, 0xbc9f1d34, 0xef1345c4},
{[]byte{0xc3, 0x97}, 0xbc9f1d34, 0x5b663814},
{[]byte{0xe2, 0x99, 0xa5}, 0xbc9f1d34, 0x323c078f},
{[]byte{0xe1, 0x80, 0xb9, 0x32}, 0xbc9f1d34, 0xed21633a},
{[]byte{
0x01, 0xc0, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x14, 0x00, 0x00, 0x00,
0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x14,
0x00, 0x00, 0x00, 0x18,
0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
}, 0x12345678, 0xf333dabb},
}
func TestHash(t *testing.T) {
for i, x := range hashTests {
h := Hash(x.data, x.seed)
if h != x.hash {
t.Fatalf("test-%d: invalid hash, %#x vs %#x", i, h, x.hash)
}
}
}

View File

@@ -1,181 +0,0 @@
package leveldb
import (
"encoding/binary"
"reflect"
"testing"
"github.com/onsi/gomega"
"github.com/syndtr/goleveldb/leveldb/testutil"
)
type testFileRec struct {
level int
num int64
}
func TestVersionStaging(t *testing.T) {
gomega.RegisterTestingT(t)
stor := testutil.NewStorage()
defer stor.Close()
s, err := newSession(stor, nil)
if err != nil {
t.Fatal(err)
}
v := newVersion(s)
v.newStaging()
tmp := make([]byte, 4)
mik := func(i uint64) []byte {
binary.BigEndian.PutUint32(tmp, uint32(i))
return []byte(makeInternalKey(nil, tmp, 0, keyTypeVal))
}
for i, x := range []struct {
add, del []testFileRec
levels [][]int64
}{
{
add: []testFileRec{
{1, 1},
},
levels: [][]int64{
{},
{1},
},
},
{
add: []testFileRec{
{1, 1},
},
levels: [][]int64{
{},
{1},
},
},
{
del: []testFileRec{
{1, 1},
},
levels: [][]int64{},
},
{
add: []testFileRec{
{0, 1},
{0, 3},
{0, 2},
{2, 5},
{1, 4},
},
levels: [][]int64{
{3, 2, 1},
{4},
{5},
},
},
{
add: []testFileRec{
{1, 6},
{2, 5},
},
del: []testFileRec{
{0, 1},
{0, 4},
},
levels: [][]int64{
{3, 2},
{4, 6},
{5},
},
},
{
del: []testFileRec{
{0, 3},
{0, 2},
{1, 4},
{1, 6},
{2, 5},
},
levels: [][]int64{},
},
{
add: []testFileRec{
{0, 1},
},
levels: [][]int64{
{1},
},
},
{
add: []testFileRec{
{1, 2},
},
levels: [][]int64{
{1},
{2},
},
},
{
add: []testFileRec{
{0, 3},
},
levels: [][]int64{
{3, 1},
{2},
},
},
{
add: []testFileRec{
{6, 9},
},
levels: [][]int64{
{3, 1},
{2},
{},
{},
{},
{},
{9},
},
},
{
del: []testFileRec{
{6, 9},
},
levels: [][]int64{
{3, 1},
{2},
},
},
} {
rec := &sessionRecord{}
for _, f := range x.add {
ik := mik(uint64(f.num))
rec.addTable(f.level, f.num, 1, ik, ik)
}
for _, f := range x.del {
rec.delTable(f.level, f.num)
}
vs := v.newStaging()
vs.commit(rec)
v = vs.finish()
if len(v.levels) != len(x.levels) {
t.Fatalf("#%d: invalid level count: want=%d got=%d", i, len(x.levels), len(v.levels))
}
for j, want := range x.levels {
tables := v.levels[j]
if len(want) != len(tables) {
t.Fatalf("#%d.%d: invalid tables count: want=%d got=%d", i, j, len(want), len(tables))
}
got := make([]int64, len(tables))
for k, t := range tables {
got[k] = t.fd.Num
}
if !reflect.DeepEqual(want, got) {
t.Fatalf("#%d.%d: invalid tables: want=%v got=%v", i, j, want, got)
}
}
}
}

View File

@@ -1,137 +0,0 @@
package main
import (
"encoding/binary"
"fmt"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/storage"
)
type ErrIkeyCorrupted struct {
Ikey []byte
Reason string
}
func (e *ErrIkeyCorrupted) Error() string {
return fmt.Sprintf("leveldb: iKey %q corrupted: %s", e.Ikey, e.Reason)
}
func newErrIkeyCorrupted(ikey []byte, reason string) error {
return errors.NewErrCorrupted(storage.FileDesc{}, &ErrIkeyCorrupted{append([]byte{}, ikey...), reason})
}
type kType int
func (kt kType) String() string {
switch kt {
case ktDel:
return "d"
case ktVal:
return "v"
}
return "x"
}
// Value types encoded as the last component of internal keys.
// Don't modify; this value are saved to disk.
const (
ktDel kType = iota
ktVal
)
// ktSeek defines the kType that should be passed when constructing an
// internal key for seeking to a particular sequence number (since we
// sort sequence numbers in decreasing order and the value type is
// embedded as the low 8 bits in the sequence number in internal keys,
// we need to use the highest-numbered ValueType, not the lowest).
const ktSeek = ktVal
const (
// Maximum value possible for sequence number; the 8-bits are
// used by value type, so its can packed together in single
// 64-bit integer.
kMaxSeq uint64 = (uint64(1) << 56) - 1
// Maximum value possible for packed sequence number and type.
kMaxNum uint64 = (kMaxSeq << 8) | uint64(ktSeek)
)
// Maximum number encoded in bytes.
var kMaxNumBytes = make([]byte, 8)
func init() {
binary.LittleEndian.PutUint64(kMaxNumBytes, kMaxNum)
}
type iKey []byte
func newIkey(ukey []byte, seq uint64, kt kType) iKey {
if seq > kMaxSeq {
panic("leveldb: invalid sequence number")
} else if kt > ktVal {
panic("leveldb: invalid type")
}
ik := make(iKey, len(ukey)+8)
copy(ik, ukey)
binary.LittleEndian.PutUint64(ik[len(ukey):], (seq<<8)|uint64(kt))
return ik
}
func parseIkey(ik []byte) (ukey []byte, seq uint64, kt kType, err error) {
if len(ik) < 8 {
return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid length")
}
num := binary.LittleEndian.Uint64(ik[len(ik)-8:])
seq, kt = uint64(num>>8), kType(num&0xff)
if kt > ktVal {
return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid type")
}
ukey = ik[:len(ik)-8]
return
}
func validIkey(ik []byte) bool {
_, _, _, err := parseIkey(ik)
return err == nil
}
func (ik iKey) assert() {
if ik == nil {
panic("leveldb: nil iKey")
}
if len(ik) < 8 {
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", ik, len(ik)))
}
}
func (ik iKey) ukey() []byte {
ik.assert()
return ik[:len(ik)-8]
}
func (ik iKey) num() uint64 {
ik.assert()
return binary.LittleEndian.Uint64(ik[len(ik)-8:])
}
func (ik iKey) parseNum() (seq uint64, kt kType) {
num := ik.num()
seq, kt = uint64(num>>8), kType(num&0xff)
if kt > ktVal {
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", ik, len(ik), kt))
}
return
}
func (ik iKey) String() string {
if ik == nil {
return "<nil>"
}
if ukey, seq, kt, err := parseIkey(ik); err == nil {
return fmt.Sprintf("%x,%s%d", ukey, kt, seq)
} else {
return "<invalid>"
}
}

View File

@@ -1,628 +0,0 @@
package main
import (
"crypto/rand"
"encoding/binary"
"flag"
"fmt"
"log"
mrand "math/rand"
"net/http"
_ "net/http/pprof"
"os"
"os/signal"
"path"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
"github.com/syndtr/goleveldb/leveldb/table"
"github.com/syndtr/goleveldb/leveldb/util"
)
var (
dbPath = path.Join(os.TempDir(), "goleveldb-testdb")
openFilesCacheCapacity = 500
keyLen = 63
valueLen = 256
numKeys = arrayInt{100000, 1332, 531, 1234, 9553, 1024, 35743}
httpProf = "127.0.0.1:5454"
transactionProb = 0.5
enableBlockCache = false
enableCompression = false
enableBufferPool = false
wg = new(sync.WaitGroup)
done, fail uint32
bpool *util.BufferPool
)
type arrayInt []int
func (a arrayInt) String() string {
var str string
for i, n := range a {
if i > 0 {
str += ","
}
str += strconv.Itoa(n)
}
return str
}
func (a *arrayInt) Set(str string) error {
var na arrayInt
for _, s := range strings.Split(str, ",") {
s = strings.TrimSpace(s)
if s != "" {
n, err := strconv.Atoi(s)
if err != nil {
return err
}
na = append(na, n)
}
}
*a = na
return nil
}
func init() {
flag.StringVar(&dbPath, "db", dbPath, "testdb path")
flag.IntVar(&openFilesCacheCapacity, "openfilescachecap", openFilesCacheCapacity, "open files cache capacity")
flag.IntVar(&keyLen, "keylen", keyLen, "key length")
flag.IntVar(&valueLen, "valuelen", valueLen, "value length")
flag.Var(&numKeys, "numkeys", "num keys")
flag.StringVar(&httpProf, "httpprof", httpProf, "http pprof listen addr")
flag.Float64Var(&transactionProb, "transactionprob", transactionProb, "probablity of writes using transaction")
flag.BoolVar(&enableBufferPool, "enablebufferpool", enableBufferPool, "enable buffer pool")
flag.BoolVar(&enableBlockCache, "enableblockcache", enableBlockCache, "enable block cache")
flag.BoolVar(&enableCompression, "enablecompression", enableCompression, "enable block compression")
}
func randomData(dst []byte, ns, prefix byte, i uint32, dataLen int) []byte {
if dataLen < (2+4+4)*2+4 {
panic("dataLen is too small")
}
if cap(dst) < dataLen {
dst = make([]byte, dataLen)
} else {
dst = dst[:dataLen]
}
half := (dataLen - 4) / 2
if _, err := rand.Reader.Read(dst[2 : half-8]); err != nil {
panic(err)
}
dst[0] = ns
dst[1] = prefix
binary.LittleEndian.PutUint32(dst[half-8:], i)
binary.LittleEndian.PutUint32(dst[half-8:], i)
binary.LittleEndian.PutUint32(dst[half-4:], util.NewCRC(dst[:half-4]).Value())
full := half * 2
copy(dst[half:full], dst[:half])
if full < dataLen-4 {
if _, err := rand.Reader.Read(dst[full : dataLen-4]); err != nil {
panic(err)
}
}
binary.LittleEndian.PutUint32(dst[dataLen-4:], util.NewCRC(dst[:dataLen-4]).Value())
return dst
}
func dataSplit(data []byte) (data0, data1 []byte) {
n := (len(data) - 4) / 2
return data[:n], data[n : n+n]
}
func dataNS(data []byte) byte {
return data[0]
}
func dataPrefix(data []byte) byte {
return data[1]
}
func dataI(data []byte) uint32 {
return binary.LittleEndian.Uint32(data[(len(data)-4)/2-8:])
}
func dataChecksum(data []byte) (uint32, uint32) {
checksum0 := binary.LittleEndian.Uint32(data[len(data)-4:])
checksum1 := util.NewCRC(data[:len(data)-4]).Value()
return checksum0, checksum1
}
func dataPrefixSlice(ns, prefix byte) *util.Range {
return util.BytesPrefix([]byte{ns, prefix})
}
func dataNsSlice(ns byte) *util.Range {
return util.BytesPrefix([]byte{ns})
}
type testingStorage struct {
storage.Storage
}
func (ts *testingStorage) scanTable(fd storage.FileDesc, checksum bool) (corrupted bool) {
r, err := ts.Open(fd)
if err != nil {
log.Fatal(err)
}
defer r.Close()
size, err := r.Seek(0, os.SEEK_END)
if err != nil {
log.Fatal(err)
}
o := &opt.Options{
DisableLargeBatchTransaction: true,
Strict: opt.NoStrict,
}
if checksum {
o.Strict = opt.StrictBlockChecksum | opt.StrictReader
}
tr, err := table.NewReader(r, size, fd, nil, bpool, o)
if err != nil {
log.Fatal(err)
}
defer tr.Release()
checkData := func(i int, t string, data []byte) bool {
if len(data) == 0 {
panic(fmt.Sprintf("[%v] nil data: i=%d t=%s", fd, i, t))
}
checksum0, checksum1 := dataChecksum(data)
if checksum0 != checksum1 {
atomic.StoreUint32(&fail, 1)
atomic.StoreUint32(&done, 1)
corrupted = true
data0, data1 := dataSplit(data)
data0c0, data0c1 := dataChecksum(data0)
data1c0, data1c1 := dataChecksum(data1)
log.Printf("FATAL: [%v] Corrupted data i=%d t=%s (%#x != %#x): %x(%v) vs %x(%v)",
fd, i, t, checksum0, checksum1, data0, data0c0 == data0c1, data1, data1c0 == data1c1)
return true
}
return false
}
iter := tr.NewIterator(nil, nil)
defer iter.Release()
for i := 0; iter.Next(); i++ {
ukey, _, kt, kerr := parseIkey(iter.Key())
if kerr != nil {
atomic.StoreUint32(&fail, 1)
atomic.StoreUint32(&done, 1)
corrupted = true
log.Printf("FATAL: [%v] Corrupted ikey i=%d: %v", fd, i, kerr)
return
}
if checkData(i, "key", ukey) {
return
}
if kt == ktVal && checkData(i, "value", iter.Value()) {
return
}
}
if err := iter.Error(); err != nil {
if errors.IsCorrupted(err) {
atomic.StoreUint32(&fail, 1)
atomic.StoreUint32(&done, 1)
corrupted = true
log.Printf("FATAL: [%v] Corruption detected: %v", fd, err)
} else {
log.Fatal(err)
}
}
return
}
func (ts *testingStorage) Remove(fd storage.FileDesc) error {
if atomic.LoadUint32(&fail) == 1 {
return nil
}
if fd.Type == storage.TypeTable {
if ts.scanTable(fd, true) {
return nil
}
}
return ts.Storage.Remove(fd)
}
type latencyStats struct {
mark time.Time
dur, min, max time.Duration
num int
}
func (s *latencyStats) start() {
s.mark = time.Now()
}
func (s *latencyStats) record(n int) {
if s.mark.IsZero() {
panic("not started")
}
dur := time.Now().Sub(s.mark)
dur1 := dur / time.Duration(n)
if dur1 < s.min || s.min == 0 {
s.min = dur1
}
if dur1 > s.max {
s.max = dur1
}
s.dur += dur
s.num += n
s.mark = time.Time{}
}
func (s *latencyStats) ratePerSec() int {
durSec := s.dur / time.Second
if durSec > 0 {
return s.num / int(durSec)
}
return s.num
}
func (s *latencyStats) avg() time.Duration {
if s.num > 0 {
return s.dur / time.Duration(s.num)
}
return 0
}
func (s *latencyStats) add(x *latencyStats) {
if x.min < s.min || s.min == 0 {
s.min = x.min
}
if x.max > s.max {
s.max = x.max
}
s.dur += x.dur
s.num += x.num
}
func main() {
flag.Parse()
if enableBufferPool {
bpool = util.NewBufferPool(opt.DefaultBlockSize + 128)
}
log.Printf("Test DB stored at %q", dbPath)
if httpProf != "" {
log.Printf("HTTP pprof listening at %q", httpProf)
runtime.SetBlockProfileRate(1)
go func() {
if err := http.ListenAndServe(httpProf, nil); err != nil {
log.Fatalf("HTTPPROF: %v", err)
}
}()
}
runtime.GOMAXPROCS(runtime.NumCPU())
os.RemoveAll(dbPath)
stor, err := storage.OpenFile(dbPath, false)
if err != nil {
log.Fatal(err)
}
tstor := &testingStorage{stor}
defer tstor.Close()
fatalf := func(err error, format string, v ...interface{}) {
atomic.StoreUint32(&fail, 1)
atomic.StoreUint32(&done, 1)
log.Printf("FATAL: "+format, v...)
if err != nil && errors.IsCorrupted(err) {
cerr := err.(*errors.ErrCorrupted)
if !cerr.Fd.Zero() && cerr.Fd.Type == storage.TypeTable {
log.Print("FATAL: corruption detected, scanning...")
if !tstor.scanTable(storage.FileDesc{Type: storage.TypeTable, Num: cerr.Fd.Num}, false) {
log.Printf("FATAL: unable to find corrupted key/value pair in table %v", cerr.Fd)
}
}
}
runtime.Goexit()
}
if openFilesCacheCapacity == 0 {
openFilesCacheCapacity = -1
}
o := &opt.Options{
OpenFilesCacheCapacity: openFilesCacheCapacity,
DisableBufferPool: !enableBufferPool,
DisableBlockCache: !enableBlockCache,
ErrorIfExist: true,
Compression: opt.NoCompression,
}
if enableCompression {
o.Compression = opt.DefaultCompression
}
db, err := leveldb.Open(tstor, o)
if err != nil {
log.Fatal(err)
}
defer db.Close()
var (
mu = &sync.Mutex{}
gGetStat = &latencyStats{}
gIterStat = &latencyStats{}
gWriteStat = &latencyStats{}
gTrasactionStat = &latencyStats{}
startTime = time.Now()
writeReq = make(chan *leveldb.Batch)
writeAck = make(chan error)
writeAckAck = make(chan struct{})
)
go func() {
for b := range writeReq {
var err error
if mrand.Float64() < transactionProb {
log.Print("> Write using transaction")
gTrasactionStat.start()
var tr *leveldb.Transaction
if tr, err = db.OpenTransaction(); err == nil {
if err = tr.Write(b, nil); err == nil {
if err = tr.Commit(); err == nil {
gTrasactionStat.record(b.Len())
}
} else {
tr.Discard()
}
}
} else {
gWriteStat.start()
if err = db.Write(b, nil); err == nil {
gWriteStat.record(b.Len())
}
}
writeAck <- err
<-writeAckAck
}
}()
go func() {
for {
time.Sleep(3 * time.Second)
log.Print("------------------------")
log.Printf("> Elapsed=%v", time.Now().Sub(startTime))
mu.Lock()
log.Printf("> GetLatencyMin=%v GetLatencyMax=%v GetLatencyAvg=%v GetRatePerSec=%d",
gGetStat.min, gGetStat.max, gGetStat.avg(), gGetStat.ratePerSec())
log.Printf("> IterLatencyMin=%v IterLatencyMax=%v IterLatencyAvg=%v IterRatePerSec=%d",
gIterStat.min, gIterStat.max, gIterStat.avg(), gIterStat.ratePerSec())
log.Printf("> WriteLatencyMin=%v WriteLatencyMax=%v WriteLatencyAvg=%v WriteRatePerSec=%d",
gWriteStat.min, gWriteStat.max, gWriteStat.avg(), gWriteStat.ratePerSec())
log.Printf("> TransactionLatencyMin=%v TransactionLatencyMax=%v TransactionLatencyAvg=%v TransactionRatePerSec=%d",
gTrasactionStat.min, gTrasactionStat.max, gTrasactionStat.avg(), gTrasactionStat.ratePerSec())
mu.Unlock()
cachedblock, _ := db.GetProperty("leveldb.cachedblock")
openedtables, _ := db.GetProperty("leveldb.openedtables")
alivesnaps, _ := db.GetProperty("leveldb.alivesnaps")
aliveiters, _ := db.GetProperty("leveldb.aliveiters")
blockpool, _ := db.GetProperty("leveldb.blockpool")
log.Printf("> BlockCache=%s OpenedTables=%s AliveSnaps=%s AliveIter=%s BlockPool=%q",
cachedblock, openedtables, alivesnaps, aliveiters, blockpool)
log.Print("------------------------")
}
}()
for ns, numKey := range numKeys {
func(ns, numKey int) {
log.Printf("[%02d] STARTING: numKey=%d", ns, numKey)
keys := make([][]byte, numKey)
for i := range keys {
keys[i] = randomData(nil, byte(ns), 1, uint32(i), keyLen)
}
wg.Add(1)
go func() {
var wi uint32
defer func() {
log.Printf("[%02d] WRITER DONE #%d", ns, wi)
wg.Done()
}()
var (
b = new(leveldb.Batch)
k2, v2 []byte
nReader int32
)
for atomic.LoadUint32(&done) == 0 {
log.Printf("[%02d] WRITER #%d", ns, wi)
b.Reset()
for _, k1 := range keys {
k2 = randomData(k2, byte(ns), 2, wi, keyLen)
v2 = randomData(v2, byte(ns), 3, wi, valueLen)
b.Put(k2, v2)
b.Put(k1, k2)
}
writeReq <- b
if err := <-writeAck; err != nil {
writeAckAck <- struct{}{}
fatalf(err, "[%02d] WRITER #%d db.Write: %v", ns, wi, err)
}
snap, err := db.GetSnapshot()
if err != nil {
writeAckAck <- struct{}{}
fatalf(err, "[%02d] WRITER #%d db.GetSnapshot: %v", ns, wi, err)
}
writeAckAck <- struct{}{}
wg.Add(1)
atomic.AddInt32(&nReader, 1)
go func(snapwi uint32, snap *leveldb.Snapshot) {
var (
ri int
iterStat = &latencyStats{}
getStat = &latencyStats{}
)
defer func() {
mu.Lock()
gGetStat.add(getStat)
gIterStat.add(iterStat)
mu.Unlock()
atomic.AddInt32(&nReader, -1)
log.Printf("[%02d] READER #%d.%d DONE Snap=%v Alive=%d IterLatency=%v GetLatency=%v", ns, snapwi, ri, snap, atomic.LoadInt32(&nReader), iterStat.avg(), getStat.avg())
snap.Release()
wg.Done()
}()
stopi := snapwi + 3
for (ri < 3 || atomic.LoadUint32(&wi) < stopi) && atomic.LoadUint32(&done) == 0 {
var n int
iter := snap.NewIterator(dataPrefixSlice(byte(ns), 1), nil)
iterStat.start()
for iter.Next() {
k1 := iter.Key()
k2 := iter.Value()
iterStat.record(1)
if dataNS(k2) != byte(ns) {
fatalf(nil, "[%02d] READER #%d.%d K%d invalid in-key NS: want=%d got=%d", ns, snapwi, ri, n, ns, dataNS(k2))
}
kwritei := dataI(k2)
if kwritei != snapwi {
fatalf(nil, "[%02d] READER #%d.%d K%d invalid in-key iter num: %d", ns, snapwi, ri, n, kwritei)
}
getStat.start()
v2, err := snap.Get(k2, nil)
if err != nil {
fatalf(err, "[%02d] READER #%d.%d K%d snap.Get: %v\nk1: %x\n -> k2: %x", ns, snapwi, ri, n, err, k1, k2)
}
getStat.record(1)
if checksum0, checksum1 := dataChecksum(v2); checksum0 != checksum1 {
err := &errors.ErrCorrupted{Fd: storage.FileDesc{0xff, 0}, Err: fmt.Errorf("v2: %x: checksum mismatch: %v vs %v", v2, checksum0, checksum1)}
fatalf(err, "[%02d] READER #%d.%d K%d snap.Get: %v\nk1: %x\n -> k2: %x", ns, snapwi, ri, n, err, k1, k2)
}
n++
iterStat.start()
}
iter.Release()
if err := iter.Error(); err != nil {
fatalf(err, "[%02d] READER #%d.%d K%d iter.Error: %v", ns, snapwi, ri, numKey, err)
}
if n != numKey {
fatalf(nil, "[%02d] READER #%d.%d missing keys: want=%d got=%d", ns, snapwi, ri, numKey, n)
}
ri++
}
}(wi, snap)
atomic.AddUint32(&wi, 1)
}
}()
delB := new(leveldb.Batch)
wg.Add(1)
go func() {
var (
i int
iterStat = &latencyStats{}
)
defer func() {
log.Printf("[%02d] SCANNER DONE #%d", ns, i)
wg.Done()
}()
time.Sleep(2 * time.Second)
for atomic.LoadUint32(&done) == 0 {
var n int
delB.Reset()
iter := db.NewIterator(dataNsSlice(byte(ns)), nil)
iterStat.start()
for iter.Next() && atomic.LoadUint32(&done) == 0 {
k := iter.Key()
v := iter.Value()
iterStat.record(1)
for ci, x := range [...][]byte{k, v} {
checksum0, checksum1 := dataChecksum(x)
if checksum0 != checksum1 {
if ci == 0 {
fatalf(nil, "[%02d] SCANNER %d.%d invalid key checksum: want %d, got %d\n%x -> %x", ns, i, n, checksum0, checksum1, k, v)
} else {
fatalf(nil, "[%02d] SCANNER %d.%d invalid value checksum: want %d, got %d\n%x -> %x", ns, i, n, checksum0, checksum1, k, v)
}
}
}
if dataPrefix(k) == 2 || mrand.Int()%999 == 0 {
delB.Delete(k)
}
n++
iterStat.start()
}
iter.Release()
if err := iter.Error(); err != nil {
fatalf(err, "[%02d] SCANNER #%d.%d iter.Error: %v", ns, i, n, err)
}
if n > 0 {
log.Printf("[%02d] SCANNER #%d IterLatency=%v", ns, i, iterStat.avg())
}
if delB.Len() > 0 && atomic.LoadUint32(&done) == 0 {
t := time.Now()
writeReq <- delB
if err := <-writeAck; err != nil {
writeAckAck <- struct{}{}
fatalf(err, "[%02d] SCANNER #%d db.Write: %v", ns, i, err)
} else {
writeAckAck <- struct{}{}
}
log.Printf("[%02d] SCANNER #%d Deleted=%d Time=%v", ns, i, delB.Len(), time.Now().Sub(t))
}
i++
}
}()
}(ns, numKey)
}
go func() {
sig := make(chan os.Signal)
signal.Notify(sig, os.Interrupt, os.Kill)
log.Printf("Got signal: %v, exiting...", <-sig)
atomic.StoreUint32(&done, 1)
}()
wg.Wait()
}

View File

@@ -1,85 +0,0 @@
package main
import (
"bufio"
"bytes"
"flag"
"fmt"
"os"
"os/exec"
"path/filepath"
"github.com/syndtr/goleveldb/leveldb/storage"
)
var (
filename string
child bool
)
func init() {
flag.StringVar(&filename, "filename", filepath.Join(os.TempDir(), "goleveldb_filelock_test"), "Filename used for testing")
flag.BoolVar(&child, "child", false, "This is the child")
}
func runChild() error {
var args []string
args = append(args, os.Args[1:]...)
args = append(args, "-child")
cmd := exec.Command(os.Args[0], args...)
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
r := bufio.NewReader(&out)
for {
line, _, e1 := r.ReadLine()
if e1 != nil {
break
}
fmt.Println("[Child]", string(line))
}
return err
}
func main() {
flag.Parse()
fmt.Printf("Using path: %s\n", filename)
if child {
fmt.Println("Child flag set.")
}
stor, err := storage.OpenFile(filename, false)
if err != nil {
fmt.Printf("Could not open storage: %s", err)
os.Exit(10)
}
if !child {
fmt.Println("Executing child -- first test (expecting error)")
err := runChild()
if err == nil {
fmt.Println("Expecting error from child")
} else if err.Error() != "exit status 10" {
fmt.Println("Got unexpected error from child:", err)
} else {
fmt.Printf("Got error from child: %s (expected)\n", err)
}
}
err = stor.Close()
if err != nil {
fmt.Printf("Error when closing storage: %s", err)
os.Exit(11)
}
if !child {
fmt.Println("Executing child -- second test")
err := runChild()
if err != nil {
fmt.Println("Got unexpected error from child:", err)
}
}
os.RemoveAll(filename)
}