Use Go 1.5 vendoring instead of Godeps
Change made by: - running "gvt fetch" on each of the packages mentioned in Godeps/Godeps.json - `rm -rf Godeps` - tweaking the build scripts to not mention Godeps - tweaking the build scripts to test `./lib/...`, `./cmd/...` explicitly (to avoid testing vendor) - tweaking the build scripts to not juggle GOPATH for Godeps and instead set GO15VENDOREXPERIMENT. This also results in some updated packages at the same time I bet. Building with Go 1.3 and 1.4 still *works* but won't use our vendored dependencies - the user needs to have the actual packages in their GOPATH then, which they'll get with a normal "go get". Building with Go 1.6+ will get our vendored dependencies by default even when not using our build script, which is nice. By doing this we gain some freedom in that we can pick and choose manually what to include in vendor, as it's not based on just dependency analysis of our own code. This is also a risk as we might pick up dependencies we are unaware of, as the build may work locally with those packages present in GOPATH. On the other hand the build server will detect this as it has no packages in it's GOPATH beyond what is included in the repo. Recommended tool to manage dependencies is github.com/FiloSottile/gvt.
This commit is contained in:
139
vendor/github.com/syndtr/goleveldb/leveldb/table/block_test.go
generated
vendored
Normal file
139
vendor/github.com/syndtr/goleveldb/leveldb/table/block_test.go
generated
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/testutil"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
type blockTesting struct {
|
||||
tr *Reader
|
||||
b *block
|
||||
}
|
||||
|
||||
func (t *blockTesting) TestNewIterator(slice *util.Range) iterator.Iterator {
|
||||
return t.tr.newBlockIter(t.b, nil, slice, false)
|
||||
}
|
||||
|
||||
var _ = testutil.Defer(func() {
|
||||
Describe("Block", func() {
|
||||
Build := func(kv *testutil.KeyValue, restartInterval int) *blockTesting {
|
||||
// Building the block.
|
||||
bw := &blockWriter{
|
||||
restartInterval: restartInterval,
|
||||
scratch: make([]byte, 30),
|
||||
}
|
||||
kv.Iterate(func(i int, key, value []byte) {
|
||||
bw.append(key, value)
|
||||
})
|
||||
bw.finish()
|
||||
|
||||
// Opening the block.
|
||||
data := bw.buf.Bytes()
|
||||
restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:]))
|
||||
return &blockTesting{
|
||||
tr: &Reader{cmp: comparer.DefaultComparer},
|
||||
b: &block{
|
||||
data: data,
|
||||
restartsLen: restartsLen,
|
||||
restartsOffset: len(data) - (restartsLen+1)*4,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
Describe("read test", func() {
|
||||
for restartInterval := 1; restartInterval <= 5; restartInterval++ {
|
||||
Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() {
|
||||
kv := &testutil.KeyValue{}
|
||||
Text := func() string {
|
||||
return fmt.Sprintf("and %d keys", kv.Len())
|
||||
}
|
||||
|
||||
Test := func() {
|
||||
// Make block.
|
||||
br := Build(kv, restartInterval)
|
||||
// Do testing.
|
||||
testutil.KeyValueTesting(nil, kv.Clone(), br, nil, nil)
|
||||
}
|
||||
|
||||
Describe(Text(), Test)
|
||||
|
||||
kv.PutString("", "empty")
|
||||
Describe(Text(), Test)
|
||||
|
||||
kv.PutString("a1", "foo")
|
||||
Describe(Text(), Test)
|
||||
|
||||
kv.PutString("a2", "v")
|
||||
Describe(Text(), Test)
|
||||
|
||||
kv.PutString("a3qqwrkks", "hello")
|
||||
Describe(Text(), Test)
|
||||
|
||||
kv.PutString("a4", "bar")
|
||||
Describe(Text(), Test)
|
||||
|
||||
kv.PutString("a5111111", "v5")
|
||||
kv.PutString("a6", "")
|
||||
kv.PutString("a7", "v7")
|
||||
kv.PutString("a8", "vvvvvvvvvvvvvvvvvvvvvv8")
|
||||
kv.PutString("b", "v9")
|
||||
kv.PutString("c9", "v9")
|
||||
kv.PutString("c91", "v9")
|
||||
kv.PutString("d0", "v9")
|
||||
Describe(Text(), Test)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
Describe("out-of-bound slice test", func() {
|
||||
kv := &testutil.KeyValue{}
|
||||
kv.PutString("k1", "v1")
|
||||
kv.PutString("k2", "v2")
|
||||
kv.PutString("k3abcdefgg", "v3")
|
||||
kv.PutString("k4", "v4")
|
||||
kv.PutString("k5", "v5")
|
||||
for restartInterval := 1; restartInterval <= 5; restartInterval++ {
|
||||
Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() {
|
||||
// Make block.
|
||||
bt := Build(kv, restartInterval)
|
||||
|
||||
Test := func(r *util.Range) func(done Done) {
|
||||
return func(done Done) {
|
||||
iter := bt.TestNewIterator(r)
|
||||
Expect(iter.Error()).ShouldNot(HaveOccurred())
|
||||
|
||||
t := testutil.IteratorTesting{
|
||||
KeyValue: kv.Clone(),
|
||||
Iter: iter,
|
||||
}
|
||||
|
||||
testutil.DoIteratorTesting(&t)
|
||||
iter.Release()
|
||||
done <- true
|
||||
}
|
||||
}
|
||||
|
||||
It("Should do iterations and seeks correctly #0",
|
||||
Test(&util.Range{Start: []byte("k0"), Limit: []byte("k6")}), 2.0)
|
||||
|
||||
It("Should do iterations and seeks correctly #1",
|
||||
Test(&util.Range{Start: []byte(""), Limit: []byte("zzzzzzz")}), 2.0)
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
1107
vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go
generated
vendored
Normal file
1107
vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
177
vendor/github.com/syndtr/goleveldb/leveldb/table/table.go
generated
vendored
Normal file
177
vendor/github.com/syndtr/goleveldb/leveldb/table/table.go
generated
vendored
Normal file
@@ -0,0 +1,177 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Package table allows read and write sorted key/value.
|
||||
package table
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
/*
|
||||
Table:
|
||||
|
||||
Table is consist of one or more data blocks, an optional filter block
|
||||
a metaindex block, an index block and a table footer. Metaindex block
|
||||
is a special block used to keep parameters of the table, such as filter
|
||||
block name and its block handle. Index block is a special block used to
|
||||
keep record of data blocks offset and length, index block use one as
|
||||
restart interval. The key used by index block are the last key of preceding
|
||||
block, shorter separator of adjacent blocks or shorter successor of the
|
||||
last key of the last block. Filter block is an optional block contains
|
||||
sequence of filter data generated by a filter generator.
|
||||
|
||||
Table data structure:
|
||||
+ optional
|
||||
/
|
||||
+--------------+--------------+--------------+------+-------+-----------------+-------------+--------+
|
||||
| data block 1 | ... | data block n | filter block | metaindex block | index block | footer |
|
||||
+--------------+--------------+--------------+--------------+-----------------+-------------+--------+
|
||||
|
||||
Each block followed by a 5-bytes trailer contains compression type and checksum.
|
||||
|
||||
Table block trailer:
|
||||
|
||||
+---------------------------+-------------------+
|
||||
| compression type (1-byte) | checksum (4-byte) |
|
||||
+---------------------------+-------------------+
|
||||
|
||||
The checksum is a CRC-32 computed using Castagnoli's polynomial. Compression
|
||||
type also included in the checksum.
|
||||
|
||||
Table footer:
|
||||
|
||||
+------------------- 40-bytes -------------------+
|
||||
/ \
|
||||
+------------------------+--------------------+------+-----------------+
|
||||
| metaindex block handle / index block handle / ---- | magic (8-bytes) |
|
||||
+------------------------+--------------------+------+-----------------+
|
||||
|
||||
The magic are first 64-bit of SHA-1 sum of "http://code.google.com/p/leveldb/".
|
||||
|
||||
NOTE: All fixed-length integer are little-endian.
|
||||
*/
|
||||
|
||||
/*
|
||||
Block:
|
||||
|
||||
Block is consist of one or more key/value entries and a block trailer.
|
||||
Block entry shares key prefix with its preceding key until a restart
|
||||
point reached. A block should contains at least one restart point.
|
||||
First restart point are always zero.
|
||||
|
||||
Block data structure:
|
||||
|
||||
+ restart point + restart point (depends on restart interval)
|
||||
/ /
|
||||
+---------------+---------------+---------------+---------------+---------+
|
||||
| block entry 1 | block entry 2 | ... | block entry n | trailer |
|
||||
+---------------+---------------+---------------+---------------+---------+
|
||||
|
||||
Key/value entry:
|
||||
|
||||
+---- key len ----+
|
||||
/ \
|
||||
+-------+---------+-----------+---------+--------------------+--------------+----------------+
|
||||
| shared (varint) | not shared (varint) | value len (varint) | key (varlen) | value (varlen) |
|
||||
+-----------------+---------------------+--------------------+--------------+----------------+
|
||||
|
||||
Block entry shares key prefix with its preceding key:
|
||||
Conditions:
|
||||
restart_interval=2
|
||||
entry one : key=deck,value=v1
|
||||
entry two : key=dock,value=v2
|
||||
entry three: key=duck,value=v3
|
||||
The entries will be encoded as follow:
|
||||
|
||||
+ restart point (offset=0) + restart point (offset=16)
|
||||
/ /
|
||||
+-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+
|
||||
| 0 | 4 | 2 | "deck" | "v1" | 1 | 3 | 2 | "ock" | "v2" | 0 | 4 | 2 | "duck" | "v3" |
|
||||
+-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+
|
||||
\ / \ / \ /
|
||||
+----------- entry one -----------+ +----------- entry two ----------+ +---------- entry three ----------+
|
||||
|
||||
The block trailer will contains two restart points:
|
||||
|
||||
+------------+-----------+--------+
|
||||
| 0 | 16 | 2 |
|
||||
+------------+-----------+---+----+
|
||||
\ / \
|
||||
+-- restart points --+ + restart points length
|
||||
|
||||
Block trailer:
|
||||
|
||||
+-- 4-bytes --+
|
||||
/ \
|
||||
+-----------------+-----------------+-----------------+------------------------------+
|
||||
| restart point 1 | .... | restart point n | restart points len (4-bytes) |
|
||||
+-----------------+-----------------+-----------------+------------------------------+
|
||||
|
||||
|
||||
NOTE: All fixed-length integer are little-endian.
|
||||
*/
|
||||
|
||||
/*
|
||||
Filter block:
|
||||
|
||||
Filter block consist of one or more filter data and a filter block trailer.
|
||||
The trailer contains filter data offsets, a trailer offset and a 1-byte base Lg.
|
||||
|
||||
Filter block data structure:
|
||||
|
||||
+ offset 1 + offset 2 + offset n + trailer offset
|
||||
/ / / /
|
||||
+---------------+---------------+---------------+---------+
|
||||
| filter data 1 | ... | filter data n | trailer |
|
||||
+---------------+---------------+---------------+---------+
|
||||
|
||||
Filter block trailer:
|
||||
|
||||
+- 4-bytes -+
|
||||
/ \
|
||||
+---------------+---------------+---------------+-------------------------------+------------------+
|
||||
| data 1 offset | .... | data n offset | data-offsets offset (4-bytes) | base Lg (1-byte) |
|
||||
+-------------- +---------------+---------------+-------------------------------+------------------+
|
||||
|
||||
|
||||
NOTE: All fixed-length integer are little-endian.
|
||||
*/
|
||||
|
||||
const (
|
||||
blockTrailerLen = 5
|
||||
footerLen = 48
|
||||
|
||||
magic = "\x57\xfb\x80\x8b\x24\x75\x47\xdb"
|
||||
|
||||
// The block type gives the per-block compression format.
|
||||
// These constants are part of the file format and should not be changed.
|
||||
blockTypeNoCompression = 0
|
||||
blockTypeSnappyCompression = 1
|
||||
|
||||
// Generate new filter every 2KB of data
|
||||
filterBaseLg = 11
|
||||
filterBase = 1 << filterBaseLg
|
||||
)
|
||||
|
||||
type blockHandle struct {
|
||||
offset, length uint64
|
||||
}
|
||||
|
||||
func decodeBlockHandle(src []byte) (blockHandle, int) {
|
||||
offset, n := binary.Uvarint(src)
|
||||
length, m := binary.Uvarint(src[n:])
|
||||
if n == 0 || m == 0 {
|
||||
return blockHandle{}, 0
|
||||
}
|
||||
return blockHandle{offset, length}, n + m
|
||||
}
|
||||
|
||||
func encodeBlockHandle(dst []byte, b blockHandle) int {
|
||||
n := binary.PutUvarint(dst, b.offset)
|
||||
m := binary.PutUvarint(dst[n:], b.length)
|
||||
return n + m
|
||||
}
|
||||
11
vendor/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go
generated
vendored
Normal file
11
vendor/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
package table
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/testutil"
|
||||
)
|
||||
|
||||
func TestTable(t *testing.T) {
|
||||
testutil.RunSuite(t, "Table Suite")
|
||||
}
|
||||
123
vendor/github.com/syndtr/goleveldb/leveldb/table/table_test.go
generated
vendored
Normal file
123
vendor/github.com/syndtr/goleveldb/leveldb/table/table_test.go
generated
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
"github.com/syndtr/goleveldb/leveldb/testutil"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
type tableWrapper struct {
|
||||
*Reader
|
||||
}
|
||||
|
||||
func (t tableWrapper) TestFind(key []byte) (rkey, rvalue []byte, err error) {
|
||||
return t.Reader.Find(key, false, nil)
|
||||
}
|
||||
|
||||
func (t tableWrapper) TestGet(key []byte) (value []byte, err error) {
|
||||
return t.Reader.Get(key, nil)
|
||||
}
|
||||
|
||||
func (t tableWrapper) TestNewIterator(slice *util.Range) iterator.Iterator {
|
||||
return t.Reader.NewIterator(slice, nil)
|
||||
}
|
||||
|
||||
var _ = testutil.Defer(func() {
|
||||
Describe("Table", func() {
|
||||
Describe("approximate offset test", func() {
|
||||
var (
|
||||
buf = &bytes.Buffer{}
|
||||
o = &opt.Options{
|
||||
BlockSize: 1024,
|
||||
Compression: opt.NoCompression,
|
||||
}
|
||||
)
|
||||
|
||||
// Building the table.
|
||||
tw := NewWriter(buf, o)
|
||||
tw.Append([]byte("k01"), []byte("hello"))
|
||||
tw.Append([]byte("k02"), []byte("hello2"))
|
||||
tw.Append([]byte("k03"), bytes.Repeat([]byte{'x'}, 10000))
|
||||
tw.Append([]byte("k04"), bytes.Repeat([]byte{'x'}, 200000))
|
||||
tw.Append([]byte("k05"), bytes.Repeat([]byte{'x'}, 300000))
|
||||
tw.Append([]byte("k06"), []byte("hello3"))
|
||||
tw.Append([]byte("k07"), bytes.Repeat([]byte{'x'}, 100000))
|
||||
err := tw.Close()
|
||||
|
||||
It("Should be able to approximate offset of a key correctly", func() {
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
tr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), storage.FileDesc{}, nil, nil, o)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
CheckOffset := func(key string, expect, threshold int) {
|
||||
offset, err := tr.OffsetOf([]byte(key))
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
Expect(offset).Should(BeNumerically("~", expect, threshold), "Offset of key %q", key)
|
||||
}
|
||||
|
||||
CheckOffset("k0", 0, 0)
|
||||
CheckOffset("k01a", 0, 0)
|
||||
CheckOffset("k02", 0, 0)
|
||||
CheckOffset("k03", 0, 0)
|
||||
CheckOffset("k04", 10000, 1000)
|
||||
CheckOffset("k04a", 210000, 1000)
|
||||
CheckOffset("k05", 210000, 1000)
|
||||
CheckOffset("k06", 510000, 1000)
|
||||
CheckOffset("k07", 510000, 1000)
|
||||
CheckOffset("xyz", 610000, 2000)
|
||||
})
|
||||
})
|
||||
|
||||
Describe("read test", func() {
|
||||
Build := func(kv testutil.KeyValue) testutil.DB {
|
||||
o := &opt.Options{
|
||||
BlockSize: 512,
|
||||
BlockRestartInterval: 3,
|
||||
}
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
// Building the table.
|
||||
tw := NewWriter(buf, o)
|
||||
kv.Iterate(func(i int, key, value []byte) {
|
||||
tw.Append(key, value)
|
||||
})
|
||||
tw.Close()
|
||||
|
||||
// Opening the table.
|
||||
tr, _ := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), storage.FileDesc{}, nil, nil, o)
|
||||
return tableWrapper{tr}
|
||||
}
|
||||
Test := func(kv *testutil.KeyValue, body func(r *Reader)) func() {
|
||||
return func() {
|
||||
db := Build(*kv)
|
||||
if body != nil {
|
||||
body(db.(tableWrapper).Reader)
|
||||
}
|
||||
testutil.KeyValueTesting(nil, *kv, db, nil, nil)
|
||||
}
|
||||
}
|
||||
|
||||
testutil.AllKeyValueTesting(nil, Build, nil, nil)
|
||||
Describe("with one key per block", Test(testutil.KeyValue_Generate(nil, 9, 1, 10, 512, 512), func(r *Reader) {
|
||||
It("should have correct blocks number", func() {
|
||||
indexBlock, err := r.readBlock(r.indexBH, true)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(indexBlock.restartsLen).Should(Equal(9))
|
||||
})
|
||||
}))
|
||||
})
|
||||
})
|
||||
})
|
||||
375
vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go
generated
vendored
Normal file
375
vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go
generated
vendored
Normal file
@@ -0,0 +1,375 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
func sharedPrefixLen(a, b []byte) int {
|
||||
i, n := 0, len(a)
|
||||
if n > len(b) {
|
||||
n = len(b)
|
||||
}
|
||||
for i < n && a[i] == b[i] {
|
||||
i++
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
type blockWriter struct {
|
||||
restartInterval int
|
||||
buf util.Buffer
|
||||
nEntries int
|
||||
prevKey []byte
|
||||
restarts []uint32
|
||||
scratch []byte
|
||||
}
|
||||
|
||||
func (w *blockWriter) append(key, value []byte) {
|
||||
nShared := 0
|
||||
if w.nEntries%w.restartInterval == 0 {
|
||||
w.restarts = append(w.restarts, uint32(w.buf.Len()))
|
||||
} else {
|
||||
nShared = sharedPrefixLen(w.prevKey, key)
|
||||
}
|
||||
n := binary.PutUvarint(w.scratch[0:], uint64(nShared))
|
||||
n += binary.PutUvarint(w.scratch[n:], uint64(len(key)-nShared))
|
||||
n += binary.PutUvarint(w.scratch[n:], uint64(len(value)))
|
||||
w.buf.Write(w.scratch[:n])
|
||||
w.buf.Write(key[nShared:])
|
||||
w.buf.Write(value)
|
||||
w.prevKey = append(w.prevKey[:0], key...)
|
||||
w.nEntries++
|
||||
}
|
||||
|
||||
func (w *blockWriter) finish() {
|
||||
// Write restarts entry.
|
||||
if w.nEntries == 0 {
|
||||
// Must have at least one restart entry.
|
||||
w.restarts = append(w.restarts, 0)
|
||||
}
|
||||
w.restarts = append(w.restarts, uint32(len(w.restarts)))
|
||||
for _, x := range w.restarts {
|
||||
buf4 := w.buf.Alloc(4)
|
||||
binary.LittleEndian.PutUint32(buf4, x)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *blockWriter) reset() {
|
||||
w.buf.Reset()
|
||||
w.nEntries = 0
|
||||
w.restarts = w.restarts[:0]
|
||||
}
|
||||
|
||||
func (w *blockWriter) bytesLen() int {
|
||||
restartsLen := len(w.restarts)
|
||||
if restartsLen == 0 {
|
||||
restartsLen = 1
|
||||
}
|
||||
return w.buf.Len() + 4*restartsLen + 4
|
||||
}
|
||||
|
||||
type filterWriter struct {
|
||||
generator filter.FilterGenerator
|
||||
buf util.Buffer
|
||||
nKeys int
|
||||
offsets []uint32
|
||||
}
|
||||
|
||||
func (w *filterWriter) add(key []byte) {
|
||||
if w.generator == nil {
|
||||
return
|
||||
}
|
||||
w.generator.Add(key)
|
||||
w.nKeys++
|
||||
}
|
||||
|
||||
func (w *filterWriter) flush(offset uint64) {
|
||||
if w.generator == nil {
|
||||
return
|
||||
}
|
||||
for x := int(offset / filterBase); x > len(w.offsets); {
|
||||
w.generate()
|
||||
}
|
||||
}
|
||||
|
||||
func (w *filterWriter) finish() {
|
||||
if w.generator == nil {
|
||||
return
|
||||
}
|
||||
// Generate last keys.
|
||||
|
||||
if w.nKeys > 0 {
|
||||
w.generate()
|
||||
}
|
||||
w.offsets = append(w.offsets, uint32(w.buf.Len()))
|
||||
for _, x := range w.offsets {
|
||||
buf4 := w.buf.Alloc(4)
|
||||
binary.LittleEndian.PutUint32(buf4, x)
|
||||
}
|
||||
w.buf.WriteByte(filterBaseLg)
|
||||
}
|
||||
|
||||
func (w *filterWriter) generate() {
|
||||
// Record offset.
|
||||
w.offsets = append(w.offsets, uint32(w.buf.Len()))
|
||||
// Generate filters.
|
||||
if w.nKeys > 0 {
|
||||
w.generator.Generate(&w.buf)
|
||||
w.nKeys = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Writer is a table writer.
|
||||
type Writer struct {
|
||||
writer io.Writer
|
||||
err error
|
||||
// Options
|
||||
cmp comparer.Comparer
|
||||
filter filter.Filter
|
||||
compression opt.Compression
|
||||
blockSize int
|
||||
|
||||
dataBlock blockWriter
|
||||
indexBlock blockWriter
|
||||
filterBlock filterWriter
|
||||
pendingBH blockHandle
|
||||
offset uint64
|
||||
nEntries int
|
||||
// Scratch allocated enough for 5 uvarint. Block writer should not use
|
||||
// first 20-bytes since it will be used to encode block handle, which
|
||||
// then passed to the block writer itself.
|
||||
scratch [50]byte
|
||||
comparerScratch []byte
|
||||
compressionScratch []byte
|
||||
}
|
||||
|
||||
func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) {
|
||||
// Compress the buffer if necessary.
|
||||
var b []byte
|
||||
if compression == opt.SnappyCompression {
|
||||
// Allocate scratch enough for compression and block trailer.
|
||||
if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n {
|
||||
w.compressionScratch = make([]byte, n)
|
||||
}
|
||||
compressed := snappy.Encode(w.compressionScratch, buf.Bytes())
|
||||
n := len(compressed)
|
||||
b = compressed[:n+blockTrailerLen]
|
||||
b[n] = blockTypeSnappyCompression
|
||||
} else {
|
||||
tmp := buf.Alloc(blockTrailerLen)
|
||||
tmp[0] = blockTypeNoCompression
|
||||
b = buf.Bytes()
|
||||
}
|
||||
|
||||
// Calculate the checksum.
|
||||
n := len(b) - 4
|
||||
checksum := util.NewCRC(b[:n]).Value()
|
||||
binary.LittleEndian.PutUint32(b[n:], checksum)
|
||||
|
||||
// Write the buffer to the file.
|
||||
_, err = w.writer.Write(b)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)}
|
||||
w.offset += uint64(len(b))
|
||||
return
|
||||
}
|
||||
|
||||
func (w *Writer) flushPendingBH(key []byte) {
|
||||
if w.pendingBH.length == 0 {
|
||||
return
|
||||
}
|
||||
var separator []byte
|
||||
if len(key) == 0 {
|
||||
separator = w.cmp.Successor(w.comparerScratch[:0], w.dataBlock.prevKey)
|
||||
} else {
|
||||
separator = w.cmp.Separator(w.comparerScratch[:0], w.dataBlock.prevKey, key)
|
||||
}
|
||||
if separator == nil {
|
||||
separator = w.dataBlock.prevKey
|
||||
} else {
|
||||
w.comparerScratch = separator
|
||||
}
|
||||
n := encodeBlockHandle(w.scratch[:20], w.pendingBH)
|
||||
// Append the block handle to the index block.
|
||||
w.indexBlock.append(separator, w.scratch[:n])
|
||||
// Reset prev key of the data block.
|
||||
w.dataBlock.prevKey = w.dataBlock.prevKey[:0]
|
||||
// Clear pending block handle.
|
||||
w.pendingBH = blockHandle{}
|
||||
}
|
||||
|
||||
func (w *Writer) finishBlock() error {
|
||||
w.dataBlock.finish()
|
||||
bh, err := w.writeBlock(&w.dataBlock.buf, w.compression)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.pendingBH = bh
|
||||
// Reset the data block.
|
||||
w.dataBlock.reset()
|
||||
// Flush the filter block.
|
||||
w.filterBlock.flush(w.offset)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Append appends key/value pair to the table. The keys passed must
|
||||
// be in increasing order.
|
||||
//
|
||||
// It is safe to modify the contents of the arguments after Append returns.
|
||||
func (w *Writer) Append(key, value []byte) error {
|
||||
if w.err != nil {
|
||||
return w.err
|
||||
}
|
||||
if w.nEntries > 0 && w.cmp.Compare(w.dataBlock.prevKey, key) >= 0 {
|
||||
w.err = fmt.Errorf("leveldb/table: Writer: keys are not in increasing order: %q, %q", w.dataBlock.prevKey, key)
|
||||
return w.err
|
||||
}
|
||||
|
||||
w.flushPendingBH(key)
|
||||
// Append key/value pair to the data block.
|
||||
w.dataBlock.append(key, value)
|
||||
// Add key to the filter block.
|
||||
w.filterBlock.add(key)
|
||||
|
||||
// Finish the data block if block size target reached.
|
||||
if w.dataBlock.bytesLen() >= w.blockSize {
|
||||
if err := w.finishBlock(); err != nil {
|
||||
w.err = err
|
||||
return w.err
|
||||
}
|
||||
}
|
||||
w.nEntries++
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlocksLen returns number of blocks written so far.
|
||||
func (w *Writer) BlocksLen() int {
|
||||
n := w.indexBlock.nEntries
|
||||
if w.pendingBH.length > 0 {
|
||||
// Includes the pending block.
|
||||
n++
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// EntriesLen returns number of entries added so far.
|
||||
func (w *Writer) EntriesLen() int {
|
||||
return w.nEntries
|
||||
}
|
||||
|
||||
// BytesLen returns number of bytes written so far.
|
||||
func (w *Writer) BytesLen() int {
|
||||
return int(w.offset)
|
||||
}
|
||||
|
||||
// Close will finalize the table. Calling Append is not possible
|
||||
// after Close, but calling BlocksLen, EntriesLen and BytesLen
|
||||
// is still possible.
|
||||
func (w *Writer) Close() error {
|
||||
if w.err != nil {
|
||||
return w.err
|
||||
}
|
||||
|
||||
// Write the last data block. Or empty data block if there
|
||||
// aren't any data blocks at all.
|
||||
if w.dataBlock.nEntries > 0 || w.nEntries == 0 {
|
||||
if err := w.finishBlock(); err != nil {
|
||||
w.err = err
|
||||
return w.err
|
||||
}
|
||||
}
|
||||
w.flushPendingBH(nil)
|
||||
|
||||
// Write the filter block.
|
||||
var filterBH blockHandle
|
||||
w.filterBlock.finish()
|
||||
if buf := &w.filterBlock.buf; buf.Len() > 0 {
|
||||
filterBH, w.err = w.writeBlock(buf, opt.NoCompression)
|
||||
if w.err != nil {
|
||||
return w.err
|
||||
}
|
||||
}
|
||||
|
||||
// Write the metaindex block.
|
||||
if filterBH.length > 0 {
|
||||
key := []byte("filter." + w.filter.Name())
|
||||
n := encodeBlockHandle(w.scratch[:20], filterBH)
|
||||
w.dataBlock.append(key, w.scratch[:n])
|
||||
}
|
||||
w.dataBlock.finish()
|
||||
metaindexBH, err := w.writeBlock(&w.dataBlock.buf, w.compression)
|
||||
if err != nil {
|
||||
w.err = err
|
||||
return w.err
|
||||
}
|
||||
|
||||
// Write the index block.
|
||||
w.indexBlock.finish()
|
||||
indexBH, err := w.writeBlock(&w.indexBlock.buf, w.compression)
|
||||
if err != nil {
|
||||
w.err = err
|
||||
return w.err
|
||||
}
|
||||
|
||||
// Write the table footer.
|
||||
footer := w.scratch[:footerLen]
|
||||
for i := range footer {
|
||||
footer[i] = 0
|
||||
}
|
||||
n := encodeBlockHandle(footer, metaindexBH)
|
||||
encodeBlockHandle(footer[n:], indexBH)
|
||||
copy(footer[footerLen-len(magic):], magic)
|
||||
if _, err := w.writer.Write(footer); err != nil {
|
||||
w.err = err
|
||||
return w.err
|
||||
}
|
||||
w.offset += footerLen
|
||||
|
||||
w.err = errors.New("leveldb/table: writer is closed")
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewWriter creates a new initialized table writer for the file.
|
||||
//
|
||||
// Table writer is not goroutine-safe.
|
||||
func NewWriter(f io.Writer, o *opt.Options) *Writer {
|
||||
w := &Writer{
|
||||
writer: f,
|
||||
cmp: o.GetComparer(),
|
||||
filter: o.GetFilter(),
|
||||
compression: o.GetCompression(),
|
||||
blockSize: o.GetBlockSize(),
|
||||
comparerScratch: make([]byte, 0),
|
||||
}
|
||||
// data block
|
||||
w.dataBlock.restartInterval = o.GetBlockRestartInterval()
|
||||
// The first 20-bytes are used for encoding block handle.
|
||||
w.dataBlock.scratch = w.scratch[20:]
|
||||
// index block
|
||||
w.indexBlock.restartInterval = 1
|
||||
w.indexBlock.scratch = w.scratch[20:]
|
||||
// filter block
|
||||
if w.filter != nil {
|
||||
w.filterBlock.generator = w.filter.NewGenerator()
|
||||
w.filterBlock.flush(0)
|
||||
}
|
||||
return w
|
||||
}
|
||||
Reference in New Issue
Block a user