mv internal lib
This commit is contained in:
1
lib/db/.gitignore
vendored
Normal file
1
lib/db/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
testdata/*.db
|
||||
228
lib/db/blockmap.go
Normal file
228
lib/db/blockmap.go
Normal file
@@ -0,0 +1,228 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Package db provides a set type to track local/remote files with newness
|
||||
// checks. We must do a certain amount of normalization in here. We will get
|
||||
// fed paths with either native or wire-format separators and encodings
|
||||
// depending on who calls us. We transform paths to wire-format (NFC and
|
||||
// slashes) on the way to the database, and transform to native format
|
||||
// (varying separator and encoding) on the way back out.
|
||||
package db
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
var blockFinder *BlockFinder
|
||||
|
||||
type BlockMap struct {
|
||||
db *leveldb.DB
|
||||
folder string
|
||||
}
|
||||
|
||||
func NewBlockMap(db *leveldb.DB, folder string) *BlockMap {
|
||||
return &BlockMap{
|
||||
db: db,
|
||||
folder: folder,
|
||||
}
|
||||
}
|
||||
|
||||
// Add files to the block map, ignoring any deleted or invalid files.
|
||||
func (m *BlockMap) Add(files []protocol.FileInfo) error {
|
||||
batch := new(leveldb.Batch)
|
||||
buf := make([]byte, 4)
|
||||
for _, file := range files {
|
||||
if file.IsDirectory() || file.IsDeleted() || file.IsInvalid() {
|
||||
continue
|
||||
}
|
||||
|
||||
for i, block := range file.Blocks {
|
||||
binary.BigEndian.PutUint32(buf, uint32(i))
|
||||
batch.Put(m.blockKey(block.Hash, file.Name), buf)
|
||||
}
|
||||
}
|
||||
return m.db.Write(batch, nil)
|
||||
}
|
||||
|
||||
// Update block map state, removing any deleted or invalid files.
|
||||
func (m *BlockMap) Update(files []protocol.FileInfo) error {
|
||||
batch := new(leveldb.Batch)
|
||||
buf := make([]byte, 4)
|
||||
for _, file := range files {
|
||||
if file.IsDirectory() {
|
||||
continue
|
||||
}
|
||||
|
||||
if file.IsDeleted() || file.IsInvalid() {
|
||||
for _, block := range file.Blocks {
|
||||
batch.Delete(m.blockKey(block.Hash, file.Name))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
for i, block := range file.Blocks {
|
||||
binary.BigEndian.PutUint32(buf, uint32(i))
|
||||
batch.Put(m.blockKey(block.Hash, file.Name), buf)
|
||||
}
|
||||
}
|
||||
return m.db.Write(batch, nil)
|
||||
}
|
||||
|
||||
// Discard block map state, removing the given files
|
||||
func (m *BlockMap) Discard(files []protocol.FileInfo) error {
|
||||
batch := new(leveldb.Batch)
|
||||
for _, file := range files {
|
||||
for _, block := range file.Blocks {
|
||||
batch.Delete(m.blockKey(block.Hash, file.Name))
|
||||
}
|
||||
}
|
||||
return m.db.Write(batch, nil)
|
||||
}
|
||||
|
||||
// Drop block map, removing all entries related to this block map from the db.
|
||||
func (m *BlockMap) Drop() error {
|
||||
batch := new(leveldb.Batch)
|
||||
iter := m.db.NewIterator(util.BytesPrefix(m.blockKey(nil, "")[:1+64]), nil)
|
||||
defer iter.Release()
|
||||
for iter.Next() {
|
||||
batch.Delete(iter.Key())
|
||||
}
|
||||
if iter.Error() != nil {
|
||||
return iter.Error()
|
||||
}
|
||||
return m.db.Write(batch, nil)
|
||||
}
|
||||
|
||||
func (m *BlockMap) blockKey(hash []byte, file string) []byte {
|
||||
return toBlockKey(hash, m.folder, file)
|
||||
}
|
||||
|
||||
type BlockFinder struct {
|
||||
db *leveldb.DB
|
||||
folders []string
|
||||
mut sync.RWMutex
|
||||
}
|
||||
|
||||
func NewBlockFinder(db *leveldb.DB, cfg *config.Wrapper) *BlockFinder {
|
||||
if blockFinder != nil {
|
||||
return blockFinder
|
||||
}
|
||||
|
||||
f := &BlockFinder{
|
||||
db: db,
|
||||
mut: sync.NewRWMutex(),
|
||||
}
|
||||
|
||||
f.CommitConfiguration(config.Configuration{}, cfg.Raw())
|
||||
cfg.Subscribe(f)
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// VerifyConfiguration implementes the config.Committer interface
|
||||
func (f *BlockFinder) VerifyConfiguration(from, to config.Configuration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CommitConfiguration implementes the config.Committer interface
|
||||
func (f *BlockFinder) CommitConfiguration(from, to config.Configuration) bool {
|
||||
folders := make([]string, len(to.Folders))
|
||||
for i, folder := range to.Folders {
|
||||
folders[i] = folder.ID
|
||||
}
|
||||
|
||||
sort.Strings(folders)
|
||||
|
||||
f.mut.Lock()
|
||||
f.folders = folders
|
||||
f.mut.Unlock()
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (f *BlockFinder) String() string {
|
||||
return fmt.Sprintf("BlockFinder@%p", f)
|
||||
}
|
||||
|
||||
// Iterate takes an iterator function which iterates over all matching blocks
|
||||
// for the given hash. The iterator function has to return either true (if
|
||||
// they are happy with the block) or false to continue iterating for whatever
|
||||
// reason. The iterator finally returns the result, whether or not a
|
||||
// satisfying block was eventually found.
|
||||
func (f *BlockFinder) Iterate(hash []byte, iterFn func(string, string, int32) bool) bool {
|
||||
f.mut.RLock()
|
||||
folders := f.folders
|
||||
f.mut.RUnlock()
|
||||
for _, folder := range folders {
|
||||
key := toBlockKey(hash, folder, "")
|
||||
iter := f.db.NewIterator(util.BytesPrefix(key), nil)
|
||||
defer iter.Release()
|
||||
|
||||
for iter.Next() && iter.Error() == nil {
|
||||
folder, file := fromBlockKey(iter.Key())
|
||||
index := int32(binary.BigEndian.Uint32(iter.Value()))
|
||||
if iterFn(folder, osutil.NativeFilename(file), index) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Fix repairs incorrect blockmap entries, removing the old entry and
|
||||
// replacing it with a new entry for the given block
|
||||
func (f *BlockFinder) Fix(folder, file string, index int32, oldHash, newHash []byte) error {
|
||||
buf := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(buf, uint32(index))
|
||||
|
||||
batch := new(leveldb.Batch)
|
||||
batch.Delete(toBlockKey(oldHash, folder, file))
|
||||
batch.Put(toBlockKey(newHash, folder, file), buf)
|
||||
return f.db.Write(batch, nil)
|
||||
}
|
||||
|
||||
// m.blockKey returns a byte slice encoding the following information:
|
||||
// keyTypeBlock (1 byte)
|
||||
// folder (64 bytes)
|
||||
// block hash (32 bytes)
|
||||
// file name (variable size)
|
||||
func toBlockKey(hash []byte, folder, file string) []byte {
|
||||
o := make([]byte, 1+64+32+len(file))
|
||||
o[0] = KeyTypeBlock
|
||||
copy(o[1:], []byte(folder))
|
||||
copy(o[1+64:], []byte(hash))
|
||||
copy(o[1+64+32:], []byte(file))
|
||||
return o
|
||||
}
|
||||
|
||||
func fromBlockKey(data []byte) (string, string) {
|
||||
if len(data) < 1+64+32+1 {
|
||||
panic("Incorrect key length")
|
||||
}
|
||||
if data[0] != KeyTypeBlock {
|
||||
panic("Incorrect key type")
|
||||
}
|
||||
|
||||
file := string(data[1+64+32:])
|
||||
|
||||
slice := data[1 : 1+64]
|
||||
izero := bytes.IndexByte(slice, 0)
|
||||
if izero > -1 {
|
||||
return string(slice[:izero]), file
|
||||
}
|
||||
return string(slice), file
|
||||
}
|
||||
259
lib/db/blockmap_test.go
Normal file
259
lib/db/blockmap_test.go
Normal file
@@ -0,0 +1,259 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package db
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
func genBlocks(n int) []protocol.BlockInfo {
|
||||
b := make([]protocol.BlockInfo, n)
|
||||
for i := range b {
|
||||
h := make([]byte, 32)
|
||||
for j := range h {
|
||||
h[j] = byte(i + j)
|
||||
}
|
||||
b[i].Size = int32(i)
|
||||
b[i].Hash = h
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
var f1, f2, f3 protocol.FileInfo
|
||||
|
||||
func init() {
|
||||
blocks := genBlocks(30)
|
||||
|
||||
f1 = protocol.FileInfo{
|
||||
Name: "f1",
|
||||
Blocks: blocks[:10],
|
||||
}
|
||||
|
||||
f2 = protocol.FileInfo{
|
||||
Name: "f2",
|
||||
Blocks: blocks[10:20],
|
||||
}
|
||||
|
||||
f3 = protocol.FileInfo{
|
||||
Name: "f3",
|
||||
Blocks: blocks[20:],
|
||||
}
|
||||
}
|
||||
|
||||
func setup() (*leveldb.DB, *BlockFinder) {
|
||||
// Setup
|
||||
|
||||
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
wrapper := config.Wrap("", config.Configuration{})
|
||||
wrapper.SetFolder(config.FolderConfiguration{
|
||||
ID: "folder1",
|
||||
})
|
||||
wrapper.SetFolder(config.FolderConfiguration{
|
||||
ID: "folder2",
|
||||
})
|
||||
|
||||
return db, NewBlockFinder(db, wrapper)
|
||||
}
|
||||
|
||||
func dbEmpty(db *leveldb.DB) bool {
|
||||
iter := db.NewIterator(nil, nil)
|
||||
defer iter.Release()
|
||||
if iter.Next() {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func TestBlockMapAddUpdateWipe(t *testing.T) {
|
||||
db, f := setup()
|
||||
|
||||
if !dbEmpty(db) {
|
||||
t.Fatal("db not empty")
|
||||
}
|
||||
|
||||
m := NewBlockMap(db, "folder1")
|
||||
|
||||
f3.Flags |= protocol.FlagDirectory
|
||||
|
||||
err := m.Add([]protocol.FileInfo{f1, f2, f3})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
f.Iterate(f1.Blocks[0].Hash, func(folder, file string, index int32) bool {
|
||||
if folder != "folder1" || file != "f1" || index != 0 {
|
||||
t.Fatal("Mismatch")
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
f.Iterate(f2.Blocks[0].Hash, func(folder, file string, index int32) bool {
|
||||
if folder != "folder1" || file != "f2" || index != 0 {
|
||||
t.Fatal("Mismatch")
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
f.Iterate(f3.Blocks[0].Hash, func(folder, file string, index int32) bool {
|
||||
t.Fatal("Unexpected block")
|
||||
return true
|
||||
})
|
||||
|
||||
f3.Flags = f1.Flags
|
||||
f1.Flags |= protocol.FlagDeleted
|
||||
f2.Flags |= protocol.FlagInvalid
|
||||
|
||||
// Should remove
|
||||
err = m.Update([]protocol.FileInfo{f1, f2, f3})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
f.Iterate(f1.Blocks[0].Hash, func(folder, file string, index int32) bool {
|
||||
t.Fatal("Unexpected block")
|
||||
return false
|
||||
})
|
||||
|
||||
f.Iterate(f2.Blocks[0].Hash, func(folder, file string, index int32) bool {
|
||||
t.Fatal("Unexpected block")
|
||||
return false
|
||||
})
|
||||
|
||||
f.Iterate(f3.Blocks[0].Hash, func(folder, file string, index int32) bool {
|
||||
if folder != "folder1" || file != "f3" || index != 0 {
|
||||
t.Fatal("Mismatch")
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
err = m.Drop()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !dbEmpty(db) {
|
||||
t.Fatal("db not empty")
|
||||
}
|
||||
|
||||
// Should not add
|
||||
err = m.Add([]protocol.FileInfo{f1, f2})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !dbEmpty(db) {
|
||||
t.Fatal("db not empty")
|
||||
}
|
||||
|
||||
f1.Flags = 0
|
||||
f2.Flags = 0
|
||||
f3.Flags = 0
|
||||
}
|
||||
|
||||
func TestBlockFinderLookup(t *testing.T) {
|
||||
db, f := setup()
|
||||
|
||||
m1 := NewBlockMap(db, "folder1")
|
||||
m2 := NewBlockMap(db, "folder2")
|
||||
|
||||
err := m1.Add([]protocol.FileInfo{f1})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = m2.Add([]protocol.FileInfo{f1})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
counter := 0
|
||||
f.Iterate(f1.Blocks[0].Hash, func(folder, file string, index int32) bool {
|
||||
counter++
|
||||
switch counter {
|
||||
case 1:
|
||||
if folder != "folder1" || file != "f1" || index != 0 {
|
||||
t.Fatal("Mismatch")
|
||||
}
|
||||
case 2:
|
||||
if folder != "folder2" || file != "f1" || index != 0 {
|
||||
t.Fatal("Mismatch")
|
||||
}
|
||||
default:
|
||||
t.Fatal("Unexpected block")
|
||||
}
|
||||
return false
|
||||
})
|
||||
if counter != 2 {
|
||||
t.Fatal("Incorrect count", counter)
|
||||
}
|
||||
|
||||
f1.Flags |= protocol.FlagDeleted
|
||||
|
||||
err = m1.Update([]protocol.FileInfo{f1})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
counter = 0
|
||||
f.Iterate(f1.Blocks[0].Hash, func(folder, file string, index int32) bool {
|
||||
counter++
|
||||
switch counter {
|
||||
case 1:
|
||||
if folder != "folder2" || file != "f1" || index != 0 {
|
||||
t.Fatal("Mismatch")
|
||||
}
|
||||
default:
|
||||
t.Fatal("Unexpected block")
|
||||
}
|
||||
return false
|
||||
})
|
||||
if counter != 1 {
|
||||
t.Fatal("Incorrect count")
|
||||
}
|
||||
|
||||
f1.Flags = 0
|
||||
}
|
||||
|
||||
func TestBlockFinderFix(t *testing.T) {
|
||||
db, f := setup()
|
||||
|
||||
iterFn := func(folder, file string, index int32) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
m := NewBlockMap(db, "folder1")
|
||||
err := m.Add([]protocol.FileInfo{f1})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !f.Iterate(f1.Blocks[0].Hash, iterFn) {
|
||||
t.Fatal("Block not found")
|
||||
}
|
||||
|
||||
err = f.Fix("folder1", f1.Name, 0, f1.Blocks[0].Hash, f2.Blocks[0].Hash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if f.Iterate(f1.Blocks[0].Hash, iterFn) {
|
||||
t.Fatal("Unexpected block")
|
||||
}
|
||||
|
||||
if !f.Iterate(f2.Blocks[0].Hash, iterFn) {
|
||||
t.Fatal("Block not found")
|
||||
}
|
||||
}
|
||||
236
lib/db/concurrency_test.go
Normal file
236
lib/db/concurrency_test.go
Normal file
@@ -0,0 +1,236 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build ignore // this is a really tedious test for an old issue
|
||||
|
||||
package db_test
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
var keys [][]byte
|
||||
|
||||
func init() {
|
||||
for i := 0; i < nItems; i++ {
|
||||
keys = append(keys, randomData(1))
|
||||
}
|
||||
}
|
||||
|
||||
const nItems = 10000
|
||||
|
||||
func randomData(prefix byte) []byte {
|
||||
data := make([]byte, 1+32+64+32)
|
||||
_, err := rand.Reader.Read(data)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return append([]byte{prefix}, data...)
|
||||
}
|
||||
|
||||
func setItems(db *leveldb.DB) error {
|
||||
batch := new(leveldb.Batch)
|
||||
for _, k1 := range keys {
|
||||
k2 := randomData(2)
|
||||
// k2 -> data
|
||||
batch.Put(k2, randomData(42))
|
||||
// k1 -> k2
|
||||
batch.Put(k1, k2)
|
||||
}
|
||||
if testing.Verbose() {
|
||||
log.Printf("batch write (set) %p", batch)
|
||||
}
|
||||
return db.Write(batch, nil)
|
||||
}
|
||||
|
||||
func clearItems(db *leveldb.DB) error {
|
||||
snap, err := db.GetSnapshot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer snap.Release()
|
||||
|
||||
// Iterate over k2
|
||||
|
||||
it := snap.NewIterator(util.BytesPrefix([]byte{1}), nil)
|
||||
defer it.Release()
|
||||
|
||||
batch := new(leveldb.Batch)
|
||||
for it.Next() {
|
||||
k1 := it.Key()
|
||||
k2 := it.Value()
|
||||
|
||||
// k2 should exist
|
||||
_, err := snap.Get(k2, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete the k1 => k2 mapping first
|
||||
batch.Delete(k1)
|
||||
// Then the k2 => data mapping
|
||||
batch.Delete(k2)
|
||||
}
|
||||
if testing.Verbose() {
|
||||
log.Printf("batch write (clear) %p", batch)
|
||||
}
|
||||
return db.Write(batch, nil)
|
||||
}
|
||||
|
||||
func scanItems(db *leveldb.DB) error {
|
||||
snap, err := db.GetSnapshot()
|
||||
if testing.Verbose() {
|
||||
log.Printf("snap create %p", snap)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if testing.Verbose() {
|
||||
log.Printf("snap release %p", snap)
|
||||
}
|
||||
snap.Release()
|
||||
}()
|
||||
|
||||
// Iterate from the start of k2 space to the end
|
||||
it := snap.NewIterator(util.BytesPrefix([]byte{1}), nil)
|
||||
defer it.Release()
|
||||
|
||||
i := 0
|
||||
for it.Next() {
|
||||
// k2 => k1 => data
|
||||
k1 := it.Key()
|
||||
k2 := it.Value()
|
||||
_, err := snap.Get(k2, nil)
|
||||
if err != nil {
|
||||
log.Printf("k1: %x", k1)
|
||||
log.Printf("k2: %x (missing)", k2)
|
||||
return err
|
||||
}
|
||||
i++
|
||||
}
|
||||
if testing.Verbose() {
|
||||
log.Println("scanned", i)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestConcurrentSetClear(t *testing.T) {
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
|
||||
dur := 30 * time.Second
|
||||
t0 := time.Now()
|
||||
wg := sync.NewWaitGroup()
|
||||
|
||||
os.RemoveAll("testdata/concurrent-set-clear.db")
|
||||
db, err := leveldb.OpenFile("testdata/concurrent-set-clear.db", &opt.Options{OpenFilesCacheCapacity: 10})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll("testdata/concurrent-set-clear.db")
|
||||
|
||||
errChan := make(chan error, 3)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for time.Since(t0) < dur {
|
||||
if err := setItems(db); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
if err := clearItems(db); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for time.Since(t0) < dur {
|
||||
if err := scanItems(db); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
errChan <- nil
|
||||
}()
|
||||
|
||||
err = <-errChan
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
db.Close()
|
||||
}
|
||||
|
||||
func TestConcurrentSetOnly(t *testing.T) {
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
|
||||
dur := 30 * time.Second
|
||||
t0 := time.Now()
|
||||
wg := sync.NewWaitGroup()
|
||||
|
||||
os.RemoveAll("testdata/concurrent-set-only.db")
|
||||
db, err := leveldb.OpenFile("testdata/concurrent-set-only.db", &opt.Options{OpenFilesCacheCapacity: 10})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll("testdata/concurrent-set-only.db")
|
||||
|
||||
errChan := make(chan error, 3)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for time.Since(t0) < dur {
|
||||
if err := setItems(db); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for time.Since(t0) < dur {
|
||||
if err := scanItems(db); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
errChan <- nil
|
||||
}()
|
||||
|
||||
err = <-errChan
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
20
lib/db/debug.go
Normal file
20
lib/db/debug.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package db
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/calmh/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
debug = strings.Contains(os.Getenv("STTRACE"), "files") || os.Getenv("STTRACE") == "all"
|
||||
debugDB = strings.Contains(os.Getenv("STTRACE"), "db") || os.Getenv("STTRACE") == "all"
|
||||
l = logger.DefaultLogger
|
||||
)
|
||||
1058
lib/db/leveldb.go
Normal file
1058
lib/db/leveldb.go
Normal file
File diff suppressed because it is too large
Load Diff
49
lib/db/leveldb_test.go
Normal file
49
lib/db/leveldb_test.go
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package db
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDeviceKey(t *testing.T) {
|
||||
fld := []byte("folder6789012345678901234567890123456789012345678901234567890123")
|
||||
dev := []byte("device67890123456789012345678901")
|
||||
name := []byte("name")
|
||||
|
||||
key := deviceKey(fld, dev, name)
|
||||
|
||||
fld2 := deviceKeyFolder(key)
|
||||
if bytes.Compare(fld2, fld) != 0 {
|
||||
t.Errorf("wrong folder %q != %q", fld2, fld)
|
||||
}
|
||||
dev2 := deviceKeyDevice(key)
|
||||
if bytes.Compare(dev2, dev) != 0 {
|
||||
t.Errorf("wrong device %q != %q", dev2, dev)
|
||||
}
|
||||
name2 := deviceKeyName(key)
|
||||
if bytes.Compare(name2, name) != 0 {
|
||||
t.Errorf("wrong name %q != %q", name2, name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGlobalKey(t *testing.T) {
|
||||
fld := []byte("folder6789012345678901234567890123456789012345678901234567890123")
|
||||
name := []byte("name")
|
||||
|
||||
key := globalKey(fld, name)
|
||||
|
||||
fld2 := globalKeyFolder(key)
|
||||
if bytes.Compare(fld2, fld) != 0 {
|
||||
t.Errorf("wrong folder %q != %q", fld2, fld)
|
||||
}
|
||||
name2 := globalKeyName(key)
|
||||
if bytes.Compare(name2, name) != 0 {
|
||||
t.Errorf("wrong name %q != %q", name2, name)
|
||||
}
|
||||
}
|
||||
167
lib/db/leveldb_xdr.go
Normal file
167
lib/db/leveldb_xdr.go
Normal file
@@ -0,0 +1,167 @@
|
||||
// ************************************************************
|
||||
// This file is automatically generated by genxdr. Do not edit.
|
||||
// ************************************************************
|
||||
|
||||
package db
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
fileVersion Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Vector Structure \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of device |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ device (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct fileVersion {
|
||||
Vector version;
|
||||
opaque device<>;
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
func (o fileVersion) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o fileVersion) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o fileVersion) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o fileVersion) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o fileVersion) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
_, err := o.version.EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
xw.WriteBytes(o.device)
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *fileVersion) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *fileVersion) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *fileVersion) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
(&o.version).DecodeXDRFrom(xr)
|
||||
o.device = xr.ReadBytes()
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
versionList Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of versions |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Zero or more fileVersion Structures \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct versionList {
|
||||
fileVersion versions<>;
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
func (o versionList) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o versionList) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o versionList) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o versionList) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o versionList) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint32(uint32(len(o.versions)))
|
||||
for i := range o.versions {
|
||||
_, err := o.versions[i].EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
}
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *versionList) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *versionList) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *versionList) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
_versionsSize := int(xr.ReadUint32())
|
||||
if _versionsSize < 0 {
|
||||
return xdr.ElementSizeExceeded("versions", _versionsSize, 0)
|
||||
}
|
||||
o.versions = make([]fileVersion, _versionsSize)
|
||||
for i := range o.versions {
|
||||
(&o.versions[i]).DecodeXDRFrom(xr)
|
||||
}
|
||||
return xr.Error()
|
||||
}
|
||||
159
lib/db/namespaced.go
Normal file
159
lib/db/namespaced.go
Normal file
@@ -0,0 +1,159 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package db
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
// NamespacedKV is a simple key-value store using a specific namespace within
|
||||
// a leveldb.
|
||||
type NamespacedKV struct {
|
||||
db *leveldb.DB
|
||||
prefix []byte
|
||||
}
|
||||
|
||||
// NewNamespacedKV returns a new NamespacedKV that lives in the namespace
|
||||
// specified by the prefix.
|
||||
func NewNamespacedKV(db *leveldb.DB, prefix string) *NamespacedKV {
|
||||
return &NamespacedKV{
|
||||
db: db,
|
||||
prefix: []byte(prefix),
|
||||
}
|
||||
}
|
||||
|
||||
// Reset removes all entries in this namespace.
|
||||
func (n *NamespacedKV) Reset() {
|
||||
it := n.db.NewIterator(util.BytesPrefix(n.prefix), nil)
|
||||
defer it.Release()
|
||||
batch := new(leveldb.Batch)
|
||||
for it.Next() {
|
||||
batch.Delete(it.Key())
|
||||
if batch.Len() > batchFlushSize {
|
||||
if err := n.db.Write(batch, nil); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
batch.Reset()
|
||||
}
|
||||
}
|
||||
if batch.Len() > 0 {
|
||||
if err := n.db.Write(batch, nil); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PutInt64 stores a new int64. Any existing value (even if of another type)
|
||||
// is overwritten.
|
||||
func (n *NamespacedKV) PutInt64(key string, val int64) {
|
||||
keyBs := append(n.prefix, []byte(key)...)
|
||||
var valBs [8]byte
|
||||
binary.BigEndian.PutUint64(valBs[:], uint64(val))
|
||||
n.db.Put(keyBs, valBs[:], nil)
|
||||
}
|
||||
|
||||
// Int64 returns the stored value interpreted as an int64 and a boolean that
|
||||
// is false if no value was stored at the key.
|
||||
func (n *NamespacedKV) Int64(key string) (int64, bool) {
|
||||
keyBs := append(n.prefix, []byte(key)...)
|
||||
valBs, err := n.db.Get(keyBs, nil)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
val := binary.BigEndian.Uint64(valBs)
|
||||
return int64(val), true
|
||||
}
|
||||
|
||||
// PutTime stores a new time.Time. Any existing value (even if of another
|
||||
// type) is overwritten.
|
||||
func (n *NamespacedKV) PutTime(key string, val time.Time) {
|
||||
keyBs := append(n.prefix, []byte(key)...)
|
||||
valBs, _ := val.MarshalBinary() // never returns an error
|
||||
n.db.Put(keyBs, valBs, nil)
|
||||
}
|
||||
|
||||
// Time returns the stored value interpreted as a time.Time and a boolean
|
||||
// that is false if no value was stored at the key.
|
||||
func (n NamespacedKV) Time(key string) (time.Time, bool) {
|
||||
var t time.Time
|
||||
keyBs := append(n.prefix, []byte(key)...)
|
||||
valBs, err := n.db.Get(keyBs, nil)
|
||||
if err != nil {
|
||||
return t, false
|
||||
}
|
||||
err = t.UnmarshalBinary(valBs)
|
||||
return t, err == nil
|
||||
}
|
||||
|
||||
// PutString stores a new string. Any existing value (even if of another type)
|
||||
// is overwritten.
|
||||
func (n *NamespacedKV) PutString(key, val string) {
|
||||
keyBs := append(n.prefix, []byte(key)...)
|
||||
n.db.Put(keyBs, []byte(val), nil)
|
||||
}
|
||||
|
||||
// String returns the stored value interpreted as a string and a boolean that
|
||||
// is false if no value was stored at the key.
|
||||
func (n NamespacedKV) String(key string) (string, bool) {
|
||||
keyBs := append(n.prefix, []byte(key)...)
|
||||
valBs, err := n.db.Get(keyBs, nil)
|
||||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
return string(valBs), true
|
||||
}
|
||||
|
||||
// PutBytes stores a new byte slice. Any existing value (even if of another type)
|
||||
// is overwritten.
|
||||
func (n *NamespacedKV) PutBytes(key string, val []byte) {
|
||||
keyBs := append(n.prefix, []byte(key)...)
|
||||
n.db.Put(keyBs, val, nil)
|
||||
}
|
||||
|
||||
// Bytes returns the stored value as a raw byte slice and a boolean that
|
||||
// is false if no value was stored at the key.
|
||||
func (n NamespacedKV) Bytes(key string) ([]byte, bool) {
|
||||
keyBs := append(n.prefix, []byte(key)...)
|
||||
valBs, err := n.db.Get(keyBs, nil)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
return valBs, true
|
||||
}
|
||||
|
||||
// PutBool stores a new boolean. Any existing value (even if of another type)
|
||||
// is overwritten.
|
||||
func (n *NamespacedKV) PutBool(key string, val bool) {
|
||||
keyBs := append(n.prefix, []byte(key)...)
|
||||
if val {
|
||||
n.db.Put(keyBs, []byte{0x0}, nil)
|
||||
} else {
|
||||
n.db.Put(keyBs, []byte{0x1}, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Bool returns the stored value as a boolean and a boolean that
|
||||
// is false if no value was stored at the key.
|
||||
func (n NamespacedKV) Bool(key string) (bool, bool) {
|
||||
keyBs := append(n.prefix, []byte(key)...)
|
||||
valBs, err := n.db.Get(keyBs, nil)
|
||||
if err != nil {
|
||||
return false, false
|
||||
}
|
||||
return valBs[0] == 0x0, true
|
||||
}
|
||||
|
||||
// Delete deletes the specified key. It is allowed to delete a nonexistent
|
||||
// key.
|
||||
func (n NamespacedKV) Delete(key string) {
|
||||
keyBs := append(n.prefix, []byte(key)...)
|
||||
n.db.Delete(keyBs, nil)
|
||||
}
|
||||
127
lib/db/namespaced_test.go
Normal file
127
lib/db/namespaced_test.go
Normal file
@@ -0,0 +1,127 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package db
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
func TestNamespacedInt(t *testing.T) {
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
n1 := NewNamespacedKV(ldb, "foo")
|
||||
n2 := NewNamespacedKV(ldb, "bar")
|
||||
|
||||
// Key is missing to start with
|
||||
|
||||
if v, ok := n1.Int64("test"); v != 0 || ok {
|
||||
t.Errorf("Incorrect return v %v != 0 || ok %v != false", v, ok)
|
||||
}
|
||||
|
||||
n1.PutInt64("test", 42)
|
||||
|
||||
// It should now exist in n1
|
||||
|
||||
if v, ok := n1.Int64("test"); v != 42 || !ok {
|
||||
t.Errorf("Incorrect return v %v != 42 || ok %v != true", v, ok)
|
||||
}
|
||||
|
||||
// ... but not in n2, which is in a different namespace
|
||||
|
||||
if v, ok := n2.Int64("test"); v != 0 || ok {
|
||||
t.Errorf("Incorrect return v %v != 0 || ok %v != false", v, ok)
|
||||
}
|
||||
|
||||
n1.Delete("test")
|
||||
|
||||
// It should no longer exist
|
||||
|
||||
if v, ok := n1.Int64("test"); v != 0 || ok {
|
||||
t.Errorf("Incorrect return v %v != 0 || ok %v != false", v, ok)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNamespacedTime(t *testing.T) {
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
n1 := NewNamespacedKV(ldb, "foo")
|
||||
|
||||
if v, ok := n1.Time("test"); v != (time.Time{}) || ok {
|
||||
t.Errorf("Incorrect return v %v != %v || ok %v != false", v, time.Time{}, ok)
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
n1.PutTime("test", now)
|
||||
|
||||
if v, ok := n1.Time("test"); v != now || !ok {
|
||||
t.Errorf("Incorrect return v %v != %v || ok %v != true", v, now, ok)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNamespacedString(t *testing.T) {
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
n1 := NewNamespacedKV(ldb, "foo")
|
||||
|
||||
if v, ok := n1.String("test"); v != "" || ok {
|
||||
t.Errorf("Incorrect return v %q != \"\" || ok %v != false", v, ok)
|
||||
}
|
||||
|
||||
n1.PutString("test", "yo")
|
||||
|
||||
if v, ok := n1.String("test"); v != "yo" || !ok {
|
||||
t.Errorf("Incorrect return v %q != \"yo\" || ok %v != true", v, ok)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNamespacedReset(t *testing.T) {
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
n1 := NewNamespacedKV(ldb, "foo")
|
||||
|
||||
n1.PutString("test1", "yo1")
|
||||
n1.PutString("test2", "yo2")
|
||||
n1.PutString("test3", "yo3")
|
||||
|
||||
if v, ok := n1.String("test1"); v != "yo1" || !ok {
|
||||
t.Errorf("Incorrect return v %q != \"yo1\" || ok %v != true", v, ok)
|
||||
}
|
||||
if v, ok := n1.String("test2"); v != "yo2" || !ok {
|
||||
t.Errorf("Incorrect return v %q != \"yo2\" || ok %v != true", v, ok)
|
||||
}
|
||||
if v, ok := n1.String("test3"); v != "yo3" || !ok {
|
||||
t.Errorf("Incorrect return v %q != \"yo3\" || ok %v != true", v, ok)
|
||||
}
|
||||
|
||||
n1.Reset()
|
||||
|
||||
if v, ok := n1.String("test1"); v != "" || ok {
|
||||
t.Errorf("Incorrect return v %q != \"\" || ok %v != false", v, ok)
|
||||
}
|
||||
if v, ok := n1.String("test2"); v != "" || ok {
|
||||
t.Errorf("Incorrect return v %q != \"\" || ok %v != false", v, ok)
|
||||
}
|
||||
if v, ok := n1.String("test3"); v != "" || ok {
|
||||
t.Errorf("Incorrect return v %q != \"\" || ok %v != false", v, ok)
|
||||
}
|
||||
}
|
||||
237
lib/db/set.go
Normal file
237
lib/db/set.go
Normal file
@@ -0,0 +1,237 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Package db provides a set type to track local/remote files with newness
|
||||
// checks. We must do a certain amount of normalization in here. We will get
|
||||
// fed paths with either native or wire-format separators and encodings
|
||||
// depending on who calls us. We transform paths to wire-format (NFC and
|
||||
// slashes) on the way to the database, and transform to native format
|
||||
// (varying separator and encoding) on the way back out.
|
||||
package db
|
||||
|
||||
import (
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
|
||||
type FileSet struct {
|
||||
localVersion map[protocol.DeviceID]int64
|
||||
mutex sync.Mutex
|
||||
folder string
|
||||
db *leveldb.DB
|
||||
blockmap *BlockMap
|
||||
}
|
||||
|
||||
// FileIntf is the set of methods implemented by both protocol.FileInfo and
|
||||
// protocol.FileInfoTruncated.
|
||||
type FileIntf interface {
|
||||
Size() int64
|
||||
IsDeleted() bool
|
||||
IsInvalid() bool
|
||||
IsDirectory() bool
|
||||
IsSymlink() bool
|
||||
HasPermissionBits() bool
|
||||
}
|
||||
|
||||
// The Iterator is called with either a protocol.FileInfo or a
|
||||
// protocol.FileInfoTruncated (depending on the method) and returns true to
|
||||
// continue iteration, false to stop.
|
||||
type Iterator func(f FileIntf) bool
|
||||
|
||||
func NewFileSet(folder string, db *leveldb.DB) *FileSet {
|
||||
var s = FileSet{
|
||||
localVersion: make(map[protocol.DeviceID]int64),
|
||||
folder: folder,
|
||||
db: db,
|
||||
blockmap: NewBlockMap(db, folder),
|
||||
mutex: sync.NewMutex(),
|
||||
}
|
||||
|
||||
ldbCheckGlobals(db, []byte(folder))
|
||||
|
||||
var deviceID protocol.DeviceID
|
||||
ldbWithAllFolderTruncated(db, []byte(folder), func(device []byte, f FileInfoTruncated) bool {
|
||||
copy(deviceID[:], device)
|
||||
if f.LocalVersion > s.localVersion[deviceID] {
|
||||
s.localVersion[deviceID] = f.LocalVersion
|
||||
}
|
||||
return true
|
||||
})
|
||||
if debug {
|
||||
l.Debugf("loaded localVersion for %q: %#v", folder, s.localVersion)
|
||||
}
|
||||
clock(s.localVersion[protocol.LocalDeviceID])
|
||||
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s *FileSet) Replace(device protocol.DeviceID, fs []protocol.FileInfo) {
|
||||
if debug {
|
||||
l.Debugf("%s Replace(%v, [%d])", s.folder, device, len(fs))
|
||||
}
|
||||
normalizeFilenames(fs)
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
s.localVersion[device] = ldbReplace(s.db, []byte(s.folder), device[:], fs)
|
||||
if len(fs) == 0 {
|
||||
// Reset the local version if all files were removed.
|
||||
s.localVersion[device] = 0
|
||||
}
|
||||
if device == protocol.LocalDeviceID {
|
||||
s.blockmap.Drop()
|
||||
s.blockmap.Add(fs)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
|
||||
if debug {
|
||||
l.Debugf("%s Update(%v, [%d])", s.folder, device, len(fs))
|
||||
}
|
||||
normalizeFilenames(fs)
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
if device == protocol.LocalDeviceID {
|
||||
discards := make([]protocol.FileInfo, 0, len(fs))
|
||||
updates := make([]protocol.FileInfo, 0, len(fs))
|
||||
for _, newFile := range fs {
|
||||
existingFile, ok := ldbGet(s.db, []byte(s.folder), device[:], []byte(newFile.Name))
|
||||
if !ok || !existingFile.Version.Equal(newFile.Version) {
|
||||
discards = append(discards, existingFile)
|
||||
updates = append(updates, newFile)
|
||||
}
|
||||
}
|
||||
s.blockmap.Discard(discards)
|
||||
s.blockmap.Update(updates)
|
||||
}
|
||||
if lv := ldbUpdate(s.db, []byte(s.folder), device[:], fs); lv > s.localVersion[device] {
|
||||
s.localVersion[device] = lv
|
||||
}
|
||||
}
|
||||
|
||||
func (s *FileSet) WithNeed(device protocol.DeviceID, fn Iterator) {
|
||||
if debug {
|
||||
l.Debugf("%s WithNeed(%v)", s.folder, device)
|
||||
}
|
||||
ldbWithNeed(s.db, []byte(s.folder), device[:], false, nativeFileIterator(fn))
|
||||
}
|
||||
|
||||
func (s *FileSet) WithNeedTruncated(device protocol.DeviceID, fn Iterator) {
|
||||
if debug {
|
||||
l.Debugf("%s WithNeedTruncated(%v)", s.folder, device)
|
||||
}
|
||||
ldbWithNeed(s.db, []byte(s.folder), device[:], true, nativeFileIterator(fn))
|
||||
}
|
||||
|
||||
func (s *FileSet) WithHave(device protocol.DeviceID, fn Iterator) {
|
||||
if debug {
|
||||
l.Debugf("%s WithHave(%v)", s.folder, device)
|
||||
}
|
||||
ldbWithHave(s.db, []byte(s.folder), device[:], false, nativeFileIterator(fn))
|
||||
}
|
||||
|
||||
func (s *FileSet) WithHaveTruncated(device protocol.DeviceID, fn Iterator) {
|
||||
if debug {
|
||||
l.Debugf("%s WithHaveTruncated(%v)", s.folder, device)
|
||||
}
|
||||
ldbWithHave(s.db, []byte(s.folder), device[:], true, nativeFileIterator(fn))
|
||||
}
|
||||
|
||||
func (s *FileSet) WithGlobal(fn Iterator) {
|
||||
if debug {
|
||||
l.Debugf("%s WithGlobal()", s.folder)
|
||||
}
|
||||
ldbWithGlobal(s.db, []byte(s.folder), nil, false, nativeFileIterator(fn))
|
||||
}
|
||||
|
||||
func (s *FileSet) WithGlobalTruncated(fn Iterator) {
|
||||
if debug {
|
||||
l.Debugf("%s WithGlobalTruncated()", s.folder)
|
||||
}
|
||||
ldbWithGlobal(s.db, []byte(s.folder), nil, true, nativeFileIterator(fn))
|
||||
}
|
||||
|
||||
func (s *FileSet) WithPrefixedGlobalTruncated(prefix string, fn Iterator) {
|
||||
if debug {
|
||||
l.Debugf("%s WithPrefixedGlobalTruncated()", s.folder, prefix)
|
||||
}
|
||||
ldbWithGlobal(s.db, []byte(s.folder), []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn))
|
||||
}
|
||||
|
||||
func (s *FileSet) Get(device protocol.DeviceID, file string) (protocol.FileInfo, bool) {
|
||||
f, ok := ldbGet(s.db, []byte(s.folder), device[:], []byte(osutil.NormalizedFilename(file)))
|
||||
f.Name = osutil.NativeFilename(f.Name)
|
||||
return f, ok
|
||||
}
|
||||
|
||||
func (s *FileSet) GetGlobal(file string) (protocol.FileInfo, bool) {
|
||||
fi, ok := ldbGetGlobal(s.db, []byte(s.folder), []byte(osutil.NormalizedFilename(file)), false)
|
||||
if !ok {
|
||||
return protocol.FileInfo{}, false
|
||||
}
|
||||
f := fi.(protocol.FileInfo)
|
||||
f.Name = osutil.NativeFilename(f.Name)
|
||||
return f, true
|
||||
}
|
||||
|
||||
func (s *FileSet) GetGlobalTruncated(file string) (FileInfoTruncated, bool) {
|
||||
fi, ok := ldbGetGlobal(s.db, []byte(s.folder), []byte(osutil.NormalizedFilename(file)), true)
|
||||
if !ok {
|
||||
return FileInfoTruncated{}, false
|
||||
}
|
||||
f := fi.(FileInfoTruncated)
|
||||
f.Name = osutil.NativeFilename(f.Name)
|
||||
return f, true
|
||||
}
|
||||
|
||||
func (s *FileSet) Availability(file string) []protocol.DeviceID {
|
||||
return ldbAvailability(s.db, []byte(s.folder), []byte(osutil.NormalizedFilename(file)))
|
||||
}
|
||||
|
||||
func (s *FileSet) LocalVersion(device protocol.DeviceID) int64 {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
return s.localVersion[device]
|
||||
}
|
||||
|
||||
// ListFolders returns the folder IDs seen in the database.
|
||||
func ListFolders(db *leveldb.DB) []string {
|
||||
return ldbListFolders(db)
|
||||
}
|
||||
|
||||
// DropFolder clears out all information related to the given folder from the
|
||||
// database.
|
||||
func DropFolder(db *leveldb.DB, folder string) {
|
||||
ldbDropFolder(db, []byte(folder))
|
||||
bm := &BlockMap{
|
||||
db: db,
|
||||
folder: folder,
|
||||
}
|
||||
bm.Drop()
|
||||
NewVirtualMtimeRepo(db, folder).Drop()
|
||||
}
|
||||
|
||||
func normalizeFilenames(fs []protocol.FileInfo) {
|
||||
for i := range fs {
|
||||
fs[i].Name = osutil.NormalizedFilename(fs[i].Name)
|
||||
}
|
||||
}
|
||||
|
||||
func nativeFileIterator(fn Iterator) Iterator {
|
||||
return func(fi FileIntf) bool {
|
||||
switch f := fi.(type) {
|
||||
case protocol.FileInfo:
|
||||
f.Name = osutil.NativeFilename(f.Name)
|
||||
return fn(f)
|
||||
case FileInfoTruncated:
|
||||
f.Name = osutil.NativeFilename(f.Name)
|
||||
return fn(f)
|
||||
default:
|
||||
panic("unknown interface type")
|
||||
}
|
||||
}
|
||||
}
|
||||
783
lib/db/set_test.go
Normal file
783
lib/db/set_test.go
Normal file
@@ -0,0 +1,783 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package db_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
var remoteDevice0, remoteDevice1 protocol.DeviceID
|
||||
|
||||
func init() {
|
||||
remoteDevice0, _ = protocol.DeviceIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR")
|
||||
remoteDevice1, _ = protocol.DeviceIDFromString("I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU")
|
||||
}
|
||||
|
||||
const myID = 1
|
||||
|
||||
func genBlocks(n int) []protocol.BlockInfo {
|
||||
b := make([]protocol.BlockInfo, n)
|
||||
for i := range b {
|
||||
h := make([]byte, 32)
|
||||
for j := range h {
|
||||
h[j] = byte(i + j)
|
||||
}
|
||||
b[i].Size = int32(i)
|
||||
b[i].Hash = h
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func globalList(s *db.FileSet) []protocol.FileInfo {
|
||||
var fs []protocol.FileInfo
|
||||
s.WithGlobal(func(fi db.FileIntf) bool {
|
||||
f := fi.(protocol.FileInfo)
|
||||
fs = append(fs, f)
|
||||
return true
|
||||
})
|
||||
return fs
|
||||
}
|
||||
|
||||
func haveList(s *db.FileSet, n protocol.DeviceID) []protocol.FileInfo {
|
||||
var fs []protocol.FileInfo
|
||||
s.WithHave(n, func(fi db.FileIntf) bool {
|
||||
f := fi.(protocol.FileInfo)
|
||||
fs = append(fs, f)
|
||||
return true
|
||||
})
|
||||
return fs
|
||||
}
|
||||
|
||||
func needList(s *db.FileSet, n protocol.DeviceID) []protocol.FileInfo {
|
||||
var fs []protocol.FileInfo
|
||||
s.WithNeed(n, func(fi db.FileIntf) bool {
|
||||
f := fi.(protocol.FileInfo)
|
||||
fs = append(fs, f)
|
||||
return true
|
||||
})
|
||||
return fs
|
||||
}
|
||||
|
||||
type fileList []protocol.FileInfo
|
||||
|
||||
func (l fileList) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
func (l fileList) Less(a, b int) bool {
|
||||
return l[a].Name < l[b].Name
|
||||
}
|
||||
|
||||
func (l fileList) Swap(a, b int) {
|
||||
l[a], l[b] = l[b], l[a]
|
||||
}
|
||||
|
||||
func (l fileList) String() string {
|
||||
var b bytes.Buffer
|
||||
b.WriteString("[]protocol.FileList{\n")
|
||||
for _, f := range l {
|
||||
fmt.Fprintf(&b, " %q: #%d, %d bytes, %d blocks, flags=%o\n", f.Name, f.Version, f.Size(), len(f.Blocks), f.Flags)
|
||||
}
|
||||
b.WriteString("}")
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func TestGlobalSet(t *testing.T) {
|
||||
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
m := db.NewFileSet("test", ldb)
|
||||
|
||||
local0 := fileList{
|
||||
protocol.FileInfo{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(1)},
|
||||
protocol.FileInfo{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(3)},
|
||||
protocol.FileInfo{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(4)},
|
||||
protocol.FileInfo{Name: "z", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(8)},
|
||||
}
|
||||
local1 := fileList{
|
||||
protocol.FileInfo{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(1)},
|
||||
protocol.FileInfo{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(3)},
|
||||
protocol.FileInfo{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(4)},
|
||||
protocol.FileInfo{Name: "z", Version: protocol.Vector{{ID: myID, Value: 1001}}, Flags: protocol.FlagDeleted},
|
||||
}
|
||||
localTot := fileList{
|
||||
local0[0],
|
||||
local0[1],
|
||||
local0[2],
|
||||
local0[3],
|
||||
protocol.FileInfo{Name: "z", Version: protocol.Vector{{ID: myID, Value: 1001}}, Flags: protocol.FlagDeleted},
|
||||
}
|
||||
|
||||
remote0 := fileList{
|
||||
protocol.FileInfo{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(1)},
|
||||
protocol.FileInfo{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1001}}, Blocks: genBlocks(5)},
|
||||
}
|
||||
remote1 := fileList{
|
||||
protocol.FileInfo{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1001}}, Blocks: genBlocks(6)},
|
||||
protocol.FileInfo{Name: "e", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(7)},
|
||||
}
|
||||
remoteTot := fileList{
|
||||
remote0[0],
|
||||
remote1[0],
|
||||
remote0[2],
|
||||
remote1[1],
|
||||
}
|
||||
|
||||
expectedGlobal := fileList{
|
||||
remote0[0], // a
|
||||
remote1[0], // b
|
||||
remote0[2], // c
|
||||
localTot[3], // d
|
||||
remote1[1], // e
|
||||
localTot[4], // z
|
||||
}
|
||||
|
||||
expectedLocalNeed := fileList{
|
||||
remote1[0],
|
||||
remote0[2],
|
||||
remote1[1],
|
||||
}
|
||||
|
||||
expectedRemoteNeed := fileList{
|
||||
local0[3],
|
||||
}
|
||||
|
||||
m.Replace(protocol.LocalDeviceID, local0)
|
||||
m.Replace(protocol.LocalDeviceID, local1)
|
||||
m.Replace(remoteDevice0, remote0)
|
||||
m.Update(remoteDevice0, remote1)
|
||||
|
||||
g := fileList(globalList(m))
|
||||
sort.Sort(g)
|
||||
|
||||
if fmt.Sprint(g) != fmt.Sprint(expectedGlobal) {
|
||||
t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal)
|
||||
}
|
||||
|
||||
h := fileList(haveList(m, protocol.LocalDeviceID))
|
||||
sort.Sort(h)
|
||||
|
||||
if fmt.Sprint(h) != fmt.Sprint(localTot) {
|
||||
t.Errorf("Have incorrect;\n A: %v !=\n E: %v", h, localTot)
|
||||
}
|
||||
|
||||
h = fileList(haveList(m, remoteDevice0))
|
||||
sort.Sort(h)
|
||||
|
||||
if fmt.Sprint(h) != fmt.Sprint(remoteTot) {
|
||||
t.Errorf("Have incorrect;\n A: %v !=\n E: %v", h, remoteTot)
|
||||
}
|
||||
|
||||
n := fileList(needList(m, protocol.LocalDeviceID))
|
||||
sort.Sort(n)
|
||||
|
||||
if fmt.Sprint(n) != fmt.Sprint(expectedLocalNeed) {
|
||||
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", n, expectedLocalNeed)
|
||||
}
|
||||
|
||||
n = fileList(needList(m, remoteDevice0))
|
||||
sort.Sort(n)
|
||||
|
||||
if fmt.Sprint(n) != fmt.Sprint(expectedRemoteNeed) {
|
||||
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", n, expectedRemoteNeed)
|
||||
}
|
||||
|
||||
f, ok := m.Get(protocol.LocalDeviceID, "b")
|
||||
if !ok {
|
||||
t.Error("Unexpectedly not OK")
|
||||
}
|
||||
if fmt.Sprint(f) != fmt.Sprint(localTot[1]) {
|
||||
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, localTot[1])
|
||||
}
|
||||
|
||||
f, ok = m.Get(remoteDevice0, "b")
|
||||
if !ok {
|
||||
t.Error("Unexpectedly not OK")
|
||||
}
|
||||
if fmt.Sprint(f) != fmt.Sprint(remote1[0]) {
|
||||
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, remote1[0])
|
||||
}
|
||||
|
||||
f, ok = m.GetGlobal("b")
|
||||
if !ok {
|
||||
t.Error("Unexpectedly not OK")
|
||||
}
|
||||
if fmt.Sprint(f) != fmt.Sprint(remote1[0]) {
|
||||
t.Errorf("GetGlobal incorrect;\n A: %v !=\n E: %v", f, remote1[0])
|
||||
}
|
||||
|
||||
f, ok = m.Get(protocol.LocalDeviceID, "zz")
|
||||
if ok {
|
||||
t.Error("Unexpectedly OK")
|
||||
}
|
||||
if f.Name != "" {
|
||||
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, protocol.FileInfo{})
|
||||
}
|
||||
|
||||
f, ok = m.GetGlobal("zz")
|
||||
if ok {
|
||||
t.Error("Unexpectedly OK")
|
||||
}
|
||||
if f.Name != "" {
|
||||
t.Errorf("GetGlobal incorrect;\n A: %v !=\n E: %v", f, protocol.FileInfo{})
|
||||
}
|
||||
|
||||
av := []protocol.DeviceID{protocol.LocalDeviceID, remoteDevice0}
|
||||
a := m.Availability("a")
|
||||
if !(len(a) == 2 && (a[0] == av[0] && a[1] == av[1] || a[0] == av[1] && a[1] == av[0])) {
|
||||
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, av)
|
||||
}
|
||||
a = m.Availability("b")
|
||||
if len(a) != 1 || a[0] != remoteDevice0 {
|
||||
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, remoteDevice0)
|
||||
}
|
||||
a = m.Availability("d")
|
||||
if len(a) != 1 || a[0] != protocol.LocalDeviceID {
|
||||
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, protocol.LocalDeviceID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNeedWithInvalid(t *testing.T) {
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s := db.NewFileSet("test", ldb)
|
||||
|
||||
localHave := fileList{
|
||||
protocol.FileInfo{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(1)},
|
||||
}
|
||||
remote0Have := fileList{
|
||||
protocol.FileInfo{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1001}}, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
protocol.FileInfo{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1003}}, Blocks: genBlocks(7)},
|
||||
}
|
||||
remote1Have := fileList{
|
||||
protocol.FileInfo{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(7)},
|
||||
protocol.FileInfo{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1003}}, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
protocol.FileInfo{Name: "e", Version: protocol.Vector{{ID: myID, Value: 1004}}, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
}
|
||||
|
||||
expectedNeed := fileList{
|
||||
protocol.FileInfo{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1001}}, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(7)},
|
||||
protocol.FileInfo{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1003}}, Blocks: genBlocks(7)},
|
||||
}
|
||||
|
||||
s.Replace(protocol.LocalDeviceID, localHave)
|
||||
s.Replace(remoteDevice0, remote0Have)
|
||||
s.Replace(remoteDevice1, remote1Have)
|
||||
|
||||
need := fileList(needList(s, protocol.LocalDeviceID))
|
||||
sort.Sort(need)
|
||||
|
||||
if fmt.Sprint(need) != fmt.Sprint(expectedNeed) {
|
||||
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", need, expectedNeed)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateToInvalid(t *testing.T) {
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s := db.NewFileSet("test", ldb)
|
||||
|
||||
localHave := fileList{
|
||||
protocol.FileInfo{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(1)},
|
||||
protocol.FileInfo{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1001}}, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
protocol.FileInfo{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1003}}, Blocks: genBlocks(7)},
|
||||
}
|
||||
|
||||
s.Replace(protocol.LocalDeviceID, localHave)
|
||||
|
||||
have := fileList(haveList(s, protocol.LocalDeviceID))
|
||||
sort.Sort(have)
|
||||
|
||||
if fmt.Sprint(have) != fmt.Sprint(localHave) {
|
||||
t.Errorf("Have incorrect before invalidation;\n A: %v !=\n E: %v", have, localHave)
|
||||
}
|
||||
|
||||
localHave[1] = protocol.FileInfo{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1001}}, Flags: protocol.FlagInvalid}
|
||||
s.Update(protocol.LocalDeviceID, localHave[1:2])
|
||||
|
||||
have = fileList(haveList(s, protocol.LocalDeviceID))
|
||||
sort.Sort(have)
|
||||
|
||||
if fmt.Sprint(have) != fmt.Sprint(localHave) {
|
||||
t.Errorf("Have incorrect after invalidation;\n A: %v !=\n E: %v", have, localHave)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidAvailability(t *testing.T) {
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s := db.NewFileSet("test", ldb)
|
||||
|
||||
remote0Have := fileList{
|
||||
protocol.FileInfo{Name: "both", Version: protocol.Vector{{ID: myID, Value: 1001}}, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "r1only", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
protocol.FileInfo{Name: "r0only", Version: protocol.Vector{{ID: myID, Value: 1003}}, Blocks: genBlocks(7)},
|
||||
protocol.FileInfo{Name: "none", Version: protocol.Vector{{ID: myID, Value: 1004}}, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
}
|
||||
remote1Have := fileList{
|
||||
protocol.FileInfo{Name: "both", Version: protocol.Vector{{ID: myID, Value: 1001}}, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "r1only", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(7)},
|
||||
protocol.FileInfo{Name: "r0only", Version: protocol.Vector{{ID: myID, Value: 1003}}, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
protocol.FileInfo{Name: "none", Version: protocol.Vector{{ID: myID, Value: 1004}}, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
}
|
||||
|
||||
s.Replace(remoteDevice0, remote0Have)
|
||||
s.Replace(remoteDevice1, remote1Have)
|
||||
|
||||
if av := s.Availability("both"); len(av) != 2 {
|
||||
t.Error("Incorrect availability for 'both':", av)
|
||||
}
|
||||
|
||||
if av := s.Availability("r0only"); len(av) != 1 || av[0] != remoteDevice0 {
|
||||
t.Error("Incorrect availability for 'r0only':", av)
|
||||
}
|
||||
|
||||
if av := s.Availability("r1only"); len(av) != 1 || av[0] != remoteDevice1 {
|
||||
t.Error("Incorrect availability for 'r1only':", av)
|
||||
}
|
||||
|
||||
if av := s.Availability("none"); len(av) != 0 {
|
||||
t.Error("Incorrect availability for 'none':", av)
|
||||
}
|
||||
}
|
||||
func Benchmark10kReplace(b *testing.B) {
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
var local []protocol.FileInfo
|
||||
for i := 0; i < 10000; i++ {
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m := db.NewFileSet("test", ldb)
|
||||
m.Replace(protocol.LocalDeviceID, local)
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark10kUpdateChg(b *testing.B) {
|
||||
var remote []protocol.FileInfo
|
||||
for i := 0; i < 10000; i++ {
|
||||
remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
||||
}
|
||||
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
m := db.NewFileSet("test", ldb)
|
||||
m.Replace(remoteDevice0, remote)
|
||||
|
||||
var local []protocol.FileInfo
|
||||
for i := 0; i < 10000; i++ {
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
||||
}
|
||||
|
||||
m.Replace(protocol.LocalDeviceID, local)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
for j := range local {
|
||||
local[j].Version = local[j].Version.Update(myID)
|
||||
}
|
||||
b.StartTimer()
|
||||
m.Update(protocol.LocalDeviceID, local)
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark10kUpdateSme(b *testing.B) {
|
||||
var remote []protocol.FileInfo
|
||||
for i := 0; i < 10000; i++ {
|
||||
remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
||||
}
|
||||
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
m := db.NewFileSet("test", ldb)
|
||||
m.Replace(remoteDevice0, remote)
|
||||
|
||||
var local []protocol.FileInfo
|
||||
for i := 0; i < 10000; i++ {
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
||||
}
|
||||
|
||||
m.Replace(protocol.LocalDeviceID, local)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.Update(protocol.LocalDeviceID, local)
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark10kNeed2k(b *testing.B) {
|
||||
var remote []protocol.FileInfo
|
||||
for i := 0; i < 10000; i++ {
|
||||
remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
||||
}
|
||||
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
m := db.NewFileSet("test", ldb)
|
||||
m.Replace(remoteDevice0, remote)
|
||||
|
||||
var local []protocol.FileInfo
|
||||
for i := 0; i < 8000; i++ {
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
||||
}
|
||||
for i := 8000; i < 10000; i++ {
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{1, 980}}})
|
||||
}
|
||||
|
||||
m.Replace(protocol.LocalDeviceID, local)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
fs := needList(m, protocol.LocalDeviceID)
|
||||
if l := len(fs); l != 2000 {
|
||||
b.Errorf("wrong length %d != 2k", l)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark10kHaveFullList(b *testing.B) {
|
||||
var remote []protocol.FileInfo
|
||||
for i := 0; i < 10000; i++ {
|
||||
remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
||||
}
|
||||
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
m := db.NewFileSet("test", ldb)
|
||||
m.Replace(remoteDevice0, remote)
|
||||
|
||||
var local []protocol.FileInfo
|
||||
for i := 0; i < 2000; i++ {
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
||||
}
|
||||
for i := 2000; i < 10000; i++ {
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{1, 980}}})
|
||||
}
|
||||
|
||||
m.Replace(protocol.LocalDeviceID, local)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
fs := haveList(m, protocol.LocalDeviceID)
|
||||
if l := len(fs); l != 10000 {
|
||||
b.Errorf("wrong length %d != 10k", l)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark10kGlobal(b *testing.B) {
|
||||
var remote []protocol.FileInfo
|
||||
for i := 0; i < 10000; i++ {
|
||||
remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
||||
}
|
||||
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
m := db.NewFileSet("test", ldb)
|
||||
m.Replace(remoteDevice0, remote)
|
||||
|
||||
var local []protocol.FileInfo
|
||||
for i := 0; i < 2000; i++ {
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
||||
}
|
||||
for i := 2000; i < 10000; i++ {
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{1, 980}}})
|
||||
}
|
||||
|
||||
m.Replace(protocol.LocalDeviceID, local)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
fs := globalList(m)
|
||||
if l := len(fs); l != 10000 {
|
||||
b.Errorf("wrong length %d != 10k", l)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGlobalReset(t *testing.T) {
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
m := db.NewFileSet("test", ldb)
|
||||
|
||||
local := []protocol.FileInfo{
|
||||
{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
}
|
||||
|
||||
remote := []protocol.FileInfo{
|
||||
{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1001}}},
|
||||
{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1002}}},
|
||||
{Name: "e", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
}
|
||||
|
||||
m.Replace(protocol.LocalDeviceID, local)
|
||||
g := globalList(m)
|
||||
sort.Sort(fileList(g))
|
||||
|
||||
if fmt.Sprint(g) != fmt.Sprint(local) {
|
||||
t.Errorf("Global incorrect;\n%v !=\n%v", g, local)
|
||||
}
|
||||
|
||||
m.Replace(remoteDevice0, remote)
|
||||
m.Replace(remoteDevice0, nil)
|
||||
|
||||
g = globalList(m)
|
||||
sort.Sort(fileList(g))
|
||||
|
||||
if fmt.Sprint(g) != fmt.Sprint(local) {
|
||||
t.Errorf("Global incorrect;\n%v !=\n%v", g, local)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNeed(t *testing.T) {
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
m := db.NewFileSet("test", ldb)
|
||||
|
||||
local := []protocol.FileInfo{
|
||||
{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
}
|
||||
|
||||
remote := []protocol.FileInfo{
|
||||
{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1001}}},
|
||||
{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1002}}},
|
||||
{Name: "e", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
}
|
||||
|
||||
shouldNeed := []protocol.FileInfo{
|
||||
{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1001}}},
|
||||
{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1002}}},
|
||||
{Name: "e", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
}
|
||||
|
||||
m.Replace(protocol.LocalDeviceID, local)
|
||||
m.Replace(remoteDevice0, remote)
|
||||
|
||||
need := needList(m, protocol.LocalDeviceID)
|
||||
|
||||
sort.Sort(fileList(need))
|
||||
sort.Sort(fileList(shouldNeed))
|
||||
|
||||
if fmt.Sprint(need) != fmt.Sprint(shouldNeed) {
|
||||
t.Errorf("Need incorrect;\n%v !=\n%v", need, shouldNeed)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalVersion(t *testing.T) {
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
m := db.NewFileSet("test", ldb)
|
||||
|
||||
local1 := []protocol.FileInfo{
|
||||
{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
}
|
||||
|
||||
local2 := []protocol.FileInfo{
|
||||
local1[0],
|
||||
// [1] deleted
|
||||
local1[2],
|
||||
{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1002}}},
|
||||
{Name: "e", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
}
|
||||
|
||||
m.Replace(protocol.LocalDeviceID, local1)
|
||||
c0 := m.LocalVersion(protocol.LocalDeviceID)
|
||||
|
||||
m.Replace(protocol.LocalDeviceID, local2)
|
||||
c1 := m.LocalVersion(protocol.LocalDeviceID)
|
||||
if !(c1 > c0) {
|
||||
t.Fatal("Local version number should have incremented")
|
||||
}
|
||||
}
|
||||
|
||||
func TestListDropFolder(t *testing.T) {
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s0 := db.NewFileSet("test0", ldb)
|
||||
local1 := []protocol.FileInfo{
|
||||
{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
}
|
||||
s0.Replace(protocol.LocalDeviceID, local1)
|
||||
|
||||
s1 := db.NewFileSet("test1", ldb)
|
||||
local2 := []protocol.FileInfo{
|
||||
{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1002}}},
|
||||
{Name: "e", Version: protocol.Vector{{ID: myID, Value: 1002}}},
|
||||
{Name: "f", Version: protocol.Vector{{ID: myID, Value: 1002}}},
|
||||
}
|
||||
s1.Replace(remoteDevice0, local2)
|
||||
|
||||
// Check that we have both folders and their data is in the global list
|
||||
|
||||
expectedFolderList := []string{"test0", "test1"}
|
||||
if actualFolderList := db.ListFolders(ldb); !reflect.DeepEqual(actualFolderList, expectedFolderList) {
|
||||
t.Fatalf("FolderList mismatch\nE: %v\nA: %v", expectedFolderList, actualFolderList)
|
||||
}
|
||||
if l := len(globalList(s0)); l != 3 {
|
||||
t.Errorf("Incorrect global length %d != 3 for s0", l)
|
||||
}
|
||||
if l := len(globalList(s1)); l != 3 {
|
||||
t.Errorf("Incorrect global length %d != 3 for s1", l)
|
||||
}
|
||||
|
||||
// Drop one of them and check that it's gone.
|
||||
|
||||
db.DropFolder(ldb, "test1")
|
||||
|
||||
expectedFolderList = []string{"test0"}
|
||||
if actualFolderList := db.ListFolders(ldb); !reflect.DeepEqual(actualFolderList, expectedFolderList) {
|
||||
t.Fatalf("FolderList mismatch\nE: %v\nA: %v", expectedFolderList, actualFolderList)
|
||||
}
|
||||
if l := len(globalList(s0)); l != 3 {
|
||||
t.Errorf("Incorrect global length %d != 3 for s0", l)
|
||||
}
|
||||
if l := len(globalList(s1)); l != 0 {
|
||||
t.Errorf("Incorrect global length %d != 0 for s1", l)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGlobalNeedWithInvalid(t *testing.T) {
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s := db.NewFileSet("test1", ldb)
|
||||
|
||||
rem0 := fileList{
|
||||
protocol.FileInfo{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(4)},
|
||||
protocol.FileInfo{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1002}}, Flags: protocol.FlagInvalid},
|
||||
protocol.FileInfo{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(4)},
|
||||
}
|
||||
s.Replace(remoteDevice0, rem0)
|
||||
|
||||
rem1 := fileList{
|
||||
protocol.FileInfo{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(4)},
|
||||
protocol.FileInfo{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(4)},
|
||||
protocol.FileInfo{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1002}}, Flags: protocol.FlagInvalid},
|
||||
}
|
||||
s.Replace(remoteDevice1, rem1)
|
||||
|
||||
total := fileList{
|
||||
// There's a valid copy of each file, so it should be merged
|
||||
protocol.FileInfo{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(4)},
|
||||
protocol.FileInfo{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(4)},
|
||||
protocol.FileInfo{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(4)},
|
||||
}
|
||||
|
||||
need := fileList(needList(s, protocol.LocalDeviceID))
|
||||
if fmt.Sprint(need) != fmt.Sprint(total) {
|
||||
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", need, total)
|
||||
}
|
||||
|
||||
global := fileList(globalList(s))
|
||||
if fmt.Sprint(global) != fmt.Sprint(total) {
|
||||
t.Errorf("Global incorrect;\n A: %v !=\n E: %v", global, total)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLongPath(t *testing.T) {
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s := db.NewFileSet("test", ldb)
|
||||
|
||||
var b bytes.Buffer
|
||||
for i := 0; i < 100; i++ {
|
||||
b.WriteString("012345678901234567890123456789012345678901234567890")
|
||||
}
|
||||
name := b.String() // 5000 characters
|
||||
|
||||
local := []protocol.FileInfo{
|
||||
{Name: string(name), Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
}
|
||||
|
||||
s.Replace(protocol.LocalDeviceID, local)
|
||||
|
||||
gf := globalList(s)
|
||||
if l := len(gf); l != 1 {
|
||||
t.Fatalf("Incorrect len %d != 1 for global list", l)
|
||||
}
|
||||
if gf[0].Name != local[0].Name {
|
||||
t.Errorf("Incorrect long filename;\n%q !=\n%q",
|
||||
gf[0].Name, local[0].Name)
|
||||
}
|
||||
}
|
||||
1
lib/db/testdata/.gitignore
vendored
Normal file
1
lib/db/testdata/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
index.db
|
||||
32
lib/db/truncated.go
Normal file
32
lib/db/truncated.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package db
|
||||
|
||||
import "github.com/syncthing/protocol"
|
||||
|
||||
type FileInfoTruncated struct {
|
||||
protocol.FileInfo
|
||||
ActualSize int64
|
||||
}
|
||||
|
||||
func (f *FileInfoTruncated) UnmarshalXDR(bs []byte) error {
|
||||
err := f.FileInfo.UnmarshalXDR(bs)
|
||||
f.ActualSize = f.FileInfo.Size()
|
||||
f.FileInfo.Blocks = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (f FileInfoTruncated) Size() int64 {
|
||||
return f.ActualSize
|
||||
}
|
||||
|
||||
func BlocksToSize(num int) int64 {
|
||||
if num < 2 {
|
||||
return protocol.BlockSize / 2
|
||||
}
|
||||
return int64(num-1)*protocol.BlockSize + protocol.BlockSize/2
|
||||
}
|
||||
84
lib/db/virtualmtime.go
Normal file
84
lib/db/virtualmtime.go
Normal file
@@ -0,0 +1,84 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
|
||||
// This type encapsulates a repository of mtimes for platforms where file mtimes
|
||||
// can't be set to arbitrary values. For this to work, we need to store both
|
||||
// the mtime we tried to set (the "actual" mtime) as well as the mtime the file
|
||||
// has when we're done touching it (the "disk" mtime) so that we can tell if it
|
||||
// was changed. So in GetMtime(), it's not sufficient that the record exists --
|
||||
// the argument must also equal the "disk" mtime in the record, otherwise it's
|
||||
// been touched locally and the "disk" mtime is actually correct.
|
||||
|
||||
type VirtualMtimeRepo struct {
|
||||
ns *NamespacedKV
|
||||
}
|
||||
|
||||
func NewVirtualMtimeRepo(ldb *leveldb.DB, folder string) *VirtualMtimeRepo {
|
||||
prefix := string(KeyTypeVirtualMtime) + folder
|
||||
|
||||
return &VirtualMtimeRepo{
|
||||
ns: NewNamespacedKV(ldb, prefix),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *VirtualMtimeRepo) UpdateMtime(path string, diskMtime, actualMtime time.Time) {
|
||||
if debug {
|
||||
l.Debugf("virtual mtime: storing values for path:%s disk:%v actual:%v", path, diskMtime, actualMtime)
|
||||
}
|
||||
|
||||
diskBytes, _ := diskMtime.MarshalBinary()
|
||||
actualBytes, _ := actualMtime.MarshalBinary()
|
||||
|
||||
data := append(diskBytes, actualBytes...)
|
||||
|
||||
r.ns.PutBytes(path, data)
|
||||
}
|
||||
|
||||
func (r *VirtualMtimeRepo) GetMtime(path string, diskMtime time.Time) time.Time {
|
||||
data, exists := r.ns.Bytes(path)
|
||||
if !exists {
|
||||
// Absense of debug print is significant enough in itself here
|
||||
return diskMtime
|
||||
}
|
||||
|
||||
var mtime time.Time
|
||||
if err := mtime.UnmarshalBinary(data[:len(data)/2]); err != nil {
|
||||
panic(fmt.Sprintf("Can't unmarshal stored mtime at path %s: %v", path, err))
|
||||
}
|
||||
|
||||
if mtime.Equal(diskMtime) {
|
||||
if err := mtime.UnmarshalBinary(data[len(data)/2:]); err != nil {
|
||||
panic(fmt.Sprintf("Can't unmarshal stored mtime at path %s: %v", path, err))
|
||||
}
|
||||
|
||||
if debug {
|
||||
l.Debugf("virtual mtime: return %v instead of %v for path: %s", mtime, diskMtime, path)
|
||||
}
|
||||
return mtime
|
||||
}
|
||||
|
||||
if debug {
|
||||
l.Debugf("virtual mtime: record exists, but mismatch inDisk: %v dbDisk: %v for path: %s", diskMtime, mtime, path)
|
||||
}
|
||||
return diskMtime
|
||||
}
|
||||
|
||||
func (r *VirtualMtimeRepo) DeleteMtime(path string) {
|
||||
r.ns.Delete(path)
|
||||
}
|
||||
|
||||
func (r *VirtualMtimeRepo) Drop() {
|
||||
r.ns.Reset()
|
||||
}
|
||||
80
lib/db/virtualmtime_test.go
Normal file
80
lib/db/virtualmtime_test.go
Normal file
@@ -0,0 +1,80 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package db
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
func TestVirtualMtimeRepo(t *testing.T) {
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// A few repos so we can ensure they don't pollute each other
|
||||
repo1 := NewVirtualMtimeRepo(ldb, "folder1")
|
||||
repo2 := NewVirtualMtimeRepo(ldb, "folder2")
|
||||
|
||||
// Since GetMtime() returns its argument if the key isn't found or is outdated, we need a dummy to test with.
|
||||
dummyTime := time.Date(2001, time.February, 3, 4, 5, 6, 0, time.UTC)
|
||||
|
||||
// Some times to test with
|
||||
time1 := time.Date(2001, time.February, 3, 4, 5, 7, 0, time.UTC)
|
||||
time2 := time.Date(2010, time.February, 3, 4, 5, 6, 0, time.UTC)
|
||||
|
||||
file1 := "file1.txt"
|
||||
|
||||
// Files are not present at the start
|
||||
|
||||
if v := repo1.GetMtime(file1, dummyTime); !v.Equal(dummyTime) {
|
||||
t.Errorf("Mtime should be missing (%v) from repo 1 but it's %v", dummyTime, v)
|
||||
}
|
||||
|
||||
if v := repo2.GetMtime(file1, dummyTime); !v.Equal(dummyTime) {
|
||||
t.Errorf("Mtime should be missing (%v) from repo 2 but it's %v", dummyTime, v)
|
||||
}
|
||||
|
||||
repo1.UpdateMtime(file1, time1, time2)
|
||||
|
||||
// Now it should return time2 only when time1 is passed as the argument
|
||||
|
||||
if v := repo1.GetMtime(file1, time1); !v.Equal(time2) {
|
||||
t.Errorf("Mtime should be %v for disk time %v but we got %v", time2, time1, v)
|
||||
}
|
||||
|
||||
if v := repo1.GetMtime(file1, dummyTime); !v.Equal(dummyTime) {
|
||||
t.Errorf("Mtime should be %v for disk time %v but we got %v", dummyTime, dummyTime, v)
|
||||
}
|
||||
|
||||
// repo2 shouldn't know about this file
|
||||
|
||||
if v := repo2.GetMtime(file1, time1); !v.Equal(time1) {
|
||||
t.Errorf("Mtime should be %v for disk time %v in repo 2 but we got %v", time1, time1, v)
|
||||
}
|
||||
|
||||
repo1.DeleteMtime(file1)
|
||||
|
||||
// Now it should be gone
|
||||
|
||||
if v := repo1.GetMtime(file1, time1); !v.Equal(time1) {
|
||||
t.Errorf("Mtime should be %v for disk time %v but we got %v", time1, time1, v)
|
||||
}
|
||||
|
||||
// Try again but with Drop()
|
||||
|
||||
repo1.UpdateMtime(file1, time1, time2)
|
||||
repo1.Drop()
|
||||
|
||||
if v := repo1.GetMtime(file1, time1); !v.Equal(time1) {
|
||||
t.Errorf("Mtime should be %v for disk time %v but we got %v", time1, time1, v)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user