Use LevelDB storage backend
This commit is contained in:
48
files/cmd/pidx/main.go
Normal file
48
files/cmd/pidx/main.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/calmh/syncthing/files"
|
||||
"github.com/calmh/syncthing/protocol"
|
||||
"github.com/calmh/syncthing/scanner"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(0)
|
||||
log.SetOutput(os.Stdout)
|
||||
|
||||
repo := flag.String("repo", "default", "Repository ID")
|
||||
node := flag.String("node", "", "Node ID (blank for global)")
|
||||
flag.Parse()
|
||||
|
||||
db, err := leveldb.OpenFile(flag.Arg(0), nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fs := files.NewSet(*repo, db)
|
||||
|
||||
if *node == "" {
|
||||
log.Printf("*** Global index for repo %q", *repo)
|
||||
fs.WithGlobal(func(f scanner.File) bool {
|
||||
fmt.Println(f)
|
||||
fmt.Println("\t", fs.Availability(f.Name))
|
||||
return true
|
||||
})
|
||||
} else {
|
||||
n, err := protocol.NodeIDFromString(*node)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Printf("*** Have index for repo %q node %q", *repo, n)
|
||||
fs.WithHave(n, func(f scanner.File) bool {
|
||||
fmt.Println(f)
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
563
files/leveldb.go
Normal file
563
files/leveldb.go
Normal file
@@ -0,0 +1,563 @@
|
||||
package files
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
|
||||
"github.com/calmh/syncthing/lamport"
|
||||
"github.com/calmh/syncthing/protocol"
|
||||
"github.com/calmh/syncthing/scanner"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
const (
|
||||
keyTypeNode = iota
|
||||
keyTypeGlobal
|
||||
)
|
||||
|
||||
type fileVersion struct {
|
||||
version uint64
|
||||
node []byte
|
||||
}
|
||||
|
||||
type versionList struct {
|
||||
versions []fileVersion
|
||||
}
|
||||
|
||||
type fileList []scanner.File
|
||||
|
||||
func (l fileList) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
func (l fileList) Swap(a, b int) {
|
||||
l[a], l[b] = l[b], l[a]
|
||||
}
|
||||
|
||||
func (l fileList) Less(a, b int) bool {
|
||||
return l[a].Name < l[b].Name
|
||||
}
|
||||
|
||||
type dbReader interface {
|
||||
Get([]byte, *opt.ReadOptions) ([]byte, error)
|
||||
}
|
||||
|
||||
type dbWriter interface {
|
||||
Put([]byte, []byte)
|
||||
Delete([]byte)
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
keyTypeNode (1 byte)
|
||||
repository (64 bytes)
|
||||
node (32 bytes)
|
||||
name (variable size)
|
||||
|
|
||||
scanner.File
|
||||
|
||||
keyTypeGlobal (1 byte)
|
||||
repository (64 bytes)
|
||||
name (variable size)
|
||||
|
|
||||
[]fileVersion (sorted)
|
||||
|
||||
*/
|
||||
|
||||
func nodeKey(repo, node, file []byte) []byte {
|
||||
k := make([]byte, 1+64+32+len(file))
|
||||
k[0] = keyTypeNode
|
||||
copy(k[1:], []byte(repo))
|
||||
copy(k[1+64:], node[:])
|
||||
copy(k[1+64+32:], []byte(file))
|
||||
return k
|
||||
}
|
||||
|
||||
func globalKey(repo, file []byte) []byte {
|
||||
k := make([]byte, 1+64+len(file))
|
||||
k[0] = keyTypeGlobal
|
||||
copy(k[1:], []byte(repo))
|
||||
copy(k[1+64:], []byte(file))
|
||||
return k
|
||||
}
|
||||
|
||||
func nodeKeyName(key []byte) []byte {
|
||||
return key[1+64+32:]
|
||||
}
|
||||
|
||||
func globalKeyName(key []byte) []byte {
|
||||
return key[1+64:]
|
||||
}
|
||||
|
||||
type deletionHandler func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) bool
|
||||
|
||||
type fileIterator func(f scanner.File) bool
|
||||
|
||||
func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []scanner.File, deleteFn deletionHandler) bool {
|
||||
sort.Sort(fileList(fs)) // sort list on name, same as on disk
|
||||
|
||||
start := nodeKey(repo, node, nil) // before all repo/node files
|
||||
limit := nodeKey(repo, node, []byte{0xff, 0xff, 0xff, 0xff}) // after all repo/node files
|
||||
|
||||
batch := new(leveldb.Batch)
|
||||
snap, err := db.GetSnapshot()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer snap.Release()
|
||||
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
|
||||
defer dbi.Release()
|
||||
|
||||
moreDb := dbi.Next()
|
||||
fsi := 0
|
||||
changed := false
|
||||
|
||||
for {
|
||||
var newName, oldName []byte
|
||||
moreFs := fsi < len(fs)
|
||||
|
||||
if !moreDb && !moreFs {
|
||||
break
|
||||
}
|
||||
|
||||
if !moreFs && deleteFn == nil {
|
||||
// We don't have any more updated files to process and deletion
|
||||
// has not been requested, so we can exit early
|
||||
break
|
||||
}
|
||||
|
||||
if moreFs {
|
||||
newName = []byte(fs[fsi].Name)
|
||||
}
|
||||
|
||||
if moreDb {
|
||||
oldName = nodeKeyName(dbi.Key())
|
||||
}
|
||||
|
||||
cmp := bytes.Compare(newName, oldName)
|
||||
|
||||
if debug {
|
||||
l.Debugf("generic replace; repo=%q node=%x moreFs=%v moreDb=%v cmp=%d newName=%q oldName=%q", repo, node, moreFs, moreDb, cmp, newName, oldName)
|
||||
}
|
||||
|
||||
switch {
|
||||
case moreFs && (!moreDb || cmp == -1):
|
||||
changed = true
|
||||
// Disk is missing this file. Insert it.
|
||||
ldbInsert(batch, repo, node, newName, fs[fsi])
|
||||
ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version)
|
||||
fsi++
|
||||
|
||||
case cmp == 0:
|
||||
// File exists on both sides - compare versions.
|
||||
var ef scanner.File
|
||||
ef.UnmarshalXDR(dbi.Value())
|
||||
if fs[fsi].Version > ef.Version {
|
||||
ldbInsert(batch, repo, node, newName, fs[fsi])
|
||||
ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version)
|
||||
changed = true
|
||||
}
|
||||
// Iterate both sides.
|
||||
fsi++
|
||||
moreDb = dbi.Next()
|
||||
|
||||
case moreDb && (!moreFs || cmp == 1):
|
||||
if deleteFn != nil {
|
||||
if deleteFn(snap, batch, repo, node, oldName, dbi) {
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
moreDb = dbi.Next()
|
||||
}
|
||||
}
|
||||
|
||||
err = db.Write(batch, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return changed
|
||||
}
|
||||
|
||||
func ldbReplace(db *leveldb.DB, repo, node []byte, fs []scanner.File) bool {
|
||||
return ldbGenericReplace(db, repo, node, fs, func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) bool {
|
||||
// Disk has files that we are missing. Remove it.
|
||||
if debug {
|
||||
l.Debugf("delete; repo=%q node=%x name=%q", repo, node, name)
|
||||
}
|
||||
batch.Delete(dbi.Key())
|
||||
ldbRemoveFromGlobal(db, batch, repo, node, name)
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func ldbReplaceWithDelete(db *leveldb.DB, repo, node []byte, fs []scanner.File) bool {
|
||||
return ldbGenericReplace(db, repo, node, fs, func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) bool {
|
||||
var f scanner.File
|
||||
err := f.UnmarshalXDR(dbi.Value())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if !protocol.IsDeleted(f.Flags) {
|
||||
if debug {
|
||||
l.Debugf("mark deleted; repo=%q node=%x name=%q", repo, node, name)
|
||||
}
|
||||
f.Blocks = nil
|
||||
f.Version = lamport.Default.Tick(f.Version)
|
||||
f.Flags |= protocol.FlagDeleted
|
||||
batch.Put(dbi.Key(), f.MarshalXDR())
|
||||
ldbUpdateGlobal(db, batch, repo, node, nodeKeyName(dbi.Key()), f.Version)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []scanner.File) bool {
|
||||
batch := new(leveldb.Batch)
|
||||
snap, err := db.GetSnapshot()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer snap.Release()
|
||||
|
||||
for _, f := range fs {
|
||||
name := []byte(f.Name)
|
||||
fk := nodeKey(repo, node, name)
|
||||
bs, err := snap.Get(fk, nil)
|
||||
if err == leveldb.ErrNotFound {
|
||||
ldbInsert(batch, repo, node, name, f)
|
||||
ldbUpdateGlobal(snap, batch, repo, node, name, f.Version)
|
||||
continue
|
||||
}
|
||||
|
||||
var ef scanner.File
|
||||
err = ef.UnmarshalXDR(bs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if ef.Version != f.Version {
|
||||
ldbInsert(batch, repo, node, name, f)
|
||||
ldbUpdateGlobal(snap, batch, repo, node, name, f.Version)
|
||||
}
|
||||
}
|
||||
|
||||
err = db.Write(batch, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func ldbInsert(batch dbWriter, repo, node, name []byte, file scanner.File) {
|
||||
if debug {
|
||||
l.Debugf("insert; repo=%q node=%x %v", repo, node, file)
|
||||
}
|
||||
|
||||
nk := nodeKey(repo, node, name)
|
||||
batch.Put(nk, file.MarshalXDR())
|
||||
}
|
||||
|
||||
// ldbUpdateGlobal adds this node+version to the version list for the given
|
||||
// file. If the node is already present in the list, the version is updated.
|
||||
// If the file does not have an entry in the global list, it is created.
|
||||
func ldbUpdateGlobal(db dbReader, batch dbWriter, repo, node, file []byte, version uint64) bool {
|
||||
if debug {
|
||||
l.Debugf("update global; repo=%q node=%x file=%q version=%d", repo, node, file, version)
|
||||
}
|
||||
gk := globalKey(repo, file)
|
||||
svl, err := db.Get(gk, nil)
|
||||
if err != nil && err != leveldb.ErrNotFound {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var fl versionList
|
||||
nv := fileVersion{
|
||||
node: node,
|
||||
version: version,
|
||||
}
|
||||
if svl != nil {
|
||||
err = fl.UnmarshalXDR(svl)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
for i := range fl.versions {
|
||||
if bytes.Compare(fl.versions[i].node, node) == 0 {
|
||||
if fl.versions[i].version == version {
|
||||
// No need to do anything
|
||||
return false
|
||||
}
|
||||
fl.versions = append(fl.versions[:i], fl.versions[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i := range fl.versions {
|
||||
if fl.versions[i].version <= version {
|
||||
t := append(fl.versions, fileVersion{})
|
||||
copy(t[i+1:], t[i:])
|
||||
t[i] = nv
|
||||
fl.versions = t
|
||||
goto done
|
||||
}
|
||||
}
|
||||
|
||||
fl.versions = append(fl.versions, nv)
|
||||
|
||||
done:
|
||||
batch.Put(gk, fl.MarshalXDR())
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// ldbRemoveFromGlobal removes the node from the global version list for the
|
||||
// given file. If the version list is empty after this, the file entry is
|
||||
// removed entirely.
|
||||
func ldbRemoveFromGlobal(db dbReader, batch dbWriter, repo, node, file []byte) {
|
||||
if debug {
|
||||
l.Debugf("remove from global; repo=%q node=%x file=%q", repo, node, file)
|
||||
}
|
||||
|
||||
gk := globalKey(repo, file)
|
||||
svl, err := db.Get(gk, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var fl versionList
|
||||
err = fl.UnmarshalXDR(svl)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
for i := range fl.versions {
|
||||
if bytes.Compare(fl.versions[i].node, node) == 0 {
|
||||
fl.versions = append(fl.versions[:i], fl.versions[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(fl.versions) == 0 {
|
||||
batch.Delete(gk)
|
||||
} else {
|
||||
batch.Put(gk, fl.MarshalXDR())
|
||||
}
|
||||
}
|
||||
|
||||
func ldbWithHave(db *leveldb.DB, repo, node []byte, fn fileIterator) {
|
||||
start := nodeKey(repo, node, nil) // before all repo/node files
|
||||
limit := nodeKey(repo, node, []byte{0xff, 0xff, 0xff, 0xff}) // after all repo/node files
|
||||
snap, err := db.GetSnapshot()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer snap.Release()
|
||||
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
|
||||
defer dbi.Release()
|
||||
|
||||
for dbi.Next() {
|
||||
var f scanner.File
|
||||
err := f.UnmarshalXDR(dbi.Value())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if cont := fn(f); !cont {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ldbGet(db *leveldb.DB, repo, node, file []byte) scanner.File {
|
||||
nk := nodeKey(repo, node, file)
|
||||
bs, err := db.Get(nk, nil)
|
||||
if err == leveldb.ErrNotFound {
|
||||
return scanner.File{}
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var f scanner.File
|
||||
err = f.UnmarshalXDR(bs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func ldbGetGlobal(db *leveldb.DB, repo, file []byte) scanner.File {
|
||||
k := globalKey(repo, file)
|
||||
snap, err := db.GetSnapshot()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer snap.Release()
|
||||
|
||||
bs, err := snap.Get(k, nil)
|
||||
if err == leveldb.ErrNotFound {
|
||||
return scanner.File{}
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var vl versionList
|
||||
err = vl.UnmarshalXDR(bs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if len(vl.versions) == 0 {
|
||||
l.Debugln(k)
|
||||
panic("no versions?")
|
||||
}
|
||||
|
||||
k = nodeKey(repo, vl.versions[0].node, file)
|
||||
bs, err = snap.Get(k, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var f scanner.File
|
||||
err = f.UnmarshalXDR(bs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func ldbWithGlobal(db *leveldb.DB, repo []byte, fn fileIterator) {
|
||||
start := globalKey(repo, nil)
|
||||
limit := globalKey(repo, []byte{0xff, 0xff, 0xff, 0xff})
|
||||
snap, err := db.GetSnapshot()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer snap.Release()
|
||||
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
|
||||
defer dbi.Release()
|
||||
|
||||
for dbi.Next() {
|
||||
var vl versionList
|
||||
err := vl.UnmarshalXDR(dbi.Value())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if len(vl.versions) == 0 {
|
||||
l.Debugln(dbi.Key())
|
||||
panic("no versions?")
|
||||
}
|
||||
fk := nodeKey(repo, vl.versions[0].node, globalKeyName(dbi.Key()))
|
||||
bs, err := snap.Get(fk, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var f scanner.File
|
||||
err = f.UnmarshalXDR(bs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if cont := fn(f); !cont {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ldbAvailability(db *leveldb.DB, repo, file []byte) []protocol.NodeID {
|
||||
k := globalKey(repo, file)
|
||||
bs, err := db.Get(k, nil)
|
||||
if err == leveldb.ErrNotFound {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var vl versionList
|
||||
err = vl.UnmarshalXDR(bs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var nodes []protocol.NodeID
|
||||
for _, v := range vl.versions {
|
||||
if v.version != vl.versions[0].version {
|
||||
break
|
||||
}
|
||||
var n protocol.NodeID
|
||||
copy(n[:], v.node)
|
||||
nodes = append(nodes, n)
|
||||
}
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
func ldbWithNeed(db *leveldb.DB, repo, node []byte, fn fileIterator) {
|
||||
start := globalKey(repo, nil)
|
||||
limit := globalKey(repo, []byte{0xff, 0xff, 0xff, 0xff})
|
||||
snap, err := db.GetSnapshot()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer snap.Release()
|
||||
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
|
||||
defer dbi.Release()
|
||||
|
||||
for dbi.Next() {
|
||||
var vl versionList
|
||||
err := vl.UnmarshalXDR(dbi.Value())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if len(vl.versions) == 0 {
|
||||
l.Debugln(dbi.Key())
|
||||
panic("no versions?")
|
||||
}
|
||||
|
||||
have := false // If we have the file, any version
|
||||
need := false // If we have a lower version of the file
|
||||
var haveVersion uint64
|
||||
for _, v := range vl.versions {
|
||||
if bytes.Compare(v.node, node) == 0 {
|
||||
have = true
|
||||
haveVersion = v.version
|
||||
need = v.version < vl.versions[0].version
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if need || !have {
|
||||
name := globalKeyName(dbi.Key())
|
||||
if debug {
|
||||
l.Debugf("need repo=%q node=%x name=%q need=%v have=%v haveV=%d globalV=%d", repo, node, name, need, have, haveVersion, vl.versions[0].version)
|
||||
}
|
||||
fk := nodeKey(repo, vl.versions[0].node, name)
|
||||
bs, err := snap.Get(fk, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var gf scanner.File
|
||||
err = gf.UnmarshalXDR(bs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if protocol.IsDeleted(gf.Flags) && !have {
|
||||
// We don't need deleted files that we don't have
|
||||
continue
|
||||
}
|
||||
|
||||
if cont := fn(gf); !cont {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
83
files/leveldb_xdr.go
Normal file
83
files/leveldb_xdr.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package files
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/calmh/syncthing/xdr"
|
||||
)
|
||||
|
||||
func (o fileVersion) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o fileVersion) MarshalXDR() []byte {
|
||||
var aw = make(xdr.AppendWriter, 0, 128)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
}
|
||||
|
||||
func (o fileVersion) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint64(o.version)
|
||||
xw.WriteBytes(o.node)
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *fileVersion) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *fileVersion) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *fileVersion) decodeXDR(xr *xdr.Reader) error {
|
||||
o.version = xr.ReadUint64()
|
||||
o.node = xr.ReadBytes()
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
func (o versionList) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o versionList) MarshalXDR() []byte {
|
||||
var aw = make(xdr.AppendWriter, 0, 128)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
}
|
||||
|
||||
func (o versionList) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint32(uint32(len(o.versions)))
|
||||
for i := range o.versions {
|
||||
o.versions[i].encodeXDR(xw)
|
||||
}
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *versionList) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *versionList) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *versionList) decodeXDR(xr *xdr.Reader) error {
|
||||
_versionsSize := int(xr.ReadUint32())
|
||||
o.versions = make([]fileVersion, _versionsSize)
|
||||
for i := range o.versions {
|
||||
(&o.versions[i]).decodeXDR(xr)
|
||||
}
|
||||
return xr.Error()
|
||||
}
|
||||
315
files/set.go
315
files/set.go
@@ -8,10 +8,9 @@ package files
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/calmh/syncthing/cid"
|
||||
"github.com/calmh/syncthing/lamport"
|
||||
"github.com/calmh/syncthing/protocol"
|
||||
"github.com/calmh/syncthing/scanner"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
|
||||
type fileRecord struct {
|
||||
@@ -23,297 +22,97 @@ type fileRecord struct {
|
||||
type bitset uint64
|
||||
|
||||
type Set struct {
|
||||
sync.Mutex
|
||||
files map[key]fileRecord
|
||||
remoteKey [64]map[string]key
|
||||
changes [64]uint64
|
||||
globalAvailability map[string]bitset
|
||||
globalKey map[string]key
|
||||
changes map[protocol.NodeID]uint64
|
||||
mutex sync.RWMutex
|
||||
repo string
|
||||
db *leveldb.DB
|
||||
}
|
||||
|
||||
func NewSet() *Set {
|
||||
var m = Set{
|
||||
files: make(map[key]fileRecord),
|
||||
globalAvailability: make(map[string]bitset),
|
||||
globalKey: make(map[string]key),
|
||||
func NewSet(repo string, db *leveldb.DB) *Set {
|
||||
var s = Set{
|
||||
changes: make(map[protocol.NodeID]uint64),
|
||||
repo: repo,
|
||||
db: db,
|
||||
}
|
||||
return &m
|
||||
return &s
|
||||
}
|
||||
|
||||
func (m *Set) Replace(id uint, fs []scanner.File) {
|
||||
func (s *Set) Replace(node protocol.NodeID, fs []scanner.File) {
|
||||
if debug {
|
||||
l.Debugf("Replace(%d, [%d])", id, len(fs))
|
||||
l.Debugf("%s Replace(%v, [%d])", s.repo, node, len(fs))
|
||||
}
|
||||
if id > 63 {
|
||||
panic("Connection ID must be in the range 0 - 63 inclusive")
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
if ldbReplace(s.db, []byte(s.repo), node[:], fs) {
|
||||
s.changes[node]++
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
if len(fs) == 0 || !m.equals(id, fs) {
|
||||
m.changes[id]++
|
||||
m.replace(id, fs)
|
||||
}
|
||||
m.Unlock()
|
||||
}
|
||||
|
||||
func (m *Set) ReplaceWithDelete(id uint, fs []scanner.File) {
|
||||
func (s *Set) ReplaceWithDelete(node protocol.NodeID, fs []scanner.File) {
|
||||
if debug {
|
||||
l.Debugf("ReplaceWithDelete(%d, [%d])", id, len(fs))
|
||||
l.Debugf("%s ReplaceWithDelete(%v, [%d])", s.repo, node, len(fs))
|
||||
}
|
||||
if id > 63 {
|
||||
panic("Connection ID must be in the range 0 - 63 inclusive")
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
if ldbReplaceWithDelete(s.db, []byte(s.repo), node[:], fs) {
|
||||
s.changes[node]++
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
if len(fs) == 0 || !m.equals(id, fs) {
|
||||
m.changes[id]++
|
||||
|
||||
var nf = make(map[string]key, len(fs))
|
||||
for _, f := range fs {
|
||||
nf[f.Name] = keyFor(f)
|
||||
}
|
||||
|
||||
// For previously existing files not in the list, add them to the list
|
||||
// with the relevant delete flags etc set. Previously existing files
|
||||
// with the delete bit already set are not modified.
|
||||
|
||||
for _, ck := range m.remoteKey[cid.LocalID] {
|
||||
if _, ok := nf[ck.Name]; !ok {
|
||||
cf := m.files[ck].File
|
||||
if !protocol.IsDeleted(cf.Flags) {
|
||||
cf.Flags |= protocol.FlagDeleted
|
||||
cf.Blocks = nil
|
||||
cf.Size = 0
|
||||
cf.Version = lamport.Default.Tick(cf.Version)
|
||||
}
|
||||
fs = append(fs, cf)
|
||||
if debug {
|
||||
l.Debugln("deleted:", ck.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m.replace(id, fs)
|
||||
}
|
||||
m.Unlock()
|
||||
}
|
||||
|
||||
func (m *Set) Update(id uint, fs []scanner.File) {
|
||||
func (s *Set) Update(node protocol.NodeID, fs []scanner.File) {
|
||||
if debug {
|
||||
l.Debugf("Update(%d, [%d])", id, len(fs))
|
||||
l.Debugf("%s Update(%v, [%d])", s.repo, node, len(fs))
|
||||
}
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
if ldbUpdate(s.db, []byte(s.repo), node[:], fs) {
|
||||
s.changes[node]++
|
||||
}
|
||||
m.Lock()
|
||||
m.update(id, fs)
|
||||
m.changes[id]++
|
||||
m.Unlock()
|
||||
}
|
||||
|
||||
func (m *Set) Need(id uint) []scanner.File {
|
||||
func (s *Set) WithNeed(node protocol.NodeID, fn fileIterator) {
|
||||
if debug {
|
||||
l.Debugf("Need(%d)", id)
|
||||
l.Debugf("%s Need(%v)", s.repo, node)
|
||||
}
|
||||
m.Lock()
|
||||
var fs = make([]scanner.File, 0, len(m.globalKey)/2) // Just a guess, but avoids too many reallocations
|
||||
rkID := m.remoteKey[id]
|
||||
for gk, gf := range m.files {
|
||||
if !gf.Global || gf.File.Suppressed {
|
||||
continue
|
||||
}
|
||||
|
||||
if rk, ok := rkID[gk.Name]; gk.newerThan(rk) {
|
||||
if protocol.IsDeleted(gf.File.Flags) && (!ok || protocol.IsDeleted(m.files[rk].File.Flags)) {
|
||||
// We don't need to delete files we don't have or that are already deleted
|
||||
continue
|
||||
}
|
||||
|
||||
fs = append(fs, gf.File)
|
||||
}
|
||||
}
|
||||
m.Unlock()
|
||||
return fs
|
||||
s.mutex.RLock()
|
||||
defer s.mutex.RUnlock()
|
||||
ldbWithNeed(s.db, []byte(s.repo), node[:], fn)
|
||||
}
|
||||
|
||||
func (m *Set) Have(id uint) []scanner.File {
|
||||
func (s *Set) WithHave(node protocol.NodeID, fn fileIterator) {
|
||||
if debug {
|
||||
l.Debugf("Have(%d)", id)
|
||||
l.Debugf("%s WithHave(%v)", s.repo, node)
|
||||
}
|
||||
var fs = make([]scanner.File, 0, len(m.remoteKey[id]))
|
||||
m.Lock()
|
||||
for _, rk := range m.remoteKey[id] {
|
||||
fs = append(fs, m.files[rk].File)
|
||||
}
|
||||
m.Unlock()
|
||||
return fs
|
||||
s.mutex.RLock()
|
||||
defer s.mutex.RUnlock()
|
||||
ldbWithHave(s.db, []byte(s.repo), node[:], fn)
|
||||
}
|
||||
|
||||
func (m *Set) Global() []scanner.File {
|
||||
func (s *Set) WithGlobal(fn fileIterator) {
|
||||
if debug {
|
||||
l.Debugf("Global()")
|
||||
l.Debugf("%s WithGlobal()", s.repo)
|
||||
}
|
||||
m.Lock()
|
||||
var fs = make([]scanner.File, 0, len(m.globalKey))
|
||||
for _, file := range m.files {
|
||||
if file.Global {
|
||||
fs = append(fs, file.File)
|
||||
}
|
||||
}
|
||||
m.Unlock()
|
||||
return fs
|
||||
s.mutex.RLock()
|
||||
defer s.mutex.RUnlock()
|
||||
ldbWithGlobal(s.db, []byte(s.repo), fn)
|
||||
}
|
||||
|
||||
func (m *Set) Get(id uint, file string) scanner.File {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
if debug {
|
||||
l.Debugf("Get(%d, %q)", id, file)
|
||||
}
|
||||
return m.files[m.remoteKey[id][file]].File
|
||||
func (s *Set) Get(node protocol.NodeID, file string) scanner.File {
|
||||
return ldbGet(s.db, []byte(s.repo), node[:], []byte(file))
|
||||
}
|
||||
|
||||
func (m *Set) GetGlobal(file string) scanner.File {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
if debug {
|
||||
l.Debugf("GetGlobal(%q)", file)
|
||||
}
|
||||
return m.files[m.globalKey[file]].File
|
||||
func (s *Set) GetGlobal(file string) scanner.File {
|
||||
s.mutex.RLock()
|
||||
defer s.mutex.RUnlock()
|
||||
return ldbGetGlobal(s.db, []byte(s.repo), []byte(file))
|
||||
}
|
||||
|
||||
func (m *Set) Availability(name string) bitset {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
av := m.globalAvailability[name]
|
||||
if debug {
|
||||
l.Debugf("Availability(%q) = %0x", name, av)
|
||||
}
|
||||
return av
|
||||
func (s *Set) Availability(file string) []protocol.NodeID {
|
||||
return ldbAvailability(s.db, []byte(s.repo), []byte(file))
|
||||
}
|
||||
|
||||
func (m *Set) Changes(id uint) uint64 {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
if debug {
|
||||
l.Debugf("Changes(%d)", id)
|
||||
}
|
||||
return m.changes[id]
|
||||
}
|
||||
|
||||
func (m *Set) equals(id uint, fs []scanner.File) bool {
|
||||
curWithoutDeleted := make(map[string]key)
|
||||
for _, k := range m.remoteKey[id] {
|
||||
f := m.files[k].File
|
||||
if !protocol.IsDeleted(f.Flags) {
|
||||
curWithoutDeleted[f.Name] = k
|
||||
}
|
||||
}
|
||||
if len(curWithoutDeleted) != len(fs) {
|
||||
return false
|
||||
}
|
||||
for _, f := range fs {
|
||||
if curWithoutDeleted[f.Name] != keyFor(f) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *Set) update(cid uint, fs []scanner.File) {
|
||||
remFiles := m.remoteKey[cid]
|
||||
if remFiles == nil {
|
||||
l.Fatalln("update before replace for cid", cid)
|
||||
}
|
||||
for _, f := range fs {
|
||||
n := f.Name
|
||||
fk := keyFor(f)
|
||||
|
||||
if ck, ok := remFiles[n]; ok && ck == fk {
|
||||
// The remote already has exactly this file, skip it
|
||||
continue
|
||||
}
|
||||
|
||||
remFiles[n] = fk
|
||||
|
||||
// Keep the block list or increment the usage
|
||||
if br, ok := m.files[fk]; !ok {
|
||||
m.files[fk] = fileRecord{
|
||||
Usage: 1,
|
||||
File: f,
|
||||
}
|
||||
} else {
|
||||
br.Usage++
|
||||
m.files[fk] = br
|
||||
}
|
||||
|
||||
// Update global view
|
||||
gk, ok := m.globalKey[n]
|
||||
switch {
|
||||
case ok && fk == gk:
|
||||
av := m.globalAvailability[n]
|
||||
av |= 1 << cid
|
||||
m.globalAvailability[n] = av
|
||||
case fk.newerThan(gk):
|
||||
if ok {
|
||||
f := m.files[gk]
|
||||
f.Global = false
|
||||
m.files[gk] = f
|
||||
}
|
||||
f := m.files[fk]
|
||||
f.Global = true
|
||||
m.files[fk] = f
|
||||
m.globalKey[n] = fk
|
||||
m.globalAvailability[n] = 1 << cid
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Set) replace(cid uint, fs []scanner.File) {
|
||||
// Decrement usage for all files belonging to this remote, and remove
|
||||
// those that are no longer needed.
|
||||
for _, fk := range m.remoteKey[cid] {
|
||||
br, ok := m.files[fk]
|
||||
switch {
|
||||
case ok && br.Usage == 1:
|
||||
delete(m.files, fk)
|
||||
case ok && br.Usage > 1:
|
||||
br.Usage--
|
||||
m.files[fk] = br
|
||||
}
|
||||
}
|
||||
|
||||
// Clear existing remote remoteKey
|
||||
m.remoteKey[cid] = make(map[string]key)
|
||||
|
||||
// Recalculate global based on all remaining remoteKey
|
||||
for n := range m.globalKey {
|
||||
var nk key // newest key
|
||||
var na bitset // newest availability
|
||||
|
||||
for i, rem := range m.remoteKey {
|
||||
if rk, ok := rem[n]; ok {
|
||||
switch {
|
||||
case rk == nk:
|
||||
na |= 1 << uint(i)
|
||||
case rk.newerThan(nk):
|
||||
nk = rk
|
||||
na = 1 << uint(i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if na != 0 {
|
||||
// Someone had the file
|
||||
f := m.files[nk]
|
||||
f.Global = true
|
||||
m.files[nk] = f
|
||||
m.globalKey[n] = nk
|
||||
m.globalAvailability[n] = na
|
||||
} else {
|
||||
// Noone had the file
|
||||
delete(m.globalKey, n)
|
||||
delete(m.globalAvailability, n)
|
||||
}
|
||||
}
|
||||
|
||||
// Add new remote remoteKey to the mix
|
||||
m.update(cid, fs)
|
||||
func (s *Set) Changes(node protocol.NodeID) uint64 {
|
||||
s.mutex.RLock()
|
||||
defer s.mutex.RUnlock()
|
||||
return s.changes[node]
|
||||
}
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
//+build anal
|
||||
|
||||
package files
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
|
||||
"github.com/calmh/syncthing/scanner"
|
||||
)
|
||||
|
||||
type key struct {
|
||||
Name string
|
||||
Version uint64
|
||||
Modified int64
|
||||
Hash [md5.Size]byte
|
||||
}
|
||||
|
||||
func keyFor(f scanner.File) key {
|
||||
h := md5.New()
|
||||
for _, b := range f.Blocks {
|
||||
h.Write(b.Hash)
|
||||
}
|
||||
return key{
|
||||
Name: f.Name,
|
||||
Version: f.Version,
|
||||
Modified: f.Modified,
|
||||
Hash: md5.Sum(nil),
|
||||
}
|
||||
}
|
||||
|
||||
func (a key) newerThan(b key) bool {
|
||||
if a.Version != b.Version {
|
||||
return a.Version > b.Version
|
||||
}
|
||||
if a.Modified != b.Modified {
|
||||
return a.Modified > b.Modified
|
||||
}
|
||||
for i := 0; i < md5.Size; i++ {
|
||||
if a.Hash[i] != b.Hash[i] {
|
||||
return a.Hash[i] > b.Hash[i]
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
//+build !anal
|
||||
|
||||
package files
|
||||
|
||||
import "github.com/calmh/syncthing/scanner"
|
||||
|
||||
type key struct {
|
||||
Name string
|
||||
Version uint64
|
||||
}
|
||||
|
||||
func keyFor(f scanner.File) key {
|
||||
return key{
|
||||
Name: f.Name,
|
||||
Version: f.Version,
|
||||
}
|
||||
}
|
||||
|
||||
func (a key) newerThan(b key) bool {
|
||||
return a.Version > b.Version
|
||||
}
|
||||
@@ -6,17 +6,64 @@ package files_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/calmh/syncthing/cid"
|
||||
"github.com/calmh/syncthing/files"
|
||||
"github.com/calmh/syncthing/lamport"
|
||||
"github.com/calmh/syncthing/protocol"
|
||||
"github.com/calmh/syncthing/scanner"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
var remoteNode protocol.NodeID
|
||||
|
||||
func init() {
|
||||
remoteNode, _ = protocol.NodeIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR")
|
||||
}
|
||||
|
||||
func genBlocks(n int) []scanner.Block {
|
||||
b := make([]scanner.Block, n)
|
||||
for i := range b {
|
||||
h := make([]byte, 32)
|
||||
for j := range h {
|
||||
h[j] = byte(i + j)
|
||||
}
|
||||
b[i].Size = uint32(i)
|
||||
b[i].Offset = int64(i)
|
||||
b[i].Hash = h
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func globalList(s *files.Set) []scanner.File {
|
||||
var fs []scanner.File
|
||||
s.WithGlobal(func(f scanner.File) bool {
|
||||
fs = append(fs, f)
|
||||
return true
|
||||
})
|
||||
return fs
|
||||
}
|
||||
|
||||
func haveList(s *files.Set, n protocol.NodeID) []scanner.File {
|
||||
var fs []scanner.File
|
||||
s.WithHave(n, func(f scanner.File) bool {
|
||||
fs = append(fs, f)
|
||||
return true
|
||||
})
|
||||
return fs
|
||||
}
|
||||
|
||||
func needList(s *files.Set, n protocol.NodeID) []scanner.File {
|
||||
var fs []scanner.File
|
||||
s.WithNeed(n, func(f scanner.File) bool {
|
||||
fs = append(fs, f)
|
||||
return true
|
||||
})
|
||||
return fs
|
||||
}
|
||||
|
||||
type fileList []scanner.File
|
||||
|
||||
func (l fileList) Len() int {
|
||||
@@ -32,118 +79,157 @@ func (l fileList) Swap(a, b int) {
|
||||
}
|
||||
|
||||
func TestGlobalSet(t *testing.T) {
|
||||
m := files.NewSet()
|
||||
lamport.Default = lamport.Clock{}
|
||||
|
||||
local := []scanner.File{
|
||||
scanner.File{Name: "a", Version: 1000},
|
||||
scanner.File{Name: "b", Version: 1000},
|
||||
scanner.File{Name: "c", Version: 1000},
|
||||
scanner.File{Name: "d", Version: 1000},
|
||||
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
m := files.NewSet("test", db)
|
||||
|
||||
local0 := []scanner.File{
|
||||
scanner.File{Name: "a", Version: 1000, Blocks: genBlocks(1)},
|
||||
scanner.File{Name: "b", Version: 1000, Blocks: genBlocks(2)},
|
||||
scanner.File{Name: "c", Version: 1000, Blocks: genBlocks(3)},
|
||||
scanner.File{Name: "d", Version: 1000, Blocks: genBlocks(4)},
|
||||
scanner.File{Name: "z", Version: 1000, Blocks: genBlocks(8)},
|
||||
}
|
||||
local1 := []scanner.File{
|
||||
scanner.File{Name: "a", Version: 1000, Blocks: genBlocks(1)},
|
||||
scanner.File{Name: "b", Version: 1000, Blocks: genBlocks(2)},
|
||||
scanner.File{Name: "c", Version: 1000, Blocks: genBlocks(3)},
|
||||
scanner.File{Name: "d", Version: 1000, Blocks: genBlocks(4)},
|
||||
}
|
||||
localTot := []scanner.File{
|
||||
local0[0],
|
||||
local0[1],
|
||||
local0[2],
|
||||
local0[3],
|
||||
scanner.File{Name: "z", Version: 1001, Flags: protocol.FlagDeleted},
|
||||
}
|
||||
|
||||
remote0 := []scanner.File{
|
||||
scanner.File{Name: "a", Version: 1000},
|
||||
scanner.File{Name: "c", Version: 1002},
|
||||
scanner.File{Name: "a", Version: 1000, Blocks: genBlocks(1)},
|
||||
scanner.File{Name: "b", Version: 1000, Blocks: genBlocks(2)},
|
||||
scanner.File{Name: "c", Version: 1002, Blocks: genBlocks(5)},
|
||||
}
|
||||
remote1 := []scanner.File{
|
||||
scanner.File{Name: "b", Version: 1001},
|
||||
scanner.File{Name: "e", Version: 1000},
|
||||
scanner.File{Name: "b", Version: 1001, Blocks: genBlocks(6)},
|
||||
scanner.File{Name: "e", Version: 1000, Blocks: genBlocks(7)},
|
||||
}
|
||||
remoteTot := []scanner.File{
|
||||
scanner.File{Name: "a", Version: 1000},
|
||||
scanner.File{Name: "b", Version: 1001},
|
||||
scanner.File{Name: "c", Version: 1002},
|
||||
scanner.File{Name: "e", Version: 1000},
|
||||
remote0[0],
|
||||
remote1[0],
|
||||
remote0[2],
|
||||
remote1[1],
|
||||
}
|
||||
|
||||
expectedGlobal := []scanner.File{
|
||||
scanner.File{Name: "a", Version: 1000},
|
||||
scanner.File{Name: "b", Version: 1001},
|
||||
scanner.File{Name: "c", Version: 1002},
|
||||
scanner.File{Name: "d", Version: 1000},
|
||||
scanner.File{Name: "e", Version: 1000},
|
||||
remote0[0],
|
||||
remote1[0],
|
||||
remote0[2],
|
||||
localTot[3],
|
||||
remote1[1],
|
||||
localTot[4],
|
||||
}
|
||||
|
||||
expectedLocalNeed := []scanner.File{
|
||||
scanner.File{Name: "b", Version: 1001},
|
||||
scanner.File{Name: "c", Version: 1002},
|
||||
scanner.File{Name: "e", Version: 1000},
|
||||
remote1[0],
|
||||
remote0[2],
|
||||
remote1[1],
|
||||
}
|
||||
|
||||
expectedRemoteNeed := []scanner.File{
|
||||
scanner.File{Name: "d", Version: 1000},
|
||||
local0[3],
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(cid.LocalID, local)
|
||||
m.Replace(1, remote0)
|
||||
m.Update(1, remote1)
|
||||
m.ReplaceWithDelete(protocol.LocalNodeID, local0)
|
||||
m.ReplaceWithDelete(protocol.LocalNodeID, local1)
|
||||
m.Replace(remoteNode, remote0)
|
||||
m.Update(remoteNode, remote1)
|
||||
|
||||
g := m.Global()
|
||||
g := globalList(m)
|
||||
sort.Sort(fileList(g))
|
||||
|
||||
if !reflect.DeepEqual(g, expectedGlobal) {
|
||||
if fmt.Sprint(g) != fmt.Sprint(expectedGlobal) {
|
||||
t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal)
|
||||
}
|
||||
|
||||
h := m.Have(cid.LocalID)
|
||||
h := haveList(m, protocol.LocalNodeID)
|
||||
sort.Sort(fileList(h))
|
||||
|
||||
if !reflect.DeepEqual(h, local) {
|
||||
t.Errorf("Have incorrect;\n A: %v !=\n E: %v", h, local)
|
||||
if fmt.Sprint(h) != fmt.Sprint(localTot) {
|
||||
t.Errorf("Have incorrect;\n A: %v !=\n E: %v", h, localTot)
|
||||
}
|
||||
|
||||
h = m.Have(1)
|
||||
h = haveList(m, remoteNode)
|
||||
sort.Sort(fileList(h))
|
||||
|
||||
if !reflect.DeepEqual(h, remoteTot) {
|
||||
if fmt.Sprint(h) != fmt.Sprint(remoteTot) {
|
||||
t.Errorf("Have incorrect;\n A: %v !=\n E: %v", h, remoteTot)
|
||||
}
|
||||
|
||||
n := m.Need(cid.LocalID)
|
||||
n := needList(m, protocol.LocalNodeID)
|
||||
sort.Sort(fileList(n))
|
||||
|
||||
if !reflect.DeepEqual(n, expectedLocalNeed) {
|
||||
if fmt.Sprint(n) != fmt.Sprint(expectedLocalNeed) {
|
||||
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", n, expectedLocalNeed)
|
||||
}
|
||||
|
||||
n = m.Need(1)
|
||||
n = needList(m, remoteNode)
|
||||
sort.Sort(fileList(n))
|
||||
|
||||
if !reflect.DeepEqual(n, expectedRemoteNeed) {
|
||||
if fmt.Sprint(n) != fmt.Sprint(expectedRemoteNeed) {
|
||||
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", n, expectedRemoteNeed)
|
||||
}
|
||||
|
||||
f := m.Get(cid.LocalID, "b")
|
||||
if !reflect.DeepEqual(f, local[1]) {
|
||||
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, local[1])
|
||||
f := m.Get(protocol.LocalNodeID, "b")
|
||||
if fmt.Sprint(f) != fmt.Sprint(localTot[1]) {
|
||||
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, localTot[1])
|
||||
}
|
||||
|
||||
f = m.Get(1, "b")
|
||||
if !reflect.DeepEqual(f, remote1[0]) {
|
||||
f = m.Get(remoteNode, "b")
|
||||
if fmt.Sprint(f) != fmt.Sprint(remote1[0]) {
|
||||
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, remote1[0])
|
||||
}
|
||||
|
||||
f = m.GetGlobal("b")
|
||||
if !reflect.DeepEqual(f, remote1[0]) {
|
||||
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, remote1[0])
|
||||
if fmt.Sprint(f) != fmt.Sprint(remote1[0]) {
|
||||
t.Errorf("GetGlobal incorrect;\n A: %v !=\n E: %v", f, remote1[0])
|
||||
}
|
||||
|
||||
a := int(m.Availability("a"))
|
||||
if av := 1<<0 + 1<<1; a != av {
|
||||
f = m.Get(protocol.LocalNodeID, "zz")
|
||||
if f.Name != "" {
|
||||
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, scanner.File{})
|
||||
}
|
||||
|
||||
f = m.GetGlobal("zz")
|
||||
if f.Name != "" {
|
||||
t.Errorf("GetGlobal incorrect;\n A: %v !=\n E: %v", f, scanner.File{})
|
||||
}
|
||||
|
||||
av := []protocol.NodeID{protocol.LocalNodeID, remoteNode}
|
||||
a := m.Availability("a")
|
||||
if !(len(a) == 2 && (a[0] == av[0] && a[1] == av[1] || a[0] == av[1] && a[1] == av[0])) {
|
||||
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, av)
|
||||
}
|
||||
a = int(m.Availability("b"))
|
||||
if av := 1 << 1; a != av {
|
||||
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, av)
|
||||
a = m.Availability("b")
|
||||
if len(a) != 1 || a[0] != remoteNode {
|
||||
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, remoteNode)
|
||||
}
|
||||
a = int(m.Availability("d"))
|
||||
if av := 1 << 0; a != av {
|
||||
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, av)
|
||||
a = m.Availability("d")
|
||||
if len(a) != 1 || a[0] != protocol.LocalNodeID {
|
||||
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, protocol.LocalNodeID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalDeleted(t *testing.T) {
|
||||
m := files.NewSet()
|
||||
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
m := files.NewSet("test", db)
|
||||
lamport.Default = lamport.Clock{}
|
||||
|
||||
local1 := []scanner.File{
|
||||
@@ -154,22 +240,22 @@ func TestLocalDeleted(t *testing.T) {
|
||||
scanner.File{Name: "z", Version: 1000, Flags: protocol.FlagDirectory},
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(cid.LocalID, local1)
|
||||
m.ReplaceWithDelete(protocol.LocalNodeID, local1)
|
||||
|
||||
m.ReplaceWithDelete(cid.LocalID, []scanner.File{
|
||||
m.ReplaceWithDelete(protocol.LocalNodeID, []scanner.File{
|
||||
local1[0],
|
||||
// [1] removed
|
||||
local1[2],
|
||||
local1[3],
|
||||
local1[4],
|
||||
})
|
||||
m.ReplaceWithDelete(cid.LocalID, []scanner.File{
|
||||
m.ReplaceWithDelete(protocol.LocalNodeID, []scanner.File{
|
||||
local1[0],
|
||||
local1[2],
|
||||
// [3] removed
|
||||
local1[4],
|
||||
})
|
||||
m.ReplaceWithDelete(cid.LocalID, []scanner.File{
|
||||
m.ReplaceWithDelete(protocol.LocalNodeID, []scanner.File{
|
||||
local1[0],
|
||||
local1[2],
|
||||
// [4] removed
|
||||
@@ -183,15 +269,15 @@ func TestLocalDeleted(t *testing.T) {
|
||||
scanner.File{Name: "z", Version: 1003, Flags: protocol.FlagDeleted | protocol.FlagDirectory},
|
||||
}
|
||||
|
||||
g := m.Global()
|
||||
g := globalList(m)
|
||||
sort.Sort(fileList(g))
|
||||
sort.Sort(fileList(expectedGlobal1))
|
||||
|
||||
if !reflect.DeepEqual(g, expectedGlobal1) {
|
||||
if fmt.Sprint(g) != fmt.Sprint(expectedGlobal1) {
|
||||
t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal1)
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(cid.LocalID, []scanner.File{
|
||||
m.ReplaceWithDelete(protocol.LocalNodeID, []scanner.File{
|
||||
local1[0],
|
||||
// [2] removed
|
||||
})
|
||||
@@ -204,16 +290,21 @@ func TestLocalDeleted(t *testing.T) {
|
||||
scanner.File{Name: "z", Version: 1003, Flags: protocol.FlagDeleted | protocol.FlagDirectory},
|
||||
}
|
||||
|
||||
g = m.Global()
|
||||
g = globalList(m)
|
||||
sort.Sort(fileList(g))
|
||||
sort.Sort(fileList(expectedGlobal2))
|
||||
|
||||
if !reflect.DeepEqual(g, expectedGlobal2) {
|
||||
if fmt.Sprint(g) != fmt.Sprint(expectedGlobal2) {
|
||||
t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal2)
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark10kReplace(b *testing.B) {
|
||||
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
var local []scanner.File
|
||||
for i := 0; i < 10000; i++ {
|
||||
local = append(local, scanner.File{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
@@ -221,8 +312,8 @@ func Benchmark10kReplace(b *testing.B) {
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m := files.NewSet()
|
||||
m.ReplaceWithDelete(cid.LocalID, local)
|
||||
m := files.NewSet("test", db)
|
||||
m.ReplaceWithDelete(protocol.LocalNodeID, local)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -232,15 +323,20 @@ func Benchmark10kUpdateChg(b *testing.B) {
|
||||
remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
}
|
||||
|
||||
m := files.NewSet()
|
||||
m.Replace(1, remote)
|
||||
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
m := files.NewSet("test", db)
|
||||
m.Replace(remoteNode, remote)
|
||||
|
||||
var local []scanner.File
|
||||
for i := 0; i < 10000; i++ {
|
||||
local = append(local, scanner.File{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(cid.LocalID, local)
|
||||
m.ReplaceWithDelete(protocol.LocalNodeID, local)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@@ -249,7 +345,7 @@ func Benchmark10kUpdateChg(b *testing.B) {
|
||||
local[j].Version++
|
||||
}
|
||||
b.StartTimer()
|
||||
m.Update(cid.LocalID, local)
|
||||
m.Update(protocol.LocalNodeID, local)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -259,19 +355,23 @@ func Benchmark10kUpdateSme(b *testing.B) {
|
||||
remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
}
|
||||
|
||||
m := files.NewSet()
|
||||
m.Replace(1, remote)
|
||||
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
m := files.NewSet("test", db)
|
||||
m.Replace(remoteNode, remote)
|
||||
|
||||
var local []scanner.File
|
||||
for i := 0; i < 10000; i++ {
|
||||
local = append(local, scanner.File{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(cid.LocalID, local)
|
||||
m.ReplaceWithDelete(protocol.LocalNodeID, local)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.Update(cid.LocalID, local)
|
||||
m.Update(protocol.LocalNodeID, local)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -281,8 +381,13 @@ func Benchmark10kNeed2k(b *testing.B) {
|
||||
remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
}
|
||||
|
||||
m := files.NewSet()
|
||||
m.Replace(cid.LocalID+1, remote)
|
||||
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
m := files.NewSet("test", db)
|
||||
m.Replace(remoteNode, remote)
|
||||
|
||||
var local []scanner.File
|
||||
for i := 0; i < 8000; i++ {
|
||||
@@ -292,25 +397,30 @@ func Benchmark10kNeed2k(b *testing.B) {
|
||||
local = append(local, scanner.File{Name: fmt.Sprintf("file%d", i), Version: 980})
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(cid.LocalID, local)
|
||||
m.ReplaceWithDelete(protocol.LocalNodeID, local)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
fs := m.Need(cid.LocalID)
|
||||
fs := needList(m, protocol.LocalNodeID)
|
||||
if l := len(fs); l != 2000 {
|
||||
b.Errorf("wrong length %d != 2k", l)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark10kHave(b *testing.B) {
|
||||
func Benchmark10kHaveFullList(b *testing.B) {
|
||||
var remote []scanner.File
|
||||
for i := 0; i < 10000; i++ {
|
||||
remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
}
|
||||
|
||||
m := files.NewSet()
|
||||
m.Replace(cid.LocalID+1, remote)
|
||||
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
m := files.NewSet("test", db)
|
||||
m.Replace(remoteNode, remote)
|
||||
|
||||
var local []scanner.File
|
||||
for i := 0; i < 2000; i++ {
|
||||
@@ -320,11 +430,11 @@ func Benchmark10kHave(b *testing.B) {
|
||||
local = append(local, scanner.File{Name: fmt.Sprintf("file%d", i), Version: 980})
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(cid.LocalID, local)
|
||||
m.ReplaceWithDelete(protocol.LocalNodeID, local)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
fs := m.Have(cid.LocalID)
|
||||
fs := haveList(m, protocol.LocalNodeID)
|
||||
if l := len(fs); l != 10000 {
|
||||
b.Errorf("wrong length %d != 10k", l)
|
||||
}
|
||||
@@ -337,8 +447,13 @@ func Benchmark10kGlobal(b *testing.B) {
|
||||
remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
}
|
||||
|
||||
m := files.NewSet()
|
||||
m.Replace(cid.LocalID+1, remote)
|
||||
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
m := files.NewSet("test", db)
|
||||
m.Replace(remoteNode, remote)
|
||||
|
||||
var local []scanner.File
|
||||
for i := 0; i < 2000; i++ {
|
||||
@@ -348,11 +463,11 @@ func Benchmark10kGlobal(b *testing.B) {
|
||||
local = append(local, scanner.File{Name: fmt.Sprintf("file%d", i), Version: 980})
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(cid.LocalID, local)
|
||||
m.ReplaceWithDelete(protocol.LocalNodeID, local)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
fs := m.Global()
|
||||
fs := globalList(m)
|
||||
if l := len(fs); l != 10000 {
|
||||
b.Errorf("wrong length %d != 10k", l)
|
||||
}
|
||||
@@ -360,7 +475,12 @@ func Benchmark10kGlobal(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestGlobalReset(t *testing.T) {
|
||||
m := files.NewSet()
|
||||
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
m := files.NewSet("test", db)
|
||||
|
||||
local := []scanner.File{
|
||||
scanner.File{Name: "a", Version: 1000},
|
||||
@@ -376,27 +496,32 @@ func TestGlobalReset(t *testing.T) {
|
||||
scanner.File{Name: "e", Version: 1000},
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(cid.LocalID, local)
|
||||
g := m.Global()
|
||||
m.ReplaceWithDelete(protocol.LocalNodeID, local)
|
||||
g := globalList(m)
|
||||
sort.Sort(fileList(g))
|
||||
|
||||
if !reflect.DeepEqual(g, local) {
|
||||
if fmt.Sprint(g) != fmt.Sprint(local) {
|
||||
t.Errorf("Global incorrect;\n%v !=\n%v", g, local)
|
||||
}
|
||||
|
||||
m.Replace(1, remote)
|
||||
m.Replace(1, nil)
|
||||
m.Replace(remoteNode, remote)
|
||||
m.Replace(remoteNode, nil)
|
||||
|
||||
g = m.Global()
|
||||
g = globalList(m)
|
||||
sort.Sort(fileList(g))
|
||||
|
||||
if !reflect.DeepEqual(g, local) {
|
||||
if fmt.Sprint(g) != fmt.Sprint(local) {
|
||||
t.Errorf("Global incorrect;\n%v !=\n%v", g, local)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNeed(t *testing.T) {
|
||||
m := files.NewSet()
|
||||
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
m := files.NewSet("test", db)
|
||||
|
||||
local := []scanner.File{
|
||||
scanner.File{Name: "a", Version: 1000},
|
||||
@@ -418,21 +543,26 @@ func TestNeed(t *testing.T) {
|
||||
scanner.File{Name: "e", Version: 1000},
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(cid.LocalID, local)
|
||||
m.Replace(1, remote)
|
||||
m.ReplaceWithDelete(protocol.LocalNodeID, local)
|
||||
m.Replace(remoteNode, remote)
|
||||
|
||||
need := m.Need(0)
|
||||
need := needList(m, protocol.LocalNodeID)
|
||||
|
||||
sort.Sort(fileList(need))
|
||||
sort.Sort(fileList(shouldNeed))
|
||||
|
||||
if !reflect.DeepEqual(need, shouldNeed) {
|
||||
if fmt.Sprint(need) != fmt.Sprint(shouldNeed) {
|
||||
t.Errorf("Need incorrect;\n%v !=\n%v", need, shouldNeed)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChanges(t *testing.T) {
|
||||
m := files.NewSet()
|
||||
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
m := files.NewSet("test", db)
|
||||
|
||||
local1 := []scanner.File{
|
||||
scanner.File{Name: "a", Version: 1000},
|
||||
@@ -449,17 +579,17 @@ func TestChanges(t *testing.T) {
|
||||
scanner.File{Name: "e", Version: 1000},
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(cid.LocalID, local1)
|
||||
c0 := m.Changes(cid.LocalID)
|
||||
m.ReplaceWithDelete(protocol.LocalNodeID, local1)
|
||||
c0 := m.Changes(protocol.LocalNodeID)
|
||||
|
||||
m.ReplaceWithDelete(cid.LocalID, local2)
|
||||
c1 := m.Changes(cid.LocalID)
|
||||
m.ReplaceWithDelete(protocol.LocalNodeID, local2)
|
||||
c1 := m.Changes(protocol.LocalNodeID)
|
||||
if !(c1 > c0) {
|
||||
t.Fatal("Change number should have incremented")
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(cid.LocalID, local2)
|
||||
c2 := m.Changes(cid.LocalID)
|
||||
m.ReplaceWithDelete(protocol.LocalNodeID, local2)
|
||||
c2 := m.Changes(protocol.LocalNodeID)
|
||||
if c2 != c1 {
|
||||
t.Fatal("Change number should be unchanged")
|
||||
}
|
||||
|
||||
1
files/testdata/.gitignore
vendored
Normal file
1
files/testdata/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
index.db
|
||||
BIN
files/testdata/index.db
vendored
BIN
files/testdata/index.db
vendored
Binary file not shown.
Reference in New Issue
Block a user