Refactor out file scanner into separate package

This commit is contained in:
Jakob Borg
2014-03-08 23:02:01 +01:00
parent d6c9afd07f
commit 1448cfe66a
21 changed files with 471 additions and 338 deletions

View File

@@ -1,74 +0,0 @@
package main
import (
"bytes"
"crypto/sha256"
"io"
)
type Block struct {
Offset int64
Size uint32
Hash []byte
}
// Blocks returns the blockwise hash of the reader.
func Blocks(r io.Reader, blocksize int) ([]Block, error) {
var blocks []Block
var offset int64
for {
lr := &io.LimitedReader{R: r, N: int64(blocksize)}
hf := sha256.New()
n, err := io.Copy(hf, lr)
if err != nil {
return nil, err
}
if n == 0 {
break
}
b := Block{
Offset: offset,
Size: uint32(n),
Hash: hf.Sum(nil),
}
blocks = append(blocks, b)
offset += int64(n)
}
if len(blocks) == 0 {
// Empty file
blocks = append(blocks, Block{
Offset: 0,
Size: 0,
Hash: []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55},
})
}
return blocks, nil
}
// BlockDiff returns lists of common and missing (to transform src into tgt)
// blocks. Both block lists must have been created with the same block size.
func BlockDiff(src, tgt []Block) (have, need []Block) {
if len(tgt) == 0 && len(src) != 0 {
return nil, nil
}
if len(tgt) != 0 && len(src) == 0 {
// Copy the entire file
return nil, tgt
}
for i := range tgt {
if i >= len(src) || bytes.Compare(tgt[i].Hash, src[i].Hash) != 0 {
// Copy differing block
need = append(need, tgt[i])
} else {
have = append(have, tgt[i])
}
}
return have, need
}

View File

@@ -1,116 +0,0 @@
package main
import (
"bytes"
"fmt"
"testing"
)
var blocksTestData = []struct {
data []byte
blocksize int
hash []string
}{
{[]byte(""), 1024, []string{
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}},
{[]byte("contents"), 1024, []string{
"d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8"}},
{[]byte("contents"), 9, []string{
"d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8"}},
{[]byte("contents"), 8, []string{
"d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8"}},
{[]byte("contents"), 7, []string{
"ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f73",
"043a718774c572bd8a25adbeb1bfcd5c0256ae11cecf9f9c3f925d0e52beaf89"},
},
{[]byte("contents"), 3, []string{
"1143da2bc54c495c4be31d3868785d39ffdfd56df5668f0645d8f14d47647952",
"e4432baa90819aaef51d2a7f8e148bf7e679610f3173752fabb4dcb2d0f418d3",
"44ad63f60af0f6db6fdde6d5186ef78176367df261fa06be3079b6c80c8adba4"},
},
{[]byte("conconts"), 3, []string{
"1143da2bc54c495c4be31d3868785d39ffdfd56df5668f0645d8f14d47647952",
"1143da2bc54c495c4be31d3868785d39ffdfd56df5668f0645d8f14d47647952",
"44ad63f60af0f6db6fdde6d5186ef78176367df261fa06be3079b6c80c8adba4"},
},
{[]byte("contenten"), 3, []string{
"1143da2bc54c495c4be31d3868785d39ffdfd56df5668f0645d8f14d47647952",
"e4432baa90819aaef51d2a7f8e148bf7e679610f3173752fabb4dcb2d0f418d3",
"e4432baa90819aaef51d2a7f8e148bf7e679610f3173752fabb4dcb2d0f418d3"},
},
}
func TestBlocks(t *testing.T) {
for _, test := range blocksTestData {
buf := bytes.NewBuffer(test.data)
blocks, err := Blocks(buf, test.blocksize)
if err != nil {
t.Fatal(err)
}
if l := len(blocks); l != len(test.hash) {
t.Fatalf("Incorrect number of blocks %d != %d", l, len(test.hash))
} else {
i := 0
for off := int64(0); off < int64(len(test.data)); off += int64(test.blocksize) {
if blocks[i].Offset != off {
t.Errorf("Incorrect offset for block %d: %d != %d", i, blocks[i].Offset, off)
}
bs := test.blocksize
if rem := len(test.data) - int(off); bs > rem {
bs = rem
}
if int(blocks[i].Size) != bs {
t.Errorf("Incorrect length for block %d: %d != %d", i, blocks[i].Size, bs)
}
if h := fmt.Sprintf("%x", blocks[i].Hash); h != test.hash[i] {
t.Errorf("Incorrect block hash %q != %q", h, test.hash[i])
}
i++
}
}
}
}
var diffTestData = []struct {
a string
b string
s int
d []Block
}{
{"contents", "contents", 1024, []Block{}},
{"", "", 1024, []Block{}},
{"contents", "contents", 3, []Block{}},
{"contents", "cantents", 3, []Block{{0, 3, nil}}},
{"contents", "contants", 3, []Block{{3, 3, nil}}},
{"contents", "cantants", 3, []Block{{0, 3, nil}, {3, 3, nil}}},
{"contents", "", 3, []Block{{0, 0, nil}}},
{"", "contents", 3, []Block{{0, 3, nil}, {3, 3, nil}, {6, 2, nil}}},
{"con", "contents", 3, []Block{{3, 3, nil}, {6, 2, nil}}},
{"contents", "con", 3, nil},
{"contents", "cont", 3, []Block{{3, 1, nil}}},
{"cont", "contents", 3, []Block{{3, 3, nil}, {6, 2, nil}}},
}
func TestDiff(t *testing.T) {
for i, test := range diffTestData {
a, _ := Blocks(bytes.NewBufferString(test.a), test.s)
b, _ := Blocks(bytes.NewBufferString(test.b), test.s)
_, d := BlockDiff(a, b)
if len(d) != len(test.d) {
t.Fatalf("Incorrect length for diff %d; %d != %d", i, len(d), len(test.d))
} else {
for j := range test.d {
if d[j].Offset != test.d[j].Offset {
t.Errorf("Incorrect offset for diff %d block %d; %d != %d", i, j, d[j].Offset, test.d[j].Offset)
}
if d[j].Size != test.d[j].Size {
t.Errorf("Incorrect length for diff %d block %d; %d != %d", i, j, d[j].Size, test.d[j].Size)
}
}
}
}
}

View File

@@ -11,6 +11,7 @@ import (
"time"
"github.com/calmh/syncthing/buffers"
"github.com/calmh/syncthing/scanner"
)
type fileMonitor struct {
@@ -18,8 +19,8 @@ type fileMonitor struct {
path string // full path
writeDone sync.WaitGroup
model *Model
global File
localBlocks []Block
global scanner.File
localBlocks []scanner.Block
copyError error
writeError error
}
@@ -29,7 +30,7 @@ func (m *fileMonitor) FileBegins(cc <-chan content) error {
log.Printf("FILE: FileBegins: " + m.name)
}
tmp := tempName(m.path, m.global.Modified)
tmp := defTempNamer.TempName(m.path)
dir := path.Dir(tmp)
_, err := os.Stat(dir)
@@ -115,7 +116,7 @@ func (m *fileMonitor) FileDone() error {
m.writeDone.Wait()
tmp := tempName(m.path, m.global.Modified)
tmp := defTempNamer.TempName(m.path)
defer os.Remove(tmp)
if m.copyError != nil {
@@ -149,14 +150,14 @@ func (m *fileMonitor) FileDone() error {
return nil
}
func hashCheck(name string, correct []Block) error {
func hashCheck(name string, correct []scanner.Block) error {
rf, err := os.Open(name)
if err != nil {
return err
}
defer rf.Close()
current, err := Blocks(rf, BlockSize)
current, err := scanner.Blocks(rf, BlockSize)
if err != nil {
return err
}

View File

@@ -5,6 +5,8 @@ import (
"sort"
"sync"
"time"
"github.com/calmh/syncthing/scanner"
)
type Monitor interface {
@@ -23,7 +25,7 @@ type FileQueue struct {
type queuedFile struct {
name string
blocks []Block
blocks []scanner.Block
activeBlocks []bool
given int
remaining int
@@ -54,7 +56,7 @@ func (l queuedFileList) Less(a, b int) bool {
type queuedBlock struct {
name string
block Block
block scanner.Block
index int
}
@@ -65,7 +67,7 @@ func NewFileQueue() *FileQueue {
}
}
func (q *FileQueue) Add(name string, blocks []Block, monitor Monitor) {
func (q *FileQueue) Add(name string, blocks []scanner.Block, monitor Monitor) {
q.fmut.Lock()
defer q.fmut.Unlock()

View File

@@ -5,6 +5,8 @@ import (
"sync"
"sync/atomic"
"testing"
"github.com/calmh/syncthing/scanner"
)
func TestFileQueueAdd(t *testing.T) {
@@ -17,8 +19,8 @@ func TestFileQueueAddSorting(t *testing.T) {
q.SetAvailable("zzz", []string{"nodeID"})
q.SetAvailable("aaa", []string{"nodeID"})
q.Add("zzz", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
q.Add("aaa", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
q.Add("zzz", []scanner.Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
q.Add("aaa", []scanner.Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
b, _ := q.Get("nodeID")
if b.name != "aaa" {
t.Errorf("Incorrectly sorted get: %+v", b)
@@ -28,12 +30,12 @@ func TestFileQueueAddSorting(t *testing.T) {
q.SetAvailable("zzz", []string{"nodeID"})
q.SetAvailable("aaa", []string{"nodeID"})
q.Add("zzz", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
q.Add("zzz", []scanner.Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
b, _ = q.Get("nodeID") // Start on zzzz
if b.name != "zzz" {
t.Errorf("Incorrectly sorted get: %+v", b)
}
q.Add("aaa", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
q.Add("aaa", []scanner.Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
b, _ = q.Get("nodeID")
if b.name != "zzz" {
// Continue rather than starting a new file
@@ -56,12 +58,12 @@ func TestFileQueueGet(t *testing.T) {
q.SetAvailable("foo", []string{"nodeID"})
q.SetAvailable("bar", []string{"nodeID"})
q.Add("foo", []Block{
q.Add("foo", []scanner.Block{
{Offset: 0, Size: 128, Hash: []byte("some foo hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other foo hash bytes")},
{Offset: 256, Size: 128, Hash: []byte("more foo hash bytes")},
}, nil)
q.Add("bar", []Block{
q.Add("bar", []scanner.Block{
{Offset: 0, Size: 128, Hash: []byte("some bar hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other bar hash bytes")},
}, nil)
@@ -70,7 +72,7 @@ func TestFileQueueGet(t *testing.T) {
expected := queuedBlock{
name: "bar",
block: Block{
block: scanner.Block{
Offset: 0,
Size: 128,
Hash: []byte("some bar hash bytes"),
@@ -89,7 +91,7 @@ func TestFileQueueGet(t *testing.T) {
expected = queuedBlock{
name: "bar",
block: Block{
block: scanner.Block{
Offset: 128,
Size: 128,
Hash: []byte("some other bar hash bytes"),
@@ -109,7 +111,7 @@ func TestFileQueueGet(t *testing.T) {
expected = queuedBlock{
name: "foo",
block: Block{
block: scanner.Block{
Offset: 0,
Size: 128,
Hash: []byte("some foo hash bytes"),
@@ -150,7 +152,7 @@ func TestFileQueueDone(t *testing.T) {
}()
q := FileQueue{resolver: fakeResolver{}}
q.Add("foo", []Block{
q.Add("foo", []scanner.Block{
{Offset: 0, Length: 128, Hash: []byte("some foo hash bytes")},
{Offset: 128, Length: 128, Hash: []byte("some other foo hash bytes")},
}, ch)
@@ -181,19 +183,19 @@ func TestFileQueueGetNodeIDs(t *testing.T) {
q.SetAvailable("a-foo", []string{"nodeID", "a"})
q.SetAvailable("b-bar", []string{"nodeID", "b"})
q.Add("a-foo", []Block{
q.Add("a-foo", []scanner.Block{
{Offset: 0, Size: 128, Hash: []byte("some foo hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other foo hash bytes")},
{Offset: 256, Size: 128, Hash: []byte("more foo hash bytes")},
}, nil)
q.Add("b-bar", []Block{
q.Add("b-bar", []scanner.Block{
{Offset: 0, Size: 128, Hash: []byte("some bar hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other bar hash bytes")},
}, nil)
expected := queuedBlock{
name: "b-bar",
block: Block{
block: scanner.Block{
Offset: 0,
Size: 128,
Hash: []byte("some bar hash bytes"),
@@ -209,7 +211,7 @@ func TestFileQueueGetNodeIDs(t *testing.T) {
expected = queuedBlock{
name: "a-foo",
block: Block{
block: scanner.Block{
Offset: 0,
Size: 128,
Hash: []byte("some foo hash bytes"),
@@ -225,7 +227,7 @@ func TestFileQueueGetNodeIDs(t *testing.T) {
expected = queuedBlock{
name: "a-foo",
block: Block{
block: scanner.Block{
Offset: 128,
Size: 128,
Hash: []byte("some other foo hash bytes"),
@@ -246,9 +248,9 @@ func TestFileQueueThreadHandling(t *testing.T) {
const n = 100
var total int
var blocks []Block
var blocks []scanner.Block
for i := 1; i <= n; i++ {
blocks = append(blocks, Block{Offset: int64(i), Size: 1})
blocks = append(blocks, scanner.Block{Offset: int64(i), Size: 1})
total += i
}

View File

@@ -9,6 +9,7 @@ import (
"sync"
"time"
"github.com/calmh/syncthing/scanner"
"github.com/codegangsta/martini"
)
@@ -107,7 +108,7 @@ func restPostRestart(req *http.Request) {
restart()
}
type guiFile File
type guiFile scanner.File
func (f guiFile) MarshalJSON() ([]byte, error) {
type t struct {
@@ -116,7 +117,7 @@ func (f guiFile) MarshalJSON() ([]byte, error) {
}
return json.Marshal(t{
Name: f.Name,
Size: File(f).Size,
Size: scanner.File(f).Size,
})
}

View File

@@ -21,8 +21,11 @@ import (
"github.com/calmh/ini"
"github.com/calmh/syncthing/discover"
"github.com/calmh/syncthing/protocol"
"github.com/calmh/syncthing/scanner"
)
const BlockSize = 128 * 1024
var cfg Configuration
var Version = "unknown-dev"
@@ -217,7 +220,17 @@ func main() {
infoln("Populating repository index")
}
loadIndex(m)
updateLocalModel(m)
sup := &suppressor{threshold: int64(cfg.Options.MaxChangeKbps)}
w := &scanner.Walker{
Dir: m.dir,
IgnoreFile: ".stignore",
FollowSymlinks: cfg.Options.FollowSymlinks,
BlockSize: BlockSize,
Suppressor: sup,
TempNamer: defTempNamer,
}
updateLocalModel(m, w)
connOpts := map[string]string{
"clientId": "syncthing",
@@ -263,7 +276,7 @@ func main() {
for {
time.Sleep(td)
if m.LocalAge() > (td / 2).Seconds() {
updateLocalModel(m)
updateLocalModel(m, w)
}
}
}()
@@ -502,8 +515,8 @@ func connect(myID string, disc *discover.Discoverer, m *Model, tlsCfg *tls.Confi
}
}
func updateLocalModel(m *Model) {
files, _ := m.Walk(cfg.Options.FollowSymlinks)
func updateLocalModel(m *Model, w *scanner.Walker) {
files, _ := w.Walk()
m.ReplaceLocal(files)
saveIndex(m)
}

View File

@@ -13,16 +13,17 @@ import (
"github.com/calmh/syncthing/buffers"
"github.com/calmh/syncthing/protocol"
"github.com/calmh/syncthing/scanner"
)
type Model struct {
dir string
global map[string]File // the latest version of each file as it exists in the cluster
gmut sync.RWMutex // protects global
local map[string]File // the files we currently have locally on disk
lmut sync.RWMutex // protects local
remote map[string]map[string]File
global map[string]scanner.File // the latest version of each file as it exists in the cluster
gmut sync.RWMutex // protects global
local map[string]scanner.File // the files we currently have locally on disk
lmut sync.RWMutex // protects local
remote map[string]map[string]scanner.File
rmut sync.RWMutex // protects remote
protoConn map[string]Connection
rawConn map[string]io.Closer
@@ -31,7 +32,7 @@ type Model struct {
// Queue for files to fetch. fq can call back into the model, so we must ensure
// to hold no locks when calling methods on fq.
fq *FileQueue
dq chan File // queue for files to delete
dq chan scanner.File // queue for files to delete
updatedLocal int64 // timestamp of last update to local
updateGlobal int64 // timestamp of last update to remote
@@ -77,16 +78,16 @@ var (
func NewModel(dir string, maxChangeBw int) *Model {
m := &Model{
dir: dir,
global: make(map[string]File),
local: make(map[string]File),
remote: make(map[string]map[string]File),
global: make(map[string]scanner.File),
local: make(map[string]scanner.File),
remote: make(map[string]map[string]scanner.File),
protoConn: make(map[string]Connection),
rawConn: make(map[string]io.Closer),
lastIdxBcast: time.Now(),
trace: make(map[string]bool),
sup: suppressor{threshold: int64(maxChangeBw)},
fq: NewFileQueue(),
dq: make(chan File),
dq: make(chan scanner.File),
}
go m.broadcastIndexLoop()
@@ -128,7 +129,6 @@ func (m *Model) StartRW(del bool, threads int) {
m.delete = del
m.parallelRequests = threads
go m.cleanTempFiles()
if del {
go m.deleteLoop()
}
@@ -260,7 +260,7 @@ func (m *Model) InSyncSize() (files, bytes int64) {
}
// NeedFiles returns the list of currently needed files and the total size.
func (m *Model) NeedFiles() (files []File, bytes int64) {
func (m *Model) NeedFiles() (files []scanner.File, bytes int64) {
qf := m.fq.QueuedFiles()
m.gmut.RLock()
@@ -278,7 +278,7 @@ func (m *Model) NeedFiles() (files []File, bytes int64) {
// Index is called when a new node is connected and we receive their full index.
// Implements the protocol.Model interface.
func (m *Model) Index(nodeID string, fs []protocol.FileInfo) {
var files = make([]File, len(fs))
var files = make([]scanner.File, len(fs))
for i := range fs {
files[i] = fileFromFileInfo(fs[i])
}
@@ -290,7 +290,7 @@ func (m *Model) Index(nodeID string, fs []protocol.FileInfo) {
debugf("NET IDX(in): %s: %d files", nodeID, len(fs))
}
repo := make(map[string]File)
repo := make(map[string]scanner.File)
for _, f := range files {
m.indexUpdate(repo, f)
}
@@ -306,7 +306,7 @@ func (m *Model) Index(nodeID string, fs []protocol.FileInfo) {
// IndexUpdate is called for incremental updates to connected nodes' indexes.
// Implements the protocol.Model interface.
func (m *Model) IndexUpdate(nodeID string, fs []protocol.FileInfo) {
var files = make([]File, len(fs))
var files = make([]scanner.File, len(fs))
for i := range fs {
files[i] = fileFromFileInfo(fs[i])
}
@@ -335,7 +335,7 @@ func (m *Model) IndexUpdate(nodeID string, fs []protocol.FileInfo) {
m.recomputeNeedForFiles(files)
}
func (m *Model) indexUpdate(repo map[string]File, f File) {
func (m *Model) indexUpdate(repo map[string]scanner.File, f scanner.File) {
if m.trace["idx"] {
var flagComment string
if f.Flags&protocol.FlagDeleted != 0 {
@@ -431,9 +431,9 @@ func (m *Model) Request(nodeID, repo, name string, offset int64, size int) ([]by
}
// ReplaceLocal replaces the local repository index with the given list of files.
func (m *Model) ReplaceLocal(fs []File) {
func (m *Model) ReplaceLocal(fs []scanner.File) {
var updated bool
var newLocal = make(map[string]File)
var newLocal = make(map[string]scanner.File)
m.lmut.RLock()
for _, f := range fs {
@@ -474,7 +474,7 @@ func (m *Model) ReplaceLocal(fs []File) {
// the local index from a cache file at startup.
func (m *Model) SeedLocal(fs []protocol.FileInfo) {
m.lmut.Lock()
m.local = make(map[string]File)
m.local = make(map[string]scanner.File)
for _, f := range fs {
m.local[f.Name] = fileFromFileInfo(f)
}
@@ -628,7 +628,7 @@ func (m *Model) broadcastIndexLoop() {
}
// markDeletedLocals sets the deleted flag on files that have gone missing locally.
func (m *Model) markDeletedLocals(newLocal map[string]File) bool {
func (m *Model) markDeletedLocals(newLocal map[string]scanner.File) bool {
// For every file in the existing local table, check if they are also
// present in the new local table. If they are not, check that we already
// had the newest version available according to the global table and if so
@@ -658,7 +658,7 @@ func (m *Model) markDeletedLocals(newLocal map[string]File) bool {
return updated
}
func (m *Model) updateLocal(f File) {
func (m *Model) updateLocal(f scanner.File) {
var updated bool
m.lmut.Lock()
@@ -685,7 +685,7 @@ func (m *Model) updateLocal(f File) {
/*
XXX: Not done, needs elegant handling of availability
func (m *Model) recomputeGlobalFor(files []File) bool {
func (m *Model) recomputeGlobalFor(files []scanner.File) bool {
m.gmut.Lock()
defer m.gmut.Unlock()
@@ -702,7 +702,7 @@ func (m *Model) recomputeGlobalFor(files []File) bool {
*/
func (m *Model) recomputeGlobal() {
var newGlobal = make(map[string]File)
var newGlobal = make(map[string]scanner.File)
m.lmut.RLock()
for n, f := range m.local {
@@ -761,12 +761,12 @@ func (m *Model) recomputeGlobal() {
type addOrder struct {
n string
remote []Block
remote []scanner.Block
fm *fileMonitor
}
func (m *Model) recomputeNeedForGlobal() {
var toDelete []File
var toDelete []scanner.File
var toAdd []addOrder
m.gmut.RLock()
@@ -785,8 +785,8 @@ func (m *Model) recomputeNeedForGlobal() {
}
}
func (m *Model) recomputeNeedForFiles(files []File) {
var toDelete []File
func (m *Model) recomputeNeedForFiles(files []scanner.File) {
var toDelete []scanner.File
var toAdd []addOrder
m.gmut.RLock()
@@ -805,7 +805,7 @@ func (m *Model) recomputeNeedForFiles(files []File) {
}
}
func (m *Model) recomputeNeedForFile(gf File, toAdd []addOrder, toDelete []File) ([]addOrder, []File) {
func (m *Model) recomputeNeedForFile(gf scanner.File, toAdd []addOrder, toDelete []scanner.File) ([]addOrder, []scanner.File) {
m.lmut.RLock()
lf, ok := m.local[gf.Name]
m.lmut.RUnlock()
@@ -830,7 +830,7 @@ func (m *Model) recomputeNeedForFile(gf File, toAdd []addOrder, toDelete []File)
if gf.Flags&protocol.FlagDeleted != 0 {
toDelete = append(toDelete, gf)
} else {
local, remote := BlockDiff(lf.Blocks, gf.Blocks)
local, remote := scanner.BlockDiff(lf.Blocks, gf.Blocks)
fm := fileMonitor{
name: gf.Name,
path: path.Clean(path.Join(m.dir, gf.Name)),
@@ -878,18 +878,18 @@ func (m *Model) deleteLoop() {
}
}
func fileFromFileInfo(f protocol.FileInfo) File {
var blocks = make([]Block, len(f.Blocks))
func fileFromFileInfo(f protocol.FileInfo) scanner.File {
var blocks = make([]scanner.Block, len(f.Blocks))
var offset int64
for i, b := range f.Blocks {
blocks[i] = Block{
blocks[i] = scanner.Block{
Offset: offset,
Size: b.Size,
Hash: b.Hash,
}
offset += int64(b.Size)
}
return File{
return scanner.File{
Name: f.Name,
Size: offset,
Flags: f.Flags,
@@ -899,7 +899,7 @@ func fileFromFileInfo(f protocol.FileInfo) File {
}
}
func fileInfoFromFile(f File) protocol.FileInfo {
func fileInfoFromFile(f scanner.File) protocol.FileInfo {
var blocks = make([]protocol.BlockInfo, len(f.Blocks))
for i, b := range f.Blocks {
blocks[i] = protocol.BlockInfo{

View File

@@ -9,6 +9,7 @@ import (
"time"
"github.com/calmh/syncthing/protocol"
"github.com/calmh/syncthing/scanner"
)
func TestNewModel(t *testing.T) {
@@ -27,27 +28,27 @@ func TestNewModel(t *testing.T) {
}
}
var testDataExpected = map[string]File{
"foo": File{
var testDataExpected = map[string]scanner.File{
"foo": scanner.File{
Name: "foo",
Flags: 0,
Modified: 0,
Size: 7,
Blocks: []Block{{Offset: 0x0, Size: 0x7, Hash: []uint8{0xae, 0xc0, 0x70, 0x64, 0x5f, 0xe5, 0x3e, 0xe3, 0xb3, 0x76, 0x30, 0x59, 0x37, 0x61, 0x34, 0xf0, 0x58, 0xcc, 0x33, 0x72, 0x47, 0xc9, 0x78, 0xad, 0xd1, 0x78, 0xb6, 0xcc, 0xdf, 0xb0, 0x1, 0x9f}}},
Blocks: []scanner.Block{{Offset: 0x0, Size: 0x7, Hash: []uint8{0xae, 0xc0, 0x70, 0x64, 0x5f, 0xe5, 0x3e, 0xe3, 0xb3, 0x76, 0x30, 0x59, 0x37, 0x61, 0x34, 0xf0, 0x58, 0xcc, 0x33, 0x72, 0x47, 0xc9, 0x78, 0xad, 0xd1, 0x78, 0xb6, 0xcc, 0xdf, 0xb0, 0x1, 0x9f}}},
},
"empty": File{
"empty": scanner.File{
Name: "empty",
Flags: 0,
Modified: 0,
Size: 0,
Blocks: []Block{{Offset: 0x0, Size: 0x0, Hash: []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}},
Blocks: []scanner.Block{{Offset: 0x0, Size: 0x0, Hash: []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}},
},
"bar": File{
"bar": scanner.File{
Name: "bar",
Flags: 0,
Modified: 0,
Size: 10,
Blocks: []Block{{Offset: 0x0, Size: 0xa, Hash: []uint8{0x2f, 0x72, 0xcc, 0x11, 0xa6, 0xfc, 0xd0, 0x27, 0x1e, 0xce, 0xf8, 0xc6, 0x10, 0x56, 0xee, 0x1e, 0xb1, 0x24, 0x3b, 0xe3, 0x80, 0x5b, 0xf9, 0xa9, 0xdf, 0x98, 0xf9, 0x2f, 0x76, 0x36, 0xb0, 0x5c}}},
Blocks: []scanner.Block{{Offset: 0x0, Size: 0xa, Hash: []uint8{0x2f, 0x72, 0xcc, 0x11, 0xa6, 0xfc, 0xd0, 0x27, 0x1e, 0xce, 0xf8, 0xc6, 0x10, 0x56, 0xee, 0x1e, 0xb1, 0x24, 0x3b, 0xe3, 0x80, 0x5b, 0xf9, 0xa9, 0xdf, 0x98, 0xf9, 0x2f, 0x76, 0x36, 0xb0, 0x5c}}},
},
}
@@ -63,7 +64,8 @@ func init() {
func TestUpdateLocal(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
if fs, _ := m.NeedFiles(); len(fs) > 0 {
@@ -105,7 +107,8 @@ func TestUpdateLocal(t *testing.T) {
func TestRemoteUpdateExisting(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
newFile := protocol.FileInfo{
@@ -122,7 +125,8 @@ func TestRemoteUpdateExisting(t *testing.T) {
func TestRemoteAddNew(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
newFile := protocol.FileInfo{
@@ -139,7 +143,8 @@ func TestRemoteAddNew(t *testing.T) {
func TestRemoteUpdateOld(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
oldTimeStamp := int64(1234)
@@ -157,7 +162,8 @@ func TestRemoteUpdateOld(t *testing.T) {
func TestRemoteIndexUpdate(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
foo := protocol.FileInfo{
@@ -190,7 +196,8 @@ func TestRemoteIndexUpdate(t *testing.T) {
func TestDelete(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
if l1, l2 := len(m.local), len(fs); l1 != l2 {
@@ -201,10 +208,10 @@ func TestDelete(t *testing.T) {
}
ot := time.Now().Unix()
newFile := File{
newFile := scanner.File{
Name: "a new file",
Modified: ot,
Blocks: []Block{{0, 100, []byte("some hash bytes")}},
Blocks: []scanner.Block{{0, 100, []byte("some hash bytes")}},
}
m.updateLocal(newFile)
@@ -292,7 +299,8 @@ func TestDelete(t *testing.T) {
func TestForgetNode(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
if l1, l2 := len(m.local), len(fs); l1 != l2 {
@@ -345,7 +353,8 @@ func TestForgetNode(t *testing.T) {
func TestRequest(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
bs, err := m.Request("some node", "default", "foo", 0, 6)
@@ -367,7 +376,8 @@ func TestRequest(t *testing.T) {
func TestIgnoreWithUnknownFlags(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
valid := protocol.FileInfo{
@@ -410,7 +420,8 @@ func genFiles(n int) []protocol.FileInfo {
func BenchmarkIndex10000(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
files := genFiles(10000)
@@ -422,7 +433,8 @@ func BenchmarkIndex10000(b *testing.B) {
func BenchmarkIndex00100(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
files := genFiles(100)
@@ -434,7 +446,8 @@ func BenchmarkIndex00100(b *testing.B) {
func BenchmarkIndexUpdate10000f10000(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
files := genFiles(10000)
m.Index("42", files)
@@ -447,7 +460,8 @@ func BenchmarkIndexUpdate10000f10000(b *testing.B) {
func BenchmarkIndexUpdate10000f00100(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
files := genFiles(10000)
m.Index("42", files)
@@ -461,7 +475,8 @@ func BenchmarkIndexUpdate10000f00100(b *testing.B) {
func BenchmarkIndexUpdate10000f00001(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
files := genFiles(10000)
m.Index("42", files)
@@ -506,7 +521,8 @@ func (FakeConnection) Statistics() protocol.Statistics {
func BenchmarkRequest(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
const n = 1000

View File

@@ -1,6 +1,7 @@
package main
import (
"os"
"sync"
"time"
)
@@ -51,6 +52,11 @@ func (h *changeHistory) append(size int64, t time.Time) {
h.changes = append(h.changes, c)
}
func (s *suppressor) Suppress(name string, fi os.FileInfo) bool {
sup, _ := s.suppress(name, fi.Size(), time.Now())
return sup
}
func (s *suppressor) suppress(name string, size int64, t time.Time) (bool, bool) {
s.Lock()

28
cmd/syncthing/tempname.go Normal file
View File

@@ -0,0 +1,28 @@
package main
import (
"fmt"
"path"
"path/filepath"
"runtime"
"strings"
)
type tempNamer struct {
prefix string
}
var defTempNamer = tempNamer{".syncthing"}
func (t tempNamer) IsTemporary(name string) bool {
if runtime.GOOS == "windows" {
name = filepath.ToSlash(name)
}
return strings.HasPrefix(path.Base(name), t.prefix)
}
func (t tempNamer) TempName(name string) string {
tdir := path.Dir(name)
tname := fmt.Sprintf("%s.%s", t.prefix, path.Base(name))
return path.Join(tdir, tname)
}

View File

@@ -1,242 +0,0 @@
package main
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/calmh/syncthing/protocol"
)
const BlockSize = 128 * 1024
type File struct {
Name string
Flags uint32
Modified int64
Version uint32
Size int64
Blocks []Block
}
func (f File) String() string {
return fmt.Sprintf("File{Name:%q, Flags:0x%x, Modified:%d, Version:%d, Size:%d, NumBlocks:%d}",
f.Name, f.Flags, f.Modified, f.Version, f.Size, len(f.Blocks))
}
func (f File) Equals(o File) bool {
return f.Modified == o.Modified && f.Version == o.Version
}
func (f File) NewerThan(o File) bool {
return f.Modified > o.Modified || (f.Modified == o.Modified && f.Version > o.Version)
}
func isTempName(name string) bool {
if runtime.GOOS == "windows" {
name = filepath.ToSlash(name)
}
return strings.HasPrefix(path.Base(name), ".syncthing.")
}
func tempName(name string, modified int64) string {
tdir := path.Dir(name)
tname := fmt.Sprintf(".syncthing.%s.%d", path.Base(name), modified)
return path.Join(tdir, tname)
}
func (m *Model) loadIgnoreFiles(ign map[string][]string) filepath.WalkFunc {
return func(p string, info os.FileInfo, err error) error {
if err != nil {
return nil
}
rn, err := filepath.Rel(m.dir, p)
if err != nil {
return nil
}
if pn, sn := path.Split(rn); sn == ".stignore" {
pn := strings.Trim(pn, "/")
bs, _ := ioutil.ReadFile(p)
lines := bytes.Split(bs, []byte("\n"))
var patterns []string
for _, line := range lines {
if len(line) > 0 {
patterns = append(patterns, string(line))
}
}
ign[pn] = patterns
}
return nil
}
}
func (m *Model) walkAndHashFiles(res *[]File, ign map[string][]string) filepath.WalkFunc {
return func(p string, info os.FileInfo, err error) error {
if err != nil {
if m.trace["file"] {
log.Printf("FILE: %q: %v", p, err)
}
return nil
}
if isTempName(p) {
return nil
}
rn, err := filepath.Rel(m.dir, p)
if err != nil {
return nil
}
if _, sn := path.Split(rn); sn == ".stignore" {
// We never sync the .stignore files
return nil
}
if ignoreFile(ign, rn) {
if m.trace["file"] {
log.Println("FILE: IGNORE:", rn)
}
return nil
}
if info.Mode()&os.ModeType == 0 {
modified := info.ModTime().Unix()
m.lmut.RLock()
lf, ok := m.local[rn]
m.lmut.RUnlock()
if ok && lf.Modified == modified {
if nf := uint32(info.Mode()); nf != lf.Flags {
lf.Flags = nf
lf.Version++
}
*res = append(*res, lf)
} else {
if cur, prev := m.sup.suppress(rn, info.Size(), time.Now()); cur {
if m.trace["file"] {
log.Printf("FILE: SUPPRESS: %q change bw over threshold", rn)
}
if !prev {
log.Printf("INFO: Changes to %q are being temporarily suppressed because it changes too frequently.", rn)
}
if ok {
lf.Flags = protocol.FlagInvalid
lf.Version++
*res = append(*res, lf)
}
return nil
} else if prev && !cur {
log.Printf("INFO: Changes to %q are no longer suppressed.", rn)
}
if m.trace["file"] {
log.Printf("FILE: Hash %q", p)
}
fd, err := os.Open(p)
if err != nil {
if m.trace["file"] {
log.Printf("FILE: %q: %v", p, err)
}
return nil
}
defer fd.Close()
blocks, err := Blocks(fd, BlockSize)
if err != nil {
if m.trace["file"] {
log.Printf("FILE: %q: %v", p, err)
}
return nil
}
f := File{
Name: rn,
Size: info.Size(),
Flags: uint32(info.Mode()),
Modified: modified,
Blocks: blocks,
}
*res = append(*res, f)
}
}
return nil
}
}
// Walk returns the list of files found in the local repository by scanning the
// file system. Files are blockwise hashed.
func (m *Model) Walk(followSymlinks bool) (files []File, ignore map[string][]string) {
ignore = make(map[string][]string)
hashFiles := m.walkAndHashFiles(&files, ignore)
filepath.Walk(m.dir, m.loadIgnoreFiles(ignore))
filepath.Walk(m.dir, hashFiles)
if followSymlinks {
d, err := os.Open(m.dir)
if err != nil {
return
}
defer d.Close()
fis, err := d.Readdir(-1)
if err != nil {
return
}
for _, info := range fis {
if info.Mode()&os.ModeSymlink != 0 {
dir := path.Join(m.dir, info.Name()) + "/"
filepath.Walk(dir, m.loadIgnoreFiles(ignore))
filepath.Walk(dir, hashFiles)
}
}
}
return
}
func (m *Model) cleanTempFile(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.Mode()&os.ModeType == 0 && isTempName(path) {
if m.trace["file"] {
log.Printf("FILE: Remove %q", path)
}
os.Remove(path)
}
return nil
}
func (m *Model) cleanTempFiles() {
filepath.Walk(m.dir, m.cleanTempFile)
}
func ignoreFile(patterns map[string][]string, file string) bool {
first, last := path.Split(file)
for prefix, pats := range patterns {
if len(prefix) == 0 || prefix == first || strings.HasPrefix(first, prefix+"/") {
for _, pattern := range pats {
if match, _ := path.Match(pattern, last); match {
return true
}
}
}
}
return false
}

View File

@@ -1,83 +0,0 @@
package main
import (
"fmt"
"reflect"
"testing"
"time"
)
var testdata = []struct {
name string
size int
hash string
}{
{"bar", 10, "2f72cc11a6fcd0271ecef8c61056ee1eb1243be3805bf9a9df98f92f7636b05c"},
{"empty", 0, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
{"foo", 7, "aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f"},
}
var correctIgnores = map[string][]string{
"": {".*", "quux"},
}
func TestWalk(t *testing.T) {
m := NewModel("testdata", 1e6)
files, ignores := m.Walk(false)
if l1, l2 := len(files), len(testdata); l1 != l2 {
t.Fatalf("Incorrect number of walked files %d != %d", l1, l2)
}
for i := range testdata {
if n1, n2 := testdata[i].name, files[i].Name; n1 != n2 {
t.Errorf("Incorrect file name %q != %q for case #%d", n1, n2, i)
}
if h1, h2 := fmt.Sprintf("%x", files[i].Blocks[0].Hash), testdata[i].hash; h1 != h2 {
t.Errorf("Incorrect hash %q != %q for case #%d", h1, h2, i)
}
t0 := time.Date(2010, 1, 1, 0, 0, 0, 0, time.UTC).Unix()
t1 := time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC).Unix()
if mt := files[i].Modified; mt < t0 || mt > t1 {
t.Errorf("Unrealistic modtime %d for test %d", mt, i)
}
}
if !reflect.DeepEqual(ignores, correctIgnores) {
t.Errorf("Incorrect ignores\n %v\n %v", correctIgnores, ignores)
}
}
func TestIgnore(t *testing.T) {
var patterns = map[string][]string{
"": {"t2"},
"foo": {"bar", "z*"},
"foo/baz": {"quux", ".*"},
}
var tests = []struct {
f string
r bool
}{
{"foo/bar", true},
{"foo/quux", false},
{"foo/zuux", true},
{"foo/qzuux", false},
{"foo/baz/t1", false},
{"foo/baz/t2", true},
{"foo/baz/bar", true},
{"foo/baz/quuxa", false},
{"foo/baz/aquux", false},
{"foo/baz/.quux", true},
{"foo/baz/zquux", true},
{"foo/baz/quux", true},
{"foo/bazz/quux", false},
}
for i, tc := range tests {
if r := ignoreFile(patterns, tc.f); r != tc.r {
t.Errorf("Incorrect ignoreFile() #%d; E: %v, A: %v", i, tc.r, r)
}
}
}