mv internal lib
This commit is contained in:
78
lib/scanner/blockqueue.go
Normal file
78
lib/scanner/blockqueue.go
Normal file
@@ -0,0 +1,78 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
)
|
||||
|
||||
// The parallell hasher reads FileInfo structures from the inbox, hashes the
|
||||
// file to populate the Blocks element and sends it to the outbox. A number of
|
||||
// workers are used in parallel. The outbox will become closed when the inbox
|
||||
// is closed and all items handled.
|
||||
|
||||
func newParallelHasher(dir string, blockSize, workers int, outbox, inbox chan protocol.FileInfo) {
|
||||
wg := sync.NewWaitGroup()
|
||||
wg.Add(workers)
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go func() {
|
||||
hashFiles(dir, blockSize, outbox, inbox)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(outbox)
|
||||
}()
|
||||
}
|
||||
|
||||
func HashFile(path string, blockSize int) ([]protocol.BlockInfo, error) {
|
||||
fd, err := os.Open(path)
|
||||
if err != nil {
|
||||
if debug {
|
||||
l.Debugln("open:", err)
|
||||
}
|
||||
return []protocol.BlockInfo{}, err
|
||||
}
|
||||
|
||||
fi, err := fd.Stat()
|
||||
if err != nil {
|
||||
fd.Close()
|
||||
if debug {
|
||||
l.Debugln("stat:", err)
|
||||
}
|
||||
return []protocol.BlockInfo{}, err
|
||||
}
|
||||
defer fd.Close()
|
||||
return Blocks(fd, blockSize, fi.Size())
|
||||
}
|
||||
|
||||
func hashFiles(dir string, blockSize int, outbox, inbox chan protocol.FileInfo) {
|
||||
for f := range inbox {
|
||||
if f.IsDirectory() || f.IsDeleted() || f.IsSymlink() {
|
||||
outbox <- f
|
||||
continue
|
||||
}
|
||||
|
||||
blocks, err := HashFile(filepath.Join(dir, f.Name), blockSize)
|
||||
if err != nil {
|
||||
if debug {
|
||||
l.Debugln("hash error:", f.Name, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
f.Blocks = blocks
|
||||
outbox <- f
|
||||
}
|
||||
}
|
||||
155
lib/scanner/blocks.go
Normal file
155
lib/scanner/blocks.go
Normal file
@@ -0,0 +1,155 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
)
|
||||
|
||||
var SHA256OfNothing = []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}
|
||||
|
||||
// Blocks returns the blockwise hash of the reader.
|
||||
func Blocks(r io.Reader, blocksize int, sizehint int64) ([]protocol.BlockInfo, error) {
|
||||
var blocks []protocol.BlockInfo
|
||||
if sizehint > 0 {
|
||||
blocks = make([]protocol.BlockInfo, 0, int(sizehint/int64(blocksize)))
|
||||
}
|
||||
var offset int64
|
||||
hf := sha256.New()
|
||||
for {
|
||||
lr := &io.LimitedReader{R: r, N: int64(blocksize)}
|
||||
n, err := io.Copy(hf, lr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
b := protocol.BlockInfo{
|
||||
Size: int32(n),
|
||||
Offset: offset,
|
||||
Hash: hf.Sum(nil),
|
||||
}
|
||||
blocks = append(blocks, b)
|
||||
offset += int64(n)
|
||||
|
||||
hf.Reset()
|
||||
}
|
||||
|
||||
if len(blocks) == 0 {
|
||||
// Empty file
|
||||
blocks = append(blocks, protocol.BlockInfo{
|
||||
Offset: 0,
|
||||
Size: 0,
|
||||
Hash: SHA256OfNothing,
|
||||
})
|
||||
}
|
||||
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
// PopulateOffsets sets the Offset field on each block
|
||||
func PopulateOffsets(blocks []protocol.BlockInfo) {
|
||||
var offset int64
|
||||
for i := range blocks {
|
||||
blocks[i].Offset = offset
|
||||
offset += int64(blocks[i].Size)
|
||||
}
|
||||
}
|
||||
|
||||
// BlockDiff returns lists of common and missing (to transform src into tgt)
|
||||
// blocks. Both block lists must have been created with the same block size.
|
||||
func BlockDiff(src, tgt []protocol.BlockInfo) (have, need []protocol.BlockInfo) {
|
||||
if len(tgt) == 0 && len(src) != 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if len(tgt) != 0 && len(src) == 0 {
|
||||
// Copy the entire file
|
||||
return nil, tgt
|
||||
}
|
||||
|
||||
for i := range tgt {
|
||||
if i >= len(src) || bytes.Compare(tgt[i].Hash, src[i].Hash) != 0 {
|
||||
// Copy differing block
|
||||
need = append(need, tgt[i])
|
||||
} else {
|
||||
have = append(have, tgt[i])
|
||||
}
|
||||
}
|
||||
|
||||
return have, need
|
||||
}
|
||||
|
||||
// Verify returns nil or an error describing the mismatch between the block
|
||||
// list and actual reader contents
|
||||
func Verify(r io.Reader, blocksize int, blocks []protocol.BlockInfo) error {
|
||||
hf := sha256.New()
|
||||
for i, block := range blocks {
|
||||
lr := &io.LimitedReader{R: r, N: int64(blocksize)}
|
||||
_, err := io.Copy(hf, lr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash := hf.Sum(nil)
|
||||
hf.Reset()
|
||||
|
||||
if bytes.Compare(hash, block.Hash) != 0 {
|
||||
return fmt.Errorf("hash mismatch %x != %x for block %d", hash, block.Hash, i)
|
||||
}
|
||||
}
|
||||
|
||||
// We should have reached the end now
|
||||
bs := make([]byte, 1)
|
||||
n, err := r.Read(bs)
|
||||
if n != 0 || err != io.EOF {
|
||||
return fmt.Errorf("file continues past end of blocks")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func VerifyBuffer(buf []byte, block protocol.BlockInfo) ([]byte, error) {
|
||||
if len(buf) != int(block.Size) {
|
||||
return nil, fmt.Errorf("length mismatch %d != %d", len(buf), block.Size)
|
||||
}
|
||||
hf := sha256.New()
|
||||
_, err := hf.Write(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := hf.Sum(nil)
|
||||
|
||||
if !bytes.Equal(hash, block.Hash) {
|
||||
return hash, fmt.Errorf("hash mismatch %x != %x", hash, block.Hash)
|
||||
}
|
||||
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
// BlocksEqual returns whether two slices of blocks are exactly the same hash
|
||||
// and index pair wise.
|
||||
func BlocksEqual(src, tgt []protocol.BlockInfo) bool {
|
||||
if len(tgt) != len(src) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i, sblk := range src {
|
||||
if !bytes.Equal(sblk.Hash, tgt[i].Hash) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
124
lib/scanner/blocks_test.go
Normal file
124
lib/scanner/blocks_test.go
Normal file
@@ -0,0 +1,124 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
)
|
||||
|
||||
var blocksTestData = []struct {
|
||||
data []byte
|
||||
blocksize int
|
||||
hash []string
|
||||
}{
|
||||
{[]byte(""), 1024, []string{
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}},
|
||||
{[]byte("contents"), 1024, []string{
|
||||
"d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8"}},
|
||||
{[]byte("contents"), 9, []string{
|
||||
"d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8"}},
|
||||
{[]byte("contents"), 8, []string{
|
||||
"d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8"}},
|
||||
{[]byte("contents"), 7, []string{
|
||||
"ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f73",
|
||||
"043a718774c572bd8a25adbeb1bfcd5c0256ae11cecf9f9c3f925d0e52beaf89"},
|
||||
},
|
||||
{[]byte("contents"), 3, []string{
|
||||
"1143da2bc54c495c4be31d3868785d39ffdfd56df5668f0645d8f14d47647952",
|
||||
"e4432baa90819aaef51d2a7f8e148bf7e679610f3173752fabb4dcb2d0f418d3",
|
||||
"44ad63f60af0f6db6fdde6d5186ef78176367df261fa06be3079b6c80c8adba4"},
|
||||
},
|
||||
{[]byte("conconts"), 3, []string{
|
||||
"1143da2bc54c495c4be31d3868785d39ffdfd56df5668f0645d8f14d47647952",
|
||||
"1143da2bc54c495c4be31d3868785d39ffdfd56df5668f0645d8f14d47647952",
|
||||
"44ad63f60af0f6db6fdde6d5186ef78176367df261fa06be3079b6c80c8adba4"},
|
||||
},
|
||||
{[]byte("contenten"), 3, []string{
|
||||
"1143da2bc54c495c4be31d3868785d39ffdfd56df5668f0645d8f14d47647952",
|
||||
"e4432baa90819aaef51d2a7f8e148bf7e679610f3173752fabb4dcb2d0f418d3",
|
||||
"e4432baa90819aaef51d2a7f8e148bf7e679610f3173752fabb4dcb2d0f418d3"},
|
||||
},
|
||||
}
|
||||
|
||||
func TestBlocks(t *testing.T) {
|
||||
for _, test := range blocksTestData {
|
||||
buf := bytes.NewBuffer(test.data)
|
||||
blocks, err := Blocks(buf, test.blocksize, 0)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if l := len(blocks); l != len(test.hash) {
|
||||
t.Fatalf("Incorrect number of blocks %d != %d", l, len(test.hash))
|
||||
} else {
|
||||
i := 0
|
||||
for off := int64(0); off < int64(len(test.data)); off += int64(test.blocksize) {
|
||||
if blocks[i].Offset != off {
|
||||
t.Errorf("Incorrect offset for block %d: %d != %d", i, blocks[i].Offset, off)
|
||||
}
|
||||
|
||||
bs := test.blocksize
|
||||
if rem := len(test.data) - int(off); bs > rem {
|
||||
bs = rem
|
||||
}
|
||||
if int(blocks[i].Size) != bs {
|
||||
t.Errorf("Incorrect length for block %d: %d != %d", i, blocks[i].Size, bs)
|
||||
}
|
||||
if h := fmt.Sprintf("%x", blocks[i].Hash); h != test.hash[i] {
|
||||
t.Errorf("Incorrect block hash %q != %q", h, test.hash[i])
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var diffTestData = []struct {
|
||||
a string
|
||||
b string
|
||||
s int
|
||||
d []protocol.BlockInfo
|
||||
}{
|
||||
{"contents", "contents", 1024, []protocol.BlockInfo{}},
|
||||
{"", "", 1024, []protocol.BlockInfo{}},
|
||||
{"contents", "contents", 3, []protocol.BlockInfo{}},
|
||||
{"contents", "cantents", 3, []protocol.BlockInfo{{0, 3, nil}}},
|
||||
{"contents", "contants", 3, []protocol.BlockInfo{{3, 3, nil}}},
|
||||
{"contents", "cantants", 3, []protocol.BlockInfo{{0, 3, nil}, {3, 3, nil}}},
|
||||
{"contents", "", 3, []protocol.BlockInfo{{0, 0, nil}}},
|
||||
{"", "contents", 3, []protocol.BlockInfo{{0, 3, nil}, {3, 3, nil}, {6, 2, nil}}},
|
||||
{"con", "contents", 3, []protocol.BlockInfo{{3, 3, nil}, {6, 2, nil}}},
|
||||
{"contents", "con", 3, nil},
|
||||
{"contents", "cont", 3, []protocol.BlockInfo{{3, 1, nil}}},
|
||||
{"cont", "contents", 3, []protocol.BlockInfo{{3, 3, nil}, {6, 2, nil}}},
|
||||
}
|
||||
|
||||
func TestDiff(t *testing.T) {
|
||||
for i, test := range diffTestData {
|
||||
a, _ := Blocks(bytes.NewBufferString(test.a), test.s, 0)
|
||||
b, _ := Blocks(bytes.NewBufferString(test.b), test.s, 0)
|
||||
_, d := BlockDiff(a, b)
|
||||
if len(d) != len(test.d) {
|
||||
t.Fatalf("Incorrect length for diff %d; %d != %d", i, len(d), len(test.d))
|
||||
} else {
|
||||
for j := range test.d {
|
||||
if d[j].Offset != test.d[j].Offset {
|
||||
t.Errorf("Incorrect offset for diff %d block %d; %d != %d", i, j, d[j].Offset, test.d[j].Offset)
|
||||
}
|
||||
if d[j].Size != test.d[j].Size {
|
||||
t.Errorf("Incorrect length for diff %d block %d; %d != %d", i, j, d[j].Size, test.d[j].Size)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
19
lib/scanner/debug.go
Normal file
19
lib/scanner/debug.go
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/calmh/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
debug = strings.Contains(os.Getenv("STTRACE"), "scanner") || os.Getenv("STTRACE") == "all"
|
||||
l = logger.DefaultLogger
|
||||
)
|
||||
8
lib/scanner/doc.go
Normal file
8
lib/scanner/doc.go
Normal file
@@ -0,0 +1,8 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Package scanner implements a file system scanner and hasher.
|
||||
package scanner
|
||||
4
lib/scanner/testdata/.stignore
vendored
Normal file
4
lib/scanner/testdata/.stignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
#include excludes
|
||||
|
||||
bfile
|
||||
dir1/cfile
|
||||
1
lib/scanner/testdata/afile
vendored
Normal file
1
lib/scanner/testdata/afile
vendored
Normal file
@@ -0,0 +1 @@
|
||||
foo
|
||||
1
lib/scanner/testdata/bfile
vendored
Normal file
1
lib/scanner/testdata/bfile
vendored
Normal file
@@ -0,0 +1 @@
|
||||
bar
|
||||
1
lib/scanner/testdata/dir1/cfile
vendored
Normal file
1
lib/scanner/testdata/dir1/cfile
vendored
Normal file
@@ -0,0 +1 @@
|
||||
baz
|
||||
1
lib/scanner/testdata/dir1/dfile
vendored
Normal file
1
lib/scanner/testdata/dir1/dfile
vendored
Normal file
@@ -0,0 +1 @@
|
||||
quux
|
||||
1
lib/scanner/testdata/dir2/cfile
vendored
Normal file
1
lib/scanner/testdata/dir2/cfile
vendored
Normal file
@@ -0,0 +1 @@
|
||||
baz
|
||||
1
lib/scanner/testdata/dir2/dfile
vendored
Normal file
1
lib/scanner/testdata/dir2/dfile
vendored
Normal file
@@ -0,0 +1 @@
|
||||
quux
|
||||
1
lib/scanner/testdata/dir3/cfile
vendored
Normal file
1
lib/scanner/testdata/dir3/cfile
vendored
Normal file
@@ -0,0 +1 @@
|
||||
baz
|
||||
1
lib/scanner/testdata/dir3/dfile
vendored
Normal file
1
lib/scanner/testdata/dir3/dfile
vendored
Normal file
@@ -0,0 +1 @@
|
||||
quux
|
||||
2
lib/scanner/testdata/excludes
vendored
Normal file
2
lib/scanner/testdata/excludes
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
dir2/dfile
|
||||
#include further-excludes
|
||||
1
lib/scanner/testdata/further-excludes
vendored
Normal file
1
lib/scanner/testdata/further-excludes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
dir3
|
||||
403
lib/scanner/walk.go
Normal file
403
lib/scanner/walk.go
Normal file
@@ -0,0 +1,403 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/ignore"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/symlinks"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
var maskModePerm os.FileMode
|
||||
|
||||
func init() {
|
||||
if runtime.GOOS == "windows" {
|
||||
// There is no user/group/others in Windows' read-only
|
||||
// attribute, and all "w" bits are set in os.FileInfo
|
||||
// if the file is not read-only. Do not send these
|
||||
// group/others-writable bits to other devices in order to
|
||||
// avoid unexpected world-writable files on other platforms.
|
||||
maskModePerm = os.ModePerm & 0755
|
||||
} else {
|
||||
maskModePerm = os.ModePerm
|
||||
}
|
||||
}
|
||||
|
||||
type Walker struct {
|
||||
// Dir is the base directory for the walk
|
||||
Dir string
|
||||
// Limit walking to these paths within Dir, or no limit if Sub is empty
|
||||
Subs []string
|
||||
// BlockSize controls the size of the block used when hashing.
|
||||
BlockSize int
|
||||
// If Matcher is not nil, it is used to identify files to ignore which were specified by the user.
|
||||
Matcher *ignore.Matcher
|
||||
// If TempNamer is not nil, it is used to ignore temporary files when walking.
|
||||
TempNamer TempNamer
|
||||
// Number of hours to keep temporary files for
|
||||
TempLifetime time.Duration
|
||||
// If CurrentFiler is not nil, it is queried for the current file before rescanning.
|
||||
CurrentFiler CurrentFiler
|
||||
// If MtimeRepo is not nil, it is used to provide mtimes on systems that don't support setting arbirtary mtimes.
|
||||
MtimeRepo *db.VirtualMtimeRepo
|
||||
// If IgnorePerms is true, changes to permission bits will not be
|
||||
// detected. Scanned files will get zero permission bits and the
|
||||
// NoPermissionBits flag set.
|
||||
IgnorePerms bool
|
||||
// When AutoNormalize is set, file names that are in UTF8 but incorrect
|
||||
// normalization form will be corrected.
|
||||
AutoNormalize bool
|
||||
// Number of routines to use for hashing
|
||||
Hashers int
|
||||
// Our vector clock id
|
||||
ShortID uint64
|
||||
}
|
||||
|
||||
type TempNamer interface {
|
||||
// Temporary returns a temporary name for the filed referred to by filepath.
|
||||
TempName(path string) string
|
||||
// IsTemporary returns true if path refers to the name of temporary file.
|
||||
IsTemporary(path string) bool
|
||||
}
|
||||
|
||||
type CurrentFiler interface {
|
||||
// CurrentFile returns the file as seen at last scan.
|
||||
CurrentFile(name string) (protocol.FileInfo, bool)
|
||||
}
|
||||
|
||||
// Walk returns the list of files found in the local folder by scanning the
|
||||
// file system. Files are blockwise hashed.
|
||||
func (w *Walker) Walk() (chan protocol.FileInfo, error) {
|
||||
if debug {
|
||||
l.Debugln("Walk", w.Dir, w.Subs, w.BlockSize, w.Matcher)
|
||||
}
|
||||
|
||||
err := checkDir(w.Dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
files := make(chan protocol.FileInfo)
|
||||
hashedFiles := make(chan protocol.FileInfo)
|
||||
newParallelHasher(w.Dir, w.BlockSize, w.Hashers, hashedFiles, files)
|
||||
|
||||
go func() {
|
||||
hashFiles := w.walkAndHashFiles(files)
|
||||
if len(w.Subs) == 0 {
|
||||
filepath.Walk(w.Dir, hashFiles)
|
||||
} else {
|
||||
for _, sub := range w.Subs {
|
||||
filepath.Walk(filepath.Join(w.Dir, sub), hashFiles)
|
||||
}
|
||||
}
|
||||
close(files)
|
||||
}()
|
||||
|
||||
return hashedFiles, nil
|
||||
}
|
||||
|
||||
func (w *Walker) walkAndHashFiles(fchan chan protocol.FileInfo) filepath.WalkFunc {
|
||||
now := time.Now()
|
||||
return func(p string, info os.FileInfo, err error) error {
|
||||
// Return value used when we are returning early and don't want to
|
||||
// process the item. For directories, this means do-not-descend.
|
||||
var skip error // nil
|
||||
// info nil when error is not nil
|
||||
if info != nil && info.IsDir() {
|
||||
skip = filepath.SkipDir
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if debug {
|
||||
l.Debugln("error:", p, info, err)
|
||||
}
|
||||
return skip
|
||||
}
|
||||
|
||||
rn, err := filepath.Rel(w.Dir, p)
|
||||
if err != nil {
|
||||
if debug {
|
||||
l.Debugln("rel error:", p, err)
|
||||
}
|
||||
return skip
|
||||
}
|
||||
|
||||
if rn == "." {
|
||||
return nil
|
||||
}
|
||||
|
||||
mtime := info.ModTime()
|
||||
if w.MtimeRepo != nil {
|
||||
mtime = w.MtimeRepo.GetMtime(rn, mtime)
|
||||
}
|
||||
|
||||
if w.TempNamer != nil && w.TempNamer.IsTemporary(rn) {
|
||||
// A temporary file
|
||||
if debug {
|
||||
l.Debugln("temporary:", rn)
|
||||
}
|
||||
if info.Mode().IsRegular() && mtime.Add(w.TempLifetime).Before(now) {
|
||||
os.Remove(p)
|
||||
if debug {
|
||||
l.Debugln("removing temporary:", rn, mtime)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if sn := filepath.Base(rn); sn == ".stignore" || sn == ".stfolder" ||
|
||||
strings.HasPrefix(rn, ".stversions") || w.Matcher.Match(rn) {
|
||||
// An ignored file
|
||||
if debug {
|
||||
l.Debugln("ignored:", rn)
|
||||
}
|
||||
return skip
|
||||
}
|
||||
|
||||
if !utf8.ValidString(rn) {
|
||||
l.Warnf("File name %q is not in UTF8 encoding; skipping.", rn)
|
||||
return skip
|
||||
}
|
||||
|
||||
var normalizedRn string
|
||||
if runtime.GOOS == "darwin" {
|
||||
// Mac OS X file names should always be NFD normalized.
|
||||
normalizedRn = norm.NFD.String(rn)
|
||||
} else {
|
||||
// Every other OS in the known universe uses NFC or just plain
|
||||
// doesn't bother to define an encoding. In our case *we* do care,
|
||||
// so we enforce NFC regardless.
|
||||
normalizedRn = norm.NFC.String(rn)
|
||||
}
|
||||
|
||||
if rn != normalizedRn {
|
||||
// The file name was not normalized.
|
||||
|
||||
if !w.AutoNormalize {
|
||||
// We're not authorized to do anything about it, so complain and skip.
|
||||
|
||||
l.Warnf("File name %q is not in the correct UTF8 normalization form; skipping.", rn)
|
||||
return skip
|
||||
}
|
||||
|
||||
// We will attempt to normalize it.
|
||||
normalizedPath := filepath.Join(w.Dir, normalizedRn)
|
||||
if _, err := osutil.Lstat(normalizedPath); os.IsNotExist(err) {
|
||||
// Nothing exists with the normalized filename. Good.
|
||||
if err = os.Rename(p, normalizedPath); err != nil {
|
||||
l.Infof(`Error normalizing UTF8 encoding of file "%s": %v`, rn, err)
|
||||
return skip
|
||||
}
|
||||
l.Infof(`Normalized UTF8 encoding of file name "%s".`, rn)
|
||||
} else {
|
||||
// There is something already in the way at the normalized
|
||||
// file name.
|
||||
l.Infof(`File "%s" has UTF8 encoding conflict with another file; ignoring.`, rn)
|
||||
return skip
|
||||
}
|
||||
|
||||
rn = normalizedRn
|
||||
}
|
||||
|
||||
var cf protocol.FileInfo
|
||||
var ok bool
|
||||
|
||||
// Index wise symlinks are always files, regardless of what the target
|
||||
// is, because symlinks carry their target path as their content.
|
||||
if info.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
// If the target is a directory, do NOT descend down there. This
|
||||
// will cause files to get tracked, and removing the symlink will
|
||||
// as a result remove files in their real location.
|
||||
if !symlinks.Supported {
|
||||
return skip
|
||||
}
|
||||
|
||||
// We always rehash symlinks as they have no modtime or
|
||||
// permissions. We check if they point to the old target by
|
||||
// checking that their existing blocks match with the blocks in
|
||||
// the index.
|
||||
|
||||
target, flags, err := symlinks.Read(p)
|
||||
flags = flags & protocol.SymlinkTypeMask
|
||||
if err != nil {
|
||||
if debug {
|
||||
l.Debugln("readlink error:", p, err)
|
||||
}
|
||||
return skip
|
||||
}
|
||||
|
||||
blocks, err := Blocks(strings.NewReader(target), w.BlockSize, 0)
|
||||
if err != nil {
|
||||
if debug {
|
||||
l.Debugln("hash link error:", p, err)
|
||||
}
|
||||
return skip
|
||||
}
|
||||
|
||||
if w.CurrentFiler != nil {
|
||||
// A symlink is "unchanged", if
|
||||
// - it exists
|
||||
// - it wasn't deleted (because it isn't now)
|
||||
// - it was a symlink
|
||||
// - it wasn't invalid
|
||||
// - the symlink type (file/dir) was the same
|
||||
// - the block list (i.e. hash of target) was the same
|
||||
cf, ok = w.CurrentFiler.CurrentFile(rn)
|
||||
if ok && !cf.IsDeleted() && cf.IsSymlink() && !cf.IsInvalid() && SymlinkTypeEqual(flags, cf.Flags) && BlocksEqual(cf.Blocks, blocks) {
|
||||
return skip
|
||||
}
|
||||
}
|
||||
|
||||
f := protocol.FileInfo{
|
||||
Name: rn,
|
||||
Version: cf.Version.Update(w.ShortID),
|
||||
Flags: protocol.FlagSymlink | flags | protocol.FlagNoPermBits | 0666,
|
||||
Modified: 0,
|
||||
Blocks: blocks,
|
||||
}
|
||||
|
||||
if debug {
|
||||
l.Debugln("symlink to hash:", p, f)
|
||||
}
|
||||
|
||||
fchan <- f
|
||||
|
||||
return skip
|
||||
}
|
||||
|
||||
if info.Mode().IsDir() {
|
||||
if w.CurrentFiler != nil {
|
||||
// A directory is "unchanged", if it
|
||||
// - exists
|
||||
// - has the same permissions as previously, unless we are ignoring permissions
|
||||
// - was not marked deleted (since it apparently exists now)
|
||||
// - was a directory previously (not a file or something else)
|
||||
// - was not a symlink (since it's a directory now)
|
||||
// - was not invalid (since it looks valid now)
|
||||
cf, ok = w.CurrentFiler.CurrentFile(rn)
|
||||
permUnchanged := w.IgnorePerms || !cf.HasPermissionBits() || PermsEqual(cf.Flags, uint32(info.Mode()))
|
||||
if ok && permUnchanged && !cf.IsDeleted() && cf.IsDirectory() && !cf.IsSymlink() && !cf.IsInvalid() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
flags := uint32(protocol.FlagDirectory)
|
||||
if w.IgnorePerms {
|
||||
flags |= protocol.FlagNoPermBits | 0777
|
||||
} else {
|
||||
flags |= uint32(info.Mode() & maskModePerm)
|
||||
}
|
||||
f := protocol.FileInfo{
|
||||
Name: rn,
|
||||
Version: cf.Version.Update(w.ShortID),
|
||||
Flags: flags,
|
||||
Modified: mtime.Unix(),
|
||||
}
|
||||
if debug {
|
||||
l.Debugln("dir:", p, f)
|
||||
}
|
||||
fchan <- f
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.Mode().IsRegular() {
|
||||
curMode := uint32(info.Mode())
|
||||
if runtime.GOOS == "windows" && osutil.IsWindowsExecutable(rn) {
|
||||
curMode |= 0111
|
||||
}
|
||||
|
||||
if w.CurrentFiler != nil {
|
||||
// A file is "unchanged", if it
|
||||
// - exists
|
||||
// - has the same permissions as previously, unless we are ignoring permissions
|
||||
// - was not marked deleted (since it apparently exists now)
|
||||
// - had the same modification time as it has now
|
||||
// - was not a directory previously (since it's a file now)
|
||||
// - was not a symlink (since it's a file now)
|
||||
// - was not invalid (since it looks valid now)
|
||||
// - has the same size as previously
|
||||
cf, ok = w.CurrentFiler.CurrentFile(rn)
|
||||
permUnchanged := w.IgnorePerms || !cf.HasPermissionBits() || PermsEqual(cf.Flags, curMode)
|
||||
if ok && permUnchanged && !cf.IsDeleted() && cf.Modified == mtime.Unix() && !cf.IsDirectory() &&
|
||||
!cf.IsSymlink() && !cf.IsInvalid() && cf.Size() == info.Size() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if debug {
|
||||
l.Debugln("rescan:", cf, mtime.Unix(), info.Mode()&os.ModePerm)
|
||||
}
|
||||
}
|
||||
|
||||
var flags = curMode & uint32(maskModePerm)
|
||||
if w.IgnorePerms {
|
||||
flags = protocol.FlagNoPermBits | 0666
|
||||
}
|
||||
|
||||
f := protocol.FileInfo{
|
||||
Name: rn,
|
||||
Version: cf.Version.Update(w.ShortID),
|
||||
Flags: flags,
|
||||
Modified: mtime.Unix(),
|
||||
}
|
||||
if debug {
|
||||
l.Debugln("to hash:", p, f)
|
||||
}
|
||||
fchan <- f
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func checkDir(dir string) error {
|
||||
if info, err := osutil.Lstat(dir); err != nil {
|
||||
return err
|
||||
} else if !info.IsDir() {
|
||||
return errors.New(dir + ": not a directory")
|
||||
} else if debug {
|
||||
l.Debugln("checkDir", dir, info)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func PermsEqual(a, b uint32) bool {
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
// There is only writeable and read only, represented for user, group
|
||||
// and other equally. We only compare against user.
|
||||
return a&0600 == b&0600
|
||||
default:
|
||||
// All bits count
|
||||
return a&0777 == b&0777
|
||||
}
|
||||
}
|
||||
|
||||
func SymlinkTypeEqual(disk, index uint32) bool {
|
||||
// If the target is missing, Unix never knows what type of symlink it is
|
||||
// and Windows always knows even if there is no target. Which means that
|
||||
// without this special check a Unix node would be fighting with a Windows
|
||||
// node about whether or not the target is known. Basically, if you don't
|
||||
// know and someone else knows, just accept it. The fact that you don't
|
||||
// know means you are on Unix, and on Unix you don't really care what the
|
||||
// target type is. The moment you do know, and if something doesn't match,
|
||||
// that will propagate through the cluster.
|
||||
if disk&protocol.FlagSymlinkMissingTarget != 0 && index&protocol.FlagSymlinkMissingTarget == 0 {
|
||||
return true
|
||||
}
|
||||
return disk&protocol.SymlinkTypeMask == index&protocol.SymlinkTypeMask
|
||||
}
|
||||
340
lib/scanner/walk_test.go
Normal file
340
lib/scanner/walk_test.go
Normal file
@@ -0,0 +1,340 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
rdebug "runtime/debug"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/lib/ignore"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
type testfile struct {
|
||||
name string
|
||||
size int
|
||||
hash string
|
||||
}
|
||||
|
||||
type testfileList []testfile
|
||||
|
||||
var testdata = testfileList{
|
||||
{"afile", 4, "b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c"},
|
||||
{"dir1", 128, ""},
|
||||
{filepath.Join("dir1", "dfile"), 5, "49ae93732fcf8d63fe1cce759664982dbd5b23161f007dba8561862adc96d063"},
|
||||
{"dir2", 128, ""},
|
||||
{filepath.Join("dir2", "cfile"), 4, "bf07a7fbb825fc0aae7bf4a1177b2b31fcf8a3feeaf7092761e18c859ee52a9c"},
|
||||
{"excludes", 37, "df90b52f0c55dba7a7a940affe482571563b1ac57bd5be4d8a0291e7de928e06"},
|
||||
{"further-excludes", 5, "7eb0a548094fa6295f7fd9200d69973e5f5ec5c04f2a86d998080ac43ecf89f1"},
|
||||
}
|
||||
|
||||
var correctIgnores = map[string][]string{
|
||||
".": {".*", "quux"},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// This test runs the risk of entering infinite recursion if it fails.
|
||||
// Limit the stack size to 10 megs to crash early in that case instead of
|
||||
// potentially taking down the box...
|
||||
rdebug.SetMaxStack(10 * 1 << 20)
|
||||
}
|
||||
|
||||
func TestWalkSub(t *testing.T) {
|
||||
ignores := ignore.New(false)
|
||||
err := ignores.Load("testdata/.stignore")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
w := Walker{
|
||||
Dir: "testdata",
|
||||
Subs: []string{"dir2"},
|
||||
BlockSize: 128 * 1024,
|
||||
Matcher: ignores,
|
||||
Hashers: 2,
|
||||
}
|
||||
fchan, err := w.Walk()
|
||||
var files []protocol.FileInfo
|
||||
for f := range fchan {
|
||||
files = append(files, f)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// The directory contains two files, where one is ignored from a higher
|
||||
// level. We should see only the directory and one of the files.
|
||||
|
||||
if len(files) != 2 {
|
||||
t.Fatalf("Incorrect length %d != 2", len(files))
|
||||
}
|
||||
if files[0].Name != "dir2" {
|
||||
t.Errorf("Incorrect file %v != dir2", files[0])
|
||||
}
|
||||
if files[1].Name != filepath.Join("dir2", "cfile") {
|
||||
t.Errorf("Incorrect file %v != dir2/cfile", files[1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestWalk(t *testing.T) {
|
||||
ignores := ignore.New(false)
|
||||
err := ignores.Load("testdata/.stignore")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log(ignores)
|
||||
|
||||
w := Walker{
|
||||
Dir: "testdata",
|
||||
BlockSize: 128 * 1024,
|
||||
Matcher: ignores,
|
||||
Hashers: 2,
|
||||
}
|
||||
|
||||
fchan, err := w.Walk()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var tmp []protocol.FileInfo
|
||||
for f := range fchan {
|
||||
tmp = append(tmp, f)
|
||||
}
|
||||
sort.Sort(fileList(tmp))
|
||||
files := fileList(tmp).testfiles()
|
||||
|
||||
if !reflect.DeepEqual(files, testdata) {
|
||||
t.Errorf("Walk returned unexpected data\nExpected: %v\nActual: %v", testdata, files)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWalkError(t *testing.T) {
|
||||
w := Walker{
|
||||
Dir: "testdata-missing",
|
||||
BlockSize: 128 * 1024,
|
||||
Hashers: 2,
|
||||
}
|
||||
_, err := w.Walk()
|
||||
|
||||
if err == nil {
|
||||
t.Error("no error from missing directory")
|
||||
}
|
||||
|
||||
w = Walker{
|
||||
Dir: "testdata/bar",
|
||||
BlockSize: 128 * 1024,
|
||||
}
|
||||
_, err = w.Walk()
|
||||
|
||||
if err == nil {
|
||||
t.Error("no error from non-directory")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerify(t *testing.T) {
|
||||
blocksize := 16
|
||||
// data should be an even multiple of blocksize long
|
||||
data := []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut e")
|
||||
buf := bytes.NewBuffer(data)
|
||||
|
||||
blocks, err := Blocks(buf, blocksize, 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if exp := len(data) / blocksize; len(blocks) != exp {
|
||||
t.Fatalf("Incorrect number of blocks %d != %d", len(blocks), exp)
|
||||
}
|
||||
|
||||
buf = bytes.NewBuffer(data)
|
||||
err = Verify(buf, blocksize, blocks)
|
||||
t.Log(err)
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected verify failure", err)
|
||||
}
|
||||
|
||||
buf = bytes.NewBuffer(append(data, '\n'))
|
||||
err = Verify(buf, blocksize, blocks)
|
||||
t.Log(err)
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected verify success")
|
||||
}
|
||||
|
||||
buf = bytes.NewBuffer(data[:len(data)-1])
|
||||
err = Verify(buf, blocksize, blocks)
|
||||
t.Log(err)
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected verify success")
|
||||
}
|
||||
|
||||
data[42] = 42
|
||||
buf = bytes.NewBuffer(data)
|
||||
err = Verify(buf, blocksize, blocks)
|
||||
t.Log(err)
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected verify success")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalization(t *testing.T) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
t.Skip("Normalization test not possible on darwin")
|
||||
return
|
||||
}
|
||||
|
||||
os.RemoveAll("testdata/normalization")
|
||||
defer os.RemoveAll("testdata/normalization")
|
||||
|
||||
tests := []string{
|
||||
"0-A", // ASCII A -- accepted
|
||||
"1-\xC3\x84", // NFC 'Ä' -- conflicts with the entry below, accepted
|
||||
"1-\x41\xCC\x88", // NFD 'Ä' -- conflicts with the entry above, ignored
|
||||
"2-\xC3\x85", // NFC 'Å' -- accepted
|
||||
"3-\x41\xCC\x83", // NFD 'Ã' -- converted to NFC
|
||||
"4-\xE2\x98\x95", // U+2615 HOT BEVERAGE (☕) -- accepted
|
||||
"5-\xCD\xE2", // EUC-CN "wài" (外) -- ignored (not UTF8)
|
||||
}
|
||||
numInvalid := 2
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
// On Windows, in case 5 the character gets replaced with a
|
||||
// replacement character \xEF\xBF\xBD at the point it's written to disk,
|
||||
// which means it suddenly becomes valid (sort of).
|
||||
numInvalid--
|
||||
}
|
||||
|
||||
numValid := len(tests) - numInvalid
|
||||
|
||||
for _, s1 := range tests {
|
||||
// Create a directory for each of the interesting strings above
|
||||
if err := osutil.MkdirAll(filepath.Join("testdata/normalization", s1), 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, s2 := range tests {
|
||||
// Within each dir, create a file with each of the interesting
|
||||
// file names. Ensure that the file doesn't exist when it's
|
||||
// created. This detects and fails if there's file name
|
||||
// normalization stuff at the filesystem level.
|
||||
if fd, err := os.OpenFile(filepath.Join("testdata/normalization", s1, s2), os.O_CREATE|os.O_EXCL, 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
fd.WriteString("test")
|
||||
fd.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We can normalize a directory name, but we can't descend into it in the
|
||||
// same pass due to how filepath.Walk works. So we run the scan twice to
|
||||
// make sure it all gets done. In production, things will be correct
|
||||
// eventually...
|
||||
|
||||
_, err := walkDir("testdata/normalization")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tmp, err := walkDir("testdata/normalization")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
files := fileList(tmp).testfiles()
|
||||
|
||||
// We should have one file per combination, plus the directories
|
||||
// themselves
|
||||
|
||||
expectedNum := numValid*numValid + numValid
|
||||
if len(files) != expectedNum {
|
||||
t.Errorf("Expected %d files, got %d", expectedNum, len(files))
|
||||
}
|
||||
|
||||
// The file names should all be in NFC form.
|
||||
|
||||
for _, f := range files {
|
||||
t.Logf("%q (% x) %v", f.name, f.name, norm.NFC.IsNormalString(f.name))
|
||||
if !norm.NFC.IsNormalString(f.name) {
|
||||
t.Errorf("File name %q is not NFC normalized", f.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue1507(t *testing.T) {
|
||||
w := Walker{}
|
||||
c := make(chan protocol.FileInfo, 100)
|
||||
fn := w.walkAndHashFiles(c)
|
||||
|
||||
fn("", nil, protocol.ErrClosed)
|
||||
}
|
||||
|
||||
func walkDir(dir string) ([]protocol.FileInfo, error) {
|
||||
w := Walker{
|
||||
Dir: dir,
|
||||
BlockSize: 128 * 1024,
|
||||
AutoNormalize: true,
|
||||
Hashers: 2,
|
||||
}
|
||||
|
||||
fchan, err := w.Walk()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var tmp []protocol.FileInfo
|
||||
for f := range fchan {
|
||||
tmp = append(tmp, f)
|
||||
}
|
||||
sort.Sort(fileList(tmp))
|
||||
|
||||
return tmp, nil
|
||||
}
|
||||
|
||||
type fileList []protocol.FileInfo
|
||||
|
||||
func (l fileList) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
func (l fileList) Less(a, b int) bool {
|
||||
return l[a].Name < l[b].Name
|
||||
}
|
||||
|
||||
func (l fileList) Swap(a, b int) {
|
||||
l[a], l[b] = l[b], l[a]
|
||||
}
|
||||
|
||||
func (l fileList) testfiles() testfileList {
|
||||
testfiles := make(testfileList, len(l))
|
||||
for i, f := range l {
|
||||
if len(f.Blocks) > 1 {
|
||||
panic("simple test case stuff only supports a single block per file")
|
||||
}
|
||||
testfiles[i] = testfile{name: f.Name, size: int(f.Size())}
|
||||
if len(f.Blocks) == 1 {
|
||||
testfiles[i].hash = fmt.Sprintf("%x", f.Blocks[0].Hash)
|
||||
}
|
||||
}
|
||||
return testfiles
|
||||
}
|
||||
|
||||
func (l testfileList) String() string {
|
||||
var b bytes.Buffer
|
||||
b.WriteString("{\n")
|
||||
for _, f := range l {
|
||||
fmt.Fprintf(&b, " %s (%d bytes): %s\n", f.name, f.size, f.hash)
|
||||
}
|
||||
b.WriteString("}")
|
||||
return b.String()
|
||||
}
|
||||
Reference in New Issue
Block a user