all: Convert folders to use filesystem abstraction

GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/4228
This commit is contained in:
Audrius Butkevicius
2017-08-19 14:36:56 +00:00
committed by Jakob Borg
parent ab8c2fb5c7
commit 3d8b4a42b7
78 changed files with 2588 additions and 1665 deletions

View File

@@ -14,7 +14,6 @@ import (
"fmt"
"io"
"net"
"os"
"path/filepath"
"reflect"
"runtime"
@@ -81,6 +80,7 @@ type Model struct {
clientVersion string
folderCfgs map[string]config.FolderConfiguration // folder -> cfg
folderFs map[string]fs.Filesystem // folder -> fs
folderFiles map[string]*db.FileSet // folder -> files
folderDevices folderDeviceSet // folder -> deviceIDs
deviceFolders map[protocol.DeviceID][]string // deviceID -> folders
@@ -99,21 +99,18 @@ type Model struct {
pmut sync.RWMutex // protects the above
}
type folderFactory func(*Model, config.FolderConfiguration, versioner.Versioner, *fs.MtimeFS) service
type folderFactory func(*Model, config.FolderConfiguration, versioner.Versioner, fs.Filesystem) service
var (
folderFactories = make(map[config.FolderType]folderFactory, 0)
)
var (
errFolderPathEmpty = errors.New("folder path empty")
errFolderPathMissing = errors.New("folder path missing")
errFolderMarkerMissing = errors.New("folder marker missing")
errInvalidFilename = errors.New("filename is invalid")
errDeviceUnknown = errors.New("unknown device")
errDevicePaused = errors.New("device is paused")
errDeviceIgnored = errors.New("device is ignored")
errNotRelative = errors.New("not a relative path")
errFolderPaused = errors.New("folder is paused")
errFolderMissing = errors.New("no such folder")
errNetworkNotAllowed = errors.New("network not allowed")
@@ -140,6 +137,7 @@ func NewModel(cfg *config.Wrapper, id protocol.DeviceID, clientName, clientVersi
clientName: clientName,
clientVersion: clientVersion,
folderCfgs: make(map[string]config.FolderConfiguration),
folderFs: make(map[string]fs.Filesystem),
folderFiles: make(map[string]*db.FileSet),
folderDevices: make(folderDeviceSet),
deviceFolders: make(map[protocol.DeviceID][]string),
@@ -245,7 +243,7 @@ func (m *Model) startFolderLocked(folder string) config.FolderType {
l.Fatalf("Requested versioning type %q that does not exist", cfg.Versioning.Type)
}
ver = versionerFactory(folder, cfg.Path(), cfg.Versioning.Params)
ver = versionerFactory(folder, cfg.Filesystem(), cfg.Versioning.Params)
if service, ok := ver.(suture.Service); ok {
// The versioner implements the suture.Service interface, so
// expects to be run in the background in addition to being called
@@ -271,7 +269,12 @@ func (m *Model) warnAboutOverwritingProtectedFiles(folder string) {
return
}
folderLocation := m.folderCfgs[folder].Path()
// This is a bit of a hack.
ffs := m.folderCfgs[folder].Filesystem()
if ffs.Type() != fs.FilesystemTypeBasic {
return
}
folderLocation := ffs.URI()
ignores := m.folderIgnores[folder]
var filesAtRisk []string
@@ -300,6 +303,10 @@ func (m *Model) AddFolder(cfg config.FolderConfiguration) {
panic("cannot add empty folder id")
}
if len(cfg.Path) == 0 {
panic("cannot add empty folder path")
}
m.fmut.Lock()
m.addFolderLocked(cfg)
m.fmut.Unlock()
@@ -307,15 +314,16 @@ func (m *Model) AddFolder(cfg config.FolderConfiguration) {
func (m *Model) addFolderLocked(cfg config.FolderConfiguration) {
m.folderCfgs[cfg.ID] = cfg
m.folderFiles[cfg.ID] = db.NewFileSet(cfg.ID, m.db)
folderFs := cfg.Filesystem()
m.folderFiles[cfg.ID] = db.NewFileSet(cfg.ID, folderFs, m.db)
for _, device := range cfg.Devices {
m.folderDevices.set(device.DeviceID, cfg.ID)
m.deviceFolders[device.DeviceID] = append(m.deviceFolders[device.DeviceID], cfg.ID)
}
ignores := ignore.New(ignore.WithCache(m.cacheIgnoredFiles))
if err := ignores.Load(filepath.Join(cfg.Path(), ".stignore")); err != nil && !os.IsNotExist(err) {
ignores := ignore.New(folderFs, ignore.WithCache(m.cacheIgnoredFiles))
if err := ignores.Load(".stignore"); err != nil && !fs.IsNotExist(err) {
l.Warnln("Loading ignores:", err)
}
m.folderIgnores[cfg.ID] = ignores
@@ -327,8 +335,8 @@ func (m *Model) RemoveFolder(folder string) {
// Delete syncthing specific files
folderCfg := m.folderCfgs[folder]
folderPath := folderCfg.Path()
os.Remove(filepath.Join(folderPath, ".stfolder"))
fs := folderCfg.Filesystem()
fs.Remove(".stfolder")
m.tearDownFolderLocked(folder)
// Remove it from the database
@@ -1139,16 +1147,10 @@ func (m *Model) Request(deviceID protocol.DeviceID, folder, name string, offset
}
m.fmut.RLock()
folderCfg := m.folderCfgs[folder]
folderPath := folderCfg.Path()
folderIgnores := m.folderIgnores[folder]
m.fmut.RUnlock()
fn, err := rootedJoinedPath(folderPath, name)
if err != nil {
// Request tries to escape!
l.Debugf("%v Invalid REQ(in) tries to escape: %s: %q / %q o=%d s=%d", m, deviceID, folder, name, offset, len(buf))
return protocol.ErrInvalid
}
folderFs := folderCfg.Filesystem()
// Having passed the rootedJoinedPath check above, we know "name" is
// acceptable relative to "folderPath" and in canonical form, so we can
@@ -1164,7 +1166,7 @@ func (m *Model) Request(deviceID protocol.DeviceID, folder, name string, offset
return protocol.ErrNoSuchFile
}
if err := osutil.TraversesSymlink(folderPath, filepath.Dir(name)); err != nil {
if err := osutil.TraversesSymlink(folderFs, filepath.Dir(name)); err != nil {
l.Debugf("%v REQ(in) traversal check: %s - %s: %q / %q o=%d s=%d", m, err, deviceID, folder, name, offset, len(buf))
return protocol.ErrNoSuchFile
}
@@ -1172,29 +1174,29 @@ func (m *Model) Request(deviceID protocol.DeviceID, folder, name string, offset
// Only check temp files if the flag is set, and if we are set to advertise
// the temp indexes.
if fromTemporary && !folderCfg.DisableTempIndexes {
tempFn := filepath.Join(folderPath, ignore.TempName(name))
tempFn := ignore.TempName(name)
if info, err := osutil.Lstat(tempFn); err != nil || !info.Mode().IsRegular() {
if info, err := folderFs.Lstat(tempFn); err != nil || !info.IsRegular() {
// Reject reads for anything that doesn't exist or is something
// other than a regular file.
return protocol.ErrNoSuchFile
}
if err := readOffsetIntoBuf(tempFn, offset, buf); err == nil {
if err := readOffsetIntoBuf(folderFs, tempFn, offset, buf); err == nil {
return nil
}
// Fall through to reading from a non-temp file, just incase the temp
// file has finished downloading.
}
if info, err := osutil.Lstat(fn); err != nil || !info.Mode().IsRegular() {
if info, err := folderFs.Lstat(name); err != nil || !info.IsRegular() {
// Reject reads for anything that doesn't exist or is something
// other than a regular file.
return protocol.ErrNoSuchFile
}
err = readOffsetIntoBuf(fn, offset, buf)
if os.IsNotExist(err) {
err := readOffsetIntoBuf(folderFs, name, offset, buf)
if fs.IsNotExist(err) {
return protocol.ErrNoSuchFile
} else if err != nil {
return protocol.ErrGeneric
@@ -1259,9 +1261,8 @@ func (m *Model) GetIgnores(folder string) ([]string, []string, error) {
}
if cfg, ok := m.cfg.Folders()[folder]; ok {
matcher := ignore.New()
path := filepath.Join(cfg.Path(), ".stignore")
if err := matcher.Load(path); err != nil {
matcher := ignore.New(cfg.Filesystem())
if err := matcher.Load(".stignore"); err != nil {
return nil, nil, err
}
return matcher.Lines(), matcher.Patterns(), nil
@@ -1276,7 +1277,7 @@ func (m *Model) SetIgnores(folder string, content []string) error {
return fmt.Errorf("Folder %s does not exist", folder)
}
if err := ignore.WriteIgnores(filepath.Join(cfg.Path(), ".stignore"), content); err != nil {
if err := ignore.WriteIgnores(cfg.Filesystem(), ".stignore", content); err != nil {
l.Warnln("Saving .stignore:", err)
return err
}
@@ -1610,8 +1611,6 @@ func (m *Model) updateLocals(folder string, fs []protocol.FileInfo) {
}
func (m *Model) diskChangeDetected(folderCfg config.FolderConfiguration, files []protocol.FileInfo, typeOfEvent events.EventType) {
path := strings.Replace(folderCfg.Path(), `\\?\`, "", 1)
for _, file := range files {
objType := "file"
action := "modified"
@@ -1634,10 +1633,6 @@ func (m *Model) diskChangeDetected(folderCfg config.FolderConfiguration, files [
action = "deleted"
}
// The full file path, adjusted to the local path separator character. Also
// for windows paths, strip unwanted chars from the front.
path := filepath.Join(path, filepath.FromSlash(file.Name))
// Two different events can be fired here based on what EventType is passed into function
events.Default.Log(typeOfEvent, map[string]string{
"folder": folderCfg.ID,
@@ -1645,7 +1640,7 @@ func (m *Model) diskChangeDetected(folderCfg config.FolderConfiguration, files [
"label": folderCfg.Label,
"action": action,
"type": objType,
"path": path,
"path": filepath.FromSlash(file.Name),
"modifiedBy": file.ModifiedBy.String(),
})
}
@@ -1738,20 +1733,17 @@ func (m *Model) internalScanFolderSubdirs(ctx context.Context, folder string, su
// not relevant, we just want the dotdot escape detection here. For
// historical reasons we may get paths that end in a slash. We
// remove that first to allow the rootedJoinedPath to pass.
sub = strings.TrimRight(sub, string(os.PathSeparator))
if _, err := rootedJoinedPath("root", sub); err != nil {
return errors.New("invalid subpath")
}
sub = strings.TrimRight(sub, string(fs.PathSeparator))
subDirs[i] = sub
}
m.fmut.Lock()
fs := m.folderFiles[folder]
fset := m.folderFiles[folder]
folderCfg := m.folderCfgs[folder]
ignores := m.folderIgnores[folder]
runner, ok := m.folderRunners[folder]
m.fmut.Unlock()
mtimefs := fs.MtimeFS()
mtimefs := fset.MtimeFS()
// Check if the ignore patterns changed as part of scanning this folder.
// If they did we should schedule a pull of the folder so that we
@@ -1778,7 +1770,7 @@ func (m *Model) internalScanFolderSubdirs(ctx context.Context, folder string, su
return err
}
if err := ignores.Load(filepath.Join(folderCfg.Path(), ".stignore")); err != nil && !os.IsNotExist(err) {
if err := ignores.Load(".stignore"); err != nil && !fs.IsNotExist(err) {
err = fmt.Errorf("loading ignores: %v", err)
runner.setError(err)
l.Infof("Stopping folder %s due to error: %s", folderCfg.Description(), err)
@@ -1789,7 +1781,7 @@ func (m *Model) internalScanFolderSubdirs(ctx context.Context, folder string, su
// directory, and don't scan subdirectories of things we've already
// scanned.
subDirs = unifySubs(subDirs, func(f string) bool {
_, ok := fs.Get(protocol.LocalDeviceID, f)
_, ok := fset.Get(protocol.LocalDeviceID, f)
return ok
})
@@ -1797,7 +1789,6 @@ func (m *Model) internalScanFolderSubdirs(ctx context.Context, folder string, su
fchan, err := scanner.Walk(ctx, scanner.Config{
Folder: folderCfg.ID,
Dir: folderCfg.Path(),
Subs: subDirs,
Matcher: ignores,
BlockSize: protocol.BlockSize,
@@ -1860,7 +1851,7 @@ func (m *Model) internalScanFolderSubdirs(ctx context.Context, folder string, su
for _, sub := range subDirs {
var iterError error
fs.WithPrefixedHaveTruncated(protocol.LocalDeviceID, sub, func(fi db.FileIntf) bool {
fset.WithPrefixedHaveTruncated(protocol.LocalDeviceID, sub, func(fi db.FileIntf) bool {
f := fi.(db.FileInfoTruncated)
if len(batch) == maxBatchSizeFiles || batchSizeBytes > maxBatchSizeBytes {
if err := m.CheckFolderHealth(folder); err != nil {
@@ -1895,9 +1886,9 @@ func (m *Model) internalScanFolderSubdirs(ctx context.Context, folder string, su
// The file is valid and not deleted. Lets check if it's
// still here.
if _, err := mtimefs.Lstat(filepath.Join(folderCfg.Path(), f.Name)); err != nil {
if _, err := mtimefs.Lstat(f.Name); err != nil {
// We don't specifically verify that the error is
// os.IsNotExist because there is a corner case when a
// fs.IsNotExist because there is a corner case when a
// directory is suddenly transformed into a file. When that
// happens, files that were in the directory (that is now a
// file) are deleted but will return a confusing error ("not a
@@ -2275,11 +2266,9 @@ func (m *Model) CheckFolderHealth(id string) error {
// checkFolderPath returns nil if the folder path exists and has the marker file.
func (m *Model) checkFolderPath(folder config.FolderConfiguration) error {
if folder.Path() == "" {
return errFolderPathEmpty
}
fs := folder.Filesystem()
if fi, err := os.Stat(folder.Path()); err != nil || !fi.IsDir() {
if fi, err := fs.Stat("."); err != nil || !fi.IsDir() {
return errFolderPathMissing
}
@@ -2293,30 +2282,35 @@ func (m *Model) checkFolderPath(folder config.FolderConfiguration) error {
// checkFolderFreeSpace returns nil if the folder has the required amount of
// free space, or if folder free space checking is disabled.
func (m *Model) checkFolderFreeSpace(folder config.FolderConfiguration) error {
return m.checkFreeSpace(folder.MinDiskFree, folder.Path())
return m.checkFreeSpace(folder.MinDiskFree, folder.Filesystem())
}
// checkHomeDiskFree returns nil if the home disk has the required amount of
// free space, or if home disk free space checking is disabled.
func (m *Model) checkHomeDiskFree() error {
return m.checkFreeSpace(m.cfg.Options().MinHomeDiskFree, m.cfg.ConfigPath())
fs := fs.NewFilesystem(fs.FilesystemTypeBasic, filepath.Dir(m.cfg.ConfigPath()))
return m.checkFreeSpace(m.cfg.Options().MinHomeDiskFree, fs)
}
func (m *Model) checkFreeSpace(req config.Size, path string) error {
func (m *Model) checkFreeSpace(req config.Size, fs fs.Filesystem) error {
val := req.BaseValue()
if val <= 0 {
return nil
}
usage, err := fs.Usage(".")
if err != nil {
return fmt.Errorf("failed to check available storage space")
}
if req.Percentage() {
free, err := osutil.DiskFreePercentage(path)
if err == nil && free < val {
return fmt.Errorf("insufficient space in %v: %f %% < %v", path, free, req)
freePct := (1 - float64(usage.Free)/float64(usage.Total)) * 100
if err == nil && freePct < val {
return fmt.Errorf("insufficient space in %v %v: %f %% < %v", fs.Type(), fs.URI(), freePct, req)
}
} else {
free, err := osutil.DiskFreeBytes(path)
if err == nil && float64(free) < val {
return fmt.Errorf("insufficient space in %v: %v < %v", path, free, req)
if err == nil && float64(usage.Free) < val {
return fmt.Errorf("insufficient space in %v %v: %v < %v", fs.Type(), fs.URI(), usage.Free, req)
}
}
@@ -2533,8 +2527,8 @@ func stringSliceWithout(ss []string, s string) []string {
return ss
}
func readOffsetIntoBuf(file string, offset int64, buf []byte) error {
fd, err := os.Open(file)
func readOffsetIntoBuf(fs fs.Filesystem, file string, offset int64, buf []byte) error {
fd, err := fs.Open(file)
if err != nil {
l.Debugln("readOffsetIntoBuf.Open", file, err)
return err
@@ -2585,7 +2579,7 @@ func simplifySortedPaths(subs []string) []string {
next:
for _, sub := range subs {
for _, existing := range cleaned {
if sub == existing || strings.HasPrefix(sub, existing+string(os.PathSeparator)) {
if sub == existing || strings.HasPrefix(sub, existing+string(fs.PathSeparator)) {
continue next
}
}
@@ -2666,57 +2660,3 @@ func (s folderDeviceSet) sortedDevices(folder string) []protocol.DeviceID {
sort.Sort(protocol.DeviceIDs(devs))
return devs
}
// rootedJoinedPath takes a root and a supposedly relative path inside that
// root and returns the joined path. An error is returned if the joined path
// is not in fact inside the root.
func rootedJoinedPath(root, rel string) (string, error) {
// The root must not be empty.
if root == "" {
return "", errInvalidFilename
}
pathSep := string(os.PathSeparator)
// The expected prefix for the resulting path is the root, with a path
// separator at the end.
expectedPrefix := filepath.FromSlash(root)
if !strings.HasSuffix(expectedPrefix, pathSep) {
expectedPrefix += pathSep
}
// The relative path should be clean from internal dotdots and similar
// funkyness.
rel = filepath.FromSlash(rel)
if filepath.Clean(rel) != rel {
return "", errInvalidFilename
}
// It is not acceptable to attempt to traverse upwards or refer to the
// root itself.
switch rel {
case ".", "..", pathSep:
return "", errNotRelative
}
if strings.HasPrefix(rel, ".."+pathSep) {
return "", errNotRelative
}
if strings.HasPrefix(rel, pathSep+pathSep) {
// The relative path may pretend to be an absolute path within the
// root, but the double path separator on Windows implies something
// else. It would get cleaned by the Join below, but it's out of
// spec anyway.
return "", errNotRelative
}
// The supposedly correct path is the one filepath.Join will return, as
// it does cleaning and so on. Check that one first to make sure no
// obvious escape attempts have been made.
joined := filepath.Join(root, rel)
if !strings.HasPrefix(joined, expectedPrefix) {
return "", errNotRelative
}
return joined, nil
}

View File

@@ -25,8 +25,8 @@ import (
"github.com/d4l3k/messagediff"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/ignore"
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol"
srand "github.com/syncthing/syncthing/lib/rand"
"github.com/syncthing/syncthing/lib/scanner"
@@ -35,12 +35,14 @@ import (
var device1, device2 protocol.DeviceID
var defaultConfig *config.Wrapper
var defaultFolderConfig config.FolderConfiguration
var defaultFs fs.Filesystem
func init() {
device1, _ = protocol.DeviceIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR")
device2, _ = protocol.DeviceIDFromString("GYRZZQB-IRNPV4Z-T7TC52W-EQYJ3TT-FDQW6MW-DFLMU42-SSSU6EM-FBK2VAY")
defaultFs = fs.NewFilesystem(fs.FilesystemTypeBasic, "testdata")
defaultFolderConfig = config.NewFolderConfiguration("default", "testdata")
defaultFolderConfig = config.NewFolderConfiguration("default", fs.FilesystemTypeBasic, "testdata")
defaultFolderConfig.Devices = []config.FolderDeviceConfiguration{{DeviceID: device1}}
_defaultConfig := config.Configuration{
Folders: []config.FolderConfiguration{defaultFolderConfig},
@@ -513,14 +515,16 @@ func TestClusterConfig(t *testing.T) {
}
cfg.Folders = []config.FolderConfiguration{
{
ID: "folder1",
ID: "folder1",
Path: "testdata",
Devices: []config.FolderDeviceConfiguration{
{DeviceID: device1},
{DeviceID: device2},
},
},
{
ID: "folder2",
ID: "folder2",
Path: "testdata",
Devices: []config.FolderDeviceConfiguration{
{DeviceID: device1},
{DeviceID: device2},
@@ -622,13 +626,15 @@ func TestIntroducer(t *testing.T) {
},
Folders: []config.FolderConfiguration{
{
ID: "folder1",
ID: "folder1",
Path: "testdata",
Devices: []config.FolderDeviceConfiguration{
{DeviceID: device1},
},
},
{
ID: "folder2",
ID: "folder2",
Path: "testdata",
Devices: []config.FolderDeviceConfiguration{
{DeviceID: device1},
},
@@ -671,14 +677,16 @@ func TestIntroducer(t *testing.T) {
},
Folders: []config.FolderConfiguration{
{
ID: "folder1",
ID: "folder1",
Path: "testdata",
Devices: []config.FolderDeviceConfiguration{
{DeviceID: device1},
{DeviceID: device2, IntroducedBy: device1},
},
},
{
ID: "folder2",
ID: "folder2",
Path: "testdata",
Devices: []config.FolderDeviceConfiguration{
{DeviceID: device1},
},
@@ -726,14 +734,16 @@ func TestIntroducer(t *testing.T) {
},
Folders: []config.FolderConfiguration{
{
ID: "folder1",
ID: "folder1",
Path: "testdata",
Devices: []config.FolderDeviceConfiguration{
{DeviceID: device1},
{DeviceID: device2, IntroducedBy: device1},
},
},
{
ID: "folder2",
ID: "folder2",
Path: "testdata",
Devices: []config.FolderDeviceConfiguration{
{DeviceID: device1},
{DeviceID: device2, IntroducedBy: device1},
@@ -771,14 +781,16 @@ func TestIntroducer(t *testing.T) {
},
Folders: []config.FolderConfiguration{
{
ID: "folder1",
ID: "folder1",
Path: "testdata",
Devices: []config.FolderDeviceConfiguration{
{DeviceID: device1},
{DeviceID: device2, IntroducedBy: device1},
},
},
{
ID: "folder2",
ID: "folder2",
Path: "testdata",
Devices: []config.FolderDeviceConfiguration{
{DeviceID: device1},
{DeviceID: device2, IntroducedBy: device1},
@@ -816,14 +828,16 @@ func TestIntroducer(t *testing.T) {
},
Folders: []config.FolderConfiguration{
{
ID: "folder1",
ID: "folder1",
Path: "testdata",
Devices: []config.FolderDeviceConfiguration{
{DeviceID: device1},
{DeviceID: device2, IntroducedBy: device1},
},
},
{
ID: "folder2",
ID: "folder2",
Path: "testdata",
Devices: []config.FolderDeviceConfiguration{
{DeviceID: device1},
},
@@ -872,14 +886,16 @@ func TestIntroducer(t *testing.T) {
},
Folders: []config.FolderConfiguration{
{
ID: "folder1",
ID: "folder1",
Path: "testdata",
Devices: []config.FolderDeviceConfiguration{
{DeviceID: device1},
{DeviceID: device2, IntroducedBy: device1},
},
},
{
ID: "folder2",
ID: "folder2",
Path: "testdata",
Devices: []config.FolderDeviceConfiguration{
{DeviceID: device1},
{DeviceID: device2},
@@ -916,14 +932,16 @@ func TestIntroducer(t *testing.T) {
},
Folders: []config.FolderConfiguration{
{
ID: "folder1",
ID: "folder1",
Path: "testdata",
Devices: []config.FolderDeviceConfiguration{
{DeviceID: device1},
{DeviceID: device2, IntroducedBy: device1},
},
},
{
ID: "folder2",
ID: "folder2",
Path: "testdata",
Devices: []config.FolderDeviceConfiguration{
{DeviceID: device1},
{DeviceID: device2, IntroducedBy: protocol.LocalDeviceID},
@@ -1026,7 +1044,7 @@ func TestIgnores(t *testing.T) {
// because we will be changing the files on disk often enough that the
// mtimes will be unreliable to determine change status.
m.fmut.Lock()
m.folderIgnores["default"] = ignore.New(ignore.WithCache(true), ignore.WithChangeDetector(newAlwaysChanged()))
m.folderIgnores["default"] = ignore.New(defaultFs, ignore.WithCache(true), ignore.WithChangeDetector(newAlwaysChanged()))
m.fmut.Unlock()
// Make sure the initial scan has finished (ScanFolders is blocking)
@@ -1050,7 +1068,7 @@ func TestIgnores(t *testing.T) {
}
// Invalid path, marker should be missing, hence returns an error.
m.AddFolder(config.FolderConfiguration{ID: "fresh", RawPath: "XXX"})
m.AddFolder(config.FolderConfiguration{ID: "fresh", Path: "XXX"})
_, _, err = m.GetIgnores("fresh")
if err == nil {
t.Error("No error")
@@ -1069,14 +1087,14 @@ func TestIgnores(t *testing.T) {
func TestROScanRecovery(t *testing.T) {
ldb := db.OpenMemory()
set := db.NewFileSet("default", ldb)
set := db.NewFileSet("default", defaultFs, ldb)
set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
{Name: "dummyfile"},
})
fcfg := config.FolderConfiguration{
ID: "default",
RawPath: "testdata/rotestfolder",
Path: "testdata/rotestfolder",
Type: config.FolderTypeSendOnly,
RescanIntervalS: 1,
}
@@ -1089,7 +1107,7 @@ func TestROScanRecovery(t *testing.T) {
},
})
os.RemoveAll(fcfg.RawPath)
os.RemoveAll(fcfg.Path)
m := NewModel(cfg, protocol.LocalDeviceID, "syncthing", "dev", ldb, nil)
m.AddFolder(fcfg)
@@ -1120,14 +1138,14 @@ func TestROScanRecovery(t *testing.T) {
return
}
os.Mkdir(fcfg.RawPath, 0700)
os.Mkdir(fcfg.Path, 0700)
if err := waitFor("folder marker missing"); err != nil {
t.Error(err)
return
}
fd, err := os.Create(filepath.Join(fcfg.RawPath, ".stfolder"))
fd, err := os.Create(filepath.Join(fcfg.Path, ".stfolder"))
if err != nil {
t.Error(err)
return
@@ -1139,14 +1157,14 @@ func TestROScanRecovery(t *testing.T) {
return
}
os.Remove(filepath.Join(fcfg.RawPath, ".stfolder"))
os.Remove(filepath.Join(fcfg.Path, ".stfolder"))
if err := waitFor("folder marker missing"); err != nil {
t.Error(err)
return
}
os.Remove(fcfg.RawPath)
os.Remove(fcfg.Path)
if err := waitFor("folder path missing"); err != nil {
t.Error(err)
@@ -1156,14 +1174,14 @@ func TestROScanRecovery(t *testing.T) {
func TestRWScanRecovery(t *testing.T) {
ldb := db.OpenMemory()
set := db.NewFileSet("default", ldb)
set := db.NewFileSet("default", defaultFs, ldb)
set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
{Name: "dummyfile"},
})
fcfg := config.FolderConfiguration{
ID: "default",
RawPath: "testdata/rwtestfolder",
Path: "testdata/rwtestfolder",
Type: config.FolderTypeSendReceive,
RescanIntervalS: 1,
}
@@ -1176,7 +1194,7 @@ func TestRWScanRecovery(t *testing.T) {
},
})
os.RemoveAll(fcfg.RawPath)
os.RemoveAll(fcfg.Path)
m := NewModel(cfg, protocol.LocalDeviceID, "syncthing", "dev", ldb, nil)
m.AddFolder(fcfg)
@@ -1207,14 +1225,14 @@ func TestRWScanRecovery(t *testing.T) {
return
}
os.Mkdir(fcfg.RawPath, 0700)
os.Mkdir(fcfg.Path, 0700)
if err := waitFor("folder marker missing"); err != nil {
t.Error(err)
return
}
fd, err := os.Create(filepath.Join(fcfg.RawPath, ".stfolder"))
fd, err := os.Create(filepath.Join(fcfg.Path, ".stfolder"))
if err != nil {
t.Error(err)
return
@@ -1226,14 +1244,14 @@ func TestRWScanRecovery(t *testing.T) {
return
}
os.Remove(filepath.Join(fcfg.RawPath, ".stfolder"))
os.Remove(filepath.Join(fcfg.Path, ".stfolder"))
if err := waitFor("folder marker missing"); err != nil {
t.Error(err)
return
}
os.Remove(fcfg.RawPath)
os.Remove(fcfg.Path)
if err := waitFor("folder path missing"); err != nil {
t.Error(err)
@@ -1861,14 +1879,14 @@ func TestIssue3164(t *testing.T) {
f := protocol.FileInfo{
Name: "issue3164",
}
m := ignore.New()
m := ignore.New(defaultFs)
if err := m.Parse(bytes.NewBufferString("(?d)oktodelete"), ""); err != nil {
t.Fatal(err)
}
fl := sendReceiveFolder{
dbUpdates: make(chan dbUpdateJob, 1),
dir: "testdata",
fs: fs.NewFilesystem(fs.FilesystemTypeBasic, "testdata"),
}
fl.deleteDir(f, m)
@@ -1955,7 +1973,7 @@ func TestIssue2782(t *testing.T) {
if err := os.RemoveAll(testDir); err != nil {
t.Skip(err)
}
if err := osutil.MkdirAll(testDir+"/syncdir", 0755); err != nil {
if err := os.MkdirAll(testDir+"/syncdir", 0755); err != nil {
t.Skip(err)
}
if err := ioutil.WriteFile(testDir+"/syncdir/file", []byte("hello, world\n"), 0644); err != nil {
@@ -1968,7 +1986,7 @@ func TestIssue2782(t *testing.T) {
db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "syncthing", "dev", db, nil)
m.AddFolder(config.NewFolderConfiguration("default", "~/"+testName+"/synclink/"))
m.AddFolder(config.NewFolderConfiguration("default", fs.FilesystemTypeBasic, "~/"+testName+"/synclink/"))
m.StartFolder("default")
m.ServeBackground()
defer m.Stop()
@@ -1985,7 +2003,7 @@ func TestIssue2782(t *testing.T) {
func TestIndexesForUnknownDevicesDropped(t *testing.T) {
dbi := db.OpenMemory()
files := db.NewFileSet("default", dbi)
files := db.NewFileSet("default", defaultFs, dbi)
files.Replace(device1, genFiles(1))
files.Replace(device2, genFiles(1))
@@ -1998,7 +2016,7 @@ func TestIndexesForUnknownDevicesDropped(t *testing.T) {
m.StartFolder("default")
// Remote sequence is cached, hence need to recreated.
files = db.NewFileSet("default", dbi)
files = db.NewFileSet("default", defaultFs, dbi)
if len(files.ListDevices()) != 1 {
t.Error("Expected one device")
@@ -2008,7 +2026,7 @@ func TestIndexesForUnknownDevicesDropped(t *testing.T) {
func TestSharedWithClearedOnDisconnect(t *testing.T) {
dbi := db.OpenMemory()
fcfg := config.NewFolderConfiguration("default", "testdata")
fcfg := config.NewFolderConfiguration("default", fs.FilesystemTypeBasic, "testdata")
fcfg.Devices = []config.FolderDeviceConfiguration{
{DeviceID: device1},
{DeviceID: device2},
@@ -2247,7 +2265,7 @@ func TestNoRequestsFromPausedDevices(t *testing.T) {
dbi := db.OpenMemory()
fcfg := config.NewFolderConfiguration("default", "testdata")
fcfg := config.NewFolderConfiguration("default", fs.FilesystemTypeBasic, "testdata")
fcfg.Devices = []config.FolderDeviceConfiguration{
{DeviceID: device1},
{DeviceID: device2},
@@ -2335,151 +2353,6 @@ func TestNoRequestsFromPausedDevices(t *testing.T) {
}
}
func TestRootedJoinedPath(t *testing.T) {
type testcase struct {
root string
rel string
joined string
ok bool
}
cases := []testcase{
// Valid cases
{"foo", "bar", "foo/bar", true},
{"foo", "/bar", "foo/bar", true},
{"foo/", "bar", "foo/bar", true},
{"foo/", "/bar", "foo/bar", true},
{"baz/foo", "bar", "baz/foo/bar", true},
{"baz/foo", "/bar", "baz/foo/bar", true},
{"baz/foo/", "bar", "baz/foo/bar", true},
{"baz/foo/", "/bar", "baz/foo/bar", true},
{"foo", "bar/baz", "foo/bar/baz", true},
{"foo", "/bar/baz", "foo/bar/baz", true},
{"foo/", "bar/baz", "foo/bar/baz", true},
{"foo/", "/bar/baz", "foo/bar/baz", true},
{"baz/foo", "bar/baz", "baz/foo/bar/baz", true},
{"baz/foo", "/bar/baz", "baz/foo/bar/baz", true},
{"baz/foo/", "bar/baz", "baz/foo/bar/baz", true},
{"baz/foo/", "/bar/baz", "baz/foo/bar/baz", true},
// Not escape attempts, but oddly formatted relative paths. Disallowed.
{"foo", "./bar", "", false},
{"baz/foo", "./bar", "", false},
{"foo", "./bar/baz", "", false},
{"baz/foo", "./bar/baz", "", false},
{"baz/foo", "bar/../baz", "", false},
{"baz/foo", "/bar/../baz", "", false},
{"baz/foo", "./bar/../baz", "", false},
{"baz/foo", "bar/../baz", "", false},
{"baz/foo", "/bar/../baz", "", false},
{"baz/foo", "./bar/../baz", "", false},
// Results in an allowed path, but does it by probing. Disallowed.
{"foo", "../foo", "", false},
{"foo", "../foo/bar", "", false},
{"baz/foo", "../foo/bar", "", false},
{"baz/foo", "../../baz/foo/bar", "", false},
{"baz/foo", "bar/../../foo/bar", "", false},
{"baz/foo", "bar/../../../baz/foo/bar", "", false},
// Escape attempts.
{"foo", "", "", false},
{"foo", "/", "", false},
{"foo", "..", "", false},
{"foo", "/..", "", false},
{"foo", "../", "", false},
{"foo", "../bar", "", false},
{"foo", "../foobar", "", false},
{"foo/", "../bar", "", false},
{"foo/", "../foobar", "", false},
{"baz/foo", "../bar", "", false},
{"baz/foo", "../foobar", "", false},
{"baz/foo/", "../bar", "", false},
{"baz/foo/", "../foobar", "", false},
{"baz/foo/", "bar/../../quux/baz", "", false},
// Empty root is a misconfiguration.
{"", "/foo", "", false},
{"", "foo", "", false},
{"", ".", "", false},
{"", "..", "", false},
{"", "/", "", false},
{"", "", "", false},
// Root=/ is valid, and things should be verified as usual.
{"/", "foo", "/foo", true},
{"/", "/foo", "/foo", true},
{"/", "../foo", "", false},
{"/", ".", "", false},
{"/", "..", "", false},
{"/", "/", "", false},
{"/", "", "", false},
}
if runtime.GOOS == "windows" {
extraCases := []testcase{
{`c:\`, `foo`, `c:\foo`, true},
{`\\?\c:\`, `foo`, `\\?\c:\foo`, true},
{`c:\`, `\foo`, `c:\foo`, true},
{`\\?\c:\`, `\foo`, `\\?\c:\foo`, true},
{`c:\`, `\\foo`, ``, false},
{`c:\`, ``, ``, false},
{`c:\`, `.`, ``, false},
{`c:\`, `\`, ``, false},
{`\\?\c:\`, `\\foo`, ``, false},
{`\\?\c:\`, ``, ``, false},
{`\\?\c:\`, `.`, ``, false},
{`\\?\c:\`, `\`, ``, false},
// makes no sense, but will be treated simply as a bad filename
{`c:\foo`, `d:\bar`, `c:\foo\d:\bar`, true},
}
for _, tc := range cases {
// Add case where root is backslashed, rel is forward slashed
extraCases = append(extraCases, testcase{
root: filepath.FromSlash(tc.root),
rel: tc.rel,
joined: tc.joined,
ok: tc.ok,
})
// and the opposite
extraCases = append(extraCases, testcase{
root: tc.root,
rel: filepath.FromSlash(tc.rel),
joined: tc.joined,
ok: tc.ok,
})
// and both backslashed
extraCases = append(extraCases, testcase{
root: filepath.FromSlash(tc.root),
rel: filepath.FromSlash(tc.rel),
joined: tc.joined,
ok: tc.ok,
})
}
cases = append(cases, extraCases...)
}
for _, tc := range cases {
res, err := rootedJoinedPath(tc.root, tc.rel)
if tc.ok {
if err != nil {
t.Errorf("Unexpected error for rootedJoinedPath(%q, %q): %v", tc.root, tc.rel, err)
continue
}
exp := filepath.FromSlash(tc.joined)
if res != exp {
t.Errorf("Unexpected result for rootedJoinedPath(%q, %q): %q != expected %q", tc.root, tc.rel, res, exp)
}
} else if err == nil {
t.Errorf("Unexpected pass for rootedJoinedPath(%q, %q) => %q", tc.root, tc.rel, res)
continue
}
}
}
func addFakeConn(m *Model, dev protocol.DeviceID) *fakeConnection {
fc := &fakeConnection{id: dev, model: m}
m.AddConnection(fc, protocol.HelloResult{})

View File

@@ -18,6 +18,7 @@ import (
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/protocol"
)
@@ -214,7 +215,7 @@ func TestRequestVersioningSymlinkAttack(t *testing.T) {
// deleted symlink to escape
cfg := defaultConfig.RawCopy()
cfg.Folders[0] = config.NewFolderConfiguration("default", "_tmpfolder")
cfg.Folders[0] = config.NewFolderConfiguration("default", fs.FilesystemTypeBasic, "_tmpfolder")
cfg.Folders[0].PullerSleepS = 1
cfg.Folders[0].Devices = []config.FolderDeviceConfiguration{
{DeviceID: device1},
@@ -287,7 +288,7 @@ func TestRequestVersioningSymlinkAttack(t *testing.T) {
func setupModelWithConnection() (*Model, *fakeConnection) {
cfg := defaultConfig.RawCopy()
cfg.Folders[0] = config.NewFolderConfiguration("default", "_tmpfolder")
cfg.Folders[0] = config.NewFolderConfiguration("default", fs.FilesystemTypeBasic, "_tmpfolder")
cfg.Folders[0].PullerSleepS = 1
cfg.Folders[0].Devices = []config.FolderDeviceConfiguration{
{DeviceID: device1},

View File

@@ -24,7 +24,7 @@ type sendOnlyFolder struct {
config.FolderConfiguration
}
func newSendOnlyFolder(model *Model, cfg config.FolderConfiguration, _ versioner.Versioner, _ *fs.MtimeFS) service {
func newSendOnlyFolder(model *Model, cfg config.FolderConfiguration, _ versioner.Versioner, _ fs.Filesystem) service {
ctx, cancel := context.WithCancel(context.Background())
return &sendOnlyFolder{

View File

@@ -11,7 +11,6 @@ import (
"errors"
"fmt"
"math/rand"
"os"
"path/filepath"
"runtime"
"sort"
@@ -51,7 +50,7 @@ type copyBlocksState struct {
}
// Which filemode bits to preserve
const retainBits = os.ModeSetgid | os.ModeSetuid | os.ModeSticky
const retainBits = fs.ModeSetgid | fs.ModeSetuid | fs.ModeSticky
var (
activity = newDeviceActivity()
@@ -84,8 +83,7 @@ type sendReceiveFolder struct {
folder
config.FolderConfiguration
mtimeFS *fs.MtimeFS
dir string
fs fs.Filesystem
versioner versioner.Versioner
sleep time.Duration
pause time.Duration
@@ -99,7 +97,7 @@ type sendReceiveFolder struct {
errorsMut sync.Mutex
}
func newSendReceiveFolder(model *Model, cfg config.FolderConfiguration, ver versioner.Versioner, mtimeFS *fs.MtimeFS) service {
func newSendReceiveFolder(model *Model, cfg config.FolderConfiguration, ver versioner.Versioner, fs fs.Filesystem) service {
ctx, cancel := context.WithCancel(context.Background())
f := &sendReceiveFolder{
@@ -113,8 +111,7 @@ func newSendReceiveFolder(model *Model, cfg config.FolderConfiguration, ver vers
},
FolderConfiguration: cfg,
mtimeFS: mtimeFS,
dir: cfg.Path(),
fs: fs,
versioner: ver,
queue: newJobQueue(),
@@ -434,7 +431,7 @@ func (f *sendReceiveFolder) pullerIteration(ignores *ignore.Matcher) int {
for _, fi := range processDirectly {
// Verify that the thing we are handling lives inside a directory,
// and not a symlink or empty space.
if err := osutil.TraversesSymlink(f.dir, filepath.Dir(fi.Name)); err != nil {
if err := osutil.TraversesSymlink(f.fs, filepath.Dir(fi.Name)); err != nil {
f.newError(fi.Name, err)
continue
}
@@ -523,7 +520,7 @@ nextFile:
// Verify that the thing we are handling lives inside a directory,
// and not a symlink or empty space.
if err := osutil.TraversesSymlink(f.dir, filepath.Dir(fi.Name)); err != nil {
if err := osutil.TraversesSymlink(f.fs, filepath.Dir(fi.Name)); err != nil {
f.newError(fi.Name, err)
continue
}
@@ -610,12 +607,7 @@ func (f *sendReceiveFolder) handleDir(file protocol.FileInfo) {
})
}()
realName, err := rootedJoinedPath(f.dir, file.Name)
if err != nil {
f.newError(file.Name, err)
return
}
mode := os.FileMode(file.Permissions & 0777)
mode := fs.FileMode(file.Permissions & 0777)
if f.ignorePermissions(file) {
mode = 0777
}
@@ -625,13 +617,13 @@ func (f *sendReceiveFolder) handleDir(file protocol.FileInfo) {
l.Debugf("need dir\n\t%v\n\t%v", file, curFile)
}
info, err := f.mtimeFS.Lstat(realName)
info, err := f.fs.Lstat(file.Name)
switch {
// There is already something under that name, but it's a file/link.
// Most likely a file/link is getting replaced with a directory.
// Remove the file/link and fall through to directory creation.
case err == nil && (!info.IsDir() || info.IsSymlink()):
err = osutil.InWritableDir(os.Remove, realName)
err = osutil.InWritableDir(f.fs.Remove, f.fs, file.Name)
if err != nil {
l.Infof("Puller (folder %q, dir %q): %v", f.folderID, file.Name, err)
f.newError(file.Name, err)
@@ -640,28 +632,28 @@ func (f *sendReceiveFolder) handleDir(file protocol.FileInfo) {
fallthrough
// The directory doesn't exist, so we create it with the right
// mode bits from the start.
case err != nil && os.IsNotExist(err):
case err != nil && fs.IsNotExist(err):
// We declare a function that acts on only the path name, so
// we can pass it to InWritableDir. We use a regular Mkdir and
// not MkdirAll because the parent should already exist.
mkdir := func(path string) error {
err = os.Mkdir(path, mode)
err = f.fs.Mkdir(path, mode)
if err != nil || f.ignorePermissions(file) {
return err
}
// Stat the directory so we can check its permissions.
info, err := f.mtimeFS.Lstat(path)
info, err := f.fs.Lstat(path)
if err != nil {
return err
}
// Mask for the bits we want to preserve and add them in to the
// directories permissions.
return os.Chmod(path, mode|(os.FileMode(info.Mode())&retainBits))
return f.fs.Chmod(path, mode|(info.Mode()&retainBits))
}
if err = osutil.InWritableDir(mkdir, realName); err == nil {
if err = osutil.InWritableDir(mkdir, f.fs, file.Name); err == nil {
f.dbUpdates <- dbUpdateJob{file, dbUpdateHandleDir}
} else {
l.Infof("Puller (folder %q, dir %q): %v", f.folderID, file.Name, err)
@@ -681,7 +673,7 @@ func (f *sendReceiveFolder) handleDir(file protocol.FileInfo) {
// It's OK to change mode bits on stuff within non-writable directories.
if f.ignorePermissions(file) {
f.dbUpdates <- dbUpdateJob{file, dbUpdateHandleDir}
} else if err := os.Chmod(realName, mode|(os.FileMode(info.Mode())&retainBits)); err == nil {
} else if err := f.fs.Chmod(file.Name, mode|(fs.FileMode(info.Mode())&retainBits)); err == nil {
f.dbUpdates <- dbUpdateJob{file, dbUpdateHandleDir}
} else {
l.Infof("Puller (folder %q, dir %q): %v", f.folderID, file.Name, err)
@@ -712,12 +704,6 @@ func (f *sendReceiveFolder) handleSymlink(file protocol.FileInfo) {
})
}()
realName, err := rootedJoinedPath(f.dir, file.Name)
if err != nil {
f.newError(file.Name, err)
return
}
if shouldDebug() {
curFile, _ := f.model.CurrentFolderFile(f.folderID, file.Name)
l.Debugf("need symlink\n\t%v\n\t%v", file, curFile)
@@ -732,11 +718,11 @@ func (f *sendReceiveFolder) handleSymlink(file protocol.FileInfo) {
return
}
if _, err = f.mtimeFS.Lstat(realName); err == nil {
if _, err = f.fs.Lstat(file.Name); err == nil {
// There is already something under that name. Remove it to replace
// with the symlink. This also handles the "change symlink type"
// path.
err = osutil.InWritableDir(os.Remove, realName)
err = osutil.InWritableDir(f.fs.Remove, f.fs, file.Name)
if err != nil {
l.Infof("Puller (folder %q, dir %q): %v", f.folderID, file.Name, err)
f.newError(file.Name, err)
@@ -747,10 +733,10 @@ func (f *sendReceiveFolder) handleSymlink(file protocol.FileInfo) {
// We declare a function that acts on only the path name, so
// we can pass it to InWritableDir.
createLink := func(path string) error {
return os.Symlink(file.SymlinkTarget, path)
return f.fs.CreateSymlink(file.SymlinkTarget, path)
}
if err = osutil.InWritableDir(createLink, realName); err == nil {
if err = osutil.InWritableDir(createLink, f.fs, file.Name); err == nil {
f.dbUpdates <- dbUpdateJob{file, dbUpdateHandleSymlink}
} else {
l.Infof("Puller (folder %q, dir %q): %v", f.folderID, file.Name, err)
@@ -781,31 +767,21 @@ func (f *sendReceiveFolder) deleteDir(file protocol.FileInfo, matcher *ignore.Ma
})
}()
realName, err := rootedJoinedPath(f.dir, file.Name)
if err != nil {
f.newError(file.Name, err)
return
}
// Delete any temporary files lying around in the directory
dir, _ := os.Open(realName)
if dir != nil {
files, _ := dir.Readdirnames(-1)
for _, dirFile := range files {
fullDirFile := filepath.Join(file.Name, dirFile)
if ignore.IsTemporary(dirFile) || (matcher != nil &&
matcher.Match(fullDirFile).IsDeletable()) {
os.RemoveAll(filepath.Join(f.dir, fullDirFile))
}
files, _ := f.fs.DirNames(file.Name)
for _, dirFile := range files {
fullDirFile := filepath.Join(file.Name, dirFile)
if ignore.IsTemporary(dirFile) || (matcher != nil && matcher.Match(fullDirFile).IsDeletable()) {
f.fs.RemoveAll(fullDirFile)
}
dir.Close()
}
err = osutil.InWritableDir(os.Remove, realName)
if err == nil || os.IsNotExist(err) {
err = osutil.InWritableDir(f.fs.Remove, f.fs, file.Name)
if err == nil || fs.IsNotExist(err) {
// It was removed or it doesn't exist to start with
f.dbUpdates <- dbUpdateJob{file, dbUpdateDeleteDir}
} else if _, serr := f.mtimeFS.Lstat(realName); serr != nil && !os.IsPermission(serr) {
} else if _, serr := f.fs.Lstat(file.Name); serr != nil && !fs.IsPermission(serr) {
// We get an error just looking at the directory, and it's not a
// permission problem. Lets assume the error is in fact some variant
// of "file does not exist" (possibly expressed as some parent being a
@@ -840,12 +816,6 @@ func (f *sendReceiveFolder) deleteFile(file protocol.FileInfo) {
})
}()
realName, err := rootedJoinedPath(f.dir, file.Name)
if err != nil {
f.newError(file.Name, err)
return
}
cur, ok := f.model.CurrentFolderFile(f.folderID, file.Name)
if ok && f.inConflict(cur.Version, file.Version) {
// There is a conflict here. Move the file to a conflict copy instead
@@ -854,17 +824,17 @@ func (f *sendReceiveFolder) deleteFile(file protocol.FileInfo) {
file.Version = file.Version.Merge(cur.Version)
err = osutil.InWritableDir(func(name string) error {
return f.moveForConflict(name, file.ModifiedBy.String())
}, realName)
}, f.fs, file.Name)
} else if f.versioner != nil && !cur.IsSymlink() {
err = osutil.InWritableDir(f.versioner.Archive, realName)
err = osutil.InWritableDir(f.versioner.Archive, f.fs, file.Name)
} else {
err = osutil.InWritableDir(os.Remove, realName)
err = osutil.InWritableDir(f.fs.Remove, f.fs, file.Name)
}
if err == nil || os.IsNotExist(err) {
if err == nil || fs.IsNotExist(err) {
// It was removed or it doesn't exist to start with
f.dbUpdates <- dbUpdateJob{file, dbUpdateDeleteFile}
} else if _, serr := f.mtimeFS.Lstat(realName); serr != nil && !os.IsPermission(serr) {
} else if _, serr := f.fs.Lstat(file.Name); serr != nil && !fs.IsPermission(serr) {
// We get an error just looking at the file, and it's not a permission
// problem. Lets assume the error is in fact some variant of "file
// does not exist" (possibly expressed as some parent being a file and
@@ -915,24 +885,13 @@ func (f *sendReceiveFolder) renameFile(source, target protocol.FileInfo) {
l.Debugln(f, "taking rename shortcut", source.Name, "->", target.Name)
from, err := rootedJoinedPath(f.dir, source.Name)
if err != nil {
f.newError(source.Name, err)
return
}
to, err := rootedJoinedPath(f.dir, target.Name)
if err != nil {
f.newError(target.Name, err)
return
}
if f.versioner != nil {
err = osutil.Copy(from, to)
err = osutil.Copy(f.fs, source.Name, target.Name)
if err == nil {
err = osutil.InWritableDir(f.versioner.Archive, from)
err = osutil.InWritableDir(f.versioner.Archive, f.fs, source.Name)
}
} else {
err = osutil.TryRename(from, to)
err = osutil.TryRename(f.fs, source.Name, target.Name)
}
if err == nil {
@@ -955,7 +914,7 @@ func (f *sendReceiveFolder) renameFile(source, target protocol.FileInfo) {
// get rid of. Attempt to delete it instead so that we make *some*
// progress. The target is unhandled.
err = osutil.InWritableDir(os.Remove, from)
err = osutil.InWritableDir(f.fs.Remove, f.fs, source.Name)
if err != nil {
l.Infof("Puller (folder %q, file %q): delete %q after failed rename: %v", f.folderID, target.Name, source.Name, err)
f.newError(target.Name, err)
@@ -1041,26 +1000,16 @@ func (f *sendReceiveFolder) handleFile(file protocol.FileInfo, copyChan chan<- c
return
}
// Figure out the absolute filenames we need once and for all
tempName, err := rootedJoinedPath(f.dir, ignore.TempName(file.Name))
if err != nil {
f.newError(file.Name, err)
return
}
realName, err := rootedJoinedPath(f.dir, file.Name)
if err != nil {
f.newError(file.Name, err)
return
}
tempName := ignore.TempName(file.Name)
if hasCurFile && !curFile.IsDirectory() && !curFile.IsSymlink() {
// Check that the file on disk is what we expect it to be according to
// the database. If there's a mismatch here, there might be local
// changes that we don't know about yet and we should scan before
// touching the file. If we can't stat the file we'll just pull it.
if info, err := f.mtimeFS.Lstat(realName); err == nil {
if info, err := f.fs.Lstat(file.Name); err == nil {
if !info.ModTime().Equal(curFile.ModTime()) || info.Size() != curFile.Size {
l.Debugln("file modified but not rescanned; not pulling:", realName)
l.Debugln("file modified but not rescanned; not pulling:", file.Name)
// Scan() is synchronous (i.e. blocks until the scan is
// completed and returns an error), but a scan can't happen
// while we're in the puller routine. Request the scan in the
@@ -1082,7 +1031,7 @@ func (f *sendReceiveFolder) handleFile(file protocol.FileInfo, copyChan chan<- c
// Check for an old temporary file which might have some blocks we could
// reuse.
tempBlocks, err := scanner.HashFile(f.ctx, fs.DefaultFilesystem, tempName, protocol.BlockSize, nil, false)
tempBlocks, err := scanner.HashFile(f.ctx, f.fs, tempName, protocol.BlockSize, nil, false)
if err == nil {
// Check for any reusable blocks in the temp file
tempCopyBlocks, _ := scanner.BlockDiff(tempBlocks, file.Blocks)
@@ -1110,7 +1059,7 @@ func (f *sendReceiveFolder) handleFile(file protocol.FileInfo, copyChan chan<- c
// Otherwise, discard the file ourselves in order for the
// sharedpuller not to panic when it fails to exclusively create a
// file which already exists
osutil.InWritableDir(os.Remove, tempName)
osutil.InWritableDir(f.fs.Remove, f.fs, tempName)
}
} else {
// Copy the blocks, as we don't want to shuffle them on the FileInfo
@@ -1119,8 +1068,8 @@ func (f *sendReceiveFolder) handleFile(file protocol.FileInfo, copyChan chan<- c
}
if f.MinDiskFree.BaseValue() > 0 {
if free, err := osutil.DiskFreeBytes(f.dir); err == nil && free < blocksSize {
l.Warnf(`Folder "%s": insufficient disk space in %s for %s: have %.2f MiB, need %.2f MiB`, f.folderID, f.dir, file.Name, float64(free)/1024/1024, float64(blocksSize)/1024/1024)
if usage, err := f.fs.Usage("."); err == nil && usage.Free < blocksSize {
l.Warnf(`Folder "%s": insufficient disk space in %s for %s: have %.2f MiB, need %.2f MiB`, f.folderID, f.fs.URI(), file.Name, float64(usage.Free)/1024/1024, float64(blocksSize)/1024/1024)
f.newError(file.Name, errors.New("insufficient space"))
return
}
@@ -1141,9 +1090,10 @@ func (f *sendReceiveFolder) handleFile(file protocol.FileInfo, copyChan chan<- c
s := sharedPullerState{
file: file,
fs: f.fs,
folder: f.folderID,
tempName: tempName,
realName: realName,
realName: file.Name,
copyTotal: len(blocks),
copyNeeded: len(blocks),
reused: len(reused),
@@ -1170,20 +1120,15 @@ func (f *sendReceiveFolder) handleFile(file protocol.FileInfo, copyChan chan<- c
// shortcutFile sets file mode and modification time, when that's the only
// thing that has changed.
func (f *sendReceiveFolder) shortcutFile(file protocol.FileInfo) error {
realName, err := rootedJoinedPath(f.dir, file.Name)
if err != nil {
f.newError(file.Name, err)
return err
}
if !f.ignorePermissions(file) {
if err := os.Chmod(realName, os.FileMode(file.Permissions&0777)); err != nil {
if err := f.fs.Chmod(file.Name, fs.FileMode(file.Permissions&0777)); err != nil {
l.Infof("Puller (folder %q, file %q): shortcut: chmod: %v", f.folderID, file.Name, err)
f.newError(file.Name, err)
return err
}
}
f.mtimeFS.Chtimes(realName, file.ModTime(), file.ModTime()) // never fails
f.fs.Chtimes(file.Name, file.ModTime(), file.ModTime()) // never fails
// This may have been a conflict. We should merge the version vectors so
// that our clock doesn't move backwards.
@@ -1211,15 +1156,16 @@ func (f *sendReceiveFolder) copierRoutine(in <-chan copyBlocksState, pullChan ch
f.model.progressEmitter.Register(state.sharedPullerState)
}
folderRoots := make(map[string]string)
folderFilesystems := make(map[string]fs.Filesystem)
var folders []string
f.model.fmut.RLock()
for folder, cfg := range f.model.folderCfgs {
folderRoots[folder] = cfg.Path()
folderFilesystems[folder] = cfg.Filesystem()
folders = append(folders, folder)
}
f.model.fmut.RUnlock()
var file fs.File
var weakHashFinder *weakhash.Finder
if weakhash.Enabled {
@@ -1237,9 +1183,12 @@ func (f *sendReceiveFolder) copierRoutine(in <-chan copyBlocksState, pullChan ch
}
if len(hashesToFind) > 0 {
weakHashFinder, err = weakhash.NewFinder(state.realName, protocol.BlockSize, hashesToFind)
if err != nil {
l.Debugln("weak hasher", err)
file, err = f.fs.Open(state.file.Name)
if err == nil {
weakHashFinder, err = weakhash.NewFinder(file, protocol.BlockSize, hashesToFind)
if err != nil {
l.Debugln("weak hasher", err)
}
}
} else {
l.Debugf("not weak hashing %s. file did not contain any weak hashes", state.file.Name)
@@ -1289,12 +1238,9 @@ func (f *sendReceiveFolder) copierRoutine(in <-chan copyBlocksState, pullChan ch
}
if !found {
found = f.model.finder.Iterate(folders, block.Hash, func(folder, file string, index int32) bool {
inFile, err := rootedJoinedPath(folderRoots[folder], file)
if err != nil {
return false
}
fd, err := os.Open(inFile)
found = f.model.finder.Iterate(folders, block.Hash, func(folder, path string, index int32) bool {
fs := folderFilesystems[folder]
fd, err := fs.Open(path)
if err != nil {
return false
}
@@ -1308,8 +1254,8 @@ func (f *sendReceiveFolder) copierRoutine(in <-chan copyBlocksState, pullChan ch
hash, err := scanner.VerifyBuffer(buf, block)
if err != nil {
if hash != nil {
l.Debugf("Finder block mismatch in %s:%s:%d expected %q got %q", folder, file, index, block.Hash, hash)
err = f.model.finder.Fix(folder, file, index, block.Hash, hash)
l.Debugf("Finder block mismatch in %s:%s:%d expected %q got %q", folder, path, index, block.Hash, hash)
err = f.model.finder.Fix(folder, path, index, block.Hash, hash)
if err != nil {
l.Warnln("finder fix:", err)
}
@@ -1323,7 +1269,7 @@ func (f *sendReceiveFolder) copierRoutine(in <-chan copyBlocksState, pullChan ch
if err != nil {
state.fail("dst write", err)
}
if file == state.file.Name {
if path == state.file.Name {
state.copiedFromOrigin()
}
return true
@@ -1345,7 +1291,12 @@ func (f *sendReceiveFolder) copierRoutine(in <-chan copyBlocksState, pullChan ch
state.copyDone(block)
}
}
weakHashFinder.Close()
if file != nil {
// os.File used to return invalid argument if nil.
// fs.File panics as it's an interface.
file.Close()
}
out <- state.sharedPullerState
}
}
@@ -1426,12 +1377,12 @@ func (f *sendReceiveFolder) pullerRoutine(in <-chan pullBlockState, out chan<- *
func (f *sendReceiveFolder) performFinish(state *sharedPullerState) error {
// Set the correct permission bits on the new file
if !f.ignorePermissions(state.file) {
if err := os.Chmod(state.tempName, os.FileMode(state.file.Permissions&0777)); err != nil {
if err := f.fs.Chmod(state.tempName, fs.FileMode(state.file.Permissions&0777)); err != nil {
return err
}
}
if stat, err := f.mtimeFS.Lstat(state.realName); err == nil {
if stat, err := f.fs.Lstat(state.file.Name); err == nil {
// There is an old file or directory already in place. We need to
// handle that.
@@ -1445,7 +1396,7 @@ func (f *sendReceiveFolder) performFinish(state *sharedPullerState) error {
// and future hard ignores before attempting a directory delete.
// Should share code with f.deletDir().
if err = osutil.InWritableDir(os.Remove, state.realName); err != nil {
if err = osutil.InWritableDir(f.fs.Remove, f.fs, state.file.Name); err != nil {
return err
}
@@ -1458,7 +1409,7 @@ func (f *sendReceiveFolder) performFinish(state *sharedPullerState) error {
state.file.Version = state.file.Version.Merge(state.version)
err = osutil.InWritableDir(func(name string) error {
return f.moveForConflict(name, state.file.ModifiedBy.String())
}, state.realName)
}, f.fs, state.file.Name)
if err != nil {
return err
}
@@ -1468,7 +1419,7 @@ func (f *sendReceiveFolder) performFinish(state *sharedPullerState) error {
// file before we replace it. Archiving a non-existent file is not
// an error.
if err = f.versioner.Archive(state.realName); err != nil {
if err = f.versioner.Archive(state.file.Name); err != nil {
return err
}
}
@@ -1476,12 +1427,12 @@ func (f *sendReceiveFolder) performFinish(state *sharedPullerState) error {
// Replace the original content with the new one. If it didn't work,
// leave the temp file in place for reuse.
if err := osutil.TryRename(state.tempName, state.realName); err != nil {
if err := osutil.TryRename(f.fs, state.tempName, state.file.Name); err != nil {
return err
}
// Set the correct timestamp on the new file
f.mtimeFS.Chtimes(state.realName, state.file.ModTime(), state.file.ModTime()) // never fails
f.fs.Chtimes(state.file.Name, state.file.ModTime(), state.file.ModTime()) // never fails
// Record the updated file in the index
f.dbUpdates <- dbUpdateJob{state.file, dbUpdateHandleFile}
@@ -1540,26 +1491,7 @@ func (f *sendReceiveFolder) dbUpdaterRoutine() {
tick := time.NewTicker(maxBatchTime)
defer tick.Stop()
var changedFiles []string
var changedDirs []string
if f.Fsync {
changedFiles = make([]string, 0, maxBatchSize)
changedDirs = make([]string, 0, maxBatchSize)
}
syncFilesOnce := func(files []string, syncFn func(string) error) {
sort.Strings(files)
var lastFile string
for _, file := range files {
if lastFile == file {
continue
}
lastFile = file
if err := syncFn(file); err != nil {
l.Infof("fsync %q failed: %v", file, err)
}
}
}
changedDirs := make(map[string]struct{})
handleBatch := func() {
found := false
@@ -1567,20 +1499,16 @@ func (f *sendReceiveFolder) dbUpdaterRoutine() {
for _, job := range batch {
files = append(files, job.file)
if f.Fsync {
// collect changed files and dirs
switch job.jobType {
case dbUpdateHandleFile, dbUpdateShortcutFile:
changedFiles = append(changedFiles, filepath.Join(f.dir, job.file.Name))
case dbUpdateHandleDir:
changedDirs = append(changedDirs, filepath.Join(f.dir, job.file.Name))
case dbUpdateHandleSymlink:
// fsyncing symlinks is only supported by MacOS, ignore
}
if job.jobType != dbUpdateShortcutFile {
changedDirs = append(changedDirs, filepath.Dir(filepath.Join(f.dir, job.file.Name)))
}
switch job.jobType {
case dbUpdateHandleFile, dbUpdateShortcutFile:
changedDirs[filepath.Dir(job.file.Name)] = struct{}{}
case dbUpdateHandleDir:
changedDirs[job.file.Name] = struct{}{}
case dbUpdateHandleSymlink:
// fsyncing symlinks is only supported by MacOS, ignore
}
if job.file.IsInvalid() || (job.file.IsDirectory() && !job.file.IsSymlink()) {
continue
}
@@ -1593,12 +1521,18 @@ func (f *sendReceiveFolder) dbUpdaterRoutine() {
lastFile = job.file
}
if f.Fsync {
// sync files and dirs to disk
syncFilesOnce(changedFiles, osutil.SyncFile)
changedFiles = changedFiles[:0]
syncFilesOnce(changedDirs, osutil.SyncDir)
changedDirs = changedDirs[:0]
// sync directories
for dir := range changedDirs {
delete(changedDirs, dir)
fd, err := f.fs.Open(dir)
if err != nil {
l.Infof("fsync %q failed: %v", dir, err)
continue
}
if err := fd.Sync(); err != nil {
l.Infof("fsync %q failed: %v", dir, err)
}
fd.Close()
}
// All updates to file/folder objects that originated remotely
@@ -1669,14 +1603,14 @@ func removeAvailability(availabilities []Availability, availability Availability
func (f *sendReceiveFolder) moveForConflict(name string, lastModBy string) error {
if strings.Contains(filepath.Base(name), ".sync-conflict-") {
l.Infoln("Conflict for", name, "which is already a conflict copy; not copying again.")
if err := os.Remove(name); err != nil && !os.IsNotExist(err) {
if err := f.fs.Remove(name); err != nil && !fs.IsNotExist(err) {
return err
}
return nil
}
if f.MaxConflicts == 0 {
if err := os.Remove(name); err != nil && !os.IsNotExist(err) {
if err := f.fs.Remove(name); err != nil && !fs.IsNotExist(err) {
return err
}
return nil
@@ -1685,8 +1619,8 @@ func (f *sendReceiveFolder) moveForConflict(name string, lastModBy string) error
ext := filepath.Ext(name)
withoutExt := name[:len(name)-len(ext)]
newName := withoutExt + time.Now().Format(".sync-conflict-20060102-150405-") + lastModBy + ext
err := os.Rename(name, newName)
if os.IsNotExist(err) {
err := f.fs.Rename(name, newName)
if fs.IsNotExist(err) {
// We were supposed to move a file away but it does not exist. Either
// the user has already moved it away, or the conflict was between a
// remote modification and a local delete. In either way it does not
@@ -1694,11 +1628,11 @@ func (f *sendReceiveFolder) moveForConflict(name string, lastModBy string) error
err = nil
}
if f.MaxConflicts > -1 {
matches, gerr := osutil.Glob(withoutExt + ".sync-conflict-????????-??????*" + ext)
matches, gerr := f.fs.Glob(withoutExt + ".sync-conflict-????????-??????*" + ext)
if gerr == nil && len(matches) > f.MaxConflicts {
sort.Sort(sort.Reverse(sort.StringSlice(matches)))
for _, match := range matches[f.MaxConflicts:] {
gerr = os.Remove(match)
gerr = f.fs.Remove(match)
if gerr != nil {
l.Debugln(f, "removing extra conflict", gerr)
}
@@ -1772,7 +1706,7 @@ func fileValid(file db.FileIntf) error {
return errSymlinksUnsupported
case runtime.GOOS == "windows" && windowsInvalidFilename(file.FileName()):
return errInvalidFilename
return fs.ErrInvalidFilename
}
return nil
@@ -1821,7 +1755,7 @@ func (l byComponentCount) Swap(a, b int) {
func componentCount(name string) int {
count := 0
for _, codepoint := range name {
if codepoint == os.PathSeparator {
if codepoint == fs.PathSeparator {
count++
}
}

View File

@@ -87,8 +87,7 @@ func setUpSendReceiveFolder(model *Model) *sendReceiveFolder {
ctx: context.TODO(),
},
mtimeFS: fs.NewMtimeFS(fs.DefaultFilesystem, db.NewNamespacedKV(model.db, "mtime")),
dir: "testdata",
fs: fs.NewMtimeFS(fs.NewFilesystem(fs.FilesystemTypeBasic, "testdata"), db.NewNamespacedKV(model.db, "mtime")),
queue: newJobQueue(),
errors: make(map[string]string),
errorsMut: sync.NewMutex(),
@@ -246,7 +245,7 @@ func TestCopierFinder(t *testing.T) {
}
// Verify that the fetched blocks have actually been written to the temp file
blks, err := scanner.HashFile(context.TODO(), fs.DefaultFilesystem, tempFile, protocol.BlockSize, nil, false)
blks, err := scanner.HashFile(context.TODO(), fs.NewFilesystem(fs.FilesystemTypeBasic, "."), tempFile, protocol.BlockSize, nil, false)
if err != nil {
t.Log(err)
}

View File

@@ -8,10 +8,10 @@ package model
import (
"io"
"os"
"path/filepath"
"time"
"github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/sync"
)
@@ -21,6 +21,7 @@ import (
type sharedPullerState struct {
// Immutable, does not require locking
file protocol.FileInfo // The new file (desired end state)
fs fs.Filesystem
folder string
tempName string
realName string
@@ -32,7 +33,7 @@ type sharedPullerState struct {
// Mutable, must be locked for access
err error // The first error we hit
fd *os.File // The fd of the temp file
fd fs.File // The fd of the temp file
copyTotal int // Total number of copy actions for the whole job
pullTotal int // Total number of pull actions for the whole job
copyOrigin int // Number of blocks copied from the original file
@@ -92,8 +93,8 @@ func (s *sharedPullerState) tempFile() (io.WriterAt, error) {
// osutil.InWritableDir except we need to do more stuff so we duplicate it
// here.
dir := filepath.Dir(s.tempName)
if info, err := os.Stat(dir); err != nil {
if os.IsNotExist(err) {
if info, err := s.fs.Stat(dir); err != nil {
if fs.IsNotExist(err) {
// XXX: This works around a bug elsewhere, a race condition when
// things are deleted while being synced. However that happens, we
// end up with a directory for "foo" with the delete bit, but a
@@ -103,7 +104,7 @@ func (s *sharedPullerState) tempFile() (io.WriterAt, error) {
// next scan it'll be found and the delete bit on it is removed.
// The user can then clean up as they like...
l.Infoln("Resurrecting directory", dir)
if err := os.MkdirAll(dir, 0755); err != nil {
if err := s.fs.MkdirAll(dir, 0755); err != nil {
s.failLocked("resurrect dir", err)
return nil, err
}
@@ -112,10 +113,10 @@ func (s *sharedPullerState) tempFile() (io.WriterAt, error) {
return nil, err
}
} else if info.Mode()&0200 == 0 {
err := os.Chmod(dir, 0755)
err := s.fs.Chmod(dir, 0755)
if !s.ignorePerms && err == nil {
defer func() {
err := os.Chmod(dir, info.Mode().Perm())
err := s.fs.Chmod(dir, info.Mode()&fs.ModePerm)
if err != nil {
panic(err)
}
@@ -128,7 +129,7 @@ func (s *sharedPullerState) tempFile() (io.WriterAt, error) {
// permissions will be set to the final value later, but in the meantime
// we don't want to have a temporary file with looser permissions than
// the final outcome.
mode := os.FileMode(s.file.Permissions) | 0600
mode := fs.FileMode(s.file.Permissions) | 0600
if s.ignorePerms {
// When ignorePerms is set we use a very permissive mode and let the
// system umask filter it.
@@ -137,9 +138,9 @@ func (s *sharedPullerState) tempFile() (io.WriterAt, error) {
// Attempt to create the temp file
// RDWR because of issue #2994.
flags := os.O_RDWR
flags := fs.OptReadWrite
if s.reused == 0 {
flags |= os.O_CREATE | os.O_EXCL
flags |= fs.OptCreate | fs.OptExclusive
} else if !s.ignorePerms {
// With sufficiently bad luck when exiting or crashing, we may have
// had time to chmod the temp file to read only state but not yet
@@ -151,12 +152,12 @@ func (s *sharedPullerState) tempFile() (io.WriterAt, error) {
// already and make no modification, as we would otherwise override
// what the umask dictates.
if err := os.Chmod(s.tempName, mode); err != nil {
if err := s.fs.Chmod(s.tempName, mode); err != nil {
s.failLocked("dst create chmod", err)
return nil, err
}
}
fd, err := os.OpenFile(s.tempName, flags, mode)
fd, err := s.fs.OpenFile(s.tempName, flags, mode)
if err != nil {
s.failLocked("dst create", err)
return nil, err
@@ -180,7 +181,7 @@ func (s *sharedPullerState) tempFile() (io.WriterAt, error) {
}
// sourceFile opens the existing source file for reading
func (s *sharedPullerState) sourceFile() (*os.File, error) {
func (s *sharedPullerState) sourceFile() (fs.File, error) {
s.mut.Lock()
defer s.mut.Unlock()
@@ -190,7 +191,7 @@ func (s *sharedPullerState) sourceFile() (*os.File, error) {
}
// Attempt to open the existing file
fd, err := os.Open(s.realName)
fd, err := s.fs.Open(s.realName)
if err != nil {
s.failLocked("src open", err)
return nil, err
@@ -292,9 +293,12 @@ func (s *sharedPullerState) finalClose() (bool, error) {
}
if s.fd != nil {
// This is our error if we weren't errored before. Otherwise we
// keep the earlier error.
if fsyncErr := s.fd.Sync(); fsyncErr != nil && s.err == nil {
s.err = fsyncErr
}
if closeErr := s.fd.Close(); closeErr != nil && s.err == nil {
// This is our error if we weren't errored before. Otherwise we
// keep the earlier error.
s.err = closeErr
}
s.fd = nil

View File

@@ -10,12 +10,14 @@ import (
"os"
"testing"
"github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/sync"
)
func TestSourceFileOK(t *testing.T) {
s := sharedPullerState{
realName: "testdata/foo",
fs: fs.NewFilesystem(fs.FilesystemTypeBasic, "testdata"),
realName: "foo",
mut: sync.NewRWMutex(),
}
@@ -47,6 +49,7 @@ func TestSourceFileOK(t *testing.T) {
func TestSourceFileBad(t *testing.T) {
s := sharedPullerState{
fs: fs.NewFilesystem(fs.FilesystemTypeBasic, "testdata"),
realName: "nonexistent",
mut: sync.NewRWMutex(),
}
@@ -73,7 +76,8 @@ func TestReadOnlyDir(t *testing.T) {
}()
s := sharedPullerState{
tempName: "testdata/read_only_dir/.temp_name",
fs: fs.NewFilesystem(fs.FilesystemTypeBasic, "testdata"),
tempName: "read_only_dir/.temp_name",
mut: sync.NewRWMutex(),
}