all: Revert simultaneously walk fs and db on scan (fixes #4756) (#4757)

This reverts commit 6d3f9d5154.
This commit is contained in:
Simon Frei
2018-02-14 08:59:46 +01:00
committed by Jakob Borg
parent b57c9b6af5
commit 68c1b2dd47
9 changed files with 261 additions and 680 deletions

View File

@@ -1395,21 +1395,14 @@ func (m *Model) CurrentGlobalFile(folder string, file string) (protocol.FileInfo
return fs.GetGlobal(file)
}
type haveWalker struct {
fset *db.FileSet
type cFiler struct {
m *Model
r string
}
func (h haveWalker) Walk(prefix string, ctx context.Context, out chan<- protocol.FileInfo) {
ctxChan := ctx.Done()
h.fset.WithPrefixedHave(protocol.LocalDeviceID, prefix, func(fi db.FileIntf) bool {
f := fi.(protocol.FileInfo)
select {
case out <- f:
case <-ctxChan:
return false
}
return true
})
// Implements scanner.CurrentFiler
func (cf cFiler) CurrentFile(file string) (protocol.FileInfo, bool) {
return cf.m.CurrentFolderFile(cf.r, file)
}
// Connection returns the current connection for device, and a boolean wether a connection was found.
@@ -1962,14 +1955,13 @@ func (m *Model) internalScanFolderSubdirs(ctx context.Context, folder string, su
runner.setState(FolderScanning)
haveWalker := haveWalker{fset}
rchan := scanner.Walk(ctx, scanner.Config{
fchan := scanner.Walk(ctx, scanner.Config{
Folder: folderCfg.ID,
Subs: subDirs,
Matcher: ignores,
BlockSize: protocol.BlockSize,
TempLifetime: time.Duration(m.cfg.Options().KeepTemporariesH) * time.Hour,
Have: haveWalker,
CurrentFiler: cFiler{m, folder},
Filesystem: mtimefs,
IgnorePerms: folderCfg.IgnorePerms,
AutoNormalize: folderCfg.AutoNormalize,
@@ -1986,17 +1978,6 @@ func (m *Model) internalScanFolderSubdirs(ctx context.Context, folder string, su
batch := make([]protocol.FileInfo, 0, maxBatchSizeFiles)
batchSizeBytes := 0
changes := 0
checkBatch := func() error {
if len(batch) == maxBatchSizeFiles || batchSizeBytes > maxBatchSizeBytes {
if err := runner.CheckHealth(); err != nil {
return err
}
m.updateLocalsFromScanning(folder, batch)
batch = batch[:0]
batchSizeBytes = 0
}
return nil
}
// Schedule a pull after scanning, but only if we actually detected any
// changes.
@@ -2006,49 +1987,98 @@ func (m *Model) internalScanFolderSubdirs(ctx context.Context, folder string, su
}
}()
var delDirStack []protocol.FileInfo
for r := range rchan {
if err := checkBatch(); err != nil {
l.Debugln("Stopping scan of folder %s due to: %s", folderCfg.Description(), err)
return err
}
// Append deleted dirs from stack if the current file isn't a child,
// which means all children were already processed.
for len(delDirStack) != 0 && !strings.HasPrefix(r.New.Name, delDirStack[len(delDirStack)-1].Name+string(fs.PathSeparator)) {
lastDelDir := delDirStack[len(delDirStack)-1]
batch = append(batch, lastDelDir)
batchSizeBytes += lastDelDir.ProtoSize()
changes++
if err := checkBatch(); err != nil {
for f := range fchan {
if len(batch) == maxBatchSizeFiles || batchSizeBytes > maxBatchSizeBytes {
if err := runner.CheckHealth(); err != nil {
l.Debugln("Stopping scan of folder %s due to: %s", folderCfg.Description(), err)
return err
}
delDirStack = delDirStack[:len(delDirStack)-1]
m.updateLocalsFromScanning(folder, batch)
batch = batch[:0]
batchSizeBytes = 0
}
// Delay appending deleted dirs until all its children are processed
if r.Old.IsDirectory() && (r.New.Deleted || !r.New.IsDirectory()) {
delDirStack = append(delDirStack, r.New)
continue
}
l.Debugln("Appending", r)
batch = append(batch, r.New)
batchSizeBytes += r.New.ProtoSize()
batch = append(batch, f)
batchSizeBytes += f.ProtoSize()
changes++
}
// Append remaining deleted dirs.
for i := len(delDirStack) - 1; i >= 0; i-- {
if err := checkBatch(); err != nil {
l.Debugln("Stopping scan of folder %s due to: %s", folderCfg.Description(), err)
return err
}
if err := runner.CheckHealth(); err != nil {
l.Debugln("Stopping scan of folder %s due to: %s", folderCfg.Description(), err)
return err
} else if len(batch) > 0 {
m.updateLocalsFromScanning(folder, batch)
}
batch = append(batch, delDirStack[i])
batchSizeBytes += delDirStack[i].ProtoSize()
changes++
if len(subDirs) == 0 {
// If we have no specific subdirectories to traverse, set it to one
// empty prefix so we traverse the entire folder contents once.
subDirs = []string{""}
}
// Do a scan of the database for each prefix, to check for deleted and
// ignored files.
batch = batch[:0]
batchSizeBytes = 0
for _, sub := range subDirs {
var iterError error
fset.WithPrefixedHaveTruncated(protocol.LocalDeviceID, sub, func(fi db.FileIntf) bool {
f := fi.(db.FileInfoTruncated)
if len(batch) == maxBatchSizeFiles || batchSizeBytes > maxBatchSizeBytes {
if err := runner.CheckHealth(); err != nil {
iterError = err
return false
}
m.updateLocalsFromScanning(folder, batch)
batch = batch[:0]
batchSizeBytes = 0
}
switch {
case !f.IsInvalid() && ignores.Match(f.Name).IsIgnored():
// File was valid at last pass but has been ignored. Set invalid bit.
l.Debugln("setting invalid bit on ignored", f)
nf := f.ConvertToInvalidFileInfo(m.id.Short())
batch = append(batch, nf)
batchSizeBytes += nf.ProtoSize()
changes++
case !f.IsInvalid() && !f.IsDeleted():
// The file is valid and not deleted. Lets check if it's
// still here.
if _, err := mtimefs.Lstat(f.Name); err != nil {
// We don't specifically verify that the error is
// fs.IsNotExist because there is a corner case when a
// directory is suddenly transformed into a file. When that
// happens, files that were in the directory (that is now a
// file) are deleted but will return a confusing error ("not a
// directory") when we try to Lstat() them.
nf := protocol.FileInfo{
Name: f.Name,
Type: f.Type,
Size: 0,
ModifiedS: f.ModifiedS,
ModifiedNs: f.ModifiedNs,
ModifiedBy: m.id.Short(),
Deleted: true,
Version: f.Version.Update(m.shortID),
}
batch = append(batch, nf)
batchSizeBytes += nf.ProtoSize()
changes++
}
}
return true
})
if iterError != nil {
l.Debugln("Stopping scan of folder %s due to: %s", folderCfg.Description(), iterError)
return iterError
}
}
if err := runner.CheckHealth(); err != nil {