Cancel a running scan

This commit is contained in:
Jakob Borg 2015-11-13 15:00:32 +01:00
parent 73285cadb6
commit 9fbdb6b305
3 changed files with 70 additions and 25 deletions

View File

@ -1290,6 +1290,11 @@ nextSub:
} }
subs = unifySubs subs = unifySubs
// The cancel channel is closed whenever we return (such as from an error),
// to signal the potentially still running walker to stop.
cancel := make(chan struct{})
defer close(cancel)
w := &scanner.Walker{ w := &scanner.Walker{
Folder: folderCfg.ID, Folder: folderCfg.ID,
Dir: folderCfg.Path(), Dir: folderCfg.Path(),
@ -1305,6 +1310,7 @@ nextSub:
Hashers: m.numHashers(folder), Hashers: m.numHashers(folder),
ShortID: m.shortID, ShortID: m.shortID,
ProgressTickIntervalS: folderCfg.ScanProgressIntervalS, ProgressTickIntervalS: folderCfg.ScanProgressIntervalS,
Cancel: cancel,
} }
runner.setState(FolderScanning) runner.setState(FolderScanning)
@ -1714,17 +1720,17 @@ func (m *Model) BringToFront(folder, file string) {
// CheckFolderHealth checks the folder for common errors and returns the // CheckFolderHealth checks the folder for common errors and returns the
// current folder error, or nil if the folder is healthy. // current folder error, or nil if the folder is healthy.
func (m *Model) CheckFolderHealth(id string) error { func (m *Model) CheckFolderHealth(id string) error {
folder, ok := m.cfg.Folders()[id]
if !ok {
return errors.New("folder does not exist")
}
if minFree := m.cfg.Options().MinHomeDiskFreePct; minFree > 0 { if minFree := m.cfg.Options().MinHomeDiskFreePct; minFree > 0 {
if free, err := osutil.DiskFreePercentage(m.cfg.ConfigPath()); err == nil && free < minFree { if free, err := osutil.DiskFreePercentage(m.cfg.ConfigPath()); err == nil && free < minFree {
return errors.New("home disk has insufficient free space") return errors.New("home disk has insufficient free space")
} }
} }
folder, ok := m.cfg.Folders()[id]
if !ok {
return errors.New("folder does not exist")
}
fi, err := os.Stat(folder.Path()) fi, err := os.Stat(folder.Path())
v, ok := m.CurrentLocalVersion(id) v, ok := m.CurrentLocalVersion(id)

View File

@ -19,13 +19,13 @@ import (
// workers are used in parallel. The outbox will become closed when the inbox // workers are used in parallel. The outbox will become closed when the inbox
// is closed and all items handled. // is closed and all items handled.
func newParallelHasher(dir string, blockSize, workers int, outbox, inbox chan protocol.FileInfo, counter *int64, done chan struct{}) { func newParallelHasher(dir string, blockSize, workers int, outbox, inbox chan protocol.FileInfo, counter *int64, done, cancel chan struct{}) {
wg := sync.NewWaitGroup() wg := sync.NewWaitGroup()
wg.Add(workers) wg.Add(workers)
for i := 0; i < workers; i++ { for i := 0; i < workers; i++ {
go func() { go func() {
hashFiles(dir, blockSize, outbox, inbox, counter) hashFiles(dir, blockSize, outbox, inbox, counter, cancel)
wg.Done() wg.Done()
}() }()
} }
@ -59,19 +59,33 @@ func HashFile(path string, blockSize int, sizeHint int64, counter *int64) ([]pro
return Blocks(fd, blockSize, sizeHint, counter) return Blocks(fd, blockSize, sizeHint, counter)
} }
func hashFiles(dir string, blockSize int, outbox, inbox chan protocol.FileInfo, counter *int64) { func hashFiles(dir string, blockSize int, outbox, inbox chan protocol.FileInfo, counter *int64, cancel chan struct{}) {
for f := range inbox { for {
if f.IsDirectory() || f.IsDeleted() { select {
panic("Bug. Asked to hash a directory or a deleted file.") case f, ok := <-inbox:
} if !ok {
return
}
blocks, err := HashFile(filepath.Join(dir, f.Name), blockSize, f.CachedSize, counter) if f.IsDirectory() || f.IsDeleted() {
if err != nil { panic("Bug. Asked to hash a directory or a deleted file.")
l.Debugln("hash error:", f.Name, err) }
continue
}
f.Blocks = blocks blocks, err := HashFile(filepath.Join(dir, f.Name), blockSize, f.CachedSize, counter)
outbox <- f if err != nil {
l.Debugln("hash error:", f.Name, err)
continue
}
f.Blocks = blocks
select {
case outbox <- f:
case <-cancel:
return
}
case <-cancel:
return
}
} }
} }

View File

@ -72,6 +72,8 @@ type Walker struct {
// Optional progress tick interval which defines how often FolderScanProgress // Optional progress tick interval which defines how often FolderScanProgress
// events are emitted. Negative number means disabled. // events are emitted. Negative number means disabled.
ProgressTickIntervalS int ProgressTickIntervalS int
// Signals cancel from the outside - when closed, we should stop walking.
Cancel chan struct{}
} }
type TempNamer interface { type TempNamer interface {
@ -121,7 +123,7 @@ func (w *Walker) Walk() (chan protocol.FileInfo, error) {
// We're not required to emit scan progress events, just kick off hashers, // We're not required to emit scan progress events, just kick off hashers,
// and feed inputs directly from the walker. // and feed inputs directly from the walker.
if w.ProgressTickIntervalS < 0 { if w.ProgressTickIntervalS < 0 {
newParallelHasher(w.Dir, w.BlockSize, w.Hashers, finishedChan, toHashChan, nil, nil) newParallelHasher(w.Dir, w.BlockSize, w.Hashers, finishedChan, toHashChan, nil, nil, w.Cancel)
return finishedChan, nil return finishedChan, nil
} }
@ -149,7 +151,7 @@ func (w *Walker) Walk() (chan protocol.FileInfo, error) {
realToHashChan := make(chan protocol.FileInfo) realToHashChan := make(chan protocol.FileInfo)
done := make(chan struct{}) done := make(chan struct{})
newParallelHasher(w.Dir, w.BlockSize, w.Hashers, finishedChan, realToHashChan, &progress, done) newParallelHasher(w.Dir, w.BlockSize, w.Hashers, finishedChan, realToHashChan, &progress, done, w.Cancel)
// A routine which actually emits the FolderScanProgress events // A routine which actually emits the FolderScanProgress events
// every w.ProgressTicker ticks, until the hasher routines terminate. // every w.ProgressTicker ticks, until the hasher routines terminate.
@ -168,13 +170,21 @@ func (w *Walker) Walk() (chan protocol.FileInfo, error) {
"current": current, "current": current,
"total": total, "total": total,
}) })
case <-w.Cancel:
ticker.Stop()
return
} }
} }
}() }()
loop:
for _, file := range filesToHash { for _, file := range filesToHash {
l.Debugln("real to hash:", file.Name) l.Debugln("real to hash:", file.Name)
realToHashChan <- file select {
case realToHashChan <- file:
case <-w.Cancel:
break loop
}
} }
close(realToHashChan) close(realToHashChan)
}() }()
@ -329,7 +339,11 @@ func (w *Walker) walkAndHashFiles(fchan, dchan chan protocol.FileInfo) filepath.
l.Debugln("symlink changedb:", p, f) l.Debugln("symlink changedb:", p, f)
dchan <- f select {
case dchan <- f:
case <-w.Cancel:
return errors.New("cancelled")
}
return skip return skip
} }
@ -363,7 +377,13 @@ func (w *Walker) walkAndHashFiles(fchan, dchan chan protocol.FileInfo) filepath.
Modified: mtime.Unix(), Modified: mtime.Unix(),
} }
l.Debugln("dir:", p, f) l.Debugln("dir:", p, f)
dchan <- f
select {
case dchan <- f:
case <-w.Cancel:
return errors.New("cancelled")
}
return nil return nil
} }
@ -406,7 +426,12 @@ func (w *Walker) walkAndHashFiles(fchan, dchan chan protocol.FileInfo) filepath.
CachedSize: info.Size(), CachedSize: info.Size(),
} }
l.Debugln("to hash:", p, f) l.Debugln("to hash:", p, f)
fchan <- f
select {
case fchan <- f:
case <-w.Cancel:
return errors.New("cancelled")
}
} }
return nil return nil