lib/model: Optimize dbUpdaterRoutine (#5576)
This commit is contained in:
parent
6940d79f5b
commit
bd37f6da17
@ -1607,42 +1607,15 @@ func (f *sendReceiveFolder) Jobs() ([]string, []string) {
|
|||||||
func (f *sendReceiveFolder) dbUpdaterRoutine(dbUpdateChan <-chan dbUpdateJob) {
|
func (f *sendReceiveFolder) dbUpdaterRoutine(dbUpdateChan <-chan dbUpdateJob) {
|
||||||
const maxBatchTime = 2 * time.Second
|
const maxBatchTime = 2 * time.Second
|
||||||
|
|
||||||
batch := make([]dbUpdateJob, 0, maxBatchSizeFiles)
|
batch := newFileInfoBatch(nil)
|
||||||
files := make([]protocol.FileInfo, 0, maxBatchSizeFiles)
|
|
||||||
tick := time.NewTicker(maxBatchTime)
|
tick := time.NewTicker(maxBatchTime)
|
||||||
defer tick.Stop()
|
defer tick.Stop()
|
||||||
|
|
||||||
changedDirs := make(map[string]struct{})
|
changedDirs := make(map[string]struct{})
|
||||||
|
found := false
|
||||||
|
var lastFile protocol.FileInfo
|
||||||
|
|
||||||
handleBatch := func() {
|
batch.flushFn = func(files []protocol.FileInfo) error {
|
||||||
found := false
|
|
||||||
var lastFile protocol.FileInfo
|
|
||||||
|
|
||||||
for _, job := range batch {
|
|
||||||
files = append(files, job.file)
|
|
||||||
|
|
||||||
switch job.jobType {
|
|
||||||
case dbUpdateHandleFile, dbUpdateShortcutFile:
|
|
||||||
changedDirs[filepath.Dir(job.file.Name)] = struct{}{}
|
|
||||||
case dbUpdateHandleDir:
|
|
||||||
changedDirs[job.file.Name] = struct{}{}
|
|
||||||
case dbUpdateHandleSymlink, dbUpdateInvalidate:
|
|
||||||
// fsyncing symlinks is only supported by MacOS
|
|
||||||
// and invalidated files are db only changes -> no sync
|
|
||||||
}
|
|
||||||
|
|
||||||
if job.file.IsInvalid() || (job.file.IsDirectory() && !job.file.IsSymlink()) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if job.jobType&(dbUpdateHandleFile|dbUpdateDeleteFile) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
found = true
|
|
||||||
lastFile = job.file
|
|
||||||
}
|
|
||||||
|
|
||||||
// sync directories
|
// sync directories
|
||||||
for dir := range changedDirs {
|
for dir := range changedDirs {
|
||||||
delete(changedDirs, dir)
|
delete(changedDirs, dir)
|
||||||
@ -1663,13 +1636,12 @@ func (f *sendReceiveFolder) dbUpdaterRoutine(dbUpdateChan <-chan dbUpdateJob) {
|
|||||||
|
|
||||||
if found {
|
if found {
|
||||||
f.model.receivedFile(f.folderID, lastFile)
|
f.model.receivedFile(f.folderID, lastFile)
|
||||||
|
found = false
|
||||||
}
|
}
|
||||||
|
|
||||||
batch = batch[:0]
|
return nil
|
||||||
files = files[:0]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
batchSizeBytes := 0
|
|
||||||
loop:
|
loop:
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@ -1678,26 +1650,35 @@ loop:
|
|||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
|
|
||||||
job.file.Sequence = 0
|
switch job.jobType {
|
||||||
batch = append(batch, job)
|
case dbUpdateHandleFile, dbUpdateShortcutFile:
|
||||||
|
changedDirs[filepath.Dir(job.file.Name)] = struct{}{}
|
||||||
batchSizeBytes += job.file.ProtoSize()
|
case dbUpdateHandleDir:
|
||||||
if len(batch) == maxBatchSizeFiles || batchSizeBytes > maxBatchSizeBytes {
|
changedDirs[job.file.Name] = struct{}{}
|
||||||
handleBatch()
|
case dbUpdateHandleSymlink, dbUpdateInvalidate:
|
||||||
batchSizeBytes = 0
|
// fsyncing symlinks is only supported by MacOS
|
||||||
|
// and invalidated files are db only changes -> no sync
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For some reason we seem to care about file deletions and
|
||||||
|
// content modification, but not about metadata and dirs/symlinks.
|
||||||
|
if !job.file.IsInvalid() && job.jobType&(dbUpdateHandleFile|dbUpdateDeleteFile) != 0 {
|
||||||
|
found = true
|
||||||
|
lastFile = job.file
|
||||||
|
}
|
||||||
|
|
||||||
|
job.file.Sequence = 0
|
||||||
|
|
||||||
|
batch.append(job.file)
|
||||||
|
|
||||||
|
batch.flushIfFull()
|
||||||
|
|
||||||
case <-tick.C:
|
case <-tick.C:
|
||||||
if len(batch) > 0 {
|
batch.flush()
|
||||||
handleBatch()
|
|
||||||
batchSizeBytes = 0
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(batch) > 0 {
|
batch.flush()
|
||||||
handleBatch()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// pullScannerRoutine aggregates paths to be scanned after pulling. The scan is
|
// pullScannerRoutine aggregates paths to be scanned after pulling. The scan is
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user