lib/db: Keep folder meta data persistently in db (fixes #4400)

This keeps the data we need about sequence numbers and object counts
persistently in the database. The sizeTracker is expanded into a
metadataTracker than handled multiple folders, and the Counts struct is
made protobuf serializable. It gains a Sequence field to assist in
tracking that as well, and a collection of Counts become a CountsSet
(for serialization purposes).

The initial database scan is also a consistency check of the global
entries. This shouldn't strictly be necessary. Nonetheless I added a
created timestamp to the metadata and set a variable to compare against
that. When the time since the metadata creation is old enough, we drop
the metadata and rebuild from scratch like we used to, while also
consistency checking.

A new environment variable STCHECKDBEVERY can override this interval,
and for example be set to zero to force the check immediately.

GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/4547
LGTM: imsodin
This commit is contained in:
Jakob Borg
2017-12-14 09:51:17 +00:00
committed by Audrius Butkevicius
parent 8c91ced784
commit d1d967f0cf
14 changed files with 908 additions and 265 deletions

View File

@@ -93,12 +93,11 @@ func (db *Instance) Location() string {
return db.location
}
func (db *Instance) updateFiles(folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker) {
func (db *Instance) updateFiles(folder, device []byte, fs []protocol.FileInfo, meta *metadataTracker) {
t := db.newReadWriteTransaction()
defer t.close()
var fk []byte
isLocalDevice := bytes.Equal(device, protocol.LocalDeviceID[:])
for _, f := range fs {
name := []byte(f.Name)
fk = db.deviceKeyInto(fk[:cap(fk)], folder, device, name)
@@ -116,15 +115,14 @@ func (db *Instance) updateFiles(folder, device []byte, fs []protocol.FileInfo, l
continue
}
if isLocalDevice {
if err == nil {
localSize.removeFile(ef)
}
localSize.addFile(f)
devID := protocol.DeviceIDFromBytes(device)
if err == nil {
meta.removeFile(devID, ef)
}
meta.addFile(devID, f)
t.insertFile(folder, device, f)
t.updateGlobal(folder, device, f, globalSize)
t.updateGlobal(folder, device, f, meta)
// Write out and reuse the batch every few records, to avoid the batch
// growing too large and thus allocating unnecessarily much memory.
@@ -465,7 +463,7 @@ func (db *Instance) dropFolder(folder []byte) {
dbi.Release()
}
func (db *Instance) dropDeviceFolder(device, folder []byte, globalSize *sizeTracker) {
func (db *Instance) dropDeviceFolder(device, folder []byte, meta *metadataTracker) {
t := db.newReadWriteTransaction()
defer t.close()
@@ -475,13 +473,13 @@ func (db *Instance) dropDeviceFolder(device, folder []byte, globalSize *sizeTrac
for dbi.Next() {
key := dbi.Key()
name := db.deviceKeyName(key)
t.removeFromGlobal(folder, device, name, globalSize)
t.removeFromGlobal(folder, device, name, meta)
t.Delete(key)
t.checkFlush()
}
}
func (db *Instance) checkGlobals(folder []byte, globalSize *sizeTracker) {
func (db *Instance) checkGlobals(folder []byte, meta *metadataTracker) {
t := db.newReadWriteTransaction()
defer t.close()
@@ -520,7 +518,7 @@ func (db *Instance) checkGlobals(folder []byte, globalSize *sizeTracker) {
if i == 0 {
if fi, ok := t.getFile(folder, version.Device, name); ok {
globalSize.addFile(fi)
meta.addFile(globalDeviceID, fi)
}
}
}
@@ -760,6 +758,13 @@ func (db *Instance) mtimesKey(folder []byte) []byte {
return prefix
}
func (db *Instance) folderMetaKey(folder []byte) []byte {
prefix := make([]byte, 5) // key type + 4 bytes folder idx number
prefix[0] = KeyTypeFolderMeta
binary.BigEndian.PutUint32(prefix[1:], db.folderIdx.ID(folder))
return prefix
}
// DropDeltaIndexIDs removes all index IDs from the database. This will
// cause a full index transmission on the next connection.
func (db *Instance) DropDeltaIndexIDs() {
@@ -770,6 +775,10 @@ func (db *Instance) dropMtimes(folder []byte) {
db.dropPrefix(db.mtimesKey(folder))
}
func (db *Instance) dropFolderMeta(folder []byte) {
db.dropPrefix(db.folderMetaKey(folder))
}
func (db *Instance) dropPrefix(prefix []byte) {
t := db.newReadWriteTransaction()
defer t.close()