lib/db: Do all update operations on a single item at once (#5441)

To do so the BlockMap struct has been removed. It behaves like any other prefixed
part of the database, but was not integrated in the recent keyer refactor. Now
the database is only flushed when files are in a consistent state.
This commit is contained in:
Simon Frei
2019-01-23 10:22:33 +01:00
committed by Jakob Borg
parent 6421693cce
commit 42bd42df5a
5 changed files with 152 additions and 260 deletions

View File

@@ -11,125 +11,14 @@ import (
"fmt"
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/util"
)
var blockFinder *BlockFinder
const maxBatchSize = 1000
type BlockMap struct {
db *Lowlevel
folder uint32
}
func NewBlockMap(db *Lowlevel, folder string) *BlockMap {
return &BlockMap{
db: db,
folder: db.folderIdx.ID([]byte(folder)),
}
}
// Add files to the block map, ignoring any deleted or invalid files.
func (m *BlockMap) Add(files []protocol.FileInfo) error {
batch := new(leveldb.Batch)
buf := make([]byte, 4)
var key []byte
for _, file := range files {
m.checkFlush(batch)
if file.IsDirectory() || file.IsDeleted() || file.IsInvalid() {
continue
}
for i, block := range file.Blocks {
binary.BigEndian.PutUint32(buf, uint32(i))
key = m.blockKeyInto(key, block.Hash, file.Name)
batch.Put(key, buf)
}
}
return m.db.Write(batch, nil)
}
// Update block map state, removing any deleted or invalid files.
func (m *BlockMap) Update(files []protocol.FileInfo) error {
batch := new(leveldb.Batch)
buf := make([]byte, 4)
var key []byte
for _, file := range files {
m.checkFlush(batch)
switch {
case file.IsDirectory():
case file.IsDeleted() || file.IsInvalid():
for _, block := range file.Blocks {
key = m.blockKeyInto(key, block.Hash, file.Name)
batch.Delete(key)
}
default:
for i, block := range file.Blocks {
binary.BigEndian.PutUint32(buf, uint32(i))
key = m.blockKeyInto(key, block.Hash, file.Name)
batch.Put(key, buf)
}
}
}
return m.db.Write(batch, nil)
}
// Discard block map state, removing the given files
func (m *BlockMap) Discard(files []protocol.FileInfo) error {
batch := new(leveldb.Batch)
var key []byte
for _, file := range files {
m.checkFlush(batch)
m.discard(file, key, batch)
}
return m.db.Write(batch, nil)
}
func (m *BlockMap) discard(file protocol.FileInfo, key []byte, batch *leveldb.Batch) {
for _, block := range file.Blocks {
key = m.blockKeyInto(key, block.Hash, file.Name)
batch.Delete(key)
}
}
func (m *BlockMap) checkFlush(batch *leveldb.Batch) error {
if batch.Len() > maxBatchSize {
if err := m.db.Write(batch, nil); err != nil {
return err
}
batch.Reset()
}
return nil
}
// Drop block map, removing all entries related to this block map from the db.
func (m *BlockMap) Drop() error {
batch := new(leveldb.Batch)
iter := m.db.NewIterator(util.BytesPrefix(m.blockKeyInto(nil, nil, "")[:keyPrefixLen+keyFolderLen]), nil)
defer iter.Release()
for iter.Next() {
m.checkFlush(batch)
batch.Delete(iter.Key())
}
if iter.Error() != nil {
return iter.Error()
}
return m.db.Write(batch, nil)
}
func (m *BlockMap) blockKeyInto(o, hash []byte, file string) []byte {
return blockKeyInto(o, hash, m.folder, file)
}
type BlockFinder struct {
db *Lowlevel
db *instance
}
func NewBlockFinder(db *Lowlevel) *BlockFinder {
@@ -137,11 +26,9 @@ func NewBlockFinder(db *Lowlevel) *BlockFinder {
return blockFinder
}
f := &BlockFinder{
db: db,
return &BlockFinder{
db: newInstance(db),
}
return f
}
func (f *BlockFinder) String() string {
@@ -154,52 +41,23 @@ func (f *BlockFinder) String() string {
// reason. The iterator finally returns the result, whether or not a
// satisfying block was eventually found.
func (f *BlockFinder) Iterate(folders []string, hash []byte, iterFn func(string, string, int32) bool) bool {
t := f.db.newReadOnlyTransaction()
defer t.close()
var key []byte
for _, folder := range folders {
folderID := f.db.folderIdx.ID([]byte(folder))
key = blockKeyInto(key, hash, folderID, "")
iter := f.db.NewIterator(util.BytesPrefix(key), nil)
defer iter.Release()
key = f.db.keyer.GenerateBlockMapKey(key, []byte(folder), hash, nil)
iter := t.NewIterator(util.BytesPrefix(key), nil)
for iter.Next() && iter.Error() == nil {
file := blockKeyName(iter.Key())
file := string(f.db.keyer.NameFromBlockMapKey(iter.Key()))
index := int32(binary.BigEndian.Uint32(iter.Value()))
if iterFn(folder, osutil.NativeFilename(file), index) {
iter.Release()
return true
}
}
iter.Release()
}
return false
}
// m.blockKey returns a byte slice encoding the following information:
// keyTypeBlock (1 byte)
// folder (4 bytes)
// block hash (32 bytes)
// file name (variable size)
func blockKeyInto(o, hash []byte, folder uint32, file string) []byte {
reqLen := keyPrefixLen + keyFolderLen + keyHashLen + len(file)
if cap(o) < reqLen {
o = make([]byte, reqLen)
} else {
o = o[:reqLen]
}
o[0] = KeyTypeBlock
binary.BigEndian.PutUint32(o[keyPrefixLen:], folder)
copy(o[keyPrefixLen+keyFolderLen:], hash)
copy(o[keyPrefixLen+keyFolderLen+keyHashLen:], []byte(file))
return o
}
// blockKeyName returns the file name from the block key
func blockKeyName(data []byte) string {
if len(data) < keyPrefixLen+keyFolderLen+keyHashLen+1 {
panic("Incorrect key length")
}
if data[0] != KeyTypeBlock {
panic("Incorrect key type")
}
file := string(data[keyPrefixLen+keyFolderLen+keyHashLen:])
return file
}