Merge pull request #2424 from calmh/dbinstance

We should pass around db.Instance instead of leveldb.DB
This commit is contained in:
Audrius Butkevicius 2015-10-31 12:51:23 +00:00
commit b4bbd050c2
20 changed files with 145 additions and 241 deletions

View File

@ -9,6 +9,7 @@ package main
import ( import (
"bytes" "bytes"
"crypto/tls" "crypto/tls"
"errors"
"flag" "flag"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
@ -42,9 +43,6 @@ import (
"github.com/syncthing/syncthing/lib/tlsutil" "github.com/syncthing/syncthing/lib/tlsutil"
"github.com/syncthing/syncthing/lib/upgrade" "github.com/syncthing/syncthing/lib/upgrade"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/thejerf/suture" "github.com/thejerf/suture"
) )
@ -371,7 +369,7 @@ func main() {
if doUpgrade { if doUpgrade {
// Use leveldb database locks to protect against concurrent upgrades // Use leveldb database locks to protect against concurrent upgrades
_, err = leveldb.OpenFile(locations[locDatabase], &opt.Options{OpenFilesCacheCapacity: 100}) _, err = db.Open(locations[locDatabase])
if err != nil { if err != nil {
l.Infoln("Attempting upgrade through running Syncthing...") l.Infoln("Attempting upgrade through running Syncthing...")
err = upgradeViaRest() err = upgradeViaRest()
@ -617,21 +615,7 @@ func syncthingMain() {
} }
dbFile := locations[locDatabase] dbFile := locations[locDatabase]
dbOpts := dbOpts(cfg) ldb, err := db.Open(dbFile)
ldb, err := leveldb.OpenFile(dbFile, dbOpts)
if leveldbIsCorrupted(err) {
ldb, err = leveldb.RecoverFile(dbFile, dbOpts)
}
if leveldbIsCorrupted(err) {
// The database is corrupted, and we've tried to recover it but it
// didn't work. At this point there isn't much to do beyond dropping
// the database and reindexing...
l.Infoln("Database corruption detected, unable to recover. Reinitializing...")
if err := resetDB(); err != nil {
l.Fatalln("Remove database:", err)
}
ldb, err = leveldb.OpenFile(dbFile, dbOpts)
}
if err != nil { if err != nil {
l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?") l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
} }
@ -642,7 +626,7 @@ func syncthingMain() {
// Remove database entries for folders that no longer exist in the config // Remove database entries for folders that no longer exist in the config
folders := cfg.Folders() folders := cfg.Folders()
for _, folder := range db.ListFolders(ldb) { for _, folder := range ldb.ListFolders() {
if _, ok := folders[folder]; !ok { if _, ok := folders[folder]; !ok {
l.Infof("Cleaning data for dropped folder %q", folder) l.Infof("Cleaning data for dropped folder %q", folder)
db.DropFolder(ldb, folder) db.DropFolder(ldb, folder)
@ -881,40 +865,6 @@ func loadConfig(cfgFile string) (*config.Wrapper, string, error) {
return cfg, myName, nil return cfg, myName, nil
} }
func dbOpts(cfg *config.Wrapper) *opt.Options {
// Calculate a suitable database block cache capacity.
// Default is 8 MiB.
blockCacheCapacity := 8 << 20
// Increase block cache up to this maximum:
const maxCapacity = 64 << 20
// ... which we reach when the box has this much RAM:
const maxAtRAM = 8 << 30
if v := cfg.Options().DatabaseBlockCacheMiB; v != 0 {
// Use the value from the config, if it's set.
blockCacheCapacity = v << 20
} else if bytes, err := memorySize(); err == nil {
// We start at the default of 8 MiB and use larger values for machines
// with more memory.
if bytes > maxAtRAM {
// Cap the cache at maxCapacity when we reach maxAtRam amount of memory
blockCacheCapacity = maxCapacity
} else if bytes > maxAtRAM/maxCapacity*int64(blockCacheCapacity) {
// Grow from the default to maxCapacity at maxAtRam amount of memory
blockCacheCapacity = int(bytes * maxCapacity / maxAtRAM)
}
l.Infoln("Database block cache capacity", blockCacheCapacity/1024, "KiB")
}
return &opt.Options{
OpenFilesCacheCapacity: 100,
BlockCacheCapacity: blockCacheCapacity,
WriteBuffer: 4 << 20,
}
}
func startAuditing(mainSvc *suture.Supervisor) { func startAuditing(mainSvc *suture.Supervisor) {
auditFile := timestampedLoc(locAuditLog) auditFile := timestampedLoc(locAuditLog)
fd, err := os.OpenFile(auditFile, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) fd, err := os.OpenFile(auditFile, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
@ -1166,19 +1116,3 @@ func checkShortIDs(cfg *config.Wrapper) error {
} }
return nil return nil
} }
// A "better" version of leveldb's errors.IsCorrupted.
func leveldbIsCorrupted(err error) bool {
switch {
case err == nil:
return false
case errors.IsCorrupted(err):
return true
case strings.Contains(err.Error(), "corrupted"):
return true
}
return false
}

View File

@ -14,9 +14,6 @@ import (
"github.com/syncthing/syncthing/lib/db" "github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/model" "github.com/syncthing/syncthing/lib/model"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/storage"
) )
func TestFolderErrors(t *testing.T) { func TestFolderErrors(t *testing.T) {
@ -38,7 +35,7 @@ func TestFolderErrors(t *testing.T) {
} }
} }
ldb, _ := leveldb.Open(storage.NewMemStorage(), nil) ldb := db.OpenMemory()
// Case 1 - new folder, directory and marker created // Case 1 - new folder, directory and marker created

View File

@ -56,7 +56,6 @@ func TestDefaultValues(t *testing.T) {
ProgressUpdateIntervalS: 5, ProgressUpdateIntervalS: 5,
SymlinksEnabled: true, SymlinksEnabled: true,
LimitBandwidthInLan: false, LimitBandwidthInLan: false,
DatabaseBlockCacheMiB: 0,
MinHomeDiskFreePct: 1, MinHomeDiskFreePct: 1,
URURL: "https://data.syncthing.net/newdata", URURL: "https://data.syncthing.net/newdata",
URInitialDelayS: 1800, URInitialDelayS: 1800,
@ -180,7 +179,6 @@ func TestOverriddenValues(t *testing.T) {
ProgressUpdateIntervalS: 10, ProgressUpdateIntervalS: 10,
SymlinksEnabled: false, SymlinksEnabled: false,
LimitBandwidthInLan: true, LimitBandwidthInLan: true,
DatabaseBlockCacheMiB: 42,
MinHomeDiskFreePct: 5.2, MinHomeDiskFreePct: 5.2,
URURL: "https://localhost/newdata", URURL: "https://localhost/newdata",
URInitialDelayS: 800, URInitialDelayS: 800,

View File

@ -37,7 +37,6 @@ type OptionsConfiguration struct {
ProgressUpdateIntervalS int `xml:"progressUpdateIntervalS" json:"progressUpdateIntervalS" default:"5"` ProgressUpdateIntervalS int `xml:"progressUpdateIntervalS" json:"progressUpdateIntervalS" default:"5"`
SymlinksEnabled bool `xml:"symlinksEnabled" json:"symlinksEnabled" default:"true"` SymlinksEnabled bool `xml:"symlinksEnabled" json:"symlinksEnabled" default:"true"`
LimitBandwidthInLan bool `xml:"limitBandwidthInLan" json:"limitBandwidthInLan" default:"false"` LimitBandwidthInLan bool `xml:"limitBandwidthInLan" json:"limitBandwidthInLan" default:"false"`
DatabaseBlockCacheMiB int `xml:"databaseBlockCacheMiB" json:"databaseBlockCacheMiB" default:"0"`
MinHomeDiskFreePct float64 `xml:"minHomeDiskFreePct" json:"minHomeDiskFreePct" default:"1"` MinHomeDiskFreePct float64 `xml:"minHomeDiskFreePct" json:"minHomeDiskFreePct" default:"1"`
ReleasesURL string `xml:"releasesURL" json:"releasesURL" default:"https://api.github.com/repos/syncthing/syncthing/releases?per_page=30"` ReleasesURL string `xml:"releasesURL" json:"releasesURL" default:"https://api.github.com/repos/syncthing/syncthing/releases?per_page=30"`
AlwaysLocalNets []string `xml:"alwaysLocalNet" json:"alwaysLocalNets"` AlwaysLocalNets []string `xml:"alwaysLocalNet" json:"alwaysLocalNets"`

View File

@ -29,11 +29,11 @@ var blockFinder *BlockFinder
const maxBatchSize = 256 << 10 const maxBatchSize = 256 << 10
type BlockMap struct { type BlockMap struct {
db *leveldb.DB db *Instance
folder string folder string
} }
func NewBlockMap(db *leveldb.DB, folder string) *BlockMap { func NewBlockMap(db *Instance, folder string) *BlockMap {
return &BlockMap{ return &BlockMap{
db: db, db: db,
folder: folder, folder: folder,
@ -146,10 +146,10 @@ func (m *BlockMap) blockKeyInto(o, hash []byte, file string) []byte {
} }
type BlockFinder struct { type BlockFinder struct {
db *leveldb.DB db *Instance
} }
func NewBlockFinder(db *leveldb.DB) *BlockFinder { func NewBlockFinder(db *Instance) *BlockFinder {
if blockFinder != nil { if blockFinder != nil {
return blockFinder return blockFinder
} }

View File

@ -10,9 +10,6 @@ import (
"testing" "testing"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/storage"
) )
func genBlocks(n int) []protocol.BlockInfo { func genBlocks(n int) []protocol.BlockInfo {
@ -50,17 +47,14 @@ func init() {
} }
} }
func setup() (*leveldb.DB, *BlockFinder) { func setup() (*Instance, *BlockFinder) {
// Setup // Setup
db, err := leveldb.Open(storage.NewMemStorage(), nil) db := OpenMemory()
if err != nil {
panic(err)
}
return db, NewBlockFinder(db) return db, NewBlockFinder(db)
} }
func dbEmpty(db *leveldb.DB) bool { func dbEmpty(db *Instance) bool {
iter := db.NewIterator(nil, nil) iter := db.NewIterator(nil, nil)
defer iter.Release() defer iter.Release()
if iter.Next() { if iter.Next() {

View File

@ -8,27 +8,64 @@ package db
import ( import (
"bytes" "bytes"
"os"
"sort" "sort"
"strings"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
"github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
"github.com/syndtr/goleveldb/leveldb/util" "github.com/syndtr/goleveldb/leveldb/util"
) )
type deletionHandler func(t readWriteTransaction, folder, device, name []byte, dbi iterator.Iterator) int64 type deletionHandler func(t readWriteTransaction, folder, device, name []byte, dbi iterator.Iterator) int64
type dbInstance struct { type Instance struct {
*leveldb.DB *leveldb.DB
} }
func newDBInstance(db *leveldb.DB) *dbInstance { func Open(file string) (*Instance, error) {
return &dbInstance{ opts := &opt.Options{
OpenFilesCacheCapacity: 100,
WriteBuffer: 4 << 20,
}
db, err := leveldb.OpenFile(file, opts)
if leveldbIsCorrupted(err) {
db, err = leveldb.RecoverFile(file, opts)
}
if leveldbIsCorrupted(err) {
// The database is corrupted, and we've tried to recover it but it
// didn't work. At this point there isn't much to do beyond dropping
// the database and reindexing...
l.Infoln("Database corruption detected, unable to recover. Reinitializing...")
if err := os.RemoveAll(file); err != nil {
return nil, err
}
db, err = leveldb.OpenFile(file, opts)
}
if err != nil {
return nil, err
}
return newDBInstance(db), nil
}
func OpenMemory() *Instance {
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
return newDBInstance(db)
}
func newDBInstance(db *leveldb.DB) *Instance {
return &Instance{
DB: db, DB: db,
} }
} }
func (db *dbInstance) genericReplace(folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker, deleteFn deletionHandler) int64 { func (db *Instance) genericReplace(folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker, deleteFn deletionHandler) int64 {
sort.Sort(fileList(fs)) // sort list on name, same as in the database sort.Sort(fileList(fs)) // sort list on name, same as in the database
start := db.deviceKey(folder, device, nil) // before all folder/device files start := db.deviceKey(folder, device, nil) // before all folder/device files
@ -126,7 +163,7 @@ func (db *dbInstance) genericReplace(folder, device []byte, fs []protocol.FileIn
return maxLocalVer return maxLocalVer
} }
func (db *dbInstance) replace(folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker) int64 { func (db *Instance) replace(folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker) int64 {
// TODO: Return the remaining maxLocalVer? // TODO: Return the remaining maxLocalVer?
return db.genericReplace(folder, device, fs, localSize, globalSize, func(t readWriteTransaction, folder, device, name []byte, dbi iterator.Iterator) int64 { return db.genericReplace(folder, device, fs, localSize, globalSize, func(t readWriteTransaction, folder, device, name []byte, dbi iterator.Iterator) int64 {
// Database has a file that we are missing. Remove it. // Database has a file that we are missing. Remove it.
@ -137,7 +174,7 @@ func (db *dbInstance) replace(folder, device []byte, fs []protocol.FileInfo, loc
}) })
} }
func (db *dbInstance) updateFiles(folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker) int64 { func (db *Instance) updateFiles(folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker) int64 {
t := db.newReadWriteTransaction() t := db.newReadWriteTransaction()
defer t.close() defer t.close()
@ -195,7 +232,7 @@ func (db *dbInstance) updateFiles(folder, device []byte, fs []protocol.FileInfo,
return maxLocalVer return maxLocalVer
} }
func (db *dbInstance) withHave(folder, device []byte, truncate bool, fn Iterator) { func (db *Instance) withHave(folder, device []byte, truncate bool, fn Iterator) {
start := db.deviceKey(folder, device, nil) // before all folder/device files start := db.deviceKey(folder, device, nil) // before all folder/device files
limit := db.deviceKey(folder, device, []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files limit := db.deviceKey(folder, device, []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
@ -216,7 +253,7 @@ func (db *dbInstance) withHave(folder, device []byte, truncate bool, fn Iterator
} }
} }
func (db *dbInstance) withAllFolderTruncated(folder []byte, fn func(device []byte, f FileInfoTruncated) bool) { func (db *Instance) withAllFolderTruncated(folder []byte, fn func(device []byte, f FileInfoTruncated) bool) {
start := db.deviceKey(folder, nil, nil) // before all folder/device files start := db.deviceKey(folder, nil, nil) // before all folder/device files
limit := db.deviceKey(folder, protocol.LocalDeviceID[:], []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files limit := db.deviceKey(folder, protocol.LocalDeviceID[:], []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
@ -249,11 +286,11 @@ func (db *dbInstance) withAllFolderTruncated(folder []byte, fn func(device []byt
} }
} }
func (db *dbInstance) getFile(folder, device, file []byte) (protocol.FileInfo, bool) { func (db *Instance) getFile(folder, device, file []byte) (protocol.FileInfo, bool) {
return getFile(db, db.deviceKey(folder, device, file)) return getFile(db, db.deviceKey(folder, device, file))
} }
func (db *dbInstance) getGlobal(folder, file []byte, truncate bool) (FileIntf, bool) { func (db *Instance) getGlobal(folder, file []byte, truncate bool) (FileIntf, bool) {
k := db.globalKey(folder, file) k := db.globalKey(folder, file)
t := db.newReadOnlyTransaction() t := db.newReadOnlyTransaction()
@ -290,7 +327,7 @@ func (db *dbInstance) getGlobal(folder, file []byte, truncate bool) (FileIntf, b
return fi, true return fi, true
} }
func (db *dbInstance) withGlobal(folder, prefix []byte, truncate bool, fn Iterator) { func (db *Instance) withGlobal(folder, prefix []byte, truncate bool, fn Iterator) {
t := db.newReadOnlyTransaction() t := db.newReadOnlyTransaction()
defer t.close() defer t.close()
@ -333,7 +370,7 @@ func (db *dbInstance) withGlobal(folder, prefix []byte, truncate bool, fn Iterat
} }
} }
func (db *dbInstance) availability(folder, file []byte) []protocol.DeviceID { func (db *Instance) availability(folder, file []byte) []protocol.DeviceID {
k := db.globalKey(folder, file) k := db.globalKey(folder, file)
bs, err := db.Get(k, nil) bs, err := db.Get(k, nil)
if err == leveldb.ErrNotFound { if err == leveldb.ErrNotFound {
@ -361,7 +398,7 @@ func (db *dbInstance) availability(folder, file []byte) []protocol.DeviceID {
return devices return devices
} }
func (db *dbInstance) withNeed(folder, device []byte, truncate bool, fn Iterator) { func (db *Instance) withNeed(folder, device []byte, truncate bool, fn Iterator) {
start := db.globalKey(folder, nil) start := db.globalKey(folder, nil)
limit := db.globalKey(folder, []byte{0xff, 0xff, 0xff, 0xff}) limit := db.globalKey(folder, []byte{0xff, 0xff, 0xff, 0xff})
@ -452,7 +489,7 @@ nextFile:
} }
} }
func (db *dbInstance) listFolders() []string { func (db *Instance) ListFolders() []string {
t := db.newReadOnlyTransaction() t := db.newReadOnlyTransaction()
defer t.close() defer t.close()
@ -476,7 +513,7 @@ func (db *dbInstance) listFolders() []string {
return folders return folders
} }
func (db *dbInstance) dropFolder(folder []byte) { func (db *Instance) dropFolder(folder []byte) {
t := db.newReadOnlyTransaction() t := db.newReadOnlyTransaction()
defer t.close() defer t.close()
@ -501,7 +538,7 @@ func (db *dbInstance) dropFolder(folder []byte) {
dbi.Release() dbi.Release()
} }
func (db *dbInstance) checkGlobals(folder []byte, globalSize *sizeTracker) { func (db *Instance) checkGlobals(folder []byte, globalSize *sizeTracker) {
t := db.newReadWriteTransaction() t := db.newReadWriteTransaction()
defer t.close() defer t.close()
@ -560,11 +597,11 @@ func (db *dbInstance) checkGlobals(folder []byte, globalSize *sizeTracker) {
// folder (64 bytes) // folder (64 bytes)
// device (32 bytes) // device (32 bytes)
// name (variable size) // name (variable size)
func (db *dbInstance) deviceKey(folder, device, file []byte) []byte { func (db *Instance) deviceKey(folder, device, file []byte) []byte {
return db.deviceKeyInto(nil, folder, device, file) return db.deviceKeyInto(nil, folder, device, file)
} }
func (db *dbInstance) deviceKeyInto(k []byte, folder, device, file []byte) []byte { func (db *Instance) deviceKeyInto(k []byte, folder, device, file []byte) []byte {
reqLen := 1 + 64 + 32 + len(file) reqLen := 1 + 64 + 32 + len(file)
if len(k) < reqLen { if len(k) < reqLen {
k = make([]byte, reqLen) k = make([]byte, reqLen)
@ -579,11 +616,11 @@ func (db *dbInstance) deviceKeyInto(k []byte, folder, device, file []byte) []byt
return k[:reqLen] return k[:reqLen]
} }
func (db *dbInstance) deviceKeyName(key []byte) []byte { func (db *Instance) deviceKeyName(key []byte) []byte {
return key[1+64+32:] return key[1+64+32:]
} }
func (db *dbInstance) deviceKeyFolder(key []byte) []byte { func (db *Instance) deviceKeyFolder(key []byte) []byte {
folder := key[1 : 1+64] folder := key[1 : 1+64]
izero := bytes.IndexByte(folder, 0) izero := bytes.IndexByte(folder, 0)
if izero < 0 { if izero < 0 {
@ -592,7 +629,7 @@ func (db *dbInstance) deviceKeyFolder(key []byte) []byte {
return folder[:izero] return folder[:izero]
} }
func (db *dbInstance) deviceKeyDevice(key []byte) []byte { func (db *Instance) deviceKeyDevice(key []byte) []byte {
return key[1+64 : 1+64+32] return key[1+64 : 1+64+32]
} }
@ -600,7 +637,7 @@ func (db *dbInstance) deviceKeyDevice(key []byte) []byte {
// keyTypeGlobal (1 byte) // keyTypeGlobal (1 byte)
// folder (64 bytes) // folder (64 bytes)
// name (variable size) // name (variable size)
func (db *dbInstance) globalKey(folder, file []byte) []byte { func (db *Instance) globalKey(folder, file []byte) []byte {
k := make([]byte, 1+64+len(file)) k := make([]byte, 1+64+len(file))
k[0] = KeyTypeGlobal k[0] = KeyTypeGlobal
if len(folder) > 64 { if len(folder) > 64 {
@ -611,11 +648,11 @@ func (db *dbInstance) globalKey(folder, file []byte) []byte {
return k return k
} }
func (db *dbInstance) globalKeyName(key []byte) []byte { func (db *Instance) globalKeyName(key []byte) []byte {
return key[1+64:] return key[1+64:]
} }
func (db *dbInstance) globalKeyFolder(key []byte) []byte { func (db *Instance) globalKeyFolder(key []byte) []byte {
folder := key[1 : 1+64] folder := key[1 : 1+64]
izero := bytes.IndexByte(folder, 0) izero := bytes.IndexByte(folder, 0)
if izero < 0 { if izero < 0 {
@ -635,3 +672,19 @@ func unmarshalTrunc(bs []byte, truncate bool) (FileIntf, error) {
err := tf.UnmarshalXDR(bs) err := tf.UnmarshalXDR(bs)
return tf, err return tf, err
} }
// A "better" version of leveldb's errors.IsCorrupted.
func leveldbIsCorrupted(err error) bool {
switch {
case err == nil:
return false
case errors.IsCorrupted(err):
return true
case strings.Contains(err.Error(), "corrupted"):
return true
}
return false
}

View File

@ -16,7 +16,7 @@ func TestDeviceKey(t *testing.T) {
dev := []byte("device67890123456789012345678901") dev := []byte("device67890123456789012345678901")
name := []byte("name") name := []byte("name")
db := &dbInstance{} db := &Instance{}
key := db.deviceKey(fld, dev, name) key := db.deviceKey(fld, dev, name)
@ -38,7 +38,7 @@ func TestGlobalKey(t *testing.T) {
fld := []byte("folder6789012345678901234567890123456789012345678901234567890123") fld := []byte("folder6789012345678901234567890123456789012345678901234567890123")
name := []byte("name") name := []byte("name")
db := &dbInstance{} db := &Instance{}
key := db.globalKey(fld, name) key := db.globalKey(fld, name)

View File

@ -16,10 +16,10 @@ import (
// A readOnlyTransaction represents a database snapshot. // A readOnlyTransaction represents a database snapshot.
type readOnlyTransaction struct { type readOnlyTransaction struct {
*leveldb.Snapshot *leveldb.Snapshot
db *dbInstance db *Instance
} }
func (db *dbInstance) newReadOnlyTransaction() readOnlyTransaction { func (db *Instance) newReadOnlyTransaction() readOnlyTransaction {
snap, err := db.GetSnapshot() snap, err := db.GetSnapshot()
if err != nil { if err != nil {
panic(err) panic(err)
@ -46,7 +46,7 @@ type readWriteTransaction struct {
*leveldb.Batch *leveldb.Batch
} }
func (db *dbInstance) newReadWriteTransaction() readWriteTransaction { func (db *Instance) newReadWriteTransaction() readWriteTransaction {
t := db.newReadOnlyTransaction() t := db.newReadOnlyTransaction()
return readWriteTransaction{ return readWriteTransaction{
readOnlyTransaction: t, readOnlyTransaction: t,

View File

@ -17,13 +17,13 @@ import (
// NamespacedKV is a simple key-value store using a specific namespace within // NamespacedKV is a simple key-value store using a specific namespace within
// a leveldb. // a leveldb.
type NamespacedKV struct { type NamespacedKV struct {
db *leveldb.DB db *Instance
prefix []byte prefix []byte
} }
// NewNamespacedKV returns a new NamespacedKV that lives in the namespace // NewNamespacedKV returns a new NamespacedKV that lives in the namespace
// specified by the prefix. // specified by the prefix.
func NewNamespacedKV(db *leveldb.DB, prefix string) *NamespacedKV { func NewNamespacedKV(db *Instance, prefix string) *NamespacedKV {
return &NamespacedKV{ return &NamespacedKV{
db: db, db: db,
prefix: []byte(prefix), prefix: []byte(prefix),

View File

@ -9,16 +9,10 @@ package db
import ( import (
"testing" "testing"
"time" "time"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/storage"
) )
func TestNamespacedInt(t *testing.T) { func TestNamespacedInt(t *testing.T) {
ldb, err := leveldb.Open(storage.NewMemStorage(), nil) ldb := OpenMemory()
if err != nil {
t.Fatal(err)
}
n1 := NewNamespacedKV(ldb, "foo") n1 := NewNamespacedKV(ldb, "foo")
n2 := NewNamespacedKV(ldb, "bar") n2 := NewNamespacedKV(ldb, "bar")
@ -53,10 +47,7 @@ func TestNamespacedInt(t *testing.T) {
} }
func TestNamespacedTime(t *testing.T) { func TestNamespacedTime(t *testing.T) {
ldb, err := leveldb.Open(storage.NewMemStorage(), nil) ldb := OpenMemory()
if err != nil {
t.Fatal(err)
}
n1 := NewNamespacedKV(ldb, "foo") n1 := NewNamespacedKV(ldb, "foo")
@ -73,10 +64,7 @@ func TestNamespacedTime(t *testing.T) {
} }
func TestNamespacedString(t *testing.T) { func TestNamespacedString(t *testing.T) {
ldb, err := leveldb.Open(storage.NewMemStorage(), nil) ldb := OpenMemory()
if err != nil {
t.Fatal(err)
}
n1 := NewNamespacedKV(ldb, "foo") n1 := NewNamespacedKV(ldb, "foo")
@ -92,10 +80,7 @@ func TestNamespacedString(t *testing.T) {
} }
func TestNamespacedReset(t *testing.T) { func TestNamespacedReset(t *testing.T) {
ldb, err := leveldb.Open(storage.NewMemStorage(), nil) ldb := OpenMemory()
if err != nil {
t.Fatal(err)
}
n1 := NewNamespacedKV(ldb, "foo") n1 := NewNamespacedKV(ldb, "foo")

View File

@ -18,14 +18,13 @@ import (
"github.com/syncthing/syncthing/lib/osutil" "github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/sync" "github.com/syncthing/syncthing/lib/sync"
"github.com/syndtr/goleveldb/leveldb"
) )
type FileSet struct { type FileSet struct {
localVersion map[protocol.DeviceID]int64 localVersion map[protocol.DeviceID]int64
mutex sync.Mutex mutex sync.Mutex
folder string folder string
db *dbInstance db *Instance
blockmap *BlockMap blockmap *BlockMap
localSize sizeTracker localSize sizeTracker
globalSize sizeTracker globalSize sizeTracker
@ -93,11 +92,11 @@ func (s *sizeTracker) Size() (files, deleted int, bytes int64) {
return s.files, s.deleted, s.bytes return s.files, s.deleted, s.bytes
} }
func NewFileSet(folder string, db *leveldb.DB) *FileSet { func NewFileSet(folder string, db *Instance) *FileSet {
var s = FileSet{ var s = FileSet{
localVersion: make(map[protocol.DeviceID]int64), localVersion: make(map[protocol.DeviceID]int64),
folder: folder, folder: folder,
db: newDBInstance(db), db: db,
blockmap: NewBlockMap(db, folder), blockmap: NewBlockMap(db, folder),
mutex: sync.NewMutex(), mutex: sync.NewMutex(),
} }
@ -239,17 +238,10 @@ func (s *FileSet) GlobalSize() (files, deleted int, bytes int64) {
return s.globalSize.Size() return s.globalSize.Size()
} }
// ListFolders returns the folder IDs seen in the database.
func ListFolders(db *leveldb.DB) []string {
i := newDBInstance(db)
return i.listFolders()
}
// DropFolder clears out all information related to the given folder from the // DropFolder clears out all information related to the given folder from the
// database. // database.
func DropFolder(db *leveldb.DB, folder string) { func DropFolder(db *Instance, folder string) {
i := newDBInstance(db) db.dropFolder([]byte(folder))
i.dropFolder([]byte(folder))
bm := &BlockMap{ bm := &BlockMap{
db: db, db: db,
folder: folder, folder: folder,

View File

@ -15,8 +15,6 @@ import (
"github.com/syncthing/syncthing/lib/db" "github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/storage"
) )
var remoteDevice0, remoteDevice1 protocol.DeviceID var remoteDevice0, remoteDevice1 protocol.DeviceID
@ -96,11 +94,7 @@ func (l fileList) String() string {
} }
func TestGlobalSet(t *testing.T) { func TestGlobalSet(t *testing.T) {
ldb := db.OpenMemory()
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
if err != nil {
t.Fatal(err)
}
m := db.NewFileSet("test", ldb) m := db.NewFileSet("test", ldb)
@ -303,10 +297,7 @@ func TestGlobalSet(t *testing.T) {
} }
func TestNeedWithInvalid(t *testing.T) { func TestNeedWithInvalid(t *testing.T) {
ldb, err := leveldb.Open(storage.NewMemStorage(), nil) ldb := db.OpenMemory()
if err != nil {
t.Fatal(err)
}
s := db.NewFileSet("test", ldb) s := db.NewFileSet("test", ldb)
@ -343,10 +334,7 @@ func TestNeedWithInvalid(t *testing.T) {
} }
func TestUpdateToInvalid(t *testing.T) { func TestUpdateToInvalid(t *testing.T) {
ldb, err := leveldb.Open(storage.NewMemStorage(), nil) ldb := db.OpenMemory()
if err != nil {
t.Fatal(err)
}
s := db.NewFileSet("test", ldb) s := db.NewFileSet("test", ldb)
@ -378,10 +366,7 @@ func TestUpdateToInvalid(t *testing.T) {
} }
func TestInvalidAvailability(t *testing.T) { func TestInvalidAvailability(t *testing.T) {
ldb, err := leveldb.Open(storage.NewMemStorage(), nil) ldb := db.OpenMemory()
if err != nil {
t.Fatal(err)
}
s := db.NewFileSet("test", ldb) s := db.NewFileSet("test", ldb)
@ -419,10 +404,7 @@ func TestInvalidAvailability(t *testing.T) {
} }
func TestGlobalReset(t *testing.T) { func TestGlobalReset(t *testing.T) {
ldb, err := leveldb.Open(storage.NewMemStorage(), nil) ldb := db.OpenMemory()
if err != nil {
t.Fatal(err)
}
m := db.NewFileSet("test", ldb) m := db.NewFileSet("test", ldb)
@ -460,10 +442,7 @@ func TestGlobalReset(t *testing.T) {
} }
func TestNeed(t *testing.T) { func TestNeed(t *testing.T) {
ldb, err := leveldb.Open(storage.NewMemStorage(), nil) ldb := db.OpenMemory()
if err != nil {
t.Fatal(err)
}
m := db.NewFileSet("test", ldb) m := db.NewFileSet("test", ldb)
@ -501,10 +480,7 @@ func TestNeed(t *testing.T) {
} }
func TestLocalVersion(t *testing.T) { func TestLocalVersion(t *testing.T) {
ldb, err := leveldb.Open(storage.NewMemStorage(), nil) ldb := db.OpenMemory()
if err != nil {
t.Fatal(err)
}
m := db.NewFileSet("test", ldb) m := db.NewFileSet("test", ldb)
@ -534,10 +510,7 @@ func TestLocalVersion(t *testing.T) {
} }
func TestListDropFolder(t *testing.T) { func TestListDropFolder(t *testing.T) {
ldb, err := leveldb.Open(storage.NewMemStorage(), nil) ldb := db.OpenMemory()
if err != nil {
t.Fatal(err)
}
s0 := db.NewFileSet("test0", ldb) s0 := db.NewFileSet("test0", ldb)
local1 := []protocol.FileInfo{ local1 := []protocol.FileInfo{
@ -558,7 +531,7 @@ func TestListDropFolder(t *testing.T) {
// Check that we have both folders and their data is in the global list // Check that we have both folders and their data is in the global list
expectedFolderList := []string{"test0", "test1"} expectedFolderList := []string{"test0", "test1"}
if actualFolderList := db.ListFolders(ldb); !reflect.DeepEqual(actualFolderList, expectedFolderList) { if actualFolderList := ldb.ListFolders(); !reflect.DeepEqual(actualFolderList, expectedFolderList) {
t.Fatalf("FolderList mismatch\nE: %v\nA: %v", expectedFolderList, actualFolderList) t.Fatalf("FolderList mismatch\nE: %v\nA: %v", expectedFolderList, actualFolderList)
} }
if l := len(globalList(s0)); l != 3 { if l := len(globalList(s0)); l != 3 {
@ -573,7 +546,7 @@ func TestListDropFolder(t *testing.T) {
db.DropFolder(ldb, "test1") db.DropFolder(ldb, "test1")
expectedFolderList = []string{"test0"} expectedFolderList = []string{"test0"}
if actualFolderList := db.ListFolders(ldb); !reflect.DeepEqual(actualFolderList, expectedFolderList) { if actualFolderList := ldb.ListFolders(); !reflect.DeepEqual(actualFolderList, expectedFolderList) {
t.Fatalf("FolderList mismatch\nE: %v\nA: %v", expectedFolderList, actualFolderList) t.Fatalf("FolderList mismatch\nE: %v\nA: %v", expectedFolderList, actualFolderList)
} }
if l := len(globalList(s0)); l != 3 { if l := len(globalList(s0)); l != 3 {
@ -585,10 +558,7 @@ func TestListDropFolder(t *testing.T) {
} }
func TestGlobalNeedWithInvalid(t *testing.T) { func TestGlobalNeedWithInvalid(t *testing.T) {
ldb, err := leveldb.Open(storage.NewMemStorage(), nil) ldb := db.OpenMemory()
if err != nil {
t.Fatal(err)
}
s := db.NewFileSet("test1", ldb) s := db.NewFileSet("test1", ldb)
@ -625,10 +595,7 @@ func TestGlobalNeedWithInvalid(t *testing.T) {
} }
func TestLongPath(t *testing.T) { func TestLongPath(t *testing.T) {
ldb, err := leveldb.Open(storage.NewMemStorage(), nil) ldb := db.OpenMemory()
if err != nil {
t.Fatal(err)
}
s := db.NewFileSet("test", ldb) s := db.NewFileSet("test", ldb)

View File

@ -9,8 +9,6 @@ package db
import ( import (
"fmt" "fmt"
"time" "time"
"github.com/syndtr/goleveldb/leveldb"
) )
// This type encapsulates a repository of mtimes for platforms where file mtimes // This type encapsulates a repository of mtimes for platforms where file mtimes
@ -25,7 +23,7 @@ type VirtualMtimeRepo struct {
ns *NamespacedKV ns *NamespacedKV
} }
func NewVirtualMtimeRepo(ldb *leveldb.DB, folder string) *VirtualMtimeRepo { func NewVirtualMtimeRepo(ldb *Instance, folder string) *VirtualMtimeRepo {
prefix := string(KeyTypeVirtualMtime) + folder prefix := string(KeyTypeVirtualMtime) + folder
return &VirtualMtimeRepo{ return &VirtualMtimeRepo{

View File

@ -9,16 +9,10 @@ package db
import ( import (
"testing" "testing"
"time" "time"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/storage"
) )
func TestVirtualMtimeRepo(t *testing.T) { func TestVirtualMtimeRepo(t *testing.T) {
ldb, err := leveldb.Open(storage.NewMemStorage(), nil) ldb := OpenMemory()
if err != nil {
t.Fatal(err)
}
// A few repos so we can ensure they don't pollute each other // A few repos so we can ensure they don't pollute each other
repo1 := NewVirtualMtimeRepo(ldb, "folder1") repo1 := NewVirtualMtimeRepo(ldb, "folder1")

View File

@ -33,7 +33,6 @@ import (
"github.com/syncthing/syncthing/lib/symlinks" "github.com/syncthing/syncthing/lib/symlinks"
"github.com/syncthing/syncthing/lib/sync" "github.com/syncthing/syncthing/lib/sync"
"github.com/syncthing/syncthing/lib/versioner" "github.com/syncthing/syncthing/lib/versioner"
"github.com/syndtr/goleveldb/leveldb"
"github.com/thejerf/suture" "github.com/thejerf/suture"
) )
@ -64,7 +63,7 @@ type Model struct {
*suture.Supervisor *suture.Supervisor
cfg *config.Wrapper cfg *config.Wrapper
db *leveldb.DB db *db.Instance
finder *db.BlockFinder finder *db.BlockFinder
progressEmitter *ProgressEmitter progressEmitter *ProgressEmitter
id protocol.DeviceID id protocol.DeviceID
@ -99,7 +98,7 @@ var (
// NewModel creates and starts a new model. The model starts in read-only mode, // NewModel creates and starts a new model. The model starts in read-only mode,
// where it sends index information to connected peers and responds to requests // where it sends index information to connected peers and responds to requests
// for file data without altering the local folder in any way. // for file data without altering the local folder in any way.
func NewModel(cfg *config.Wrapper, id protocol.DeviceID, deviceName, clientName, clientVersion string, ldb *leveldb.DB, protectedFiles []string) *Model { func NewModel(cfg *config.Wrapper, id protocol.DeviceID, deviceName, clientName, clientVersion string, ldb *db.Instance, protectedFiles []string) *Model {
m := &Model{ m := &Model{
Supervisor: suture.New("model", suture.Spec{ Supervisor: suture.New("model", suture.Spec{
Log: func(line string) { Log: func(line string) {

View File

@ -22,8 +22,6 @@ import (
"github.com/syncthing/syncthing/lib/config" "github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/db" "github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/storage"
) )
var device1, device2 protocol.DeviceID var device1, device2 protocol.DeviceID
@ -90,7 +88,7 @@ func init() {
} }
func TestRequest(t *testing.T) { func TestRequest(t *testing.T) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil) db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil) m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
@ -167,7 +165,7 @@ func BenchmarkIndex_100(b *testing.B) {
} }
func benchmarkIndex(b *testing.B, nfiles int) { func benchmarkIndex(b *testing.B, nfiles int) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil) db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil) m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig) m.AddFolder(defaultFolderConfig)
m.StartFolderRO("default") m.StartFolderRO("default")
@ -196,7 +194,7 @@ func BenchmarkIndexUpdate_10000_1(b *testing.B) {
} }
func benchmarkIndexUpdate(b *testing.B, nfiles, nufiles int) { func benchmarkIndexUpdate(b *testing.B, nfiles, nufiles int) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil) db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil) m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig) m.AddFolder(defaultFolderConfig)
m.StartFolderRO("default") m.StartFolderRO("default")
@ -261,7 +259,7 @@ func (FakeConnection) Statistics() protocol.Statistics {
} }
func BenchmarkRequest(b *testing.B) { func BenchmarkRequest(b *testing.B) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil) db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil) m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig) m.AddFolder(defaultFolderConfig)
m.ServeBackground() m.ServeBackground()
@ -317,7 +315,7 @@ func TestDeviceRename(t *testing.T) {
} }
cfg := config.Wrap("tmpconfig.xml", rawCfg) cfg := config.Wrap("tmpconfig.xml", rawCfg)
db, _ := leveldb.Open(storage.NewMemStorage(), nil) db := db.OpenMemory()
m := NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil) m := NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
fc := FakeConnection{ fc := FakeConnection{
@ -391,7 +389,7 @@ func TestClusterConfig(t *testing.T) {
}, },
} }
db, _ := leveldb.Open(storage.NewMemStorage(), nil) db := db.OpenMemory()
m := NewModel(config.Wrap("/tmp/test", cfg), protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil) m := NewModel(config.Wrap("/tmp/test", cfg), protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(cfg.Folders[0]) m.AddFolder(cfg.Folders[0])
@ -463,7 +461,7 @@ func TestIgnores(t *testing.T) {
ioutil.WriteFile("testdata/.stfolder", nil, 0644) ioutil.WriteFile("testdata/.stfolder", nil, 0644)
ioutil.WriteFile("testdata/.stignore", []byte(".*\nquux\n"), 0644) ioutil.WriteFile("testdata/.stignore", []byte(".*\nquux\n"), 0644)
db, _ := leveldb.Open(storage.NewMemStorage(), nil) db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil) m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig) m.AddFolder(defaultFolderConfig)
m.StartFolderRO("default") m.StartFolderRO("default")
@ -538,7 +536,7 @@ func TestIgnores(t *testing.T) {
} }
func TestRefuseUnknownBits(t *testing.T) { func TestRefuseUnknownBits(t *testing.T) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil) db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil) m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig) m.AddFolder(defaultFolderConfig)
m.ServeBackground() m.ServeBackground()
@ -576,7 +574,7 @@ func TestRefuseUnknownBits(t *testing.T) {
} }
func TestROScanRecovery(t *testing.T) { func TestROScanRecovery(t *testing.T) {
ldb, _ := leveldb.Open(storage.NewMemStorage(), nil) ldb := db.OpenMemory()
set := db.NewFileSet("default", ldb) set := db.NewFileSet("default", ldb)
set.Update(protocol.LocalDeviceID, []protocol.FileInfo{ set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
{Name: "dummyfile"}, {Name: "dummyfile"},
@ -660,7 +658,7 @@ func TestROScanRecovery(t *testing.T) {
} }
func TestRWScanRecovery(t *testing.T) { func TestRWScanRecovery(t *testing.T) {
ldb, _ := leveldb.Open(storage.NewMemStorage(), nil) ldb := db.OpenMemory()
set := db.NewFileSet("default", ldb) set := db.NewFileSet("default", ldb)
set.Update(protocol.LocalDeviceID, []protocol.FileInfo{ set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
{Name: "dummyfile"}, {Name: "dummyfile"},
@ -744,7 +742,7 @@ func TestRWScanRecovery(t *testing.T) {
} }
func TestGlobalDirectoryTree(t *testing.T) { func TestGlobalDirectoryTree(t *testing.T) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil) db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil) m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig) m.AddFolder(defaultFolderConfig)
m.ServeBackground() m.ServeBackground()
@ -994,7 +992,7 @@ func TestGlobalDirectoryTree(t *testing.T) {
} }
func TestGlobalDirectorySelfFixing(t *testing.T) { func TestGlobalDirectorySelfFixing(t *testing.T) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil) db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil) m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig) m.AddFolder(defaultFolderConfig)
m.ServeBackground() m.ServeBackground()
@ -1168,7 +1166,7 @@ func BenchmarkTree_100_10(b *testing.B) {
} }
func benchmarkTree(b *testing.B, n1, n2 int) { func benchmarkTree(b *testing.B, n1, n2 int) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil) db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil) m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig) m.AddFolder(defaultFolderConfig)
m.ServeBackground() m.ServeBackground()
@ -1186,7 +1184,7 @@ func benchmarkTree(b *testing.B, n1, n2 int) {
} }
func TestIgnoreDelete(t *testing.T) { func TestIgnoreDelete(t *testing.T) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil) db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil) m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
// This folder should ignore external deletes // This folder should ignore external deletes

View File

@ -12,12 +12,10 @@ import (
"testing" "testing"
"time" "time"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/scanner" "github.com/syncthing/syncthing/lib/scanner"
"github.com/syncthing/syncthing/lib/sync" "github.com/syncthing/syncthing/lib/sync"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/storage"
) )
func init() { func init() {
@ -69,7 +67,7 @@ func TestHandleFile(t *testing.T) {
requiredFile := existingFile requiredFile := existingFile
requiredFile.Blocks = blocks[1:] requiredFile.Blocks = blocks[1:]
db, _ := leveldb.Open(storage.NewMemStorage(), nil) db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil) m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig) m.AddFolder(defaultFolderConfig)
// Update index // Update index
@ -125,7 +123,7 @@ func TestHandleFileWithTemp(t *testing.T) {
requiredFile := existingFile requiredFile := existingFile
requiredFile.Blocks = blocks[1:] requiredFile.Blocks = blocks[1:]
db, _ := leveldb.Open(storage.NewMemStorage(), nil) db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil) m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig) m.AddFolder(defaultFolderConfig)
// Update index // Update index
@ -187,7 +185,7 @@ func TestCopierFinder(t *testing.T) {
requiredFile.Blocks = blocks[1:] requiredFile.Blocks = blocks[1:]
requiredFile.Name = "file2" requiredFile.Name = "file2"
db, _ := leveldb.Open(storage.NewMemStorage(), nil) db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil) m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig) m.AddFolder(defaultFolderConfig)
// Update index // Update index
@ -264,7 +262,7 @@ func TestCopierCleanup(t *testing.T) {
return true return true
} }
db, _ := leveldb.Open(storage.NewMemStorage(), nil) db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil) m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig) m.AddFolder(defaultFolderConfig)
@ -313,7 +311,7 @@ func TestCopierCleanup(t *testing.T) {
// Make sure that the copier routine hashes the content when asked, and pulls // Make sure that the copier routine hashes the content when asked, and pulls
// if it fails to find the block. // if it fails to find the block.
func TestLastResortPulling(t *testing.T) { func TestLastResortPulling(t *testing.T) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil) db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil) m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig) m.AddFolder(defaultFolderConfig)
@ -387,7 +385,7 @@ func TestDeregisterOnFailInCopy(t *testing.T) {
} }
defer os.Remove("testdata/" + defTempNamer.TempName("filex")) defer os.Remove("testdata/" + defTempNamer.TempName("filex"))
db, _ := leveldb.Open(storage.NewMemStorage(), nil) db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil) m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig) m.AddFolder(defaultFolderConfig)
@ -480,7 +478,7 @@ func TestDeregisterOnFailInPull(t *testing.T) {
} }
defer os.Remove("testdata/" + defTempNamer.TempName("filex")) defer os.Remove("testdata/" + defTempNamer.TempName("filex"))
db, _ := leveldb.Open(storage.NewMemStorage(), nil) db := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil) m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig) m.AddFolder(defaultFolderConfig)

View File

@ -10,7 +10,6 @@ import (
"time" "time"
"github.com/syncthing/syncthing/lib/db" "github.com/syncthing/syncthing/lib/db"
"github.com/syndtr/goleveldb/leveldb"
) )
type DeviceStatistics struct { type DeviceStatistics struct {
@ -22,7 +21,7 @@ type DeviceStatisticsReference struct {
device string device string
} }
func NewDeviceStatisticsReference(ldb *leveldb.DB, device string) *DeviceStatisticsReference { func NewDeviceStatisticsReference(ldb *db.Instance, device string) *DeviceStatisticsReference {
prefix := string(db.KeyTypeDeviceStatistic) + device prefix := string(db.KeyTypeDeviceStatistic) + device
return &DeviceStatisticsReference{ return &DeviceStatisticsReference{
ns: db.NewNamespacedKV(ldb, prefix), ns: db.NewNamespacedKV(ldb, prefix),

View File

@ -10,7 +10,6 @@ import (
"time" "time"
"github.com/syncthing/syncthing/lib/db" "github.com/syncthing/syncthing/lib/db"
"github.com/syndtr/goleveldb/leveldb"
) )
type FolderStatistics struct { type FolderStatistics struct {
@ -28,7 +27,7 @@ type LastFile struct {
Deleted bool `json:"deleted"` Deleted bool `json:"deleted"`
} }
func NewFolderStatisticsReference(ldb *leveldb.DB, folder string) *FolderStatisticsReference { func NewFolderStatisticsReference(ldb *db.Instance, folder string) *FolderStatisticsReference {
prefix := string(db.KeyTypeFolderStatistic) + folder prefix := string(db.KeyTypeFolderStatistic) + folder
return &FolderStatisticsReference{ return &FolderStatisticsReference{
ns: db.NewNamespacedKV(ldb, prefix), ns: db.NewNamespacedKV(ldb, prefix),