Rename Repository -> Folder, Node -> Device (fixes #739)

This commit is contained in:
Audrius Butkevicius
2014-09-28 12:00:38 +01:00
parent 9d816694ba
commit 6c352dca74
61 changed files with 2118 additions and 2118 deletions

View File

@@ -25,24 +25,24 @@ var l = logger.DefaultLogger
type Configuration struct {
Location string `xml:"-" json:"-"`
Version int `xml:"version,attr" default:"3"`
Repositories []RepositoryConfiguration `xml:"repository"`
Nodes []NodeConfiguration `xml:"node"`
Folders []FolderConfiguration `xml:"folder"`
Devices []DeviceConfiguration `xml:"device"`
GUI GUIConfiguration `xml:"gui"`
Options OptionsConfiguration `xml:"options"`
XMLName xml.Name `xml:"configuration" json:"-"`
}
type RepositoryConfiguration struct {
type FolderConfiguration struct {
ID string `xml:"id,attr"`
Directory string `xml:"directory,attr"`
Nodes []RepositoryNodeConfiguration `xml:"node"`
Devices []FolderDeviceConfiguration `xml:"device"`
ReadOnly bool `xml:"ro,attr"`
RescanIntervalS int `xml:"rescanIntervalS,attr" default:"60"`
IgnorePerms bool `xml:"ignorePerms,attr"`
Invalid string `xml:"-"` // Set at runtime when there is an error, not saved
Versioning VersioningConfiguration `xml:"versioning"`
nodeIDs []protocol.NodeID
deviceIDs []protocol.DeviceID
}
type VersioningConfiguration struct {
@@ -86,17 +86,17 @@ func (c *VersioningConfiguration) UnmarshalXML(d *xml.Decoder, start xml.StartEl
return nil
}
func (r *RepositoryConfiguration) NodeIDs() []protocol.NodeID {
if r.nodeIDs == nil {
for _, n := range r.Nodes {
r.nodeIDs = append(r.nodeIDs, n.NodeID)
func (r *FolderConfiguration) DeviceIDs() []protocol.DeviceID {
if r.deviceIDs == nil {
for _, n := range r.Devices {
r.deviceIDs = append(r.deviceIDs, n.DeviceID)
}
}
return r.nodeIDs
return r.deviceIDs
}
type NodeConfiguration struct {
NodeID protocol.NodeID `xml:"id,attr"`
type DeviceConfiguration struct {
DeviceID protocol.DeviceID `xml:"id,attr"`
Name string `xml:"name,attr,omitempty"`
Addresses []string `xml:"address,omitempty"`
Compression bool `xml:"compression,attr"`
@@ -104,8 +104,8 @@ type NodeConfiguration struct {
Introducer bool `xml:"introducer,attr"`
}
type RepositoryNodeConfiguration struct {
NodeID protocol.NodeID `xml:"id,attr"`
type FolderDeviceConfiguration struct {
DeviceID protocol.DeviceID `xml:"id,attr"`
Deprecated_Name string `xml:"name,attr,omitempty" json:"-"`
Deprecated_Addresses []string `xml:"address,omitempty" json:"-"`
@@ -145,35 +145,35 @@ type GUIConfiguration struct {
APIKey string `xml:"apikey,omitempty"`
}
func (cfg *Configuration) NodeMap() map[protocol.NodeID]NodeConfiguration {
m := make(map[protocol.NodeID]NodeConfiguration, len(cfg.Nodes))
for _, n := range cfg.Nodes {
m[n.NodeID] = n
func (cfg *Configuration) DeviceMap() map[protocol.DeviceID]DeviceConfiguration {
m := make(map[protocol.DeviceID]DeviceConfiguration, len(cfg.Devices))
for _, n := range cfg.Devices {
m[n.DeviceID] = n
}
return m
}
func (cfg *Configuration) GetNodeConfiguration(nodeID protocol.NodeID) *NodeConfiguration {
for i, node := range cfg.Nodes {
if node.NodeID == nodeID {
return &cfg.Nodes[i]
func (cfg *Configuration) GetDeviceConfiguration(deviceID protocol.DeviceID) *DeviceConfiguration {
for i, device := range cfg.Devices {
if device.DeviceID == deviceID {
return &cfg.Devices[i]
}
}
return nil
}
func (cfg *Configuration) GetRepoConfiguration(repoID string) *RepositoryConfiguration {
for i, repo := range cfg.Repositories {
if repo.ID == repoID {
return &cfg.Repositories[i]
func (cfg *Configuration) GetFolderConfiguration(folderID string) *FolderConfiguration {
for i, folder := range cfg.Folders {
if folder.ID == folderID {
return &cfg.Folders[i]
}
}
return nil
}
func (cfg *Configuration) RepoMap() map[string]RepositoryConfiguration {
m := make(map[string]RepositoryConfiguration, len(cfg.Repositories))
for _, r := range cfg.Repositories {
func (cfg *Configuration) FolderMap() map[string]FolderConfiguration {
m := make(map[string]FolderConfiguration, len(cfg.Folders))
for _, r := range cfg.Folders {
m[r.ID] = r
}
return m
@@ -290,44 +290,44 @@ func uniqueStrings(ss []string) []string {
return us
}
func (cfg *Configuration) prepare(myID protocol.NodeID) {
func (cfg *Configuration) prepare(myID protocol.DeviceID) {
fillNilSlices(&cfg.Options)
cfg.Options.ListenAddress = uniqueStrings(cfg.Options.ListenAddress)
// Initialize an empty slice for repositories if the config has none
if cfg.Repositories == nil {
cfg.Repositories = []RepositoryConfiguration{}
// Initialize an empty slice for folders if the config has none
if cfg.Folders == nil {
cfg.Folders = []FolderConfiguration{}
}
// Check for missing, bad or duplicate repository ID:s
var seenRepos = map[string]*RepositoryConfiguration{}
// Check for missing, bad or duplicate folder ID:s
var seenFolders = map[string]*FolderConfiguration{}
var uniqueCounter int
for i := range cfg.Repositories {
repo := &cfg.Repositories[i]
for i := range cfg.Folders {
folder := &cfg.Folders[i]
if len(repo.Directory) == 0 {
repo.Invalid = "no directory configured"
if len(folder.Directory) == 0 {
folder.Invalid = "no directory configured"
continue
}
if repo.ID == "" {
repo.ID = "default"
if folder.ID == "" {
folder.ID = "default"
}
if seen, ok := seenRepos[repo.ID]; ok {
l.Warnf("Multiple repositories with ID %q; disabling", repo.ID)
if seen, ok := seenFolders[folder.ID]; ok {
l.Warnf("Multiple folders with ID %q; disabling", folder.ID)
seen.Invalid = "duplicate repository ID"
if seen.ID == repo.ID {
seen.Invalid = "duplicate folder ID"
if seen.ID == folder.ID {
uniqueCounter++
seen.ID = fmt.Sprintf("%s~%d", repo.ID, uniqueCounter)
seen.ID = fmt.Sprintf("%s~%d", folder.ID, uniqueCounter)
}
repo.Invalid = "duplicate repository ID"
folder.Invalid = "duplicate folder ID"
uniqueCounter++
repo.ID = fmt.Sprintf("%s~%d", repo.ID, uniqueCounter)
folder.ID = fmt.Sprintf("%s~%d", folder.ID, uniqueCounter)
} else {
seenRepos[repo.ID] = repo
seenFolders[folder.ID] = folder
}
}
@@ -362,42 +362,42 @@ func (cfg *Configuration) prepare(myID protocol.NodeID) {
}
}
// Build a list of available nodes
existingNodes := make(map[protocol.NodeID]bool)
existingNodes[myID] = true
for _, node := range cfg.Nodes {
existingNodes[node.NodeID] = true
// Build a list of available devices
existingDevices := make(map[protocol.DeviceID]bool)
existingDevices[myID] = true
for _, device := range cfg.Devices {
existingDevices[device.DeviceID] = true
}
// Ensure this node is present in all relevant places
me := cfg.GetNodeConfiguration(myID)
// Ensure this device is present in all relevant places
me := cfg.GetDeviceConfiguration(myID)
if me == nil {
myName, _ := os.Hostname()
cfg.Nodes = append(cfg.Nodes, NodeConfiguration{
NodeID: myID,
cfg.Devices = append(cfg.Devices, DeviceConfiguration{
DeviceID: myID,
Name: myName,
})
}
sort.Sort(NodeConfigurationList(cfg.Nodes))
// Ensure that any loose nodes are not present in the wrong places
// Ensure that there are no duplicate nodes
for i := range cfg.Repositories {
cfg.Repositories[i].Nodes = ensureNodePresent(cfg.Repositories[i].Nodes, myID)
cfg.Repositories[i].Nodes = ensureExistingNodes(cfg.Repositories[i].Nodes, existingNodes)
cfg.Repositories[i].Nodes = ensureNoDuplicates(cfg.Repositories[i].Nodes)
sort.Sort(RepositoryNodeConfigurationList(cfg.Repositories[i].Nodes))
sort.Sort(DeviceConfigurationList(cfg.Devices))
// Ensure that any loose devices are not present in the wrong places
// Ensure that there are no duplicate devices
for i := range cfg.Folders {
cfg.Folders[i].Devices = ensureDevicePresent(cfg.Folders[i].Devices, myID)
cfg.Folders[i].Devices = ensureExistingDevices(cfg.Folders[i].Devices, existingDevices)
cfg.Folders[i].Devices = ensureNoDuplicates(cfg.Folders[i].Devices)
sort.Sort(FolderDeviceConfigurationList(cfg.Folders[i].Devices))
}
// An empty address list is equivalent to a single "dynamic" entry
for i := range cfg.Nodes {
n := &cfg.Nodes[i]
for i := range cfg.Devices {
n := &cfg.Devices[i]
if len(n.Addresses) == 0 || len(n.Addresses) == 1 && n.Addresses[0] == "" {
n.Addresses = []string{"dynamic"}
}
}
}
func New(location string, myID protocol.NodeID) Configuration {
func New(location string, myID protocol.DeviceID) Configuration {
var cfg Configuration
cfg.Location = location
@@ -411,7 +411,7 @@ func New(location string, myID protocol.NodeID) Configuration {
return cfg
}
func Load(location string, myID protocol.NodeID) (Configuration, error) {
func Load(location string, myID protocol.DeviceID) (Configuration, error) {
var cfg Configuration
cfg.Location = location
@@ -435,24 +435,24 @@ func Load(location string, myID protocol.NodeID) (Configuration, error) {
// ChangeRequiresRestart returns true if updating the configuration requires a
// complete restart.
func ChangeRequiresRestart(from, to Configuration) bool {
// Adding, removing or changing repos requires restart
if len(from.Repositories) != len(to.Repositories) {
// Adding, removing or changing folders requires restart
if len(from.Folders) != len(to.Folders) {
return true
}
fromRepos := from.RepoMap()
toRepos := to.RepoMap()
for id := range fromRepos {
if !reflect.DeepEqual(fromRepos[id], toRepos[id]) {
fromFolders := from.FolderMap()
toFolders := to.FolderMap()
for id := range fromFolders {
if !reflect.DeepEqual(fromFolders[id], toFolders[id]) {
return true
}
}
// Removing a node requires a restart. Adding one does not. Changing
// Removing a device requires a restart. Adding one does not. Changing
// address or name does not.
fromNodes := from.NodeMap()
toNodes := to.NodeMap()
for nodeID := range fromNodes {
if _, ok := toNodes[nodeID]; !ok {
fromDevices := from.DeviceMap()
toDevices := to.DeviceMap()
for deviceID := range fromDevices {
if _, ok := toDevices[deviceID]; !ok {
return true
}
}
@@ -466,22 +466,22 @@ func ChangeRequiresRestart(from, to Configuration) bool {
}
func convertV3V4(cfg *Configuration) {
// In previous versions, rescan interval was common for each repository.
// In previous versions, rescan interval was common for each folder.
// From now, it can be set independently. We have to make sure, that after upgrade
// the individual rescan interval will be defined for every existing repository.
for i := range cfg.Repositories {
cfg.Repositories[i].RescanIntervalS = cfg.Options.Deprecated_RescanIntervalS
// the individual rescan interval will be defined for every existing folder.
for i := range cfg.Folders {
cfg.Folders[i].RescanIntervalS = cfg.Options.Deprecated_RescanIntervalS
}
cfg.Options.Deprecated_RescanIntervalS = 0
// In previous versions, repositories held full node configurations.
// Since that's the only place where node configs were in V1, we still have
// In previous versions, folders held full device configurations.
// Since that's the only place where device configs were in V1, we still have
// to define the deprecated fields to be able to upgrade from V1 to V4.
for i, repo := range cfg.Repositories {
for i, folder := range cfg.Folders {
for j := range repo.Nodes {
rncfg := cfg.Repositories[i].Nodes[j]
for j := range folder.Devices {
rncfg := cfg.Folders[i].Devices[j]
rncfg.Deprecated_Name = ""
rncfg.Deprecated_Addresses = nil
}
@@ -492,10 +492,10 @@ func convertV3V4(cfg *Configuration) {
func convertV2V3(cfg *Configuration) {
// In previous versions, compression was always on. When upgrading, enable
// compression on all existing new. New nodes will get compression on by
// compression on all existing new. New devices will get compression on by
// default by the GUI.
for i := range cfg.Nodes {
cfg.Nodes[i].Compression = true
for i := range cfg.Devices {
cfg.Devices[i].Compression = true
}
// The global discovery format and port number changed in v0.9. Having the
@@ -508,31 +508,31 @@ func convertV2V3(cfg *Configuration) {
}
func convertV1V2(cfg *Configuration) {
// Collect the list of nodes.
// Replace node configs inside repositories with only a reference to the nide ID.
// Set all repositories to read only if the global read only flag is set.
var nodes = map[string]RepositoryNodeConfiguration{}
for i, repo := range cfg.Repositories {
cfg.Repositories[i].ReadOnly = cfg.Options.Deprecated_ReadOnly
for j, node := range repo.Nodes {
id := node.NodeID.String()
if _, ok := nodes[id]; !ok {
nodes[id] = node
// Collect the list of devices.
// Replace device configs inside folders with only a reference to the nide ID.
// Set all folders to read only if the global read only flag is set.
var devices = map[string]FolderDeviceConfiguration{}
for i, folder := range cfg.Folders {
cfg.Folders[i].ReadOnly = cfg.Options.Deprecated_ReadOnly
for j, device := range folder.Devices {
id := device.DeviceID.String()
if _, ok := devices[id]; !ok {
devices[id] = device
}
cfg.Repositories[i].Nodes[j] = RepositoryNodeConfiguration{NodeID: node.NodeID}
cfg.Folders[i].Devices[j] = FolderDeviceConfiguration{DeviceID: device.DeviceID}
}
}
cfg.Options.Deprecated_ReadOnly = false
// Set and sort the list of nodes.
for _, node := range nodes {
cfg.Nodes = append(cfg.Nodes, NodeConfiguration{
NodeID: node.NodeID,
Name: node.Deprecated_Name,
Addresses: node.Deprecated_Addresses,
// Set and sort the list of devices.
for _, device := range devices {
cfg.Devices = append(cfg.Devices, DeviceConfiguration{
DeviceID: device.DeviceID,
Name: device.Deprecated_Name,
Addresses: device.Deprecated_Addresses,
})
}
sort.Sort(NodeConfigurationList(cfg.Nodes))
sort.Sort(DeviceConfigurationList(cfg.Devices))
// GUI
cfg.GUI.Address = cfg.Options.Deprecated_GUIAddress
@@ -543,73 +543,73 @@ func convertV1V2(cfg *Configuration) {
cfg.Version = 2
}
type NodeConfigurationList []NodeConfiguration
type DeviceConfigurationList []DeviceConfiguration
func (l NodeConfigurationList) Less(a, b int) bool {
return l[a].NodeID.Compare(l[b].NodeID) == -1
func (l DeviceConfigurationList) Less(a, b int) bool {
return l[a].DeviceID.Compare(l[b].DeviceID) == -1
}
func (l NodeConfigurationList) Swap(a, b int) {
func (l DeviceConfigurationList) Swap(a, b int) {
l[a], l[b] = l[b], l[a]
}
func (l NodeConfigurationList) Len() int {
func (l DeviceConfigurationList) Len() int {
return len(l)
}
type RepositoryNodeConfigurationList []RepositoryNodeConfiguration
type FolderDeviceConfigurationList []FolderDeviceConfiguration
func (l RepositoryNodeConfigurationList) Less(a, b int) bool {
return l[a].NodeID.Compare(l[b].NodeID) == -1
func (l FolderDeviceConfigurationList) Less(a, b int) bool {
return l[a].DeviceID.Compare(l[b].DeviceID) == -1
}
func (l RepositoryNodeConfigurationList) Swap(a, b int) {
func (l FolderDeviceConfigurationList) Swap(a, b int) {
l[a], l[b] = l[b], l[a]
}
func (l RepositoryNodeConfigurationList) Len() int {
func (l FolderDeviceConfigurationList) Len() int {
return len(l)
}
func ensureNodePresent(nodes []RepositoryNodeConfiguration, myID protocol.NodeID) []RepositoryNodeConfiguration {
for _, node := range nodes {
if node.NodeID.Equals(myID) {
return nodes
func ensureDevicePresent(devices []FolderDeviceConfiguration, myID protocol.DeviceID) []FolderDeviceConfiguration {
for _, device := range devices {
if device.DeviceID.Equals(myID) {
return devices
}
}
nodes = append(nodes, RepositoryNodeConfiguration{
NodeID: myID,
devices = append(devices, FolderDeviceConfiguration{
DeviceID: myID,
})
return nodes
return devices
}
func ensureExistingNodes(nodes []RepositoryNodeConfiguration, existingNodes map[protocol.NodeID]bool) []RepositoryNodeConfiguration {
count := len(nodes)
func ensureExistingDevices(devices []FolderDeviceConfiguration, existingDevices map[protocol.DeviceID]bool) []FolderDeviceConfiguration {
count := len(devices)
i := 0
loop:
for i < count {
if _, ok := existingNodes[nodes[i].NodeID]; !ok {
nodes[i] = nodes[count-1]
if _, ok := existingDevices[devices[i].DeviceID]; !ok {
devices[i] = devices[count-1]
count--
continue loop
}
i++
}
return nodes[0:count]
return devices[0:count]
}
func ensureNoDuplicates(nodes []RepositoryNodeConfiguration) []RepositoryNodeConfiguration {
count := len(nodes)
func ensureNoDuplicates(devices []FolderDeviceConfiguration) []FolderDeviceConfiguration {
count := len(devices)
i := 0
seenNodes := make(map[protocol.NodeID]bool)
seenDevices := make(map[protocol.DeviceID]bool)
loop:
for i < count {
id := nodes[i].NodeID
if _, ok := seenNodes[id]; ok {
nodes[i] = nodes[count-1]
id := devices[i].DeviceID
if _, ok := seenDevices[id]; ok {
devices[i] = devices[count-1]
count--
continue loop
}
seenNodes[id] = true
seenDevices[id] = true
i++
}
return nodes[0:count]
return devices[0:count]
}

View File

@@ -12,13 +12,13 @@ import (
"github.com/syncthing/syncthing/internal/protocol"
)
var node1, node2, node3, node4 protocol.NodeID
var device1, device2, device3, device4 protocol.DeviceID
func init() {
node1, _ = protocol.NodeIDFromString("AIR6LPZ7K4PTTUXQSMUUCPQ5YWOEDFIIQJUG7772YQXXR5YD6AWQ")
node2, _ = protocol.NodeIDFromString("GYRZZQB-IRNPV4Z-T7TC52W-EQYJ3TT-FDQW6MW-DFLMU42-SSSU6EM-FBK2VAY")
node3, _ = protocol.NodeIDFromString("LGFPDIT-7SKNNJL-VJZA4FC-7QNCRKA-CE753K7-2BW5QDK-2FOZ7FR-FEP57QJ")
node4, _ = protocol.NodeIDFromString("P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2")
device1, _ = protocol.DeviceIDFromString("AIR6LPZ7K4PTTUXQSMUUCPQ5YWOEDFIIQJUG7772YQXXR5YD6AWQ")
device2, _ = protocol.DeviceIDFromString("GYRZZQB-IRNPV4Z-T7TC52W-EQYJ3TT-FDQW6MW-DFLMU42-SSSU6EM-FBK2VAY")
device3, _ = protocol.DeviceIDFromString("LGFPDIT-7SKNNJL-VJZA4FC-7QNCRKA-CE753K7-2BW5QDK-2FOZ7FR-FEP57QJ")
device4, _ = protocol.DeviceIDFromString("P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2")
}
func TestDefaultValues(t *testing.T) {
@@ -39,69 +39,69 @@ func TestDefaultValues(t *testing.T) {
RestartOnWakeup: true,
}
cfg := New("test", node1)
cfg := New("test", device1)
if !reflect.DeepEqual(cfg.Options, expected) {
t.Errorf("Default config differs;\n E: %#v\n A: %#v", expected, cfg.Options)
}
}
func TestNodeConfig(t *testing.T) {
func TestDeviceConfig(t *testing.T) {
for i, ver := range []string{"v1", "v2", "v3", "v4"} {
cfg, err := Load("testdata/"+ver+".xml", node1)
cfg, err := Load("testdata/"+ver+".xml", device1)
if err != nil {
t.Error(err)
}
expectedRepos := []RepositoryConfiguration{
expectedFolders := []FolderConfiguration{
{
ID: "test",
Directory: "~/Sync",
Nodes: []RepositoryNodeConfiguration{{NodeID: node1}, {NodeID: node4}},
Devices: []FolderDeviceConfiguration{{DeviceID: device1}, {DeviceID: device4}},
ReadOnly: true,
RescanIntervalS: 600,
},
}
expectedNodes := []NodeConfiguration{
expectedDevices := []DeviceConfiguration{
{
NodeID: node1,
Name: "node one",
DeviceID: device1,
Name: "device one",
Addresses: []string{"a"},
Compression: true,
},
{
NodeID: node4,
Name: "node two",
DeviceID: device4,
Name: "device two",
Addresses: []string{"b"},
Compression: true,
},
}
expectedNodeIDs := []protocol.NodeID{node1, node4}
expectedDeviceIDs := []protocol.DeviceID{device1, device4}
if cfg.Version != 4 {
t.Errorf("%d: Incorrect version %d != 3", i, cfg.Version)
}
if !reflect.DeepEqual(cfg.Repositories, expectedRepos) {
t.Errorf("%d: Incorrect Repositories\n A: %#v\n E: %#v", i, cfg.Repositories, expectedRepos)
if !reflect.DeepEqual(cfg.Folders, expectedFolders) {
t.Errorf("%d: Incorrect Folders\n A: %#v\n E: %#v", i, cfg.Folders, expectedFolders)
}
if !reflect.DeepEqual(cfg.Nodes, expectedNodes) {
t.Errorf("%d: Incorrect Nodes\n A: %#v\n E: %#v", i, cfg.Nodes, expectedNodes)
if !reflect.DeepEqual(cfg.Devices, expectedDevices) {
t.Errorf("%d: Incorrect Devices\n A: %#v\n E: %#v", i, cfg.Devices, expectedDevices)
}
if !reflect.DeepEqual(cfg.Repositories[0].NodeIDs(), expectedNodeIDs) {
t.Errorf("%d: Incorrect NodeIDs\n A: %#v\n E: %#v", i, cfg.Repositories[0].NodeIDs(), expectedNodeIDs)
if !reflect.DeepEqual(cfg.Folders[0].DeviceIDs(), expectedDeviceIDs) {
t.Errorf("%d: Incorrect DeviceIDs\n A: %#v\n E: %#v", i, cfg.Folders[0].DeviceIDs(), expectedDeviceIDs)
}
if len(cfg.NodeMap()) != len(expectedNodes) {
t.Errorf("Unexpected number of NodeMap() entries")
if len(cfg.DeviceMap()) != len(expectedDevices) {
t.Errorf("Unexpected number of DeviceMap() entries")
}
if len(cfg.RepoMap()) != len(expectedRepos) {
t.Errorf("Unexpected number of RepoMap() entries")
if len(cfg.FolderMap()) != len(expectedFolders) {
t.Errorf("Unexpected number of FolderMap() entries")
}
}
}
func TestNoListenAddress(t *testing.T) {
cfg, err := Load("testdata/nolistenaddress.xml", node1)
cfg, err := Load("testdata/nolistenaddress.xml", device1)
if err != nil {
t.Error(err)
}
@@ -130,7 +130,7 @@ func TestOverriddenValues(t *testing.T) {
RestartOnWakeup: false,
}
cfg, err := Load("testdata/overridenvalues.xml", node1)
cfg, err := Load("testdata/overridenvalues.xml", device1)
if err != nil {
t.Error(err)
}
@@ -140,80 +140,80 @@ func TestOverriddenValues(t *testing.T) {
}
}
func TestNodeAddressesDynamic(t *testing.T) {
func TestDeviceAddressesDynamic(t *testing.T) {
name, _ := os.Hostname()
expected := []NodeConfiguration{
expected := []DeviceConfiguration{
{
NodeID: node1,
DeviceID: device1,
Addresses: []string{"dynamic"},
Compression: true,
},
{
NodeID: node2,
DeviceID: device2,
Addresses: []string{"dynamic"},
Compression: true,
},
{
NodeID: node3,
DeviceID: device3,
Addresses: []string{"dynamic"},
Compression: true,
},
{
NodeID: node4,
DeviceID: device4,
Name: name, // Set when auto created
Addresses: []string{"dynamic"},
},
}
cfg, err := Load("testdata/nodeaddressesdynamic.xml", node4)
cfg, err := Load("testdata/deviceaddressesdynamic.xml", device4)
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(cfg.Nodes, expected) {
t.Errorf("Nodes differ;\n E: %#v\n A: %#v", expected, cfg.Nodes)
if !reflect.DeepEqual(cfg.Devices, expected) {
t.Errorf("Devices differ;\n E: %#v\n A: %#v", expected, cfg.Devices)
}
}
func TestNodeAddressesStatic(t *testing.T) {
func TestDeviceAddressesStatic(t *testing.T) {
name, _ := os.Hostname()
expected := []NodeConfiguration{
expected := []DeviceConfiguration{
{
NodeID: node1,
DeviceID: device1,
Addresses: []string{"192.0.2.1", "192.0.2.2"},
},
{
NodeID: node2,
DeviceID: device2,
Addresses: []string{"192.0.2.3:6070", "[2001:db8::42]:4242"},
},
{
NodeID: node3,
DeviceID: device3,
Addresses: []string{"[2001:db8::44]:4444", "192.0.2.4:6090"},
},
{
NodeID: node4,
DeviceID: device4,
Name: name, // Set when auto created
Addresses: []string{"dynamic"},
},
}
cfg, err := Load("testdata/nodeaddressesstatic.xml", node4)
cfg, err := Load("testdata/deviceaddressesstatic.xml", device4)
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(cfg.Nodes, expected) {
t.Errorf("Nodes differ;\n E: %#v\n A: %#v", expected, cfg.Nodes)
if !reflect.DeepEqual(cfg.Devices, expected) {
t.Errorf("Devices differ;\n E: %#v\n A: %#v", expected, cfg.Devices)
}
}
func TestVersioningConfig(t *testing.T) {
cfg, err := Load("testdata/versioningconfig.xml", node4)
cfg, err := Load("testdata/versioningconfig.xml", device4)
if err != nil {
t.Error(err)
}
vc := cfg.Repositories[0].Versioning
vc := cfg.Folders[0].Versioning
if vc.Type != "simple" {
t.Errorf(`vc.Type %q != "simple"`, vc.Type)
}
@@ -239,7 +239,7 @@ func TestNewSaveLoad(t *testing.T) {
return err == nil
}
cfg := New(path, node1)
cfg := New(path, device1)
// To make the equality pass later
cfg.XMLName.Local = "configuration"
@@ -256,7 +256,7 @@ func TestNewSaveLoad(t *testing.T) {
t.Error(path, "does not exist")
}
cfg2, err := Load(path, node1)
cfg2, err := Load(path, device1)
if err != nil {
t.Error(err)
}
@@ -268,7 +268,7 @@ func TestNewSaveLoad(t *testing.T) {
cfg.GUI.User = "test"
cfg.Save()
cfg2, err = Load(path, node1)
cfg2, err = Load(path, device1)
if err != nil {
t.Error(err)
}
@@ -283,13 +283,13 @@ func TestNewSaveLoad(t *testing.T) {
func TestPrepare(t *testing.T) {
var cfg Configuration
if cfg.Repositories != nil || cfg.Nodes != nil || cfg.Options.ListenAddress != nil {
if cfg.Folders != nil || cfg.Devices != nil || cfg.Options.ListenAddress != nil {
t.Error("Expected nil")
}
cfg.prepare(node1)
cfg.prepare(device1)
if cfg.Repositories == nil || cfg.Nodes == nil || cfg.Options.ListenAddress == nil {
if cfg.Folders == nil || cfg.Devices == nil || cfg.Options.ListenAddress == nil {
t.Error("Unexpected nil")
}
}

View File

@@ -20,7 +20,7 @@ import (
)
type Discoverer struct {
myID protocol.NodeID
myID protocol.DeviceID
listenAddrs []string
localBcastIntv time.Duration
globalBcastIntv time.Duration
@@ -28,7 +28,7 @@ type Discoverer struct {
cacheLifetime time.Duration
broadcastBeacon beacon.Interface
multicastBeacon beacon.Interface
registry map[protocol.NodeID][]cacheEntry
registry map[protocol.DeviceID][]cacheEntry
registryLock sync.RWMutex
extServer string
extPort uint16
@@ -49,7 +49,7 @@ var (
ErrIncorrectMagic = errors.New("incorrect magic number")
)
func NewDiscoverer(id protocol.NodeID, addresses []string) *Discoverer {
func NewDiscoverer(id protocol.DeviceID, addresses []string) *Discoverer {
return &Discoverer{
myID: id,
listenAddrs: addresses,
@@ -57,7 +57,7 @@ func NewDiscoverer(id protocol.NodeID, addresses []string) *Discoverer {
globalBcastIntv: 1800 * time.Second,
errorRetryIntv: 60 * time.Second,
cacheLifetime: 5 * time.Minute,
registry: make(map[protocol.NodeID][]cacheEntry),
registry: make(map[protocol.DeviceID][]cacheEntry),
}
}
@@ -120,9 +120,9 @@ func (d *Discoverer) ExtAnnounceOK() bool {
return d.extAnnounceOK
}
func (d *Discoverer) Lookup(node protocol.NodeID) []string {
func (d *Discoverer) Lookup(device protocol.DeviceID) []string {
d.registryLock.Lock()
cached := d.filterCached(d.registry[node])
cached := d.filterCached(d.registry[device])
d.registryLock.Unlock()
if len(cached) > 0 {
@@ -132,7 +132,7 @@ func (d *Discoverer) Lookup(node protocol.NodeID) []string {
}
return addrs
} else if len(d.extServer) != 0 {
addrs := d.externalLookup(node)
addrs := d.externalLookup(device)
cached = make([]cacheEntry, len(addrs))
for i := range addrs {
cached[i] = cacheEntry{
@@ -142,32 +142,32 @@ func (d *Discoverer) Lookup(node protocol.NodeID) []string {
}
d.registryLock.Lock()
d.registry[node] = cached
d.registry[device] = cached
d.registryLock.Unlock()
}
return nil
}
func (d *Discoverer) Hint(node string, addrs []string) {
func (d *Discoverer) Hint(device string, addrs []string) {
resAddrs := resolveAddrs(addrs)
var id protocol.NodeID
id.UnmarshalText([]byte(node))
d.registerNode(nil, Node{
var id protocol.DeviceID
id.UnmarshalText([]byte(device))
d.registerDevice(nil, Device{
Addresses: resAddrs,
ID: id[:],
})
}
func (d *Discoverer) All() map[protocol.NodeID][]cacheEntry {
func (d *Discoverer) All() map[protocol.DeviceID][]cacheEntry {
d.registryLock.RLock()
nodes := make(map[protocol.NodeID][]cacheEntry, len(d.registry))
for node, addrs := range d.registry {
devices := make(map[protocol.DeviceID][]cacheEntry, len(d.registry))
for device, addrs := range d.registry {
addrsCopy := make([]cacheEntry, len(addrs))
copy(addrsCopy, addrs)
nodes[node] = addrsCopy
devices[device] = addrsCopy
}
d.registryLock.RUnlock()
return nodes
return devices
}
func (d *Discoverer) announcementPkt() []byte {
@@ -190,7 +190,7 @@ func (d *Discoverer) announcementPkt() []byte {
}
var pkt = Announce{
Magic: AnnouncementMagic,
This: Node{d.myID[:], addrs},
This: Device{d.myID[:], addrs},
}
return pkt.MarshalXDR()
}
@@ -200,7 +200,7 @@ func (d *Discoverer) sendLocalAnnouncements() {
var pkt = Announce{
Magic: AnnouncementMagic,
This: Node{d.myID[:], addrs},
This: Device{d.myID[:], addrs},
}
msg := pkt.MarshalXDR()
@@ -240,7 +240,7 @@ func (d *Discoverer) sendExternalAnnouncements() {
if d.extPort != 0 {
var pkt = Announce{
Magic: AnnouncementMagic,
This: Node{d.myID[:], []Address{{Port: d.extPort}}},
This: Device{d.myID[:], []Address{{Port: d.extPort}}},
}
buf = pkt.MarshalXDR()
} else {
@@ -264,7 +264,7 @@ func (d *Discoverer) sendExternalAnnouncements() {
}
ok = false
} else {
// Verify that the announce server responds positively for our node ID
// Verify that the announce server responds positively for our device ID
time.Sleep(1 * time.Second)
res := d.externalLookup(d.myID)
@@ -321,12 +321,12 @@ func (d *Discoverer) recvAnnouncements(b beacon.Interface) {
continue
}
var newNode bool
var newDevice bool
if bytes.Compare(pkt.This.ID, d.myID[:]) != 0 {
newNode = d.registerNode(addr, pkt.This)
newDevice = d.registerDevice(addr, pkt.This)
}
if newNode {
if newDevice {
select {
case d.forcedBcastTick <- time.Now():
}
@@ -334,9 +334,9 @@ func (d *Discoverer) recvAnnouncements(b beacon.Interface) {
}
}
func (d *Discoverer) registerNode(addr net.Addr, node Node) bool {
var id protocol.NodeID
copy(id[:], node.ID)
func (d *Discoverer) registerDevice(addr net.Addr, device Device) bool {
var id protocol.DeviceID
copy(id[:], device.ID)
d.registryLock.RLock()
current := d.filterCached(d.registry[id])
@@ -344,23 +344,23 @@ func (d *Discoverer) registerNode(addr net.Addr, node Node) bool {
orig := current
for _, a := range node.Addresses {
var nodeAddr string
for _, a := range device.Addresses {
var deviceAddr string
if len(a.IP) > 0 {
nodeAddr = net.JoinHostPort(net.IP(a.IP).String(), strconv.Itoa(int(a.Port)))
deviceAddr = net.JoinHostPort(net.IP(a.IP).String(), strconv.Itoa(int(a.Port)))
} else if addr != nil {
ua := addr.(*net.UDPAddr)
ua.Port = int(a.Port)
nodeAddr = ua.String()
deviceAddr = ua.String()
}
for i := range current {
if current[i].addr == nodeAddr {
if current[i].addr == deviceAddr {
current[i].seen = time.Now()
goto done
}
}
current = append(current, cacheEntry{
addr: nodeAddr,
addr: deviceAddr,
seen: time.Now(),
})
done:
@@ -379,8 +379,8 @@ func (d *Discoverer) registerNode(addr net.Addr, node Node) bool {
for i := range current {
addrs[i] = current[i].addr
}
events.Default.Log(events.NodeDiscovered, map[string]interface{}{
"node": id.String(),
events.Default.Log(events.DeviceDiscovered, map[string]interface{}{
"device": id.String(),
"addrs": addrs,
})
}
@@ -388,7 +388,7 @@ func (d *Discoverer) registerNode(addr net.Addr, node Node) bool {
return len(current) > len(orig)
}
func (d *Discoverer) externalLookup(node protocol.NodeID) []string {
func (d *Discoverer) externalLookup(device protocol.DeviceID) []string {
extIP, err := net.ResolveUDPAddr("udp", d.extServer)
if err != nil {
if debug {
@@ -414,7 +414,7 @@ func (d *Discoverer) externalLookup(node protocol.NodeID) []string {
return nil
}
buf := Query{QueryMagic, node[:]}.MarshalXDR()
buf := Query{QueryMagic, device[:]}.MarshalXDR()
_, err = conn.Write(buf)
if err != nil {
if debug {
@@ -427,7 +427,7 @@ func (d *Discoverer) externalLookup(node protocol.NodeID) []string {
n, err := conn.Read(buf)
if err != nil {
if err, ok := err.(net.Error); ok && err.Timeout() {
// Expected if the server doesn't know about requested node ID
// Expected if the server doesn't know about requested device ID
return nil
}
if debug {
@@ -451,8 +451,8 @@ func (d *Discoverer) externalLookup(node protocol.NodeID) []string {
var addrs []string
for _, a := range pkt.This.Addresses {
nodeAddr := net.JoinHostPort(net.IP(a.IP).String(), strconv.Itoa(int(a.Port)))
addrs = append(addrs, nodeAddr)
deviceAddr := net.JoinHostPort(net.IP(a.IP).String(), strconv.Itoa(int(a.Port)))
addrs = append(addrs, deviceAddr)
}
return addrs
}

View File

@@ -2,5 +2,5 @@
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// Package discover implements the node discovery protocol.
// Package discover implements the device discovery protocol.
package discover

View File

@@ -11,16 +11,16 @@ const (
type Query struct {
Magic uint32
NodeID []byte // max:32
DeviceID []byte // max:32
}
type Announce struct {
Magic uint32
This Node
Extra []Node // max:16
This Device
Extra []Device // max:16
}
type Node struct {
type Device struct {
ID []byte // max:32
Addresses []Address // max:16
}

View File

@@ -20,17 +20,17 @@ Query Structure:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Magic |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of Node ID |
| Length of Device ID |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Node ID (variable length) \
\ Device ID (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct Query {
unsigned int Magic;
opaque NodeID<32>;
opaque DeviceID<32>;
}
*/
@@ -53,10 +53,10 @@ func (o Query) AppendXDR(bs []byte) []byte {
func (o Query) encodeXDR(xw *xdr.Writer) (int, error) {
xw.WriteUint32(o.Magic)
if len(o.NodeID) > 32 {
if len(o.DeviceID) > 32 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
xw.WriteBytes(o.NodeID)
xw.WriteBytes(o.DeviceID)
return xw.Tot(), xw.Error()
}
@@ -73,7 +73,7 @@ func (o *Query) UnmarshalXDR(bs []byte) error {
func (o *Query) decodeXDR(xr *xdr.Reader) error {
o.Magic = xr.ReadUint32()
o.NodeID = xr.ReadBytesMax(32)
o.DeviceID = xr.ReadBytesMax(32)
return xr.Error()
}
@@ -86,20 +86,20 @@ Announce Structure:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Magic |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Node |
| Device |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of Extra |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Zero or more Node Structures \
\ Zero or more Device Structures \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct Announce {
unsigned int Magic;
Node This;
Node Extra<16>;
Device This;
Device Extra<16>;
}
*/
@@ -157,7 +157,7 @@ func (o *Announce) decodeXDR(xr *xdr.Reader) error {
if _ExtraSize > 16 {
return xdr.ErrElementSizeExceeded
}
o.Extra = make([]Node, _ExtraSize)
o.Extra = make([]Device, _ExtraSize)
for i := range o.Extra {
(&o.Extra[i]).decodeXDR(xr)
}
@@ -166,7 +166,7 @@ func (o *Announce) decodeXDR(xr *xdr.Reader) error {
/*
Node Structure:
Device Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
@@ -185,30 +185,30 @@ Node Structure:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct Node {
struct Device {
opaque ID<32>;
Address Addresses<16>;
}
*/
func (o Node) EncodeXDR(w io.Writer) (int, error) {
func (o Device) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.encodeXDR(xw)
}
func (o Node) MarshalXDR() []byte {
func (o Device) MarshalXDR() []byte {
return o.AppendXDR(make([]byte, 0, 128))
}
func (o Node) AppendXDR(bs []byte) []byte {
func (o Device) AppendXDR(bs []byte) []byte {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
o.encodeXDR(xw)
return []byte(aw)
}
func (o Node) encodeXDR(xw *xdr.Writer) (int, error) {
func (o Device) encodeXDR(xw *xdr.Writer) (int, error) {
if len(o.ID) > 32 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
@@ -226,18 +226,18 @@ func (o Node) encodeXDR(xw *xdr.Writer) (int, error) {
return xw.Tot(), xw.Error()
}
func (o *Node) DecodeXDR(r io.Reader) error {
func (o *Device) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.decodeXDR(xr)
}
func (o *Node) UnmarshalXDR(bs []byte) error {
func (o *Device) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.decodeXDR(xr)
}
func (o *Node) decodeXDR(xr *xdr.Reader) error {
func (o *Device) decodeXDR(xr *xdr.Reader) error {
o.ID = xr.ReadBytesMax(32)
_AddressesSize := int(xr.ReadUint32())
if _AddressesSize > 16 {

View File

@@ -17,15 +17,15 @@ const (
Ping = 1 << iota
Starting
StartupComplete
NodeDiscovered
NodeConnected
NodeDisconnected
NodeRejected
DeviceDiscovered
DeviceConnected
DeviceDisconnected
DeviceRejected
LocalIndexUpdated
RemoteIndexUpdated
ItemStarted
StateChanged
RepoRejected
FolderRejected
ConfigSaved
AllEvents = ^EventType(0)
@@ -39,14 +39,14 @@ func (t EventType) String() string {
return "Starting"
case StartupComplete:
return "StartupComplete"
case NodeDiscovered:
return "NodeDiscovered"
case NodeConnected:
return "NodeConnected"
case NodeDisconnected:
return "NodeDisconnected"
case NodeRejected:
return "NodeRejected"
case DeviceDiscovered:
return "DeviceDiscovered"
case DeviceConnected:
return "DeviceConnected"
case DeviceDisconnected:
return "DeviceDisconnected"
case DeviceRejected:
return "DeviceRejected"
case LocalIndexUpdated:
return "LocalIndexUpdated"
case RemoteIndexUpdated:
@@ -55,8 +55,8 @@ func (t EventType) String() string {
return "ItemStarted"
case StateChanged:
return "StateChanged"
case RepoRejected:
return "RepoRejected"
case FolderRejected:
return "FolderRejected"
case ConfigSaved:
return "ConfigSaved"
default:

View File

@@ -41,7 +41,7 @@ func TestTimeout(t *testing.T) {
func TestEventBeforeSubscribe(t *testing.T) {
l := events.NewLogger()
l.Log(events.NodeConnected, "foo")
l.Log(events.DeviceConnected, "foo")
s := l.Subscribe(0)
_, err := s.Poll(timeout)
@@ -54,14 +54,14 @@ func TestEventAfterSubscribe(t *testing.T) {
l := events.NewLogger()
s := l.Subscribe(events.AllEvents)
l.Log(events.NodeConnected, "foo")
l.Log(events.DeviceConnected, "foo")
ev, err := s.Poll(timeout)
if err != nil {
t.Fatal("Unexpected error:", err)
}
if ev.Type != events.NodeConnected {
if ev.Type != events.DeviceConnected {
t.Error("Incorrect event type", ev.Type)
}
switch v := ev.Data.(type) {
@@ -77,8 +77,8 @@ func TestEventAfterSubscribe(t *testing.T) {
func TestEventAfterSubscribeIgnoreMask(t *testing.T) {
l := events.NewLogger()
s := l.Subscribe(events.NodeDisconnected)
l.Log(events.NodeConnected, "foo")
s := l.Subscribe(events.DeviceDisconnected)
l.Log(events.DeviceConnected, "foo")
_, err := s.Poll(timeout)
if err != events.ErrTimeout {
@@ -93,7 +93,7 @@ func TestBufferOverflow(t *testing.T) {
t0 := time.Now()
for i := 0; i < events.BufferSize*2; i++ {
l.Log(events.NodeConnected, "foo")
l.Log(events.DeviceConnected, "foo")
}
if time.Since(t0) > timeout {
t.Fatalf("Logging took too long")
@@ -104,7 +104,7 @@ func TestUnsubscribe(t *testing.T) {
l := events.NewLogger()
s := l.Subscribe(events.AllEvents)
l.Log(events.NodeConnected, "foo")
l.Log(events.DeviceConnected, "foo")
_, err := s.Poll(timeout)
if err != nil {
@@ -112,7 +112,7 @@ func TestUnsubscribe(t *testing.T) {
}
l.Unsubscribe(s)
l.Log(events.NodeConnected, "foo")
l.Log(events.DeviceConnected, "foo")
_, err = s.Poll(timeout)
if err != events.ErrClosed {
@@ -124,8 +124,8 @@ func TestIDs(t *testing.T) {
l := events.NewLogger()
s := l.Subscribe(events.AllEvents)
l.Log(events.NodeConnected, "foo")
l.Log(events.NodeConnected, "bar")
l.Log(events.DeviceConnected, "foo")
l.Log(events.DeviceConnected, "bar")
ev, err := s.Poll(timeout)
if err != nil {
@@ -156,7 +156,7 @@ func TestBufferedSub(t *testing.T) {
go func() {
for i := 0; i < 10*events.BufferSize; i++ {
l.Log(events.NodeConnected, fmt.Sprintf("event-%d", i))
l.Log(events.DeviceConnected, fmt.Sprintf("event-%d", i))
if i%30 == 0 {
// Give the buffer routine time to pick up the events
time.Sleep(20 * time.Millisecond)

View File

@@ -35,13 +35,13 @@ func clock(v uint64) uint64 {
}
const (
keyTypeNode = iota
keyTypeDevice = iota
keyTypeGlobal
)
type fileVersion struct {
version uint64
node []byte
device []byte
}
type versionList struct {
@@ -73,47 +73,47 @@ type dbWriter interface {
/*
keyTypeNode (1 byte)
repository (64 bytes)
node (32 bytes)
keyTypeDevice (1 byte)
folder (64 bytes)
device (32 bytes)
name (variable size)
|
scanner.File
keyTypeGlobal (1 byte)
repository (64 bytes)
folder (64 bytes)
name (variable size)
|
[]fileVersion (sorted)
*/
func nodeKey(repo, node, file []byte) []byte {
func deviceKey(folder, device, file []byte) []byte {
k := make([]byte, 1+64+32+len(file))
k[0] = keyTypeNode
copy(k[1:], []byte(repo))
copy(k[1+64:], node[:])
k[0] = keyTypeDevice
copy(k[1:], []byte(folder))
copy(k[1+64:], device[:])
copy(k[1+64+32:], []byte(file))
return k
}
func globalKey(repo, file []byte) []byte {
func globalKey(folder, file []byte) []byte {
k := make([]byte, 1+64+len(file))
k[0] = keyTypeGlobal
copy(k[1:], []byte(repo))
copy(k[1:], []byte(folder))
copy(k[1+64:], []byte(file))
return k
}
func nodeKeyName(key []byte) []byte {
func deviceKeyName(key []byte) []byte {
return key[1+64+32:]
}
func nodeKeyRepo(key []byte) []byte {
repo := key[1 : 1+64]
izero := bytes.IndexByte(repo, 0)
return repo[:izero]
func deviceKeyFolder(key []byte) []byte {
folder := key[1 : 1+64]
izero := bytes.IndexByte(folder, 0)
return folder[:izero]
}
func nodeKeyNode(key []byte) []byte {
func deviceKeyDevice(key []byte) []byte {
return key[1+64 : 1+64+32]
}
@@ -121,23 +121,23 @@ func globalKeyName(key []byte) []byte {
return key[1+64:]
}
func globalKeyRepo(key []byte) []byte {
repo := key[1 : 1+64]
izero := bytes.IndexByte(repo, 0)
return repo[:izero]
func globalKeyFolder(key []byte) []byte {
folder := key[1 : 1+64]
izero := bytes.IndexByte(folder, 0)
return folder[:izero]
}
type deletionHandler func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) uint64
type deletionHandler func(db dbReader, batch dbWriter, folder, device, name []byte, dbi iterator.Iterator) uint64
type fileIterator func(f protocol.FileIntf) bool
func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo, deleteFn deletionHandler) uint64 {
func ldbGenericReplace(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo, deleteFn deletionHandler) uint64 {
runtime.GC()
sort.Sort(fileList(fs)) // sort list on name, same as on disk
start := nodeKey(repo, node, nil) // before all repo/node files
limit := nodeKey(repo, node, []byte{0xff, 0xff, 0xff, 0xff}) // after all repo/node files
start := deviceKey(folder, device, nil) // before all folder/device files
limit := deviceKey(folder, device, []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
batch := new(leveldb.Batch)
snap, err := db.GetSnapshot()
@@ -171,42 +171,42 @@ func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo
}
if moreDb {
oldName = nodeKeyName(dbi.Key())
oldName = deviceKeyName(dbi.Key())
}
cmp := bytes.Compare(newName, oldName)
if debug {
l.Debugf("generic replace; repo=%q node=%v moreFs=%v moreDb=%v cmp=%d newName=%q oldName=%q", repo, protocol.NodeIDFromBytes(node), moreFs, moreDb, cmp, newName, oldName)
l.Debugf("generic replace; folder=%q device=%v moreFs=%v moreDb=%v cmp=%d newName=%q oldName=%q", folder, protocol.DeviceIDFromBytes(device), moreFs, moreDb, cmp, newName, oldName)
}
switch {
case moreFs && (!moreDb || cmp == -1):
// Disk is missing this file. Insert it.
if lv := ldbInsert(batch, repo, node, newName, fs[fsi]); lv > maxLocalVer {
if lv := ldbInsert(batch, folder, device, newName, fs[fsi]); lv > maxLocalVer {
maxLocalVer = lv
}
if fs[fsi].IsInvalid() {
ldbRemoveFromGlobal(snap, batch, repo, node, newName)
ldbRemoveFromGlobal(snap, batch, folder, device, newName)
} else {
ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version)
ldbUpdateGlobal(snap, batch, folder, device, newName, fs[fsi].Version)
}
fsi++
case moreFs && moreDb && cmp == 0:
// File exists on both sides - compare versions. We might get an
// update with the same version and different flags if a node has
// update with the same version and different flags if a device has
// marked a file as invalid, so handle that too.
var ef protocol.FileInfoTruncated
ef.UnmarshalXDR(dbi.Value())
if fs[fsi].Version > ef.Version || fs[fsi].Version != ef.Version {
if lv := ldbInsert(batch, repo, node, newName, fs[fsi]); lv > maxLocalVer {
if lv := ldbInsert(batch, folder, device, newName, fs[fsi]); lv > maxLocalVer {
maxLocalVer = lv
}
if fs[fsi].IsInvalid() {
ldbRemoveFromGlobal(snap, batch, repo, node, newName)
ldbRemoveFromGlobal(snap, batch, folder, device, newName)
} else {
ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version)
ldbUpdateGlobal(snap, batch, folder, device, newName, fs[fsi].Version)
}
}
// Iterate both sides.
@@ -215,7 +215,7 @@ func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo
case moreDb && (!moreFs || cmp == 1):
if deleteFn != nil {
if lv := deleteFn(snap, batch, repo, node, oldName, dbi); lv > maxLocalVer {
if lv := deleteFn(snap, batch, folder, device, oldName, dbi); lv > maxLocalVer {
maxLocalVer = lv
}
}
@@ -231,21 +231,21 @@ func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo
return maxLocalVer
}
func ldbReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64 {
func ldbReplace(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) uint64 {
// TODO: Return the remaining maxLocalVer?
return ldbGenericReplace(db, repo, node, fs, func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) uint64 {
return ldbGenericReplace(db, folder, device, fs, func(db dbReader, batch dbWriter, folder, device, name []byte, dbi iterator.Iterator) uint64 {
// Disk has files that we are missing. Remove it.
if debug {
l.Debugf("delete; repo=%q node=%v name=%q", repo, protocol.NodeIDFromBytes(node), name)
l.Debugf("delete; folder=%q device=%v name=%q", folder, protocol.DeviceIDFromBytes(device), name)
}
ldbRemoveFromGlobal(db, batch, repo, node, name)
ldbRemoveFromGlobal(db, batch, folder, device, name)
batch.Delete(dbi.Key())
return 0
})
}
func ldbReplaceWithDelete(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64 {
return ldbGenericReplace(db, repo, node, fs, func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) uint64 {
func ldbReplaceWithDelete(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) uint64 {
return ldbGenericReplace(db, folder, device, fs, func(db dbReader, batch dbWriter, folder, device, name []byte, dbi iterator.Iterator) uint64 {
var tf protocol.FileInfoTruncated
err := tf.UnmarshalXDR(dbi.Value())
if err != nil {
@@ -253,7 +253,7 @@ func ldbReplaceWithDelete(db *leveldb.DB, repo, node []byte, fs []protocol.FileI
}
if !tf.IsDeleted() {
if debug {
l.Debugf("mark deleted; repo=%q node=%v name=%q", repo, protocol.NodeIDFromBytes(node), name)
l.Debugf("mark deleted; folder=%q device=%v name=%q", folder, protocol.DeviceIDFromBytes(device), name)
}
ts := clock(tf.LocalVersion)
f := protocol.FileInfo{
@@ -264,14 +264,14 @@ func ldbReplaceWithDelete(db *leveldb.DB, repo, node []byte, fs []protocol.FileI
Modified: tf.Modified,
}
batch.Put(dbi.Key(), f.MarshalXDR())
ldbUpdateGlobal(db, batch, repo, node, nodeKeyName(dbi.Key()), f.Version)
ldbUpdateGlobal(db, batch, folder, device, deviceKeyName(dbi.Key()), f.Version)
return ts
}
return 0
})
}
func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64 {
func ldbUpdate(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) uint64 {
runtime.GC()
batch := new(leveldb.Batch)
@@ -284,16 +284,16 @@ func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64
var maxLocalVer uint64
for _, f := range fs {
name := []byte(f.Name)
fk := nodeKey(repo, node, name)
fk := deviceKey(folder, device, name)
bs, err := snap.Get(fk, nil)
if err == leveldb.ErrNotFound {
if lv := ldbInsert(batch, repo, node, name, f); lv > maxLocalVer {
if lv := ldbInsert(batch, folder, device, name, f); lv > maxLocalVer {
maxLocalVer = lv
}
if f.IsInvalid() {
ldbRemoveFromGlobal(snap, batch, repo, node, name)
ldbRemoveFromGlobal(snap, batch, folder, device, name)
} else {
ldbUpdateGlobal(snap, batch, repo, node, name, f.Version)
ldbUpdateGlobal(snap, batch, folder, device, name, f.Version)
}
continue
}
@@ -306,13 +306,13 @@ func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64
// Flags might change without the version being bumped when we set the
// invalid flag on an existing file.
if ef.Version != f.Version || ef.Flags != f.Flags {
if lv := ldbInsert(batch, repo, node, name, f); lv > maxLocalVer {
if lv := ldbInsert(batch, folder, device, name, f); lv > maxLocalVer {
maxLocalVer = lv
}
if f.IsInvalid() {
ldbRemoveFromGlobal(snap, batch, repo, node, name)
ldbRemoveFromGlobal(snap, batch, folder, device, name)
} else {
ldbUpdateGlobal(snap, batch, repo, node, name, f.Version)
ldbUpdateGlobal(snap, batch, folder, device, name, f.Version)
}
}
}
@@ -325,29 +325,29 @@ func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64
return maxLocalVer
}
func ldbInsert(batch dbWriter, repo, node, name []byte, file protocol.FileInfo) uint64 {
func ldbInsert(batch dbWriter, folder, device, name []byte, file protocol.FileInfo) uint64 {
if debug {
l.Debugf("insert; repo=%q node=%v %v", repo, protocol.NodeIDFromBytes(node), file)
l.Debugf("insert; folder=%q device=%v %v", folder, protocol.DeviceIDFromBytes(device), file)
}
if file.LocalVersion == 0 {
file.LocalVersion = clock(0)
}
nk := nodeKey(repo, node, name)
nk := deviceKey(folder, device, name)
batch.Put(nk, file.MarshalXDR())
return file.LocalVersion
}
// ldbUpdateGlobal adds this node+version to the version list for the given
// file. If the node is already present in the list, the version is updated.
// ldbUpdateGlobal adds this device+version to the version list for the given
// file. If the device is already present in the list, the version is updated.
// If the file does not have an entry in the global list, it is created.
func ldbUpdateGlobal(db dbReader, batch dbWriter, repo, node, file []byte, version uint64) bool {
func ldbUpdateGlobal(db dbReader, batch dbWriter, folder, device, file []byte, version uint64) bool {
if debug {
l.Debugf("update global; repo=%q node=%v file=%q version=%d", repo, protocol.NodeIDFromBytes(node), file, version)
l.Debugf("update global; folder=%q device=%v file=%q version=%d", folder, protocol.DeviceIDFromBytes(device), file, version)
}
gk := globalKey(repo, file)
gk := globalKey(folder, file)
svl, err := db.Get(gk, nil)
if err != nil && err != leveldb.ErrNotFound {
panic(err)
@@ -355,7 +355,7 @@ func ldbUpdateGlobal(db dbReader, batch dbWriter, repo, node, file []byte, versi
var fl versionList
nv := fileVersion{
node: node,
device: device,
version: version,
}
if svl != nil {
@@ -365,7 +365,7 @@ func ldbUpdateGlobal(db dbReader, batch dbWriter, repo, node, file []byte, versi
}
for i := range fl.versions {
if bytes.Compare(fl.versions[i].node, node) == 0 {
if bytes.Compare(fl.versions[i].device, device) == 0 {
if fl.versions[i].version == version {
// No need to do anything
return false
@@ -394,15 +394,15 @@ done:
return true
}
// ldbRemoveFromGlobal removes the node from the global version list for the
// ldbRemoveFromGlobal removes the device from the global version list for the
// given file. If the version list is empty after this, the file entry is
// removed entirely.
func ldbRemoveFromGlobal(db dbReader, batch dbWriter, repo, node, file []byte) {
func ldbRemoveFromGlobal(db dbReader, batch dbWriter, folder, device, file []byte) {
if debug {
l.Debugf("remove from global; repo=%q node=%v file=%q", repo, protocol.NodeIDFromBytes(node), file)
l.Debugf("remove from global; folder=%q device=%v file=%q", folder, protocol.DeviceIDFromBytes(device), file)
}
gk := globalKey(repo, file)
gk := globalKey(folder, file)
svl, err := db.Get(gk, nil)
if err != nil {
// We might be called to "remove" a global version that doesn't exist
@@ -417,7 +417,7 @@ func ldbRemoveFromGlobal(db dbReader, batch dbWriter, repo, node, file []byte) {
}
for i := range fl.versions {
if bytes.Compare(fl.versions[i].node, node) == 0 {
if bytes.Compare(fl.versions[i].device, device) == 0 {
fl.versions = append(fl.versions[:i], fl.versions[i+1:]...)
break
}
@@ -430,9 +430,9 @@ func ldbRemoveFromGlobal(db dbReader, batch dbWriter, repo, node, file []byte) {
}
}
func ldbWithHave(db *leveldb.DB, repo, node []byte, truncate bool, fn fileIterator) {
start := nodeKey(repo, node, nil) // before all repo/node files
limit := nodeKey(repo, node, []byte{0xff, 0xff, 0xff, 0xff}) // after all repo/node files
func ldbWithHave(db *leveldb.DB, folder, device []byte, truncate bool, fn fileIterator) {
start := deviceKey(folder, device, nil) // before all folder/device files
limit := deviceKey(folder, device, []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
snap, err := db.GetSnapshot()
if err != nil {
panic(err)
@@ -452,11 +452,11 @@ func ldbWithHave(db *leveldb.DB, repo, node []byte, truncate bool, fn fileIterat
}
}
func ldbWithAllRepoTruncated(db *leveldb.DB, repo []byte, fn func(node []byte, f protocol.FileInfoTruncated) bool) {
func ldbWithAllFolderTruncated(db *leveldb.DB, folder []byte, fn func(device []byte, f protocol.FileInfoTruncated) bool) {
runtime.GC()
start := nodeKey(repo, nil, nil) // before all repo/node files
limit := nodeKey(repo, protocol.LocalNodeID[:], []byte{0xff, 0xff, 0xff, 0xff}) // after all repo/node files
start := deviceKey(folder, nil, nil) // before all folder/device files
limit := deviceKey(folder, protocol.LocalDeviceID[:], []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
snap, err := db.GetSnapshot()
if err != nil {
panic(err)
@@ -466,20 +466,20 @@ func ldbWithAllRepoTruncated(db *leveldb.DB, repo []byte, fn func(node []byte, f
defer dbi.Release()
for dbi.Next() {
node := nodeKeyNode(dbi.Key())
device := deviceKeyDevice(dbi.Key())
var f protocol.FileInfoTruncated
err := f.UnmarshalXDR(dbi.Value())
if err != nil {
panic(err)
}
if cont := fn(node, f); !cont {
if cont := fn(device, f); !cont {
return
}
}
}
func ldbGet(db *leveldb.DB, repo, node, file []byte) protocol.FileInfo {
nk := nodeKey(repo, node, file)
func ldbGet(db *leveldb.DB, folder, device, file []byte) protocol.FileInfo {
nk := deviceKey(folder, device, file)
bs, err := db.Get(nk, nil)
if err == leveldb.ErrNotFound {
return protocol.FileInfo{}
@@ -496,8 +496,8 @@ func ldbGet(db *leveldb.DB, repo, node, file []byte) protocol.FileInfo {
return f
}
func ldbGetGlobal(db *leveldb.DB, repo, file []byte) protocol.FileInfo {
k := globalKey(repo, file)
func ldbGetGlobal(db *leveldb.DB, folder, file []byte) protocol.FileInfo {
k := globalKey(folder, file)
snap, err := db.GetSnapshot()
if err != nil {
panic(err)
@@ -522,7 +522,7 @@ func ldbGetGlobal(db *leveldb.DB, repo, file []byte) protocol.FileInfo {
panic("no versions?")
}
k = nodeKey(repo, vl.versions[0].node, file)
k = deviceKey(folder, vl.versions[0].device, file)
bs, err = snap.Get(k, nil)
if err != nil {
panic(err)
@@ -536,11 +536,11 @@ func ldbGetGlobal(db *leveldb.DB, repo, file []byte) protocol.FileInfo {
return f
}
func ldbWithGlobal(db *leveldb.DB, repo []byte, truncate bool, fn fileIterator) {
func ldbWithGlobal(db *leveldb.DB, folder []byte, truncate bool, fn fileIterator) {
runtime.GC()
start := globalKey(repo, nil)
limit := globalKey(repo, []byte{0xff, 0xff, 0xff, 0xff})
start := globalKey(folder, nil)
limit := globalKey(folder, []byte{0xff, 0xff, 0xff, 0xff})
snap, err := db.GetSnapshot()
if err != nil {
panic(err)
@@ -559,7 +559,7 @@ func ldbWithGlobal(db *leveldb.DB, repo []byte, truncate bool, fn fileIterator)
l.Debugln(dbi.Key())
panic("no versions?")
}
fk := nodeKey(repo, vl.versions[0].node, globalKeyName(dbi.Key()))
fk := deviceKey(folder, vl.versions[0].device, globalKeyName(dbi.Key()))
bs, err := snap.Get(fk, nil)
if err != nil {
panic(err)
@@ -576,8 +576,8 @@ func ldbWithGlobal(db *leveldb.DB, repo []byte, truncate bool, fn fileIterator)
}
}
func ldbAvailability(db *leveldb.DB, repo, file []byte) []protocol.NodeID {
k := globalKey(repo, file)
func ldbAvailability(db *leveldb.DB, folder, file []byte) []protocol.DeviceID {
k := globalKey(folder, file)
bs, err := db.Get(k, nil)
if err == leveldb.ErrNotFound {
return nil
@@ -592,23 +592,23 @@ func ldbAvailability(db *leveldb.DB, repo, file []byte) []protocol.NodeID {
panic(err)
}
var nodes []protocol.NodeID
var devices []protocol.DeviceID
for _, v := range vl.versions {
if v.version != vl.versions[0].version {
break
}
n := protocol.NodeIDFromBytes(v.node)
nodes = append(nodes, n)
n := protocol.DeviceIDFromBytes(v.device)
devices = append(devices, n)
}
return nodes
return devices
}
func ldbWithNeed(db *leveldb.DB, repo, node []byte, truncate bool, fn fileIterator) {
func ldbWithNeed(db *leveldb.DB, folder, device []byte, truncate bool, fn fileIterator) {
runtime.GC()
start := globalKey(repo, nil)
limit := globalKey(repo, []byte{0xff, 0xff, 0xff, 0xff})
start := globalKey(folder, nil)
limit := globalKey(folder, []byte{0xff, 0xff, 0xff, 0xff})
snap, err := db.GetSnapshot()
if err != nil {
panic(err)
@@ -633,7 +633,7 @@ outer:
need := false // If we have a lower version of the file
var haveVersion uint64
for _, v := range vl.versions {
if bytes.Compare(v.node, node) == 0 {
if bytes.Compare(v.device, device) == 0 {
have = true
haveVersion = v.version
need = v.version < vl.versions[0].version
@@ -650,7 +650,7 @@ outer:
// We haven't found a valid copy of the file with the needed version.
continue outer
}
fk := nodeKey(repo, vl.versions[i].node, name)
fk := deviceKey(folder, vl.versions[i].device, name)
bs, err := snap.Get(fk, nil)
if err != nil {
panic(err)
@@ -672,7 +672,7 @@ outer:
}
if debug {
l.Debugf("need repo=%q node=%v name=%q need=%v have=%v haveV=%d globalV=%d", repo, protocol.NodeIDFromBytes(node), name, need, have, haveVersion, vl.versions[0].version)
l.Debugf("need folder=%q device=%v name=%q need=%v have=%v haveV=%d globalV=%d", folder, protocol.DeviceIDFromBytes(device), name, need, have, haveVersion, vl.versions[0].version)
}
if cont := fn(gf); !cont {
@@ -686,7 +686,7 @@ outer:
}
}
func ldbListRepos(db *leveldb.DB) []string {
func ldbListFolders(db *leveldb.DB) []string {
runtime.GC()
start := []byte{keyTypeGlobal}
@@ -699,24 +699,24 @@ func ldbListRepos(db *leveldb.DB) []string {
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
defer dbi.Release()
repoExists := make(map[string]bool)
folderExists := make(map[string]bool)
for dbi.Next() {
repo := string(globalKeyRepo(dbi.Key()))
if !repoExists[repo] {
repoExists[repo] = true
folder := string(globalKeyFolder(dbi.Key()))
if !folderExists[folder] {
folderExists[folder] = true
}
}
repos := make([]string, 0, len(repoExists))
for k := range repoExists {
repos = append(repos, k)
folders := make([]string, 0, len(folderExists))
for k := range folderExists {
folders = append(folders, k)
}
sort.Strings(repos)
return repos
sort.Strings(folders)
return folders
}
func ldbDropRepo(db *leveldb.DB, repo []byte) {
func ldbDropFolder(db *leveldb.DB, folder []byte) {
runtime.GC()
snap, err := db.GetSnapshot()
@@ -725,25 +725,25 @@ func ldbDropRepo(db *leveldb.DB, repo []byte) {
}
defer snap.Release()
// Remove all items related to the given repo from the node->file bucket
start := []byte{keyTypeNode}
limit := []byte{keyTypeNode + 1}
// Remove all items related to the given folder from the device->file bucket
start := []byte{keyTypeDevice}
limit := []byte{keyTypeDevice + 1}
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
for dbi.Next() {
itemRepo := nodeKeyRepo(dbi.Key())
if bytes.Compare(repo, itemRepo) == 0 {
itemFolder := deviceKeyFolder(dbi.Key())
if bytes.Compare(folder, itemFolder) == 0 {
db.Delete(dbi.Key(), nil)
}
}
dbi.Release()
// Remove all items related to the given repo from the global bucket
// Remove all items related to the given folder from the global bucket
start = []byte{keyTypeGlobal}
limit = []byte{keyTypeGlobal + 1}
dbi = snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
for dbi.Next() {
itemRepo := globalKeyRepo(dbi.Key())
if bytes.Compare(repo, itemRepo) == 0 {
itemFolder := globalKeyFolder(dbi.Key())
if bytes.Compare(folder, itemFolder) == 0 {
db.Delete(dbi.Key(), nil)
}
}

View File

@@ -22,17 +22,17 @@ fileVersion Structure:
+ version (64 bits) +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of node |
| Length of device |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ node (variable length) \
\ device (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct fileVersion {
unsigned hyper version;
opaque node<>;
opaque device<>;
}
*/
@@ -55,7 +55,7 @@ func (o fileVersion) AppendXDR(bs []byte) []byte {
func (o fileVersion) encodeXDR(xw *xdr.Writer) (int, error) {
xw.WriteUint64(o.version)
xw.WriteBytes(o.node)
xw.WriteBytes(o.device)
return xw.Tot(), xw.Error()
}
@@ -72,7 +72,7 @@ func (o *fileVersion) UnmarshalXDR(bs []byte) error {
func (o *fileVersion) decodeXDR(xr *xdr.Reader) error {
o.version = xr.ReadUint64()
o.node = xr.ReadBytes()
o.device = xr.ReadBytes()
return xr.Error()
}

View File

@@ -27,147 +27,147 @@ type fileRecord struct {
type bitset uint64
type Set struct {
localVersion map[protocol.NodeID]uint64
localVersion map[protocol.DeviceID]uint64
mutex sync.Mutex
repo string
folder string
db *leveldb.DB
}
func NewSet(repo string, db *leveldb.DB) *Set {
func NewSet(folder string, db *leveldb.DB) *Set {
var s = Set{
localVersion: make(map[protocol.NodeID]uint64),
repo: repo,
localVersion: make(map[protocol.DeviceID]uint64),
folder: folder,
db: db,
}
var nodeID protocol.NodeID
ldbWithAllRepoTruncated(db, []byte(repo), func(node []byte, f protocol.FileInfoTruncated) bool {
copy(nodeID[:], node)
if f.LocalVersion > s.localVersion[nodeID] {
s.localVersion[nodeID] = f.LocalVersion
var deviceID protocol.DeviceID
ldbWithAllFolderTruncated(db, []byte(folder), func(device []byte, f protocol.FileInfoTruncated) bool {
copy(deviceID[:], device)
if f.LocalVersion > s.localVersion[deviceID] {
s.localVersion[deviceID] = f.LocalVersion
}
lamport.Default.Tick(f.Version)
return true
})
if debug {
l.Debugf("loaded localVersion for %q: %#v", repo, s.localVersion)
l.Debugf("loaded localVersion for %q: %#v", folder, s.localVersion)
}
clock(s.localVersion[protocol.LocalNodeID])
clock(s.localVersion[protocol.LocalDeviceID])
return &s
}
func (s *Set) Replace(node protocol.NodeID, fs []protocol.FileInfo) {
func (s *Set) Replace(device protocol.DeviceID, fs []protocol.FileInfo) {
if debug {
l.Debugf("%s Replace(%v, [%d])", s.repo, node, len(fs))
l.Debugf("%s Replace(%v, [%d])", s.folder, device, len(fs))
}
normalizeFilenames(fs)
s.mutex.Lock()
defer s.mutex.Unlock()
s.localVersion[node] = ldbReplace(s.db, []byte(s.repo), node[:], fs)
s.localVersion[device] = ldbReplace(s.db, []byte(s.folder), device[:], fs)
if len(fs) == 0 {
// Reset the local version if all files were removed.
s.localVersion[node] = 0
s.localVersion[device] = 0
}
}
func (s *Set) ReplaceWithDelete(node protocol.NodeID, fs []protocol.FileInfo) {
func (s *Set) ReplaceWithDelete(device protocol.DeviceID, fs []protocol.FileInfo) {
if debug {
l.Debugf("%s ReplaceWithDelete(%v, [%d])", s.repo, node, len(fs))
l.Debugf("%s ReplaceWithDelete(%v, [%d])", s.folder, device, len(fs))
}
normalizeFilenames(fs)
s.mutex.Lock()
defer s.mutex.Unlock()
if lv := ldbReplaceWithDelete(s.db, []byte(s.repo), node[:], fs); lv > s.localVersion[node] {
s.localVersion[node] = lv
if lv := ldbReplaceWithDelete(s.db, []byte(s.folder), device[:], fs); lv > s.localVersion[device] {
s.localVersion[device] = lv
}
}
func (s *Set) Update(node protocol.NodeID, fs []protocol.FileInfo) {
func (s *Set) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
if debug {
l.Debugf("%s Update(%v, [%d])", s.repo, node, len(fs))
l.Debugf("%s Update(%v, [%d])", s.folder, device, len(fs))
}
normalizeFilenames(fs)
s.mutex.Lock()
defer s.mutex.Unlock()
if lv := ldbUpdate(s.db, []byte(s.repo), node[:], fs); lv > s.localVersion[node] {
s.localVersion[node] = lv
if lv := ldbUpdate(s.db, []byte(s.folder), device[:], fs); lv > s.localVersion[device] {
s.localVersion[device] = lv
}
}
func (s *Set) WithNeed(node protocol.NodeID, fn fileIterator) {
func (s *Set) WithNeed(device protocol.DeviceID, fn fileIterator) {
if debug {
l.Debugf("%s WithNeed(%v)", s.repo, node)
l.Debugf("%s WithNeed(%v)", s.folder, device)
}
ldbWithNeed(s.db, []byte(s.repo), node[:], false, nativeFileIterator(fn))
ldbWithNeed(s.db, []byte(s.folder), device[:], false, nativeFileIterator(fn))
}
func (s *Set) WithNeedTruncated(node protocol.NodeID, fn fileIterator) {
func (s *Set) WithNeedTruncated(device protocol.DeviceID, fn fileIterator) {
if debug {
l.Debugf("%s WithNeedTruncated(%v)", s.repo, node)
l.Debugf("%s WithNeedTruncated(%v)", s.folder, device)
}
ldbWithNeed(s.db, []byte(s.repo), node[:], true, nativeFileIterator(fn))
ldbWithNeed(s.db, []byte(s.folder), device[:], true, nativeFileIterator(fn))
}
func (s *Set) WithHave(node protocol.NodeID, fn fileIterator) {
func (s *Set) WithHave(device protocol.DeviceID, fn fileIterator) {
if debug {
l.Debugf("%s WithHave(%v)", s.repo, node)
l.Debugf("%s WithHave(%v)", s.folder, device)
}
ldbWithHave(s.db, []byte(s.repo), node[:], false, nativeFileIterator(fn))
ldbWithHave(s.db, []byte(s.folder), device[:], false, nativeFileIterator(fn))
}
func (s *Set) WithHaveTruncated(node protocol.NodeID, fn fileIterator) {
func (s *Set) WithHaveTruncated(device protocol.DeviceID, fn fileIterator) {
if debug {
l.Debugf("%s WithHaveTruncated(%v)", s.repo, node)
l.Debugf("%s WithHaveTruncated(%v)", s.folder, device)
}
ldbWithHave(s.db, []byte(s.repo), node[:], true, nativeFileIterator(fn))
ldbWithHave(s.db, []byte(s.folder), device[:], true, nativeFileIterator(fn))
}
func (s *Set) WithGlobal(fn fileIterator) {
if debug {
l.Debugf("%s WithGlobal()", s.repo)
l.Debugf("%s WithGlobal()", s.folder)
}
ldbWithGlobal(s.db, []byte(s.repo), false, nativeFileIterator(fn))
ldbWithGlobal(s.db, []byte(s.folder), false, nativeFileIterator(fn))
}
func (s *Set) WithGlobalTruncated(fn fileIterator) {
if debug {
l.Debugf("%s WithGlobalTruncated()", s.repo)
l.Debugf("%s WithGlobalTruncated()", s.folder)
}
ldbWithGlobal(s.db, []byte(s.repo), true, nativeFileIterator(fn))
ldbWithGlobal(s.db, []byte(s.folder), true, nativeFileIterator(fn))
}
func (s *Set) Get(node protocol.NodeID, file string) protocol.FileInfo {
f := ldbGet(s.db, []byte(s.repo), node[:], []byte(normalizedFilename(file)))
func (s *Set) Get(device protocol.DeviceID, file string) protocol.FileInfo {
f := ldbGet(s.db, []byte(s.folder), device[:], []byte(normalizedFilename(file)))
f.Name = nativeFilename(f.Name)
return f
}
func (s *Set) GetGlobal(file string) protocol.FileInfo {
f := ldbGetGlobal(s.db, []byte(s.repo), []byte(normalizedFilename(file)))
f := ldbGetGlobal(s.db, []byte(s.folder), []byte(normalizedFilename(file)))
f.Name = nativeFilename(f.Name)
return f
}
func (s *Set) Availability(file string) []protocol.NodeID {
return ldbAvailability(s.db, []byte(s.repo), []byte(normalizedFilename(file)))
func (s *Set) Availability(file string) []protocol.DeviceID {
return ldbAvailability(s.db, []byte(s.folder), []byte(normalizedFilename(file)))
}
func (s *Set) LocalVersion(node protocol.NodeID) uint64 {
func (s *Set) LocalVersion(device protocol.DeviceID) uint64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.localVersion[node]
return s.localVersion[device]
}
// ListRepos returns the repository IDs seen in the database.
func ListRepos(db *leveldb.DB) []string {
return ldbListRepos(db)
// ListFolders returns the folder IDs seen in the database.
func ListFolders(db *leveldb.DB) []string {
return ldbListFolders(db)
}
// DropRepo clears out all information related to the given repo from the
// DropFolder clears out all information related to the given folder from the
// database.
func DropRepo(db *leveldb.DB, repo string) {
ldbDropRepo(db, []byte(repo))
func DropFolder(db *leveldb.DB, folder string) {
ldbDropFolder(db, []byte(folder))
}
func normalizeFilenames(fs []protocol.FileInfo) {

View File

@@ -18,11 +18,11 @@ import (
"github.com/syndtr/goleveldb/leveldb/storage"
)
var remoteNode0, remoteNode1 protocol.NodeID
var remoteDevice0, remoteDevice1 protocol.DeviceID
func init() {
remoteNode0, _ = protocol.NodeIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR")
remoteNode1, _ = protocol.NodeIDFromString("I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU")
remoteDevice0, _ = protocol.DeviceIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR")
remoteDevice1, _ = protocol.DeviceIDFromString("I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU")
}
func genBlocks(n int) []protocol.BlockInfo {
@@ -48,7 +48,7 @@ func globalList(s *files.Set) []protocol.FileInfo {
return fs
}
func haveList(s *files.Set, n protocol.NodeID) []protocol.FileInfo {
func haveList(s *files.Set, n protocol.DeviceID) []protocol.FileInfo {
var fs []protocol.FileInfo
s.WithHave(n, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfo)
@@ -58,7 +58,7 @@ func haveList(s *files.Set, n protocol.NodeID) []protocol.FileInfo {
return fs
}
func needList(s *files.Set, n protocol.NodeID) []protocol.FileInfo {
func needList(s *files.Set, n protocol.DeviceID) []protocol.FileInfo {
var fs []protocol.FileInfo
s.WithNeed(n, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfo)
@@ -158,10 +158,10 @@ func TestGlobalSet(t *testing.T) {
local0[3],
}
m.ReplaceWithDelete(protocol.LocalNodeID, local0)
m.ReplaceWithDelete(protocol.LocalNodeID, local1)
m.Replace(remoteNode0, remote0)
m.Update(remoteNode0, remote1)
m.ReplaceWithDelete(protocol.LocalDeviceID, local0)
m.ReplaceWithDelete(protocol.LocalDeviceID, local1)
m.Replace(remoteDevice0, remote0)
m.Update(remoteDevice0, remote1)
g := fileList(globalList(m))
sort.Sort(g)
@@ -170,40 +170,40 @@ func TestGlobalSet(t *testing.T) {
t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal)
}
h := fileList(haveList(m, protocol.LocalNodeID))
h := fileList(haveList(m, protocol.LocalDeviceID))
sort.Sort(h)
if fmt.Sprint(h) != fmt.Sprint(localTot) {
t.Errorf("Have incorrect;\n A: %v !=\n E: %v", h, localTot)
}
h = fileList(haveList(m, remoteNode0))
h = fileList(haveList(m, remoteDevice0))
sort.Sort(h)
if fmt.Sprint(h) != fmt.Sprint(remoteTot) {
t.Errorf("Have incorrect;\n A: %v !=\n E: %v", h, remoteTot)
}
n := fileList(needList(m, protocol.LocalNodeID))
n := fileList(needList(m, protocol.LocalDeviceID))
sort.Sort(n)
if fmt.Sprint(n) != fmt.Sprint(expectedLocalNeed) {
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", n, expectedLocalNeed)
}
n = fileList(needList(m, remoteNode0))
n = fileList(needList(m, remoteDevice0))
sort.Sort(n)
if fmt.Sprint(n) != fmt.Sprint(expectedRemoteNeed) {
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", n, expectedRemoteNeed)
}
f := m.Get(protocol.LocalNodeID, "b")
f := m.Get(protocol.LocalDeviceID, "b")
if fmt.Sprint(f) != fmt.Sprint(localTot[1]) {
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, localTot[1])
}
f = m.Get(remoteNode0, "b")
f = m.Get(remoteDevice0, "b")
if fmt.Sprint(f) != fmt.Sprint(remote1[0]) {
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, remote1[0])
}
@@ -213,7 +213,7 @@ func TestGlobalSet(t *testing.T) {
t.Errorf("GetGlobal incorrect;\n A: %v !=\n E: %v", f, remote1[0])
}
f = m.Get(protocol.LocalNodeID, "zz")
f = m.Get(protocol.LocalDeviceID, "zz")
if f.Name != "" {
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, protocol.FileInfo{})
}
@@ -223,18 +223,18 @@ func TestGlobalSet(t *testing.T) {
t.Errorf("GetGlobal incorrect;\n A: %v !=\n E: %v", f, protocol.FileInfo{})
}
av := []protocol.NodeID{protocol.LocalNodeID, remoteNode0}
av := []protocol.DeviceID{protocol.LocalDeviceID, remoteDevice0}
a := m.Availability("a")
if !(len(a) == 2 && (a[0] == av[0] && a[1] == av[1] || a[0] == av[1] && a[1] == av[0])) {
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, av)
}
a = m.Availability("b")
if len(a) != 1 || a[0] != remoteNode0 {
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, remoteNode0)
if len(a) != 1 || a[0] != remoteDevice0 {
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, remoteDevice0)
}
a = m.Availability("d")
if len(a) != 1 || a[0] != protocol.LocalNodeID {
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, protocol.LocalNodeID)
if len(a) != 1 || a[0] != protocol.LocalDeviceID {
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, protocol.LocalDeviceID)
}
}
@@ -268,11 +268,11 @@ func TestNeedWithInvalid(t *testing.T) {
protocol.FileInfo{Name: "d", Version: 1003, Blocks: genBlocks(7)},
}
s.ReplaceWithDelete(protocol.LocalNodeID, localHave)
s.Replace(remoteNode0, remote0Have)
s.Replace(remoteNode1, remote1Have)
s.ReplaceWithDelete(protocol.LocalDeviceID, localHave)
s.Replace(remoteDevice0, remote0Have)
s.Replace(remoteDevice1, remote1Have)
need := fileList(needList(s, protocol.LocalNodeID))
need := fileList(needList(s, protocol.LocalDeviceID))
sort.Sort(need)
if fmt.Sprint(need) != fmt.Sprint(expectedNeed) {
@@ -297,9 +297,9 @@ func TestUpdateToInvalid(t *testing.T) {
protocol.FileInfo{Name: "d", Version: 1003, Blocks: genBlocks(7)},
}
s.ReplaceWithDelete(protocol.LocalNodeID, localHave)
s.ReplaceWithDelete(protocol.LocalDeviceID, localHave)
have := fileList(haveList(s, protocol.LocalNodeID))
have := fileList(haveList(s, protocol.LocalDeviceID))
sort.Sort(have)
if fmt.Sprint(have) != fmt.Sprint(localHave) {
@@ -307,9 +307,9 @@ func TestUpdateToInvalid(t *testing.T) {
}
localHave[1] = protocol.FileInfo{Name: "b", Version: 1001, Flags: protocol.FlagInvalid}
s.Update(protocol.LocalNodeID, localHave[1:2])
s.Update(protocol.LocalDeviceID, localHave[1:2])
have = fileList(haveList(s, protocol.LocalNodeID))
have = fileList(haveList(s, protocol.LocalDeviceID))
sort.Sort(have)
if fmt.Sprint(have) != fmt.Sprint(localHave) {
@@ -340,18 +340,18 @@ func TestInvalidAvailability(t *testing.T) {
protocol.FileInfo{Name: "none", Version: 1004, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
}
s.Replace(remoteNode0, remote0Have)
s.Replace(remoteNode1, remote1Have)
s.Replace(remoteDevice0, remote0Have)
s.Replace(remoteDevice1, remote1Have)
if av := s.Availability("both"); len(av) != 2 {
t.Error("Incorrect availability for 'both':", av)
}
if av := s.Availability("r0only"); len(av) != 1 || av[0] != remoteNode0 {
if av := s.Availability("r0only"); len(av) != 1 || av[0] != remoteDevice0 {
t.Error("Incorrect availability for 'r0only':", av)
}
if av := s.Availability("r1only"); len(av) != 1 || av[0] != remoteNode1 {
if av := s.Availability("r1only"); len(av) != 1 || av[0] != remoteDevice1 {
t.Error("Incorrect availability for 'r1only':", av)
}
@@ -376,22 +376,22 @@ func TestLocalDeleted(t *testing.T) {
protocol.FileInfo{Name: "z", Version: 1000, Flags: protocol.FlagDirectory},
}
m.ReplaceWithDelete(protocol.LocalNodeID, local1)
m.ReplaceWithDelete(protocol.LocalDeviceID, local1)
m.ReplaceWithDelete(protocol.LocalNodeID, []protocol.FileInfo{
m.ReplaceWithDelete(protocol.LocalDeviceID, []protocol.FileInfo{
local1[0],
// [1] removed
local1[2],
local1[3],
local1[4],
})
m.ReplaceWithDelete(protocol.LocalNodeID, []protocol.FileInfo{
m.ReplaceWithDelete(protocol.LocalDeviceID, []protocol.FileInfo{
local1[0],
local1[2],
// [3] removed
local1[4],
})
m.ReplaceWithDelete(protocol.LocalNodeID, []protocol.FileInfo{
m.ReplaceWithDelete(protocol.LocalDeviceID, []protocol.FileInfo{
local1[0],
local1[2],
// [4] removed
@@ -413,7 +413,7 @@ func TestLocalDeleted(t *testing.T) {
t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal1)
}
m.ReplaceWithDelete(protocol.LocalNodeID, []protocol.FileInfo{
m.ReplaceWithDelete(protocol.LocalDeviceID, []protocol.FileInfo{
local1[0],
// [2] removed
})
@@ -449,7 +449,7 @@ func Benchmark10kReplace(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
m := files.NewSet("test", db)
m.ReplaceWithDelete(protocol.LocalNodeID, local)
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
}
}
@@ -465,14 +465,14 @@ func Benchmark10kUpdateChg(b *testing.B) {
}
m := files.NewSet("test", db)
m.Replace(remoteNode0, remote)
m.Replace(remoteDevice0, remote)
var local []protocol.FileInfo
for i := 0; i < 10000; i++ {
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
}
m.ReplaceWithDelete(protocol.LocalNodeID, local)
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -481,7 +481,7 @@ func Benchmark10kUpdateChg(b *testing.B) {
local[j].Version++
}
b.StartTimer()
m.Update(protocol.LocalNodeID, local)
m.Update(protocol.LocalDeviceID, local)
}
}
@@ -496,18 +496,18 @@ func Benchmark10kUpdateSme(b *testing.B) {
b.Fatal(err)
}
m := files.NewSet("test", db)
m.Replace(remoteNode0, remote)
m.Replace(remoteDevice0, remote)
var local []protocol.FileInfo
for i := 0; i < 10000; i++ {
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
}
m.ReplaceWithDelete(protocol.LocalNodeID, local)
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Update(protocol.LocalNodeID, local)
m.Update(protocol.LocalDeviceID, local)
}
}
@@ -523,7 +523,7 @@ func Benchmark10kNeed2k(b *testing.B) {
}
m := files.NewSet("test", db)
m.Replace(remoteNode0, remote)
m.Replace(remoteDevice0, remote)
var local []protocol.FileInfo
for i := 0; i < 8000; i++ {
@@ -533,11 +533,11 @@ func Benchmark10kNeed2k(b *testing.B) {
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 980})
}
m.ReplaceWithDelete(protocol.LocalNodeID, local)
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
b.ResetTimer()
for i := 0; i < b.N; i++ {
fs := needList(m, protocol.LocalNodeID)
fs := needList(m, protocol.LocalDeviceID)
if l := len(fs); l != 2000 {
b.Errorf("wrong length %d != 2k", l)
}
@@ -556,7 +556,7 @@ func Benchmark10kHaveFullList(b *testing.B) {
}
m := files.NewSet("test", db)
m.Replace(remoteNode0, remote)
m.Replace(remoteDevice0, remote)
var local []protocol.FileInfo
for i := 0; i < 2000; i++ {
@@ -566,11 +566,11 @@ func Benchmark10kHaveFullList(b *testing.B) {
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 980})
}
m.ReplaceWithDelete(protocol.LocalNodeID, local)
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
b.ResetTimer()
for i := 0; i < b.N; i++ {
fs := haveList(m, protocol.LocalNodeID)
fs := haveList(m, protocol.LocalDeviceID)
if l := len(fs); l != 10000 {
b.Errorf("wrong length %d != 10k", l)
}
@@ -589,7 +589,7 @@ func Benchmark10kGlobal(b *testing.B) {
}
m := files.NewSet("test", db)
m.Replace(remoteNode0, remote)
m.Replace(remoteDevice0, remote)
var local []protocol.FileInfo
for i := 0; i < 2000; i++ {
@@ -599,7 +599,7 @@ func Benchmark10kGlobal(b *testing.B) {
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 980})
}
m.ReplaceWithDelete(protocol.LocalNodeID, local)
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -632,7 +632,7 @@ func TestGlobalReset(t *testing.T) {
protocol.FileInfo{Name: "e", Version: 1000},
}
m.ReplaceWithDelete(protocol.LocalNodeID, local)
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
g := globalList(m)
sort.Sort(fileList(g))
@@ -640,8 +640,8 @@ func TestGlobalReset(t *testing.T) {
t.Errorf("Global incorrect;\n%v !=\n%v", g, local)
}
m.Replace(remoteNode0, remote)
m.Replace(remoteNode0, nil)
m.Replace(remoteDevice0, remote)
m.Replace(remoteDevice0, nil)
g = globalList(m)
sort.Sort(fileList(g))
@@ -679,10 +679,10 @@ func TestNeed(t *testing.T) {
protocol.FileInfo{Name: "e", Version: 1000},
}
m.ReplaceWithDelete(protocol.LocalNodeID, local)
m.Replace(remoteNode0, remote)
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
m.Replace(remoteDevice0, remote)
need := needList(m, protocol.LocalNodeID)
need := needList(m, protocol.LocalDeviceID)
sort.Sort(fileList(need))
sort.Sort(fileList(shouldNeed))
@@ -715,23 +715,23 @@ func TestLocalVersion(t *testing.T) {
protocol.FileInfo{Name: "e", Version: 1000},
}
m.ReplaceWithDelete(protocol.LocalNodeID, local1)
c0 := m.LocalVersion(protocol.LocalNodeID)
m.ReplaceWithDelete(protocol.LocalDeviceID, local1)
c0 := m.LocalVersion(protocol.LocalDeviceID)
m.ReplaceWithDelete(protocol.LocalNodeID, local2)
c1 := m.LocalVersion(protocol.LocalNodeID)
m.ReplaceWithDelete(protocol.LocalDeviceID, local2)
c1 := m.LocalVersion(protocol.LocalDeviceID)
if !(c1 > c0) {
t.Fatal("Local version number should have incremented")
}
m.ReplaceWithDelete(protocol.LocalNodeID, local2)
c2 := m.LocalVersion(protocol.LocalNodeID)
m.ReplaceWithDelete(protocol.LocalDeviceID, local2)
c2 := m.LocalVersion(protocol.LocalDeviceID)
if c2 != c1 {
t.Fatal("Local version number should be unchanged")
}
}
func TestListDropRepo(t *testing.T) {
func TestListDropFolder(t *testing.T) {
db, err := leveldb.Open(storage.NewMemStorage(), nil)
if err != nil {
t.Fatal(err)
@@ -743,7 +743,7 @@ func TestListDropRepo(t *testing.T) {
protocol.FileInfo{Name: "b", Version: 1000},
protocol.FileInfo{Name: "c", Version: 1000},
}
s0.Replace(protocol.LocalNodeID, local1)
s0.Replace(protocol.LocalDeviceID, local1)
s1 := files.NewSet("test1", db)
local2 := []protocol.FileInfo{
@@ -751,13 +751,13 @@ func TestListDropRepo(t *testing.T) {
protocol.FileInfo{Name: "e", Version: 1002},
protocol.FileInfo{Name: "f", Version: 1002},
}
s1.Replace(remoteNode0, local2)
s1.Replace(remoteDevice0, local2)
// Check that we have both repos and their data is in the global list
// Check that we have both folders and their data is in the global list
expectedRepoList := []string{"test0", "test1"}
if actualRepoList := files.ListRepos(db); !reflect.DeepEqual(actualRepoList, expectedRepoList) {
t.Fatalf("RepoList mismatch\nE: %v\nA: %v", expectedRepoList, actualRepoList)
expectedFolderList := []string{"test0", "test1"}
if actualFolderList := files.ListFolders(db); !reflect.DeepEqual(actualFolderList, expectedFolderList) {
t.Fatalf("FolderList mismatch\nE: %v\nA: %v", expectedFolderList, actualFolderList)
}
if l := len(globalList(s0)); l != 3 {
t.Errorf("Incorrect global length %d != 3 for s0", l)
@@ -768,11 +768,11 @@ func TestListDropRepo(t *testing.T) {
// Drop one of them and check that it's gone.
files.DropRepo(db, "test1")
files.DropFolder(db, "test1")
expectedRepoList = []string{"test0"}
if actualRepoList := files.ListRepos(db); !reflect.DeepEqual(actualRepoList, expectedRepoList) {
t.Fatalf("RepoList mismatch\nE: %v\nA: %v", expectedRepoList, actualRepoList)
expectedFolderList = []string{"test0"}
if actualFolderList := files.ListFolders(db); !reflect.DeepEqual(actualFolderList, expectedFolderList) {
t.Fatalf("FolderList mismatch\nE: %v\nA: %v", expectedFolderList, actualFolderList)
}
if l := len(globalList(s0)); l != 3 {
t.Errorf("Incorrect global length %d != 3 for s0", l)
@@ -795,14 +795,14 @@ func TestGlobalNeedWithInvalid(t *testing.T) {
protocol.FileInfo{Name: "b", Version: 1002, Flags: protocol.FlagInvalid},
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(4)},
}
s.Replace(remoteNode0, rem0)
s.Replace(remoteDevice0, rem0)
rem1 := fileList{
protocol.FileInfo{Name: "a", Version: 1002, Blocks: genBlocks(4)},
protocol.FileInfo{Name: "b", Version: 1002, Blocks: genBlocks(4)},
protocol.FileInfo{Name: "c", Version: 1002, Flags: protocol.FlagInvalid},
}
s.Replace(remoteNode1, rem1)
s.Replace(remoteDevice1, rem1)
total := fileList{
// There's a valid copy of each file, so it should be merged
@@ -811,7 +811,7 @@ func TestGlobalNeedWithInvalid(t *testing.T) {
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(4)},
}
need := fileList(needList(s, protocol.LocalNodeID))
need := fileList(needList(s, protocol.LocalDeviceID))
if fmt.Sprint(need) != fmt.Sprint(total) {
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", need, total)
}
@@ -840,7 +840,7 @@ func TestLongPath(t *testing.T) {
protocol.FileInfo{Name: string(name), Version: 1000},
}
s.ReplaceWithDelete(protocol.LocalNodeID, local)
s.ReplaceWithDelete(protocol.LocalDeviceID, local)
gf := globalList(s)
if l := len(gf); l != 1 {
@@ -877,8 +877,8 @@ func TestStressGlobalVersion(t *testing.T) {
m := files.NewSet("test", db)
done := make(chan struct{})
go stressWriter(m, remoteNode0, set1, nil, done)
go stressWriter(m, protocol.LocalNodeID, set2, nil, done)
go stressWriter(m, remoteDevice0, set1, nil, done)
go stressWriter(m, protocol.LocalDeviceID, set2, nil, done)
t0 := time.Now()
for time.Since(t0) < dur {
@@ -891,7 +891,7 @@ func TestStressGlobalVersion(t *testing.T) {
close(done)
}
func stressWriter(s *files.Set, id protocol.NodeID, set1, set2 []protocol.FileInfo, done chan struct{}) {
func stressWriter(s *files.Set, id protocol.DeviceID, set1, set2 []protocol.FileInfo, done chan struct{}) {
one := true
i := 0
for {

View File

@@ -0,0 +1,51 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package model
import (
"sync"
"github.com/syncthing/syncthing/internal/protocol"
)
// deviceActivity tracks the number of outstanding requests per device and can
// answer which device is least busy. It is safe for use from multiple
// goroutines.
type deviceActivity struct {
act map[protocol.DeviceID]int
mut sync.Mutex
}
func newDeviceActivity() *deviceActivity {
return &deviceActivity{
act: make(map[protocol.DeviceID]int),
}
}
func (m deviceActivity) leastBusy(availability []protocol.DeviceID) protocol.DeviceID {
m.mut.Lock()
var low int = 2<<30 - 1
var selected protocol.DeviceID
for _, device := range availability {
if usage := m.act[device]; usage < low {
low = usage
selected = device
}
}
m.mut.Unlock()
return selected
}
func (m deviceActivity) using(device protocol.DeviceID) {
m.mut.Lock()
defer m.mut.Unlock()
m.act[device]++
}
func (m deviceActivity) done(device protocol.DeviceID) {
m.mut.Lock()
defer m.mut.Unlock()
m.act[device]--
}

View File

@@ -0,0 +1,56 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package model
import (
"testing"
"github.com/syncthing/syncthing/internal/protocol"
)
func TestDeviceActivity(t *testing.T) {
n0 := protocol.DeviceID{1, 2, 3, 4}
n1 := protocol.DeviceID{5, 6, 7, 8}
n2 := protocol.DeviceID{9, 10, 11, 12}
devices := []protocol.DeviceID{n0, n1, n2}
na := newDeviceActivity()
if lb := na.leastBusy(devices); lb != n0 {
t.Errorf("Least busy device should be n0 (%v) not %v", n0, lb)
}
if lb := na.leastBusy(devices); lb != n0 {
t.Errorf("Least busy device should still be n0 (%v) not %v", n0, lb)
}
na.using(na.leastBusy(devices))
if lb := na.leastBusy(devices); lb != n1 {
t.Errorf("Least busy device should be n1 (%v) not %v", n1, lb)
}
na.using(na.leastBusy(devices))
if lb := na.leastBusy(devices); lb != n2 {
t.Errorf("Least busy device should be n2 (%v) not %v", n2, lb)
}
na.using(na.leastBusy(devices))
if lb := na.leastBusy(devices); lb != n0 {
t.Errorf("Least busy device should be n0 (%v) not %v", n0, lb)
}
na.done(n1)
if lb := na.leastBusy(devices); lb != n1 {
t.Errorf("Least busy device should be n1 (%v) not %v", n1, lb)
}
na.done(n2)
if lb := na.leastBusy(devices); lb != n1 {
t.Errorf("Least busy device should still be n1 (%v) not %v", n1, lb)
}
na.done(n0)
if lb := na.leastBusy(devices); lb != n0 {
t.Errorf("Least busy device should be n0 (%v) not %v", n0, lb)
}
}

View File

@@ -2,5 +2,5 @@
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// Package model implements repository abstraction and file pulling mechanisms
// Package model implements folder abstraction and file pulling mechanisms
package model

File diff suppressed because it is too large Load Diff

View File

@@ -17,11 +17,11 @@ import (
"github.com/syndtr/goleveldb/leveldb/storage"
)
var node1, node2 protocol.NodeID
var device1, device2 protocol.DeviceID
func init() {
node1, _ = protocol.NodeIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR")
node2, _ = protocol.NodeIDFromString("GYRZZQB-IRNPV4Z-T7TC52W-EQYJ3TT-FDQW6MW-DFLMU42-SSSU6EM-FBK2VAY")
device1, _ = protocol.DeviceIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR")
device2, _ = protocol.DeviceIDFromString("GYRZZQB-IRNPV4Z-T7TC52W-EQYJ3TT-FDQW6MW-DFLMU42-SSSU6EM-FBK2VAY")
}
var testDataExpected = map[string]protocol.FileInfo{
@@ -57,11 +57,11 @@ func init() {
func TestRequest(t *testing.T) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel("/tmp", &config.Configuration{}, "node", "syncthing", "dev", db)
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
m.ScanRepo("default")
m := NewModel("/tmp", &config.Configuration{}, "device", "syncthing", "dev", db)
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
m.ScanFolder("default")
bs, err := m.Request(node1, "default", "foo", 0, 6)
bs, err := m.Request(device1, "default", "foo", 0, 6)
if err != nil {
t.Fatal(err)
}
@@ -69,7 +69,7 @@ func TestRequest(t *testing.T) {
t.Errorf("Incorrect data from request: %q", string(bs))
}
bs, err = m.Request(node1, "default", "../walk.go", 0, 6)
bs, err = m.Request(device1, "default", "../walk.go", 0, 6)
if err == nil {
t.Error("Unexpected nil error on insecure file read")
}
@@ -94,76 +94,76 @@ func genFiles(n int) []protocol.FileInfo {
func BenchmarkIndex10000(b *testing.B) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
m.ScanRepo("default")
m := NewModel("/tmp", nil, "device", "syncthing", "dev", db)
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
m.ScanFolder("default")
files := genFiles(10000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Index(node1, "default", files)
m.Index(device1, "default", files)
}
}
func BenchmarkIndex00100(b *testing.B) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
m.ScanRepo("default")
m := NewModel("/tmp", nil, "device", "syncthing", "dev", db)
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
m.ScanFolder("default")
files := genFiles(100)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Index(node1, "default", files)
m.Index(device1, "default", files)
}
}
func BenchmarkIndexUpdate10000f10000(b *testing.B) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
m.ScanRepo("default")
m := NewModel("/tmp", nil, "device", "syncthing", "dev", db)
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
m.ScanFolder("default")
files := genFiles(10000)
m.Index(node1, "default", files)
m.Index(device1, "default", files)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.IndexUpdate(node1, "default", files)
m.IndexUpdate(device1, "default", files)
}
}
func BenchmarkIndexUpdate10000f00100(b *testing.B) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
m.ScanRepo("default")
m := NewModel("/tmp", nil, "device", "syncthing", "dev", db)
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
m.ScanFolder("default")
files := genFiles(10000)
m.Index(node1, "default", files)
m.Index(device1, "default", files)
ufiles := genFiles(100)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.IndexUpdate(node1, "default", ufiles)
m.IndexUpdate(device1, "default", ufiles)
}
}
func BenchmarkIndexUpdate10000f00001(b *testing.B) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
m.ScanRepo("default")
m := NewModel("/tmp", nil, "device", "syncthing", "dev", db)
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
m.ScanFolder("default")
files := genFiles(10000)
m.Index(node1, "default", files)
m.Index(device1, "default", files)
ufiles := genFiles(1)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.IndexUpdate(node1, "default", ufiles)
m.IndexUpdate(device1, "default", ufiles)
}
}
type FakeConnection struct {
id protocol.NodeID
id protocol.DeviceID
requestData []byte
}
@@ -171,7 +171,7 @@ func (FakeConnection) Close() error {
return nil
}
func (f FakeConnection) ID() protocol.NodeID {
func (f FakeConnection) ID() protocol.DeviceID {
return f.id
}
@@ -191,7 +191,7 @@ func (FakeConnection) IndexUpdate(string, []protocol.FileInfo) error {
return nil
}
func (f FakeConnection) Request(repo, name string, offset int64, size int) ([]byte, error) {
func (f FakeConnection) Request(folder, name string, offset int64, size int) ([]byte, error) {
return f.requestData, nil
}
@@ -207,9 +207,9 @@ func (FakeConnection) Statistics() protocol.Statistics {
func BenchmarkRequest(b *testing.B) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
m.ScanRepo("default")
m := NewModel("/tmp", nil, "device", "syncthing", "dev", db)
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
m.ScanFolder("default")
const n = 1000
files := make([]protocol.FileInfo, n)
@@ -223,15 +223,15 @@ func BenchmarkRequest(b *testing.B) {
}
fc := FakeConnection{
id: node1,
id: device1,
requestData: []byte("some data to return"),
}
m.AddConnection(fc, fc)
m.Index(node1, "default", files)
m.Index(device1, "default", files)
b.ResetTimer()
for i := 0; i < b.N; i++ {
data, err := m.requestGlobal(node1, "default", files[i%n].Name, 0, 32, nil)
data, err := m.requestGlobal(device1, "default", files[i%n].Name, 0, 32, nil)
if err != nil {
b.Error(err)
}
@@ -241,28 +241,28 @@ func BenchmarkRequest(b *testing.B) {
}
}
func TestNodeRename(t *testing.T) {
func TestDeviceRename(t *testing.T) {
ccm := protocol.ClusterConfigMessage{
ClientName: "syncthing",
ClientVersion: "v0.9.4",
}
cfg := config.New("/tmp/test", node1)
cfg.Nodes = []config.NodeConfiguration{
cfg := config.New("/tmp/test", device1)
cfg.Devices = []config.DeviceConfiguration{
{
NodeID: node1,
DeviceID: device1,
},
}
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel("/tmp", &cfg, "node", "syncthing", "dev", db)
if cfg.Nodes[0].Name != "" {
t.Errorf("Node already has a name")
m := NewModel("/tmp", &cfg, "device", "syncthing", "dev", db)
if cfg.Devices[0].Name != "" {
t.Errorf("Device already has a name")
}
m.ClusterConfig(node1, ccm)
if cfg.Nodes[0].Name != "" {
t.Errorf("Node already has a name")
m.ClusterConfig(device1, ccm)
if cfg.Devices[0].Name != "" {
t.Errorf("Device already has a name")
}
ccm.Options = []protocol.Option{
@@ -271,96 +271,96 @@ func TestNodeRename(t *testing.T) {
Value: "tester",
},
}
m.ClusterConfig(node1, ccm)
if cfg.Nodes[0].Name != "tester" {
t.Errorf("Node did not get a name")
m.ClusterConfig(device1, ccm)
if cfg.Devices[0].Name != "tester" {
t.Errorf("Device did not get a name")
}
ccm.Options[0].Value = "tester2"
m.ClusterConfig(node1, ccm)
if cfg.Nodes[0].Name != "tester" {
t.Errorf("Node name got overwritten")
m.ClusterConfig(device1, ccm)
if cfg.Devices[0].Name != "tester" {
t.Errorf("Device name got overwritten")
}
}
func TestClusterConfig(t *testing.T) {
cfg := config.New("/tmp/test", node1)
cfg.Nodes = []config.NodeConfiguration{
cfg := config.New("/tmp/test", device1)
cfg.Devices = []config.DeviceConfiguration{
{
NodeID: node1,
DeviceID: device1,
Introducer: true,
},
{
NodeID: node2,
DeviceID: device2,
},
}
cfg.Repositories = []config.RepositoryConfiguration{
cfg.Folders = []config.FolderConfiguration{
{
ID: "repo1",
Nodes: []config.RepositoryNodeConfiguration{
{NodeID: node1},
{NodeID: node2},
ID: "folder1",
Devices: []config.FolderDeviceConfiguration{
{DeviceID: device1},
{DeviceID: device2},
},
},
{
ID: "repo2",
Nodes: []config.RepositoryNodeConfiguration{
{NodeID: node1},
{NodeID: node2},
ID: "folder2",
Devices: []config.FolderDeviceConfiguration{
{DeviceID: device1},
{DeviceID: device2},
},
},
}
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel("/tmp", &cfg, "node", "syncthing", "dev", db)
m.AddRepo(cfg.Repositories[0])
m.AddRepo(cfg.Repositories[1])
m := NewModel("/tmp", &cfg, "device", "syncthing", "dev", db)
m.AddFolder(cfg.Folders[0])
m.AddFolder(cfg.Folders[1])
cm := m.clusterConfig(node2)
cm := m.clusterConfig(device2)
if l := len(cm.Repositories); l != 2 {
t.Fatalf("Incorrect number of repos %d != 2", l)
if l := len(cm.Folders); l != 2 {
t.Fatalf("Incorrect number of folders %d != 2", l)
}
r := cm.Repositories[0]
if r.ID != "repo1" {
t.Errorf("Incorrect repo %q != repo1", r.ID)
r := cm.Folders[0]
if r.ID != "folder1" {
t.Errorf("Incorrect folder %q != folder1", r.ID)
}
if l := len(r.Nodes); l != 2 {
t.Errorf("Incorrect number of nodes %d != 2", l)
if l := len(r.Devices); l != 2 {
t.Errorf("Incorrect number of devices %d != 2", l)
}
if id := r.Nodes[0].ID; bytes.Compare(id, node1[:]) != 0 {
t.Errorf("Incorrect node ID %x != %x", id, node1)
if id := r.Devices[0].ID; bytes.Compare(id, device1[:]) != 0 {
t.Errorf("Incorrect device ID %x != %x", id, device1)
}
if r.Nodes[0].Flags&protocol.FlagIntroducer == 0 {
t.Error("Node1 should be flagged as Introducer")
if r.Devices[0].Flags&protocol.FlagIntroducer == 0 {
t.Error("Device1 should be flagged as Introducer")
}
if id := r.Nodes[1].ID; bytes.Compare(id, node2[:]) != 0 {
t.Errorf("Incorrect node ID %x != %x", id, node2)
if id := r.Devices[1].ID; bytes.Compare(id, device2[:]) != 0 {
t.Errorf("Incorrect device ID %x != %x", id, device2)
}
if r.Nodes[1].Flags&protocol.FlagIntroducer != 0 {
t.Error("Node2 should not be flagged as Introducer")
if r.Devices[1].Flags&protocol.FlagIntroducer != 0 {
t.Error("Device2 should not be flagged as Introducer")
}
r = cm.Repositories[1]
if r.ID != "repo2" {
t.Errorf("Incorrect repo %q != repo2", r.ID)
r = cm.Folders[1]
if r.ID != "folder2" {
t.Errorf("Incorrect folder %q != folder2", r.ID)
}
if l := len(r.Nodes); l != 2 {
t.Errorf("Incorrect number of nodes %d != 2", l)
if l := len(r.Devices); l != 2 {
t.Errorf("Incorrect number of devices %d != 2", l)
}
if id := r.Nodes[0].ID; bytes.Compare(id, node1[:]) != 0 {
t.Errorf("Incorrect node ID %x != %x", id, node1)
if id := r.Devices[0].ID; bytes.Compare(id, device1[:]) != 0 {
t.Errorf("Incorrect device ID %x != %x", id, device1)
}
if r.Nodes[0].Flags&protocol.FlagIntroducer == 0 {
t.Error("Node1 should be flagged as Introducer")
if r.Devices[0].Flags&protocol.FlagIntroducer == 0 {
t.Error("Device1 should be flagged as Introducer")
}
if id := r.Nodes[1].ID; bytes.Compare(id, node2[:]) != 0 {
t.Errorf("Incorrect node ID %x != %x", id, node2)
if id := r.Devices[1].ID; bytes.Compare(id, device2[:]) != 0 {
t.Errorf("Incorrect device ID %x != %x", id, device2)
}
if r.Nodes[1].Flags&protocol.FlagIntroducer != 0 {
t.Error("Node2 should not be flagged as Introducer")
if r.Devices[1].Flags&protocol.FlagIntroducer != 0 {
t.Error("Device2 should not be flagged as Introducer")
}
}
@@ -379,8 +379,8 @@ func TestIgnores(t *testing.T) {
}
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
m := NewModel("/tmp", nil, "device", "syncthing", "dev", db)
m.AddFolder(config.FolderConfiguration{ID: "default", Directory: "testdata"})
expected := []string{
".*",
@@ -440,7 +440,7 @@ func TestIgnores(t *testing.T) {
t.Error("No error")
}
m.AddRepo(config.RepositoryConfiguration{ID: "fresh", Directory: "XXX"})
m.AddFolder(config.FolderConfiguration{ID: "fresh", Directory: "XXX"})
ignores, err = m.GetIgnores("fresh")
if err != nil {
t.Error(err)

View File

@@ -1,51 +0,0 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package model
import (
"sync"
"github.com/syncthing/syncthing/internal/protocol"
)
// nodeActivity tracks the number of outstanding requests per node and can
// answer which node is least busy. It is safe for use from multiple
// goroutines.
type nodeActivity struct {
act map[protocol.NodeID]int
mut sync.Mutex
}
func newNodeActivity() *nodeActivity {
return &nodeActivity{
act: make(map[protocol.NodeID]int),
}
}
func (m nodeActivity) leastBusy(availability []protocol.NodeID) protocol.NodeID {
m.mut.Lock()
var low int = 2<<30 - 1
var selected protocol.NodeID
for _, node := range availability {
if usage := m.act[node]; usage < low {
low = usage
selected = node
}
}
m.mut.Unlock()
return selected
}
func (m nodeActivity) using(node protocol.NodeID) {
m.mut.Lock()
defer m.mut.Unlock()
m.act[node]++
}
func (m nodeActivity) done(node protocol.NodeID) {
m.mut.Lock()
defer m.mut.Unlock()
m.act[node]--
}

View File

@@ -1,56 +0,0 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package model
import (
"testing"
"github.com/syncthing/syncthing/internal/protocol"
)
func TestNodeActivity(t *testing.T) {
n0 := protocol.NodeID{1, 2, 3, 4}
n1 := protocol.NodeID{5, 6, 7, 8}
n2 := protocol.NodeID{9, 10, 11, 12}
nodes := []protocol.NodeID{n0, n1, n2}
na := newNodeActivity()
if lb := na.leastBusy(nodes); lb != n0 {
t.Errorf("Least busy node should be n0 (%v) not %v", n0, lb)
}
if lb := na.leastBusy(nodes); lb != n0 {
t.Errorf("Least busy node should still be n0 (%v) not %v", n0, lb)
}
na.using(na.leastBusy(nodes))
if lb := na.leastBusy(nodes); lb != n1 {
t.Errorf("Least busy node should be n1 (%v) not %v", n1, lb)
}
na.using(na.leastBusy(nodes))
if lb := na.leastBusy(nodes); lb != n2 {
t.Errorf("Least busy node should be n2 (%v) not %v", n2, lb)
}
na.using(na.leastBusy(nodes))
if lb := na.leastBusy(nodes); lb != n0 {
t.Errorf("Least busy node should be n0 (%v) not %v", n0, lb)
}
na.done(n1)
if lb := na.leastBusy(nodes); lb != n1 {
t.Errorf("Least busy node should be n1 (%v) not %v", n1, lb)
}
na.done(n2)
if lb := na.leastBusy(nodes); lb != n1 {
t.Errorf("Least busy node should still be n1 (%v) not %v", n1, lb)
}
na.done(n0)
if lb := na.leastBusy(nodes); lb != n0 {
t.Errorf("Least busy node should be n0 (%v) not %v", n0, lb)
}
}

View File

@@ -23,9 +23,9 @@ import (
// TODO: Stop on errors
const (
copiersPerRepo = 1
pullersPerRepo = 16
finishersPerRepo = 2
copiersPerFolder = 1
pullersPerFolder = 16
finishersPerFolder = 2
pauseIntv = 60 * time.Second
nextPullIntv = 10 * time.Second
checkPullIntv = 1 * time.Second
@@ -46,12 +46,12 @@ type copyBlocksState struct {
}
var (
activity = newNodeActivity()
errNoNode = errors.New("no available source node")
activity = newDeviceActivity()
errNoDevice = errors.New("no available source device")
)
type Puller struct {
repo string
folder string
dir string
scanIntv time.Duration
model *Model
@@ -75,8 +75,8 @@ func (p *Puller) Serve() {
defer func() {
pullTimer.Stop()
scanTimer.Stop()
// TODO: Should there be an actual RepoStopped state?
p.model.setState(p.repo, RepoIdle)
// TODO: Should there be an actual FolderStopped state?
p.model.setState(p.folder, FolderIdle)
}()
var prevVer uint64
@@ -94,10 +94,10 @@ loop:
// Index(), so that we immediately start a pull when new index
// information is available. Before that though, I'd like to build a
// repeatable benchmark of how long it takes to sync a change from
// node A to node B, so we have something to work against.
// device A to device B, so we have something to work against.
case <-pullTimer.C:
// RemoteLocalVersion() is a fast call, doesn't touch the database.
curVer := p.model.RemoteLocalVersion(p.repo)
curVer := p.model.RemoteLocalVersion(p.folder)
if curVer == prevVer {
pullTimer.Reset(checkPullIntv)
continue
@@ -106,11 +106,11 @@ loop:
if debug {
l.Debugln(p, "pulling", prevVer, curVer)
}
p.model.setState(p.repo, RepoSyncing)
p.model.setState(p.folder, FolderSyncing)
tries := 0
for {
tries++
changed := p.pullerIteration(copiersPerRepo, pullersPerRepo, finishersPerRepo)
changed := p.pullerIteration(copiersPerFolder, pullersPerFolder, finishersPerFolder)
if debug {
l.Debugln(p, "changed", changed)
}
@@ -120,8 +120,8 @@ loop:
// sync. Remember the local version number and
// schedule a resync a little bit into the future.
if lv := p.model.RemoteLocalVersion(p.repo); lv < curVer {
// There's a corner case where the node we needed
if lv := p.model.RemoteLocalVersion(p.folder); lv < curVer {
// There's a corner case where the device we needed
// files from disconnected during the puller
// iteration. The files will have been removed from
// the index, so we've concluded that we don't need
@@ -142,12 +142,12 @@ loop:
// we're not making it. Probably there are write
// errors preventing us. Flag this with a warning and
// wait a bit longer before retrying.
l.Warnf("Repo %q isn't making progress - check logs for possible root cause. Pausing puller for %v.", p.repo, pauseIntv)
l.Warnf("Folder %q isn't making progress - check logs for possible root cause. Pausing puller for %v.", p.folder, pauseIntv)
pullTimer.Reset(pauseIntv)
break
}
}
p.model.setState(p.repo, RepoIdle)
p.model.setState(p.folder, FolderIdle)
// The reason for running the scanner from within the puller is that
// this is the easiest way to make sure we are not doing both at the
@@ -156,12 +156,12 @@ loop:
if debug {
l.Debugln(p, "rescan")
}
p.model.setState(p.repo, RepoScanning)
if err := p.model.ScanRepo(p.repo); err != nil {
invalidateRepo(p.model.cfg, p.repo, err)
p.model.setState(p.folder, FolderScanning)
if err := p.model.ScanFolder(p.folder); err != nil {
invalidateFolder(p.model.cfg, p.folder, err)
break loop
}
p.model.setState(p.repo, RepoIdle)
p.model.setState(p.folder, FolderIdle)
scanTimer.Reset(p.scanIntv)
}
}
@@ -172,13 +172,13 @@ func (p *Puller) Stop() {
}
func (p *Puller) String() string {
return fmt.Sprintf("puller/%s@%p", p.repo, p)
return fmt.Sprintf("puller/%s@%p", p.folder, p)
}
// pullerIteration runs a single puller iteration for the given repo and
// pullerIteration runs a single puller iteration for the given folder and
// returns the number items that should have been synced (even those that
// might have failed). One puller iteration handles all files currently
// flagged as needed in the repo. The specified number of copier, puller and
// flagged as needed in the folder. The specified number of copier, puller and
// finisher routines are used. It's seldom efficient to use more than one
// copier routine, while multiple pullers are essential and multiple finishers
// may be useful (they are primarily CPU bound due to hashing).
@@ -218,7 +218,7 @@ func (p *Puller) pullerIteration(ncopiers, npullers, nfinishers int) int {
}
p.model.rmut.RLock()
files := p.model.repoFiles[p.repo]
files := p.model.folderFiles[p.folder]
p.model.rmut.RUnlock()
// !!!
@@ -228,7 +228,7 @@ func (p *Puller) pullerIteration(ncopiers, npullers, nfinishers int) int {
// !!!
changed := 0
files.WithNeed(protocol.LocalNodeID, func(intf protocol.FileIntf) bool {
files.WithNeed(protocol.LocalDeviceID, func(intf protocol.FileIntf) bool {
// Needed items are delivered sorted lexicographically. This isn't
// really optimal from a performance point of view - it would be
@@ -240,7 +240,7 @@ func (p *Puller) pullerIteration(ncopiers, npullers, nfinishers int) int {
file := intf.(protocol.FileInfo)
events.Default.Log(events.ItemStarted, map[string]string{
"repo": p.repo,
"folder": p.folder,
"item": file.Name,
})
@@ -290,7 +290,7 @@ func (p *Puller) handleDir(file protocol.FileInfo) {
mode := os.FileMode(file.Flags & 0777)
if debug {
curFile := p.model.CurrentRepoFile(p.repo, file.Name)
curFile := p.model.CurrentFolderFile(p.folder, file.Name)
l.Debugf("need dir\n\t%v\n\t%v", file, curFile)
}
@@ -307,19 +307,19 @@ func (p *Puller) handleDir(file protocol.FileInfo) {
}
if err = osutil.InWritableDir(mkdir, realName); err == nil {
p.model.updateLocal(p.repo, file)
p.model.updateLocal(p.folder, file)
} else {
l.Infof("Puller (repo %q, file %q): %v", p.repo, file.Name, err)
l.Infof("Puller (folder %q, file %q): %v", p.folder, file.Name, err)
}
return
}
// Weird error when stat()'ing the dir. Probably won't work to do
// anything else with it if we can't even stat() it.
l.Infof("Puller (repo %q, file %q): %v", p.repo, file.Name, err)
l.Infof("Puller (folder %q, file %q): %v", p.folder, file.Name, err)
return
} else if !info.IsDir() {
l.Infof("Puller (repo %q, file %q): should be dir, but is not", p.repo, file.Name)
l.Infof("Puller (folder %q, file %q): should be dir, but is not", p.folder, file.Name)
return
}
@@ -328,9 +328,9 @@ func (p *Puller) handleDir(file protocol.FileInfo) {
// It's OK to change mode bits on stuff within non-writable directories.
if err := os.Chmod(realName, mode); err == nil {
p.model.updateLocal(p.repo, file)
p.model.updateLocal(p.folder, file)
} else {
l.Infof("Puller (repo %q, file %q): %v", p.repo, file.Name, err)
l.Infof("Puller (folder %q, file %q): %v", p.folder, file.Name, err)
}
}
@@ -339,7 +339,7 @@ func (p *Puller) deleteDir(file protocol.FileInfo) {
realName := filepath.Join(p.dir, file.Name)
err := osutil.InWritableDir(os.Remove, realName)
if err == nil || os.IsNotExist(err) {
p.model.updateLocal(p.repo, file)
p.model.updateLocal(p.folder, file)
}
}
@@ -355,16 +355,16 @@ func (p *Puller) deleteFile(file protocol.FileInfo) {
}
if err != nil {
l.Infof("Puller (repo %q, file %q): delete: %v", p.repo, file.Name, err)
l.Infof("Puller (folder %q, file %q): delete: %v", p.folder, file.Name, err)
} else {
p.model.updateLocal(p.repo, file)
p.model.updateLocal(p.folder, file)
}
}
// handleFile queues the copies and pulls as necessary for a single new or
// changed file.
func (p *Puller) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, pullChan chan<- pullBlockState) {
curFile := p.model.CurrentRepoFile(p.repo, file.Name)
curFile := p.model.CurrentFolderFile(p.folder, file.Name)
copyBlocks, pullBlocks := scanner.BlockDiff(curFile.Blocks, file.Blocks)
if len(copyBlocks) == len(curFile.Blocks) && len(pullBlocks) == 0 {
@@ -384,7 +384,7 @@ func (p *Puller) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksSt
s := sharedPullerState{
file: file,
repo: p.repo,
folder: p.folder,
tempName: tempName,
realName: realName,
pullNeeded: len(pullBlocks),
@@ -422,18 +422,18 @@ func (p *Puller) shortcutFile(file protocol.FileInfo) {
realName := filepath.Join(p.dir, file.Name)
err := os.Chmod(realName, os.FileMode(file.Flags&0777))
if err != nil {
l.Infof("Puller (repo %q, file %q): shortcut: %v", p.repo, file.Name, err)
l.Infof("Puller (folder %q, file %q): shortcut: %v", p.folder, file.Name, err)
return
}
t := time.Unix(file.Modified, 0)
err = os.Chtimes(realName, t, t)
if err != nil {
l.Infof("Puller (repo %q, file %q): shortcut: %v", p.repo, file.Name, err)
l.Infof("Puller (folder %q, file %q): shortcut: %v", p.folder, file.Name, err)
return
}
p.model.updateLocal(p.repo, file)
p.model.updateLocal(p.folder, file)
}
// copierRoutine reads pullerStates until the in channel closes and performs
@@ -487,13 +487,13 @@ nextBlock:
continue nextBlock
}
// Select the least busy node to pull the block frop.model. If we found no
// feasible node at all, fail the block (and in the long run, the
// Select the least busy device to pull the block frop.model. If we found no
// feasible device at all, fail the block (and in the long run, the
// file).
potentialNodes := p.model.availability(p.repo, state.file.Name)
selected := activity.leastBusy(potentialNodes)
if selected == (protocol.NodeID{}) {
state.earlyClose("pull", errNoNode)
potentialDevices := p.model.availability(p.folder, state.file.Name)
selected := activity.leastBusy(potentialDevices)
if selected == (protocol.DeviceID{}) {
state.earlyClose("pull", errNoDevice)
continue nextBlock
}
@@ -505,10 +505,10 @@ nextBlock:
continue nextBlock
}
// Fetch the block, while marking the selected node as in use so that
// leastBusy can select another node when someone else asks.
// Fetch the block, while marking the selected device as in use so that
// leastBusy can select another device when someone else asks.
activity.using(selected)
buf, err := p.model.requestGlobal(selected, p.repo, state.file.Name, state.block.Offset, int(state.block.Size), state.block.Hash)
buf, err := p.model.requestGlobal(selected, p.folder, state.file.Name, state.block.Offset, int(state.block.Size), state.block.Hash)
activity.done(selected)
if err != nil {
state.earlyClose("pull", err)
@@ -589,7 +589,7 @@ func (p *Puller) finisherRoutine(in <-chan *sharedPullerState) {
}
// Record the updated file in the index
p.model.updateLocal(p.repo, state.file)
p.model.updateLocal(p.folder, state.file)
}
}
}
@@ -609,11 +609,11 @@ func (p *Puller) clean() {
})
}
func invalidateRepo(cfg *config.Configuration, repoID string, err error) {
for i := range cfg.Repositories {
repo := &cfg.Repositories[i]
if repo.ID == repoID {
repo.Invalid = err.Error()
func invalidateFolder(cfg *config.Configuration, folderID string, err error) {
for i := range cfg.Folders {
folder := &cfg.Folders[i]
if folder.ID == folderID {
folder.Invalid = err.Error()
return
}
}

View File

@@ -17,7 +17,7 @@ import (
type sharedPullerState struct {
// Immutable, does not require locking
file protocol.FileInfo
repo string
folder string
tempName string
realName string
@@ -113,7 +113,7 @@ func (s *sharedPullerState) earlyCloseLocked(context string, err error) {
return
}
l.Infof("Puller (repo %q, file %q): %s: %v", s.repo, s.file.Name, context, err)
l.Infof("Puller (folder %q, file %q): %s: %v", s.folder, s.file.Name, context, err)
s.err = err
if s.fd != nil {
s.fd.Close()
@@ -133,7 +133,7 @@ func (s *sharedPullerState) copyDone() {
s.mut.Lock()
s.copyNeeded--
if debug {
l.Debugln("sharedPullerState", s.repo, s.file.Name, "copyNeeded ->", s.pullNeeded)
l.Debugln("sharedPullerState", s.folder, s.file.Name, "copyNeeded ->", s.pullNeeded)
}
s.mut.Unlock()
}
@@ -142,7 +142,7 @@ func (s *sharedPullerState) pullDone() {
s.mut.Lock()
s.pullNeeded--
if debug {
l.Debugln("sharedPullerState", s.repo, s.file.Name, "pullNeeded ->", s.pullNeeded)
l.Debugln("sharedPullerState", s.folder, s.file.Name, "pullNeeded ->", s.pullNeeded)
}
s.mut.Unlock()
}

0
internal/model/testdata/.stignore vendored Normal file → Executable file
View File

View File

@@ -11,7 +11,7 @@ import (
type TestModel struct {
data []byte
repo string
folder string
name string
offset int64
size int
@@ -24,25 +24,25 @@ func newTestModel() *TestModel {
}
}
func (t *TestModel) Index(nodeID NodeID, repo string, files []FileInfo) {
func (t *TestModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
}
func (t *TestModel) IndexUpdate(nodeID NodeID, repo string, files []FileInfo) {
func (t *TestModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
}
func (t *TestModel) Request(nodeID NodeID, repo, name string, offset int64, size int) ([]byte, error) {
t.repo = repo
func (t *TestModel) Request(deviceID DeviceID, folder, name string, offset int64, size int) ([]byte, error) {
t.folder = folder
t.name = name
t.offset = offset
t.size = size
return t.data, nil
}
func (t *TestModel) Close(nodeID NodeID, err error) {
func (t *TestModel) Close(deviceID DeviceID, err error) {
close(t.closedCh)
}
func (t *TestModel) ClusterConfig(nodeID NodeID, config ClusterConfigMessage) {
func (t *TestModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
}
func (t *TestModel) isClosed() bool {

View File

@@ -16,36 +16,36 @@ import (
"github.com/syncthing/syncthing/internal/luhn"
)
type NodeID [32]byte
type DeviceID [32]byte
var LocalNodeID = NodeID{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
var LocalDeviceID = DeviceID{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
// NewNodeID generates a new node ID from the raw bytes of a certificate
func NewNodeID(rawCert []byte) NodeID {
var n NodeID
// NewDeviceID generates a new device ID from the raw bytes of a certificate
func NewDeviceID(rawCert []byte) DeviceID {
var n DeviceID
hf := sha256.New()
hf.Write(rawCert)
hf.Sum(n[:0])
return n
}
func NodeIDFromString(s string) (NodeID, error) {
var n NodeID
func DeviceIDFromString(s string) (DeviceID, error) {
var n DeviceID
err := n.UnmarshalText([]byte(s))
return n, err
}
func NodeIDFromBytes(bs []byte) NodeID {
var n NodeID
func DeviceIDFromBytes(bs []byte) DeviceID {
var n DeviceID
if len(bs) != len(n) {
panic("incorrect length of byte slice representing node ID")
panic("incorrect length of byte slice representing device ID")
}
copy(n[:], bs)
return n
}
// String returns the canonical string representation of the node ID
func (n NodeID) String() string {
// String returns the canonical string representation of the device ID
func (n DeviceID) String() string {
id := base32.StdEncoding.EncodeToString(n[:])
id = strings.Trim(id, "=")
id, err := luhnify(id)
@@ -57,23 +57,23 @@ func (n NodeID) String() string {
return id
}
func (n NodeID) GoString() string {
func (n DeviceID) GoString() string {
return n.String()
}
func (n NodeID) Compare(other NodeID) int {
func (n DeviceID) Compare(other DeviceID) int {
return bytes.Compare(n[:], other[:])
}
func (n NodeID) Equals(other NodeID) bool {
func (n DeviceID) Equals(other DeviceID) bool {
return bytes.Compare(n[:], other[:]) == 0
}
func (n *NodeID) MarshalText() ([]byte, error) {
func (n *DeviceID) MarshalText() ([]byte, error) {
return []byte(n.String()), nil
}
func (n *NodeID) UnmarshalText(bs []byte) error {
func (n *DeviceID) UnmarshalText(bs []byte) error {
id := string(bs)
id = strings.Trim(id, "=")
id = strings.ToUpper(id)
@@ -98,7 +98,7 @@ func (n *NodeID) UnmarshalText(bs []byte) error {
copy(n[:], dec)
return nil
default:
return errors.New("node ID invalid: incorrect length")
return errors.New("device ID invalid: incorrect length")
}
}

View File

@@ -20,14 +20,14 @@ var formatCases = []string{
"p561017mzjnu2yiqgdreydm2mgtimgl3bxnpq6w5bmt88z4tjxzwicq2",
}
func TestFormatNodeID(t *testing.T) {
func TestFormatDeviceID(t *testing.T) {
for i, tc := range formatCases {
var id NodeID
var id DeviceID
err := id.UnmarshalText([]byte(tc))
if err != nil {
t.Errorf("#%d UnmarshalText(%q); %v", i, tc, err)
} else if f := id.String(); f != formatted {
t.Errorf("#%d FormatNodeID(%q)\n\t%q !=\n\t%q", i, tc, f, formatted)
t.Errorf("#%d FormatDeviceID(%q)\n\t%q !=\n\t%q", i, tc, f, formatted)
}
}
}
@@ -46,20 +46,20 @@ var validateCases = []struct {
{"p56ioi7mzjnu2iqgdreydm2mgtmgl3bxnpq6w5btbbz4tjxzwicqCCCC", false},
}
func TestValidateNodeID(t *testing.T) {
func TestValidateDeviceID(t *testing.T) {
for _, tc := range validateCases {
var id NodeID
var id DeviceID
err := id.UnmarshalText([]byte(tc.s))
if (err == nil && !tc.ok) || (err != nil && tc.ok) {
t.Errorf("ValidateNodeID(%q); %v != %v", tc.s, err, tc.ok)
t.Errorf("ValidateDeviceID(%q); %v != %v", tc.s, err, tc.ok)
}
}
}
func TestMarshallingNodeID(t *testing.T) {
n0 := NodeID{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 10, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
n1 := NodeID{}
n2 := NodeID{}
func TestMarshallingDeviceID(t *testing.T) {
n0 := DeviceID{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 10, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
n1 := DeviceID{}
n2 := DeviceID{}
bs, _ := n0.MarshalText()
n1.UnmarshalText(bs)

View File

@@ -7,7 +7,7 @@ package protocol
import "fmt"
type IndexMessage struct {
Repository string // max:64
Folder string // max:64
Files []FileInfo
}
@@ -90,7 +90,7 @@ func (b BlockInfo) String() string {
}
type RequestMessage struct {
Repository string // max:64
Folder string // max:64
Name string // max:8192
Offset uint64
Size uint32
@@ -103,7 +103,7 @@ type ResponseMessage struct {
type ClusterConfigMessage struct {
ClientName string // max:64
ClientVersion string // max:64
Repositories []Repository // max:64
Folders []Folder // max:64
Options []Option // max:64
}
@@ -116,12 +116,12 @@ func (o *ClusterConfigMessage) GetOption(key string) string {
return ""
}
type Repository struct {
type Folder struct {
ID string // max:64
Nodes []Node // max:64
Devices []Device // max:64
}
type Node struct {
type Device struct {
ID []byte // max:32
Flags uint32
MaxLocalVersion uint64

View File

@@ -18,10 +18,10 @@ IndexMessage Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of Repository |
| Length of Folder |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Repository (variable length) \
\ Folder (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of Files |
@@ -33,7 +33,7 @@ IndexMessage Structure:
struct IndexMessage {
string Repository<64>;
string Folder<64>;
FileInfo Files<>;
}
@@ -56,10 +56,10 @@ func (o IndexMessage) AppendXDR(bs []byte) []byte {
}
func (o IndexMessage) encodeXDR(xw *xdr.Writer) (int, error) {
if len(o.Repository) > 64 {
if len(o.Folder) > 64 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
xw.WriteString(o.Repository)
xw.WriteString(o.Folder)
xw.WriteUint32(uint32(len(o.Files)))
for i := range o.Files {
_, err := o.Files[i].encodeXDR(xw)
@@ -82,7 +82,7 @@ func (o *IndexMessage) UnmarshalXDR(bs []byte) error {
}
func (o *IndexMessage) decodeXDR(xr *xdr.Reader) error {
o.Repository = xr.ReadStringMax(64)
o.Folder = xr.ReadStringMax(64)
_FilesSize := int(xr.ReadUint32())
o.Files = make([]FileInfo, _FilesSize)
for i := range o.Files {
@@ -362,10 +362,10 @@ RequestMessage Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of Repository |
| Length of Folder |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Repository (variable length) \
\ Folder (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of Name |
@@ -383,7 +383,7 @@ RequestMessage Structure:
struct RequestMessage {
string Repository<64>;
string Folder<64>;
string Name<8192>;
unsigned hyper Offset;
unsigned int Size;
@@ -408,10 +408,10 @@ func (o RequestMessage) AppendXDR(bs []byte) []byte {
}
func (o RequestMessage) encodeXDR(xw *xdr.Writer) (int, error) {
if len(o.Repository) > 64 {
if len(o.Folder) > 64 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
xw.WriteString(o.Repository)
xw.WriteString(o.Folder)
if len(o.Name) > 8192 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
@@ -433,7 +433,7 @@ func (o *RequestMessage) UnmarshalXDR(bs []byte) error {
}
func (o *RequestMessage) decodeXDR(xr *xdr.Reader) error {
o.Repository = xr.ReadStringMax(64)
o.Folder = xr.ReadStringMax(64)
o.Name = xr.ReadStringMax(8192)
o.Offset = xr.ReadUint64()
o.Size = xr.ReadUint32()
@@ -517,10 +517,10 @@ ClusterConfigMessage Structure:
\ Client Version (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of Repositories |
| Number of Folders |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Zero or more Repository Structures \
\ Zero or more Folder Structures \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of Options |
@@ -534,7 +534,7 @@ ClusterConfigMessage Structure:
struct ClusterConfigMessage {
string ClientName<64>;
string ClientVersion<64>;
Repository Repositories<64>;
Folder Folders<64>;
Option Options<64>;
}
@@ -565,12 +565,12 @@ func (o ClusterConfigMessage) encodeXDR(xw *xdr.Writer) (int, error) {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
xw.WriteString(o.ClientVersion)
if len(o.Repositories) > 64 {
if len(o.Folders) > 64 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
xw.WriteUint32(uint32(len(o.Repositories)))
for i := range o.Repositories {
_, err := o.Repositories[i].encodeXDR(xw)
xw.WriteUint32(uint32(len(o.Folders)))
for i := range o.Folders {
_, err := o.Folders[i].encodeXDR(xw)
if err != nil {
return xw.Tot(), err
}
@@ -602,13 +602,13 @@ func (o *ClusterConfigMessage) UnmarshalXDR(bs []byte) error {
func (o *ClusterConfigMessage) decodeXDR(xr *xdr.Reader) error {
o.ClientName = xr.ReadStringMax(64)
o.ClientVersion = xr.ReadStringMax(64)
_RepositoriesSize := int(xr.ReadUint32())
if _RepositoriesSize > 64 {
_FoldersSize := int(xr.ReadUint32())
if _FoldersSize > 64 {
return xdr.ErrElementSizeExceeded
}
o.Repositories = make([]Repository, _RepositoriesSize)
for i := range o.Repositories {
(&o.Repositories[i]).decodeXDR(xr)
o.Folders = make([]Folder, _FoldersSize)
for i := range o.Folders {
(&o.Folders[i]).decodeXDR(xr)
}
_OptionsSize := int(xr.ReadUint32())
if _OptionsSize > 64 {
@@ -623,7 +623,7 @@ func (o *ClusterConfigMessage) decodeXDR(xr *xdr.Reader) error {
/*
Repository Structure:
Folder Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
@@ -634,48 +634,48 @@ Repository Structure:
\ ID (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of Nodes |
| Number of Devices |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Zero or more Node Structures \
\ Zero or more Device Structures \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct Repository {
struct Folder {
string ID<64>;
Node Nodes<64>;
Device Devices<64>;
}
*/
func (o Repository) EncodeXDR(w io.Writer) (int, error) {
func (o Folder) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.encodeXDR(xw)
}
func (o Repository) MarshalXDR() []byte {
func (o Folder) MarshalXDR() []byte {
return o.AppendXDR(make([]byte, 0, 128))
}
func (o Repository) AppendXDR(bs []byte) []byte {
func (o Folder) AppendXDR(bs []byte) []byte {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
o.encodeXDR(xw)
return []byte(aw)
}
func (o Repository) encodeXDR(xw *xdr.Writer) (int, error) {
func (o Folder) encodeXDR(xw *xdr.Writer) (int, error) {
if len(o.ID) > 64 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
xw.WriteString(o.ID)
if len(o.Nodes) > 64 {
if len(o.Devices) > 64 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
xw.WriteUint32(uint32(len(o.Nodes)))
for i := range o.Nodes {
_, err := o.Nodes[i].encodeXDR(xw)
xw.WriteUint32(uint32(len(o.Devices)))
for i := range o.Devices {
_, err := o.Devices[i].encodeXDR(xw)
if err != nil {
return xw.Tot(), err
}
@@ -683,33 +683,33 @@ func (o Repository) encodeXDR(xw *xdr.Writer) (int, error) {
return xw.Tot(), xw.Error()
}
func (o *Repository) DecodeXDR(r io.Reader) error {
func (o *Folder) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.decodeXDR(xr)
}
func (o *Repository) UnmarshalXDR(bs []byte) error {
func (o *Folder) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.decodeXDR(xr)
}
func (o *Repository) decodeXDR(xr *xdr.Reader) error {
func (o *Folder) decodeXDR(xr *xdr.Reader) error {
o.ID = xr.ReadStringMax(64)
_NodesSize := int(xr.ReadUint32())
if _NodesSize > 64 {
_DevicesSize := int(xr.ReadUint32())
if _DevicesSize > 64 {
return xdr.ErrElementSizeExceeded
}
o.Nodes = make([]Node, _NodesSize)
for i := range o.Nodes {
(&o.Nodes[i]).decodeXDR(xr)
o.Devices = make([]Device, _DevicesSize)
for i := range o.Devices {
(&o.Devices[i]).decodeXDR(xr)
}
return xr.Error()
}
/*
Node Structure:
Device Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
@@ -728,7 +728,7 @@ Node Structure:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct Node {
struct Device {
opaque ID<32>;
unsigned int Flags;
unsigned hyper MaxLocalVersion;
@@ -736,23 +736,23 @@ struct Node {
*/
func (o Node) EncodeXDR(w io.Writer) (int, error) {
func (o Device) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.encodeXDR(xw)
}
func (o Node) MarshalXDR() []byte {
func (o Device) MarshalXDR() []byte {
return o.AppendXDR(make([]byte, 0, 128))
}
func (o Node) AppendXDR(bs []byte) []byte {
func (o Device) AppendXDR(bs []byte) []byte {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
o.encodeXDR(xw)
return []byte(aw)
}
func (o Node) encodeXDR(xw *xdr.Writer) (int, error) {
func (o Device) encodeXDR(xw *xdr.Writer) (int, error) {
if len(o.ID) > 32 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
@@ -762,18 +762,18 @@ func (o Node) encodeXDR(xw *xdr.Writer) (int, error) {
return xw.Tot(), xw.Error()
}
func (o *Node) DecodeXDR(r io.Reader) error {
func (o *Device) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.decodeXDR(xr)
}
func (o *Node) UnmarshalXDR(bs []byte) error {
func (o *Device) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.decodeXDR(xr)
}
func (o *Node) decodeXDR(xr *xdr.Reader) error {
func (o *Device) decodeXDR(xr *xdr.Reader) error {
o.ID = xr.ReadBytesMax(32)
o.Flags = xr.ReadUint32()
o.MaxLocalVersion = xr.ReadUint64()

View File

@@ -14,29 +14,29 @@ type nativeModel struct {
next Model
}
func (m nativeModel) Index(nodeID NodeID, repo string, files []FileInfo) {
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
for i := range files {
files[i].Name = norm.NFD.String(files[i].Name)
}
m.next.Index(nodeID, repo, files)
m.next.Index(deviceID, folder, files)
}
func (m nativeModel) IndexUpdate(nodeID NodeID, repo string, files []FileInfo) {
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
for i := range files {
files[i].Name = norm.NFD.String(files[i].Name)
}
m.next.IndexUpdate(nodeID, repo, files)
m.next.IndexUpdate(deviceID, folder, files)
}
func (m nativeModel) Request(nodeID NodeID, repo string, name string, offset int64, size int) ([]byte, error) {
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error) {
name = norm.NFD.String(name)
return m.next.Request(nodeID, repo, name, offset, size)
return m.next.Request(deviceID, folder, name, offset, size)
}
func (m nativeModel) ClusterConfig(nodeID NodeID, config ClusterConfigMessage) {
m.next.ClusterConfig(nodeID, config)
func (m nativeModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
m.next.ClusterConfig(deviceID, config)
}
func (m nativeModel) Close(nodeID NodeID, err error) {
m.next.Close(nodeID, err)
func (m nativeModel) Close(deviceID DeviceID, err error) {
m.next.Close(deviceID, err)
}

View File

@@ -12,22 +12,22 @@ type nativeModel struct {
next Model
}
func (m nativeModel) Index(nodeID NodeID, repo string, files []FileInfo) {
m.next.Index(nodeID, repo, files)
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
m.next.Index(deviceID, folder, files)
}
func (m nativeModel) IndexUpdate(nodeID NodeID, repo string, files []FileInfo) {
m.next.IndexUpdate(nodeID, repo, files)
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
m.next.IndexUpdate(deviceID, folder, files)
}
func (m nativeModel) Request(nodeID NodeID, repo string, name string, offset int64, size int) ([]byte, error) {
return m.next.Request(nodeID, repo, name, offset, size)
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error) {
return m.next.Request(deviceID, folder, name, offset, size)
}
func (m nativeModel) ClusterConfig(nodeID NodeID, config ClusterConfigMessage) {
m.next.ClusterConfig(nodeID, config)
func (m nativeModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
m.next.ClusterConfig(deviceID, config)
}
func (m nativeModel) Close(nodeID NodeID, err error) {
m.next.Close(nodeID, err)
func (m nativeModel) Close(deviceID DeviceID, err error) {
m.next.Close(deviceID, err)
}

View File

@@ -26,7 +26,7 @@ type nativeModel struct {
next Model
}
func (m nativeModel) Index(nodeID NodeID, repo string, files []FileInfo) {
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
for i, f := range files {
if strings.ContainsAny(f.Name, disallowedCharacters) {
if f.IsDeleted() {
@@ -39,10 +39,10 @@ func (m nativeModel) Index(nodeID NodeID, repo string, files []FileInfo) {
}
files[i].Name = filepath.FromSlash(f.Name)
}
m.next.Index(nodeID, repo, files)
m.next.Index(deviceID, folder, files)
}
func (m nativeModel) IndexUpdate(nodeID NodeID, repo string, files []FileInfo) {
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
for i, f := range files {
if strings.ContainsAny(f.Name, disallowedCharacters) {
if f.IsDeleted() {
@@ -55,18 +55,18 @@ func (m nativeModel) IndexUpdate(nodeID NodeID, repo string, files []FileInfo) {
}
files[i].Name = filepath.FromSlash(files[i].Name)
}
m.next.IndexUpdate(nodeID, repo, files)
m.next.IndexUpdate(deviceID, folder, files)
}
func (m nativeModel) Request(nodeID NodeID, repo string, name string, offset int64, size int) ([]byte, error) {
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error) {
name = filepath.FromSlash(name)
return m.next.Request(nodeID, repo, name, offset, size)
return m.next.Request(deviceID, folder, name, offset, size)
}
func (m nativeModel) ClusterConfig(nodeID NodeID, config ClusterConfigMessage) {
m.next.ClusterConfig(nodeID, config)
func (m nativeModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
m.next.ClusterConfig(deviceID, config)
}
func (m nativeModel) Close(nodeID NodeID, err error) {
m.next.Close(nodeID, err)
func (m nativeModel) Close(deviceID DeviceID, err error) {
m.next.Close(deviceID, err)
}

View File

@@ -57,30 +57,30 @@ var (
)
type Model interface {
// An index was received from the peer node
Index(nodeID NodeID, repo string, files []FileInfo)
// An index update was received from the peer node
IndexUpdate(nodeID NodeID, repo string, files []FileInfo)
// A request was made by the peer node
Request(nodeID NodeID, repo string, name string, offset int64, size int) ([]byte, error)
// An index was received from the peer device
Index(deviceID DeviceID, folder string, files []FileInfo)
// An index update was received from the peer device
IndexUpdate(deviceID DeviceID, folder string, files []FileInfo)
// A request was made by the peer device
Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error)
// A cluster configuration message was received
ClusterConfig(nodeID NodeID, config ClusterConfigMessage)
// The peer node closed the connection
Close(nodeID NodeID, err error)
ClusterConfig(deviceID DeviceID, config ClusterConfigMessage)
// The peer device closed the connection
Close(deviceID DeviceID, err error)
}
type Connection interface {
ID() NodeID
ID() DeviceID
Name() string
Index(repo string, files []FileInfo) error
IndexUpdate(repo string, files []FileInfo) error
Request(repo string, name string, offset int64, size int) ([]byte, error)
Index(folder string, files []FileInfo) error
IndexUpdate(folder string, files []FileInfo) error
Request(folder string, name string, offset int64, size int) ([]byte, error)
ClusterConfig(config ClusterConfigMessage)
Statistics() Statistics
}
type rawConnection struct {
id NodeID
id DeviceID
name string
receiver Model
state int
@@ -123,7 +123,7 @@ const (
pingIdleTime = 60 * time.Second
)
func NewConnection(nodeID NodeID, reader io.Reader, writer io.Writer, receiver Model, name string, compress bool) Connection {
func NewConnection(deviceID DeviceID, reader io.Reader, writer io.Writer, receiver Model, name string, compress bool) Connection {
cr := &countingReader{Reader: reader}
cw := &countingWriter{Writer: writer}
@@ -132,7 +132,7 @@ func NewConnection(nodeID NodeID, reader io.Reader, writer io.Writer, receiver M
compThres = 128 // compress messages that are 128 bytes long or larger
}
c := rawConnection{
id: nodeID,
id: deviceID,
name: name,
receiver: nativeModel{receiver},
state: stateInitial,
@@ -152,7 +152,7 @@ func NewConnection(nodeID NodeID, reader io.Reader, writer io.Writer, receiver M
return wireFormatConnection{&c}
}
func (c *rawConnection) ID() NodeID {
func (c *rawConnection) ID() DeviceID {
return c.id
}
@@ -160,34 +160,34 @@ func (c *rawConnection) Name() string {
return c.name
}
// Index writes the list of file information to the connected peer node
func (c *rawConnection) Index(repo string, idx []FileInfo) error {
// Index writes the list of file information to the connected peer device
func (c *rawConnection) Index(folder string, idx []FileInfo) error {
select {
case <-c.closed:
return ErrClosed
default:
}
c.idxMut.Lock()
c.send(-1, messageTypeIndex, IndexMessage{repo, idx})
c.send(-1, messageTypeIndex, IndexMessage{folder, idx})
c.idxMut.Unlock()
return nil
}
// IndexUpdate writes the list of file information to the connected peer node as an update
func (c *rawConnection) IndexUpdate(repo string, idx []FileInfo) error {
// IndexUpdate writes the list of file information to the connected peer device as an update
func (c *rawConnection) IndexUpdate(folder string, idx []FileInfo) error {
select {
case <-c.closed:
return ErrClosed
default:
}
c.idxMut.Lock()
c.send(-1, messageTypeIndexUpdate, IndexMessage{repo, idx})
c.send(-1, messageTypeIndexUpdate, IndexMessage{folder, idx})
c.idxMut.Unlock()
return nil
}
// Request returns the bytes for the specified block after fetching them from the connected peer.
func (c *rawConnection) Request(repo string, name string, offset int64, size int) ([]byte, error) {
func (c *rawConnection) Request(folder string, name string, offset int64, size int) ([]byte, error) {
var id int
select {
case id = <-c.nextID:
@@ -203,7 +203,7 @@ func (c *rawConnection) Request(repo string, name string, offset int64, size int
c.awaiting[id] = rc
c.awaitingMut.Unlock()
ok := c.send(id, messageTypeRequest, RequestMessage{repo, name, uint64(offset), uint32(size)})
ok := c.send(id, messageTypeRequest, RequestMessage{folder, name, uint64(offset), uint32(size)})
if !ok {
return nil, ErrClosed
}
@@ -399,20 +399,20 @@ func (c *rawConnection) readMessage() (hdr header, msg encodable, err error) {
func (c *rawConnection) handleIndex(im IndexMessage) {
if debug {
l.Debugf("Index(%v, %v, %d files)", c.id, im.Repository, len(im.Files))
l.Debugf("Index(%v, %v, %d files)", c.id, im.Folder, len(im.Files))
}
c.receiver.Index(c.id, im.Repository, im.Files)
c.receiver.Index(c.id, im.Folder, im.Files)
}
func (c *rawConnection) handleIndexUpdate(im IndexMessage) {
if debug {
l.Debugf("queueing IndexUpdate(%v, %v, %d files)", c.id, im.Repository, len(im.Files))
l.Debugf("queueing IndexUpdate(%v, %v, %d files)", c.id, im.Folder, len(im.Files))
}
c.receiver.IndexUpdate(c.id, im.Repository, im.Files)
c.receiver.IndexUpdate(c.id, im.Folder, im.Files)
}
func (c *rawConnection) handleRequest(msgID int, req RequestMessage) {
data, _ := c.receiver.Request(c.id, req.Repository, req.Name, int64(req.Offset), int(req.Size))
data, _ := c.receiver.Request(c.id, req.Folder, req.Name, int64(req.Offset), int(req.Size))
c.send(msgID, messageTypeResponse, ResponseMessage{data})
}

View File

@@ -21,8 +21,8 @@ import (
)
var (
c0ID = NewNodeID([]byte{1})
c1ID = NewNodeID([]byte{2})
c0ID = NewDeviceID([]byte{1})
c1ID = NewDeviceID([]byte{2})
)
func TestHeaderFunctions(t *testing.T) {
@@ -140,8 +140,8 @@ func TestPingErr(t *testing.T) {
// if string(d) != "response data" {
// t.Fatalf("Incorrect response data %q", string(d))
// }
// if m0.repo != "default" {
// t.Fatalf("Incorrect repo %q", m0.repo)
// if m0.folder != "default" {
// t.Fatalf("Incorrect folder %q", m0.folder)
// }
// if m0.name != "tn" {
// t.Fatalf("Incorrect name %q", m0.name)
@@ -240,13 +240,13 @@ func TestClose(t *testing.T) {
func TestElementSizeExceededNested(t *testing.T) {
m := ClusterConfigMessage{
Repositories: []Repository{
Folders: []Folder{
{ID: "longstringlongstringlongstringinglongstringlongstringlonlongstringlongstringlon"},
},
}
_, err := m.EncodeXDR(ioutil.Discard)
if err == nil {
t.Errorf("ID length %d > max 64, but no error", len(m.Repositories[0].ID))
t.Errorf("ID length %d > max 64, but no error", len(m.Folders[0].ID))
}
}

View File

@@ -14,7 +14,7 @@ type wireFormatConnection struct {
next Connection
}
func (c wireFormatConnection) ID() NodeID {
func (c wireFormatConnection) ID() DeviceID {
return c.next.ID()
}
@@ -22,7 +22,7 @@ func (c wireFormatConnection) Name() string {
return c.next.Name()
}
func (c wireFormatConnection) Index(repo string, fs []FileInfo) error {
func (c wireFormatConnection) Index(folder string, fs []FileInfo) error {
var myFs = make([]FileInfo, len(fs))
copy(myFs, fs)
@@ -30,10 +30,10 @@ func (c wireFormatConnection) Index(repo string, fs []FileInfo) error {
myFs[i].Name = norm.NFC.String(filepath.ToSlash(myFs[i].Name))
}
return c.next.Index(repo, myFs)
return c.next.Index(folder, myFs)
}
func (c wireFormatConnection) IndexUpdate(repo string, fs []FileInfo) error {
func (c wireFormatConnection) IndexUpdate(folder string, fs []FileInfo) error {
var myFs = make([]FileInfo, len(fs))
copy(myFs, fs)
@@ -41,12 +41,12 @@ func (c wireFormatConnection) IndexUpdate(repo string, fs []FileInfo) error {
myFs[i].Name = norm.NFC.String(filepath.ToSlash(myFs[i].Name))
}
return c.next.IndexUpdate(repo, myFs)
return c.next.IndexUpdate(folder, myFs)
}
func (c wireFormatConnection) Request(repo, name string, offset int64, size int) ([]byte, error) {
func (c wireFormatConnection) Request(folder, name string, offset int64, size int) ([]byte, error) {
name = norm.NFC.String(filepath.ToSlash(name))
return c.next.Request(repo, name, offset, size)
return c.next.Request(folder, name, offset, size)
}
func (c wireFormatConnection) ClusterConfig(config ClusterConfigMessage) {

View File

@@ -48,7 +48,7 @@ type CurrentFiler interface {
CurrentFile(name string) protocol.FileInfo
}
// Walk returns the list of files found in the local repository by scanning the
// Walk returns the list of files found in the local folder by scanning the
// file system. Files are blockwise hashed.
func (w *Walker) Walk() (chan protocol.FileInfo, error) {
if debug {

102
internal/stats/device.go Executable file
View File

@@ -0,0 +1,102 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package stats
import (
"time"
"github.com/syncthing/syncthing/internal/protocol"
"github.com/syndtr/goleveldb/leveldb"
)
const (
deviceStatisticTypeLastSeen = iota
)
var deviceStatisticsTypes = []byte{
deviceStatisticTypeLastSeen,
}
type DeviceStatistics struct {
LastSeen time.Time
}
type DeviceStatisticsReference struct {
db *leveldb.DB
device protocol.DeviceID
}
func NewDeviceStatisticsReference(db *leveldb.DB, device protocol.DeviceID) *DeviceStatisticsReference {
return &DeviceStatisticsReference{
db: db,
device: device,
}
}
func (s *DeviceStatisticsReference) key(stat byte) []byte {
k := make([]byte, 1+1+32)
k[0] = keyTypeDeviceStatistic
k[1] = stat
copy(k[1+1:], s.device[:])
return k
}
func (s *DeviceStatisticsReference) GetLastSeen() time.Time {
value, err := s.db.Get(s.key(deviceStatisticTypeLastSeen), nil)
if err != nil {
if err != leveldb.ErrNotFound {
l.Warnln("DeviceStatisticsReference: Failed loading last seen value for", s.device, ":", err)
}
return time.Unix(0, 0)
}
rtime := time.Time{}
err = rtime.UnmarshalBinary(value)
if err != nil {
l.Warnln("DeviceStatisticsReference: Failed parsing last seen value for", s.device, ":", err)
return time.Unix(0, 0)
}
if debug {
l.Debugln("stats.DeviceStatisticsReference.GetLastSeen:", s.device, rtime)
}
return rtime
}
func (s *DeviceStatisticsReference) WasSeen() {
if debug {
l.Debugln("stats.DeviceStatisticsReference.WasSeen:", s.device)
}
value, err := time.Now().MarshalBinary()
if err != nil {
l.Warnln("DeviceStatisticsReference: Failed serializing last seen value for", s.device, ":", err)
return
}
err = s.db.Put(s.key(deviceStatisticTypeLastSeen), value, nil)
if err != nil {
l.Warnln("Failed serializing last seen value for", s.device, ":", err)
}
}
// Never called, maybe because it's worth while to keep the data
// or maybe because we have no easy way of knowing that a device has been removed.
func (s *DeviceStatisticsReference) Delete() error {
for _, stype := range deviceStatisticsTypes {
err := s.db.Delete(s.key(stype), nil)
if debug && err == nil {
l.Debugln("stats.DeviceStatisticsReference.Delete:", s.device, stype)
}
if err != nil && err != leveldb.ErrNotFound {
return err
}
}
return nil
}
func (s *DeviceStatisticsReference) GetStatistics() DeviceStatistics {
return DeviceStatistics{
LastSeen: s.GetLastSeen(),
}
}

View File

@@ -6,5 +6,5 @@ package stats
// Same key space as files/leveldb.go keyType* constants
const (
keyTypeNodeStatistic = iota + 30
keyTypeDeviceStatistic = iota + 30
)

View File

@@ -1,102 +0,0 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package stats
import (
"time"
"github.com/syncthing/syncthing/internal/protocol"
"github.com/syndtr/goleveldb/leveldb"
)
const (
nodeStatisticTypeLastSeen = iota
)
var nodeStatisticsTypes = []byte{
nodeStatisticTypeLastSeen,
}
type NodeStatistics struct {
LastSeen time.Time
}
type NodeStatisticsReference struct {
db *leveldb.DB
node protocol.NodeID
}
func NewNodeStatisticsReference(db *leveldb.DB, node protocol.NodeID) *NodeStatisticsReference {
return &NodeStatisticsReference{
db: db,
node: node,
}
}
func (s *NodeStatisticsReference) key(stat byte) []byte {
k := make([]byte, 1+1+32)
k[0] = keyTypeNodeStatistic
k[1] = stat
copy(k[1+1:], s.node[:])
return k
}
func (s *NodeStatisticsReference) GetLastSeen() time.Time {
value, err := s.db.Get(s.key(nodeStatisticTypeLastSeen), nil)
if err != nil {
if err != leveldb.ErrNotFound {
l.Warnln("NodeStatisticsReference: Failed loading last seen value for", s.node, ":", err)
}
return time.Unix(0, 0)
}
rtime := time.Time{}
err = rtime.UnmarshalBinary(value)
if err != nil {
l.Warnln("NodeStatisticsReference: Failed parsing last seen value for", s.node, ":", err)
return time.Unix(0, 0)
}
if debug {
l.Debugln("stats.NodeStatisticsReference.GetLastSeen:", s.node, rtime)
}
return rtime
}
func (s *NodeStatisticsReference) WasSeen() {
if debug {
l.Debugln("stats.NodeStatisticsReference.WasSeen:", s.node)
}
value, err := time.Now().MarshalBinary()
if err != nil {
l.Warnln("NodeStatisticsReference: Failed serializing last seen value for", s.node, ":", err)
return
}
err = s.db.Put(s.key(nodeStatisticTypeLastSeen), value, nil)
if err != nil {
l.Warnln("Failed serializing last seen value for", s.node, ":", err)
}
}
// Never called, maybe because it's worth while to keep the data
// or maybe because we have no easy way of knowing that a node has been removed.
func (s *NodeStatisticsReference) Delete() error {
for _, stype := range nodeStatisticsTypes {
err := s.db.Delete(s.key(stype), nil)
if debug && err == nil {
l.Debugln("stats.NodeStatisticsReference.Delete:", s.node, stype)
}
if err != nil && err != leveldb.ErrNotFound {
return err
}
}
return nil
}
func (s *NodeStatisticsReference) GetStatistics() NodeStatistics {
return NodeStatistics{
LastSeen: s.GetLastSeen(),
}
}

View File

@@ -21,11 +21,11 @@ func init() {
// The type holds our configuration
type Simple struct {
keep int
repoPath string
folderPath string
}
// The constructor function takes a map of parameters and creates the type.
func NewSimple(repoID, repoPath string, params map[string]string) Versioner {
func NewSimple(folderID, folderPath string, params map[string]string) Versioner {
keep, err := strconv.Atoi(params["keep"])
if err != nil {
keep = 5 // A reasonable default
@@ -33,7 +33,7 @@ func NewSimple(repoID, repoPath string, params map[string]string) Versioner {
s := Simple{
keep: keep,
repoPath: repoPath,
folderPath: folderPath,
}
if debug {
@@ -57,7 +57,7 @@ func (v Simple) Archive(filePath string) error {
}
}
versionsDir := filepath.Join(v.repoPath, ".stversions")
versionsDir := filepath.Join(v.folderPath, ".stversions")
_, err = os.Stat(versionsDir)
if err != nil {
if os.IsNotExist(err) {
@@ -76,12 +76,12 @@ func (v Simple) Archive(filePath string) error {
}
file := filepath.Base(filePath)
inRepoPath, err := filepath.Rel(v.repoPath, filepath.Dir(filePath))
inFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath))
if err != nil {
return err
}
dir := filepath.Join(versionsDir, inRepoPath)
dir := filepath.Join(versionsDir, inFolderPath)
err = os.MkdirAll(dir, 0755)
if err != nil && !os.IsExist(err) {
return err

View File

@@ -30,7 +30,7 @@ type Interval struct {
type Staggered struct {
versionsPath string
cleanInterval int64
repoPath string
folderPath string
interval [4]Interval
mutex *sync.Mutex
}
@@ -83,7 +83,7 @@ func (v Staggered) renameOld() {
}
// The constructor function takes a map of parameters and creates the type.
func NewStaggered(repoID, repoPath string, params map[string]string) Versioner {
func NewStaggered(folderID, folderPath string, params map[string]string) Versioner {
maxAge, err := strconv.ParseInt(params["maxAge"], 10, 0)
if err != nil {
maxAge = 31536000 // Default: ~1 year
@@ -93,13 +93,13 @@ func NewStaggered(repoID, repoPath string, params map[string]string) Versioner {
cleanInterval = 3600 // Default: clean once per hour
}
// Use custom path if set, otherwise .stversions in repoPath
// Use custom path if set, otherwise .stversions in folderPath
var versionsDir string
if params["versionsPath"] == "" {
if debug {
l.Debugln("using default dir .stversions")
}
versionsDir = filepath.Join(repoPath, ".stversions")
versionsDir = filepath.Join(folderPath, ".stversions")
} else {
if debug {
l.Debugln("using dir", params["versionsPath"])
@@ -111,7 +111,7 @@ func NewStaggered(repoID, repoPath string, params map[string]string) Versioner {
s := Staggered{
versionsPath: versionsDir,
cleanInterval: cleanInterval,
repoPath: repoPath,
folderPath: folderPath,
interval: [4]Interval{
Interval{30, 3600}, // first hour -> 30 sec between versions
Interval{3600, 86400}, // next day -> 1 h between versions
@@ -320,12 +320,12 @@ func (v Staggered) Archive(filePath string) error {
}
file := filepath.Base(filePath)
inRepoPath, err := filepath.Rel(v.repoPath, filepath.Dir(filePath))
inFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath))
if err != nil {
return err
}
dir := filepath.Join(v.versionsPath, inRepoPath)
dir := filepath.Join(v.versionsPath, inFolderPath)
err = os.MkdirAll(dir, 0755)
if err != nil && !os.IsExist(err) {
return err

View File

@@ -10,4 +10,4 @@ type Versioner interface {
Archive(filePath string) error
}
var Factories = map[string]func(repoID string, repoDir string, params map[string]string) Versioner{}
var Factories = map[string]func(folderID string, folderDir string, params map[string]string) Versioner{}