New Cluster Configuration message replaces Options (fixes #63)

This commit is contained in:
Jakob Borg
2014-04-13 15:28:26 +02:00
parent 41c228cb56
commit 5064f846fc
17 changed files with 767 additions and 209 deletions

View File

@@ -1,9 +1,7 @@
package main
import (
"crypto/sha256"
"encoding/xml"
"fmt"
"io"
"reflect"
"sort"
@@ -231,15 +229,6 @@ func (l NodeConfigurationList) Len() int {
return len(l)
}
func clusterHash(nodes []NodeConfiguration) string {
sort.Sort(NodeConfigurationList(nodes))
h := sha256.New()
for _, n := range nodes {
h.Write([]byte(n.NodeID))
}
return fmt.Sprintf("%x", h.Sum(nil))
}
func cleanNodeList(nodes []NodeConfiguration, myID string) []NodeConfiguration {
var myIDExists bool
for _, node := range nodes {

View File

@@ -219,15 +219,9 @@ func main() {
m.ScanRepos()
m.SaveIndexes(confDir)
connOpts := map[string]string{
"clientId": "syncthing",
"clientVersion": Version,
"clusterHash": clusterHash(cfg.Repositories[0].Nodes),
}
// Routine to connect out to configured nodes
disc := discovery()
go listenConnect(myID, disc, m, tlsCfg, connOpts)
go listenConnect(myID, disc, m, tlsCfg)
for _, repo := range cfg.Repositories {
// Routine to pull blocks from other nodes to synchronize the local
@@ -325,7 +319,7 @@ func saveConfig() {
saveConfigCh <- struct{}{}
}
func listenConnect(myID string, disc *discover.Discoverer, m *Model, tlsCfg *tls.Config, connOpts map[string]string) {
func listenConnect(myID string, disc *discover.Discoverer, m *Model, tlsCfg *tls.Config) {
var conns = make(chan *tls.Conn)
// Listen
@@ -438,7 +432,7 @@ next:
if rateBucket != nil {
wr = &limitedWriter{conn, rateBucket}
}
protoConn := protocol.NewConnection(remoteID, conn, wr, m, connOpts)
protoConn := protocol.NewConnection(remoteID, conn, wr, m)
m.AddConnection(conn, protoConn)
continue next
}

View File

@@ -31,6 +31,7 @@ type Model struct {
protoConn map[string]protocol.Connection
rawConn map[string]io.Closer
nodeVer map[string]string
pmut sync.RWMutex // protects protoConn and rawConn
sup suppressor
@@ -56,6 +57,7 @@ func NewModel(maxChangeBw int) *Model {
cm: cid.NewMap(),
protoConn: make(map[string]protocol.Connection),
rawConn: make(map[string]io.Closer),
nodeVer: make(map[string]string),
sup: suppressor{threshold: int64(maxChangeBw)},
}
@@ -87,7 +89,6 @@ func (m *Model) StartRepoRO(repo string) {
type ConnectionInfo struct {
protocol.Statistics
Address string
ClientID string
ClientVersion string
Completion int
}
@@ -105,8 +106,7 @@ func (m *Model) ConnectionStats() map[string]ConnectionInfo {
for node, conn := range m.protoConn {
ci := ConnectionInfo{
Statistics: conn.Statistics(),
ClientID: conn.Option("clientId"),
ClientVersion: conn.Option("clientVersion"),
ClientVersion: m.nodeVer[node],
}
if nc, ok := m.rawConn[node].(remoteAddrer); ok {
ci.Address = nc.RemoteAddr().String()
@@ -245,15 +245,37 @@ func (m *Model) IndexUpdate(nodeID string, repo string, fs []protocol.FileInfo)
m.rmut.RUnlock()
}
func (m *Model) ClusterConfig(nodeID string, config protocol.ClusterConfigMessage) {
compErr := compareClusterConfig(m.clusterConfig(nodeID), config)
if debugNet {
dlog.Printf("ClusterConfig: %s: %#v", nodeID, config)
dlog.Printf(" ... compare: %s: %v", nodeID, compErr)
}
if compErr != nil {
warnf("%s: %v", nodeID, compErr)
m.Close(nodeID, compErr)
}
m.pmut.Lock()
if config.ClientName == "syncthing" {
m.nodeVer[nodeID] = config.ClientVersion
} else {
m.nodeVer[nodeID] = config.ClientName + " " + config.ClientVersion
}
m.pmut.Unlock()
}
// Close removes the peer from the model and closes the underlying connection if possible.
// Implements the protocol.Model interface.
func (m *Model) Close(node string, err error) {
if debugNet {
dlog.Printf("%s: %v", node, err)
}
if err == protocol.ErrClusterHash {
warnf("Connection to %s closed due to mismatched cluster hash. Ensure that the configured cluster members are identical on both nodes.", node)
} else if err != io.EOF {
if err != io.EOF {
warnf("Connection to %s closed: %v", node, err)
} else if _, ok := err.(ClusterConfigMismatch); ok {
warnf("Connection to %s closed: %v", node, err)
}
@@ -272,6 +294,7 @@ func (m *Model) Close(node string, err error) {
}
delete(m.protoConn, node)
delete(m.rawConn, node)
delete(m.nodeVer, node)
m.pmut.Unlock()
}
@@ -386,6 +409,9 @@ func (m *Model) AddConnection(rawConn io.Closer, protoConn protocol.Connection)
m.rawConn[nodeID] = rawConn
m.pmut.Unlock()
cm := m.clusterConfig(nodeID)
protoConn.ClusterConfig(cm)
go func() {
m.rmut.RLock()
repos := m.nodeRepos[nodeID]
@@ -596,46 +622,28 @@ func (m *Model) loadIndex(repo string, dir string) []protocol.FileInfo {
return im.Files
}
func fileFromFileInfo(f protocol.FileInfo) scanner.File {
var blocks = make([]scanner.Block, len(f.Blocks))
var offset int64
for i, b := range f.Blocks {
blocks[i] = scanner.Block{
Offset: offset,
Size: b.Size,
Hash: b.Hash,
}
offset += int64(b.Size)
// clusterConfig returns a ClusterConfigMessage that is correct for the given peer node
func (m *Model) clusterConfig(node string) protocol.ClusterConfigMessage {
cm := protocol.ClusterConfigMessage{
ClientName: "syncthing",
ClientVersion: Version,
}
return scanner.File{
// Name is with native separator and normalization
Name: filepath.FromSlash(f.Name),
Size: offset,
Flags: f.Flags &^ protocol.FlagInvalid,
Modified: f.Modified,
Version: f.Version,
Blocks: blocks,
Suppressed: f.Flags&protocol.FlagInvalid != 0,
}
}
func fileInfoFromFile(f scanner.File) protocol.FileInfo {
var blocks = make([]protocol.BlockInfo, len(f.Blocks))
for i, b := range f.Blocks {
blocks[i] = protocol.BlockInfo{
Size: b.Size,
Hash: b.Hash,
m.rmut.Lock()
for _, repo := range m.nodeRepos[node] {
cr := protocol.Repository{
ID: repo,
}
for _, node := range m.repoNodes[repo] {
// TODO: Set read only bit when relevant
cr.Nodes = append(cr.Nodes, protocol.Node{
ID: node,
Flags: protocol.FlagShareTrusted,
})
}
cm.Repositories = append(cm.Repositories, cr)
}
pf := protocol.FileInfo{
Name: filepath.ToSlash(f.Name),
Flags: f.Flags,
Modified: f.Modified,
Version: f.Version,
Blocks: blocks,
}
if f.Suppressed {
pf.Flags |= protocol.FlagInvalid
}
return pf
m.rmut.Unlock()
return cm
}

View File

@@ -170,6 +170,8 @@ func (f FakeConnection) Request(repo, name string, offset int64, size int) ([]by
return f.requestData, nil
}
func (FakeConnection) ClusterConfig(protocol.ClusterConfigMessage) {}
func (FakeConnection) Ping() bool {
return true
}

View File

@@ -3,7 +3,11 @@ package main
import (
"fmt"
"os"
"path/filepath"
"runtime"
"github.com/calmh/syncthing/protocol"
"github.com/calmh/syncthing/scanner"
)
func MetricPrefix(n int64) string {
@@ -41,3 +45,99 @@ func Rename(from, to string) error {
}
return os.Rename(from, to)
}
func fileFromFileInfo(f protocol.FileInfo) scanner.File {
var blocks = make([]scanner.Block, len(f.Blocks))
var offset int64
for i, b := range f.Blocks {
blocks[i] = scanner.Block{
Offset: offset,
Size: b.Size,
Hash: b.Hash,
}
offset += int64(b.Size)
}
return scanner.File{
// Name is with native separator and normalization
Name: filepath.FromSlash(f.Name),
Size: offset,
Flags: f.Flags &^ protocol.FlagInvalid,
Modified: f.Modified,
Version: f.Version,
Blocks: blocks,
Suppressed: f.Flags&protocol.FlagInvalid != 0,
}
}
func fileInfoFromFile(f scanner.File) protocol.FileInfo {
var blocks = make([]protocol.BlockInfo, len(f.Blocks))
for i, b := range f.Blocks {
blocks[i] = protocol.BlockInfo{
Size: b.Size,
Hash: b.Hash,
}
}
pf := protocol.FileInfo{
Name: filepath.ToSlash(f.Name),
Flags: f.Flags,
Modified: f.Modified,
Version: f.Version,
Blocks: blocks,
}
if f.Suppressed {
pf.Flags |= protocol.FlagInvalid
}
return pf
}
func cmMap(cm protocol.ClusterConfigMessage) map[string]map[string]uint32 {
m := make(map[string]map[string]uint32)
for _, repo := range cm.Repositories {
m[repo.ID] = make(map[string]uint32)
for _, node := range repo.Nodes {
m[repo.ID][node.ID] = node.Flags
}
}
return m
}
type ClusterConfigMismatch error
// compareClusterConfig returns nil for two equivalent configurations,
// otherwise a decriptive error
func compareClusterConfig(local, remote protocol.ClusterConfigMessage) error {
lm := cmMap(local)
rm := cmMap(remote)
for repo, lnodes := range lm {
_ = lnodes
if rnodes, ok := rm[repo]; ok {
for node, lflags := range lnodes {
if rflags, ok := rnodes[node]; ok {
if lflags&protocol.FlagShareBits != rflags&protocol.FlagShareBits {
return ClusterConfigMismatch(fmt.Errorf("remote has different sharing flags for node %q in repository %q", node, repo))
}
} else {
return ClusterConfigMismatch(fmt.Errorf("remote is missing node %q in repository %q", node, repo))
}
}
} else {
return ClusterConfigMismatch(fmt.Errorf("remote is missing repository %q", repo))
}
}
for repo, rnodes := range rm {
if lnodes, ok := lm[repo]; ok {
for node := range rnodes {
if _, ok := lnodes[node]; !ok {
return ClusterConfigMismatch(fmt.Errorf("remote has extra node %q in repository %q", node, repo))
}
}
} else {
return ClusterConfigMismatch(fmt.Errorf("remote has extra repository %q", repo))
}
}
return nil
}

183
cmd/syncthing/util_test.go Normal file
View File

@@ -0,0 +1,183 @@
package main
import (
"testing"
"github.com/calmh/syncthing/protocol"
)
var testcases = []struct {
local, remote protocol.ClusterConfigMessage
err string
}{
{
local: protocol.ClusterConfigMessage{},
remote: protocol.ClusterConfigMessage{},
err: "",
},
{
local: protocol.ClusterConfigMessage{ClientName: "a", ClientVersion: "b"},
remote: protocol.ClusterConfigMessage{ClientName: "c", ClientVersion: "d"},
err: "",
},
{
local: protocol.ClusterConfigMessage{
Repositories: []protocol.Repository{
{ID: "foo"},
},
},
remote: protocol.ClusterConfigMessage{ClientName: "c", ClientVersion: "d"},
err: `remote is missing repository "foo"`,
},
{
local: protocol.ClusterConfigMessage{ClientName: "c", ClientVersion: "d"},
remote: protocol.ClusterConfigMessage{
Repositories: []protocol.Repository{
{ID: "foo"},
},
},
err: `remote has extra repository "foo"`,
},
{
local: protocol.ClusterConfigMessage{
Repositories: []protocol.Repository{
{ID: "foo"},
{ID: "bar"},
},
},
remote: protocol.ClusterConfigMessage{
Repositories: []protocol.Repository{
{ID: "foo"},
{ID: "bar"},
},
},
err: "",
},
{
local: protocol.ClusterConfigMessage{
Repositories: []protocol.Repository{
{ID: "quux"},
{ID: "foo"},
{ID: "bar"},
},
},
remote: protocol.ClusterConfigMessage{
Repositories: []protocol.Repository{
{ID: "bar"},
{ID: "quux"},
},
},
err: `remote is missing repository "foo"`,
},
{
local: protocol.ClusterConfigMessage{
Repositories: []protocol.Repository{
{ID: "quux"},
{ID: "bar"},
},
},
remote: protocol.ClusterConfigMessage{
Repositories: []protocol.Repository{
{ID: "bar"},
{ID: "foo"},
{ID: "quux"},
},
},
err: `remote has extra repository "foo"`,
},
{
local: protocol.ClusterConfigMessage{
Repositories: []protocol.Repository{
{
ID: "foo",
Nodes: []protocol.Node{
{ID: "a"},
},
},
{ID: "bar"},
},
},
remote: protocol.ClusterConfigMessage{
Repositories: []protocol.Repository{
{ID: "foo"},
{ID: "bar"},
},
},
err: `remote is missing node "a" in repository "foo"`,
},
{
local: protocol.ClusterConfigMessage{
Repositories: []protocol.Repository{
{
ID: "foo",
Nodes: []protocol.Node{
{ID: "a"},
},
},
{ID: "bar"},
},
},
remote: protocol.ClusterConfigMessage{
Repositories: []protocol.Repository{
{
ID: "foo",
Nodes: []protocol.Node{
{ID: "a"},
{ID: "b"},
},
},
{ID: "bar"},
},
},
err: `remote has extra node "b" in repository "foo"`,
},
{
local: protocol.ClusterConfigMessage{
Repositories: []protocol.Repository{
{
ID: "foo",
Nodes: []protocol.Node{
{
ID: "a",
Flags: protocol.FlagShareReadOnly,
},
},
},
{ID: "bar"},
},
},
remote: protocol.ClusterConfigMessage{
Repositories: []protocol.Repository{
{
ID: "foo",
Nodes: []protocol.Node{
{
ID: "a",
Flags: protocol.FlagShareTrusted,
},
},
},
{ID: "bar"},
},
},
err: `remote has different sharing flags for node "a" in repository "foo"`,
},
}
func TestCompareClusterConfig(t *testing.T) {
for i, tc := range testcases {
err := compareClusterConfig(tc.local, tc.remote)
switch {
case tc.err == "" && err != nil:
t.Errorf("#%d: unexpected error: %v", i, err)
case tc.err != "" && err == nil:
t.Errorf("#%d: unexpected nil error", i)
case tc.err != "" && err != nil && tc.err != err.Error():
t.Errorf("#%d: incorrect error: %q != %q", i, err, tc.err)
}
}
}