cmd/syncthing, lib/events, lib/sync: Add timeout to REST event API, remove Ping (fixes #3933)

GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/3941
This commit is contained in:
Antony Male
2017-01-31 12:04:29 +00:00
committed by Jakob Borg
parent 20f8b4fd57
commit ac510b26e2
11 changed files with 207 additions and 36 deletions

View File

@@ -10,7 +10,6 @@ package events
import (
"errors"
"runtime"
stdsync "sync"
"time"
"github.com/syncthing/syncthing/lib/sync"
@@ -19,8 +18,7 @@ import (
type EventType int
const (
Ping EventType = 1 << iota
Starting
Starting EventType = 1 << iota
StartupComplete
DeviceDiscovered
DeviceConnected
@@ -55,8 +53,6 @@ var runningTests = false
func (t EventType) String() string {
switch t {
case Ping:
return "Ping"
case Starting:
return "Starting"
case StartupComplete:
@@ -279,11 +275,11 @@ type bufferedSubscription struct {
next int
cur int // Current SubscriptionID
mut sync.Mutex
cond *stdsync.Cond
cond *sync.TimeoutCond
}
type BufferedSubscription interface {
Since(id int, into []Event) []Event
Since(id int, into []Event, timeout time.Duration) []Event
}
func NewBufferedSubscription(s *Subscription, size int) BufferedSubscription {
@@ -292,7 +288,7 @@ func NewBufferedSubscription(s *Subscription, size int) BufferedSubscription {
buf: make([]Event, size),
mut: sync.NewMutex(),
}
bs.cond = stdsync.NewCond(bs.mut)
bs.cond = sync.NewTimeoutCond(bs.mut)
go bs.pollingLoop()
return bs
}
@@ -319,12 +315,21 @@ func (s *bufferedSubscription) pollingLoop() {
}
}
func (s *bufferedSubscription) Since(id int, into []Event) []Event {
func (s *bufferedSubscription) Since(id int, into []Event, timeout time.Duration) []Event {
s.mut.Lock()
defer s.mut.Unlock()
for id >= s.cur {
s.cond.Wait()
// Check once first before generating the TimeoutCondWaiter
if id >= s.cur {
waiter := s.cond.SetupWait(timeout)
defer waiter.Stop()
for id >= s.cur {
if eventsAvailable := waiter.Wait(); !eventsAvailable {
// Timed out
return into
}
}
}
for i := s.next; i < len(s.buf); i++ {

View File

@@ -219,7 +219,7 @@ func TestBufferedSub(t *testing.T) {
recv := 0
for recv < 10*BufferSize {
evs := bs.Since(recv, nil)
evs := bs.Since(recv, nil, time.Minute)
for _, ev := range evs {
if ev.GlobalID != recv+1 {
t.Fatalf("Incorrect ID; %d != %d", ev.GlobalID, recv+1)
@@ -252,7 +252,7 @@ func BenchmarkBufferedSub(b *testing.B) {
recv := 0
var evs []Event
for i := 0; i < b.N; {
evs = bs.Since(recv, evs[:0])
evs = bs.Since(recv, evs[:0], time.Minute)
for _, ev := range evs {
if ev.GlobalID != recv+1 {
done <- fmt.Errorf("skipped event %v %v", ev.GlobalID, recv)
@@ -299,7 +299,7 @@ func TestSinceUsesSubscriptionId(t *testing.T) {
// delivered to the buffered subscription when we get here.
t0 := time.Now()
for time.Since(t0) < time.Second {
events := bs.Since(0, nil)
events := bs.Since(0, nil, time.Minute)
if len(events) == 2 {
break
}
@@ -308,7 +308,7 @@ func TestSinceUsesSubscriptionId(t *testing.T) {
}
}
events := bs.Since(1, nil)
events := bs.Since(1, nil, time.Minute)
if len(events) != 1 {
t.Fatal("Incorrect number of events:", len(events))
}

View File

@@ -227,3 +227,70 @@ func goid() int {
}
return id
}
// TimeoutCond is a variant on Cond. It has roughly the same semantics regarding 'L' - it must be held
// both when broadcasting and when calling TimeoutCondWaiter.Wait()
// Call Broadcast() to broadcast to all waiters on the TimeoutCond. Call SetupWait to create a
// TimeoutCondWaiter configured with the given timeout, which can then be used to listen for
// broadcasts.
type TimeoutCond struct {
L sync.Locker
ch chan struct{}
}
// TimeoutCondWaiter is a type allowing a consumer to wait on a TimeoutCond with a timeout. Wait() may be called multiple times,
// and will return true every time that the TimeoutCond is broadcast to. Once the configured timeout
// expires, Wait() will return false.
// Call Stop() to release resources once this TimeoutCondWaiter is no longer needed.
type TimeoutCondWaiter struct {
c *TimeoutCond
timer *time.Timer
}
func NewTimeoutCond(l sync.Locker) *TimeoutCond {
return &TimeoutCond{
L: l,
}
}
func (c *TimeoutCond) Broadcast() {
// ch.L must be locked when calling this function
if c.ch != nil {
close(c.ch)
c.ch = nil
}
}
func (c *TimeoutCond) SetupWait(timeout time.Duration) *TimeoutCondWaiter {
timer := time.NewTimer(timeout)
return &TimeoutCondWaiter{
c: c,
timer: timer,
}
}
func (w *TimeoutCondWaiter) Wait() bool {
// ch.L must be locked when calling this function
// Ensure that the channel exists, since we're going to be waiting on it
if w.c.ch == nil {
w.c.ch = make(chan struct{})
}
ch := w.c.ch
w.c.L.Unlock()
defer w.c.L.Lock()
select {
case <-w.timer.C:
return false
case <-ch:
return true
}
}
func (w *TimeoutCondWaiter) Stop() {
w.timer.Stop()
}

View File

@@ -226,3 +226,100 @@ func TestWaitGroup(t *testing.T) {
debug = false
l.SetDebug("sync", false)
}
func TestTimeoutCond(t *testing.T) {
// WARNING this test relies heavily on threads not being stalled at particular points.
// As such, it's pretty unstable on the build server. It has been left in as it still
// exercises the deadlock detector, and one of the two things it tests is still functional.
// See the comments in runLocks
const (
// Low values to avoid being intrusive in continous testing. Can be
// increased significantly for stress testing.
iterations = 100
routines = 10
timeMult = 2
)
c := NewTimeoutCond(NewMutex())
// Start a routine to periodically broadcast on the cond.
go func() {
d := time.Duration(routines) * timeMult * time.Millisecond / 2
t.Log("Broadcasting every", d)
for i := 0; i < iterations; i++ {
time.Sleep(d)
c.L.Lock()
c.Broadcast()
c.L.Unlock()
}
}()
// Start several routines that wait on it with different timeouts.
var results [routines][2]int
var wg sync.WaitGroup
for i := 0; i < routines; i++ {
i := i
wg.Add(1)
go func() {
d := time.Duration(i) * timeMult * time.Millisecond
t.Logf("Routine %d waits for %v\n", i, d)
succ, fail := runLocks(t, iterations, c, d)
results[i][0] = succ
results[i][1] = fail
wg.Done()
}()
}
wg.Wait()
// Print a table of routine number: successes, failures.
for i, v := range results {
t.Logf("%4d: %4d %4d\n", i, v[0], v[1])
}
}
func runLocks(t *testing.T, iterations int, c *TimeoutCond, d time.Duration) (succ, fail int) {
for i := 0; i < iterations; i++ {
c.L.Lock()
// The thread may be stalled, so we can't test the 'succeeded late' case reliably.
// Therefore make sure that we start t0 before starting the timeout, and only test
// the 'failed early' case.
t0 := time.Now()
w := c.SetupWait(d)
res := w.Wait()
waited := time.Since(t0)
// Allow 20% slide in either direction, and a five milliseconds of
// scheduling delay... In tweaking these it was clear that things
// worked like the should, so if this becomes a spurious failure
// kind of thing feel free to remove or give significantly more
// slack.
if !res && waited < d*8/10 {
t.Errorf("Wait failed early, %v < %v", waited, d)
}
if res && waited > d*11/10+5*time.Millisecond {
// Ideally this would be t.Errorf
t.Logf("WARNING: Wait succeeded late, %v > %v. This is probably a thread scheduling issue", waited, d)
}
w.Stop()
if res {
succ++
} else {
fail++
}
c.L.Unlock()
}
return
}