vendor: Update everything
GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/4620
This commit is contained in:
40
vendor/github.com/sasha-s/go-deadlock/deadlock.go
generated
vendored
40
vendor/github.com/sasha-s/go-deadlock/deadlock.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package deadlock
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -22,13 +23,14 @@ var Opts = struct {
|
||||
// Waiting for a lock for longer than DeadlockTimeout is considered a deadlock.
|
||||
// Ignored is DeadlockTimeout <= 0.
|
||||
DeadlockTimeout time.Duration
|
||||
// OnPotentialDeadlock is called each time a potential deadlock is deetcted -- either based on
|
||||
// OnPotentialDeadlock is called each time a potential deadlock is detected -- either based on
|
||||
// lock order or on lock wait time.
|
||||
OnPotentialDeadlock func()
|
||||
// Will keep MaxMapSize lock pairs (happens before // happens after) in the map.
|
||||
// The map resets once the threshold is reached.
|
||||
MaxMapSize int
|
||||
// Will print to deadlock info to log buffer.
|
||||
mu sync.Mutex // Protects the LogBuf.
|
||||
LogBuf io.Writer
|
||||
}{
|
||||
DeadlockTimeout: time.Second * 30,
|
||||
@@ -49,7 +51,7 @@ type Mutex struct {
|
||||
// If the lock is already in use, the calling goroutine
|
||||
// blocks until the mutex is available.
|
||||
//
|
||||
// Unless deadlock detection is disabled, logs potential deadlocks to stderr,
|
||||
// Unless deadlock detection is disabled, logs potential deadlocks to Opts.LogBuf,
|
||||
// calling Opts.OnPotentialDeadlock on each occasion.
|
||||
func (m *Mutex) Lock() {
|
||||
lock(m.mu.Lock, m)
|
||||
@@ -81,7 +83,7 @@ type RWMutex struct {
|
||||
// a blocked Lock call excludes new readers from acquiring
|
||||
// the lock.
|
||||
//
|
||||
// Unless deadlock detection is disabled, logs potential deadlocks to stderr,
|
||||
// Unless deadlock detection is disabled, logs potential deadlocks to Opts.LogBuf,
|
||||
// calling Opts.OnPotentialDeadlock on each occasion.
|
||||
func (m *RWMutex) Lock() {
|
||||
lock(m.mu.Lock, m)
|
||||
@@ -102,7 +104,7 @@ func (m *RWMutex) Unlock() {
|
||||
|
||||
// RLock locks the mutex for reading.
|
||||
//
|
||||
// Unless deadlock detection is disabled, logs potential deadlocks to stderr,
|
||||
// Unless deadlock detection is disabled, logs potential deadlocks to Opts.LogBuf,
|
||||
// calling Opts.OnPotentialDeadlock on each occasion.
|
||||
func (m *RWMutex) RLock() {
|
||||
lock(m.mu.RLock, m)
|
||||
@@ -160,8 +162,9 @@ func lock(lockFn func(), ptr interface{}) {
|
||||
prev, ok := lo.cur[ptr]
|
||||
if !ok {
|
||||
lo.mu.Unlock()
|
||||
break // Nobody seems to be holding a lock, try again.
|
||||
break // Nobody seems to be holding the lock, try again.
|
||||
}
|
||||
Opts.mu.Lock()
|
||||
fmt.Fprintln(Opts.LogBuf, header)
|
||||
fmt.Fprintln(Opts.LogBuf, "Previous place where the lock was grabbed")
|
||||
fmt.Fprintf(Opts.LogBuf, "goroutine %v lock %p\n", prev.gid, ptr)
|
||||
@@ -182,7 +185,12 @@ func lock(lockFn func(), ptr interface{}) {
|
||||
lo.other(ptr)
|
||||
fmt.Fprintln(Opts.LogBuf, "All current goroutines:")
|
||||
Opts.LogBuf.Write(stacks)
|
||||
fmt.Fprintln(Opts.LogBuf)
|
||||
if buf, ok := Opts.LogBuf.(*bufio.Writer); ok {
|
||||
buf.Flush()
|
||||
}
|
||||
lo.mu.Unlock()
|
||||
Opts.mu.Unlock()
|
||||
Opts.OnPotentialDeadlock()
|
||||
<-ch
|
||||
PostLock(4, ptr)
|
||||
@@ -243,12 +251,28 @@ func (l *lockOrder) PreLock(skip int, p interface{}) {
|
||||
l.mu.Lock()
|
||||
for b, bs := range l.cur {
|
||||
if b == p {
|
||||
if bs.gid == gid {
|
||||
Opts.mu.Lock()
|
||||
fmt.Fprintln(Opts.LogBuf, header, "Duplicate locking, saw callers this locks in one goroutine:")
|
||||
fmt.Fprintf(Opts.LogBuf, "current goroutine %d lock %v \n", gid, b)
|
||||
fmt.Fprintln(Opts.LogBuf, "all callers to this lock in the goroutine")
|
||||
printStack(Opts.LogBuf, bs.stack)
|
||||
printStack(Opts.LogBuf, stack)
|
||||
l.other(p)
|
||||
fmt.Fprintln(Opts.LogBuf)
|
||||
if buf, ok := Opts.LogBuf.(*bufio.Writer); ok {
|
||||
buf.Flush()
|
||||
}
|
||||
Opts.mu.Unlock()
|
||||
Opts.OnPotentialDeadlock()
|
||||
}
|
||||
continue
|
||||
}
|
||||
if bs.gid != gid { // We want locks taken in the same goroutine only.
|
||||
continue
|
||||
}
|
||||
if s, ok := l.order[beforeAfter{p, b}]; ok {
|
||||
Opts.mu.Lock()
|
||||
fmt.Fprintln(Opts.LogBuf, header, "Inconsistent locking. saw this ordering in one goroutine:")
|
||||
fmt.Fprintln(Opts.LogBuf, "happened before")
|
||||
printStack(Opts.LogBuf, s.before)
|
||||
@@ -259,6 +283,11 @@ func (l *lockOrder) PreLock(skip int, p interface{}) {
|
||||
fmt.Fprintln(Opts.LogBuf, "happend after")
|
||||
printStack(Opts.LogBuf, stack)
|
||||
l.other(p)
|
||||
fmt.Fprintln(Opts.LogBuf)
|
||||
if buf, ok := Opts.LogBuf.(*bufio.Writer); ok {
|
||||
buf.Flush()
|
||||
}
|
||||
Opts.mu.Unlock()
|
||||
Opts.OnPotentialDeadlock()
|
||||
}
|
||||
l.order[beforeAfter{b, p}] = ss{bs.stack, stack}
|
||||
@@ -266,7 +295,6 @@ func (l *lockOrder) PreLock(skip int, p interface{}) {
|
||||
l.order = map[beforeAfter]ss{}
|
||||
}
|
||||
}
|
||||
l.cur[p] = stackGID{stack, gid}
|
||||
l.mu.Unlock()
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user