vendor: Mega update all dependencies

GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/4080
This commit is contained in:
Jakob Borg
2017-04-05 14:34:41 +00:00
parent 49c1527724
commit a1bcc15458
1354 changed files with 55066 additions and 797850 deletions

122
vendor/github.com/cznic/lldb/2pc.go generated vendored
View File

@@ -73,6 +73,7 @@ const (
wpt00Header = iota
wpt00WriteData
wpt00Checkpoint
wpt00Empty
)
const (
@@ -96,13 +97,41 @@ const (
// [1]: http://godoc.org/github.com/cznic/exp/dbm
type ACIDFiler0 struct {
*RollbackFiler
bwal *bufio.Writer
data []acidWrite
newEpoch bool
peakBitFilerPages int // track maximum transaction memory
peakWal int64 // tracks WAL maximum used size
testHook bool // keeps WAL untruncated (once)
wal *os.File
bwal *bufio.Writer
data []acidWrite
newEpoch bool
peakWal int64 // tracks WAL maximum used size
testHook bool // keeps WAL untruncated (once)
wal *os.File
walOptions walOptions
}
type walOptions struct {
headroom int64 // Minimum WAL size.
}
// WALOption amends WAL properties.
type WALOption func(*walOptions) error
// MinWAL sets the minimum size a WAL file will have. The "extra" allocated
// file space serves as a headroom. Commits that fit into the headroom should
// not fail due to 'not enough space on the volume' errors.
//
// The min parameter is first rounded-up to a non negative multiple of the size
// of the Allocator atom.
//
// Note: Setting minimum WAL size may render the DB non-recoverable when a
// crash occurs and the DB is opened in an earlier version of LLDB that does
// not support minimum WAL sizes.
func MinWAL(min int64) WALOption {
min = mathutil.MaxInt64(0, min)
if r := min % 16; r != 0 {
min += 16 - r
}
return func(o *walOptions) error {
o.headroom = min
return nil
}
}
// NewACIDFiler0 returns a newly created ACIDFiler0 with WAL in wal.
@@ -111,17 +140,24 @@ type ACIDFiler0 struct {
// granted and no recovery procedure is taken.
//
// If the WAL is of non zero size then it is checked for having a
// commited/fully finished transaction not yet been reflected in db. If such
// committed/fully finished transaction not yet been reflected in db. If such
// transaction exists it's committed to db. If the recovery process finishes
// successfully, the WAL is truncated to zero size and fsync'ed prior to return
// from NewACIDFiler0.
func NewACIDFiler(db Filer, wal *os.File) (r *ACIDFiler0, err error) {
// successfully, the WAL is truncated to the minimum WAL size and fsync'ed
// prior to return from NewACIDFiler0.
//
// opts allow to amend WAL properties.
func NewACIDFiler(db Filer, wal *os.File, opts ...WALOption) (r *ACIDFiler0, err error) {
fi, err := wal.Stat()
if err != nil {
return
}
r = &ACIDFiler0{wal: wal}
for _, o := range opts {
if err := o(&r.walOptions); err != nil {
return nil, err
}
}
if fi.Size() != 0 {
if err = r.recoverDb(db); err != nil {
@@ -149,15 +185,23 @@ func NewACIDFiler(db Filer, wal *os.File) (r *ACIDFiler0, err error) {
return
}
wfi, err := r.wal.Stat()
if err == nil {
r.peakWal = mathutil.MaxInt64(wfi.Size(), r.peakWal)
var wfi os.FileInfo
if wfi, err = r.wal.Stat(); err != nil {
return
}
r.peakWal = mathutil.MaxInt64(wfi.Size(), r.peakWal)
// Phase 1 commit complete
for _, v := range r.data {
if _, err := db.WriteAt(v.b, v.off); err != nil {
n := len(v.b)
if m := v.off + int64(n); m > sz {
if n -= int(m - sz); n <= 0 {
continue
}
}
if _, err = db.WriteAt(v.b[:n], v.off); err != nil {
return err
}
}
@@ -173,12 +217,8 @@ func NewACIDFiler(db Filer, wal *os.File) (r *ACIDFiler0, err error) {
// Phase 2 commit complete
if !r.testHook {
if err = r.wal.Truncate(0); err != nil {
return
}
if _, err = r.wal.Seek(0, 0); err != nil {
return
if err := r.emptyWAL(); err != nil {
return err
}
}
@@ -196,6 +236,33 @@ func NewACIDFiler(db Filer, wal *os.File) (r *ACIDFiler0, err error) {
return r, nil
}
func (a *ACIDFiler0) emptyWAL() error {
if err := a.wal.Truncate(a.walOptions.headroom); err != nil {
return err
}
if _, err := a.wal.Seek(0, 0); err != nil {
return err
}
if a.walOptions.headroom != 0 {
a.bwal.Reset(a.wal)
if err := (*acidWriter0)(a).writePacket([]interface{}{wpt00Empty}); err != nil {
return err
}
if err := a.bwal.Flush(); err != nil {
return err
}
if _, err := a.wal.Seek(0, 0); err != nil {
return err
}
}
return nil
}
// PeakWALSize reports the maximum size WAL has ever used.
func (a ACIDFiler0) PeakWALSize() int64 {
return a.peakWal
@@ -235,6 +302,14 @@ func (a *ACIDFiler0) recoverDb(db Filer) (err error) {
return
}
if items[0] == int64(wpt00Empty) {
if len(items) != 1 {
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("invalid packet items %#v", items)}
}
return nil
}
if len(items) != 3 || items[0] != int64(wpt00Header) || items[1] != int64(walTypeACIDFiler0) {
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("invalid packet items %#v", items)}
}
@@ -280,7 +355,8 @@ func (a *ACIDFiler0) recoverDb(db Filer) (err error) {
}
for {
k, v, err := enum.current()
var k, v []byte
k, v, err = enum.current()
if err != nil {
if fileutil.IsEOF(err) {
break
@@ -312,7 +388,7 @@ func (a *ACIDFiler0) recoverDb(db Filer) (err error) {
// Recovery complete
if err = a.wal.Truncate(0); err != nil {
if err := a.emptyWAL(); err != nil {
return err
}

View File

@@ -37,6 +37,10 @@ Packet definitions
This packet must be present only once - as the last packet of
a WAL file.
{wpt00Empty int}
The WAL size is of non-zero size due to configured headroom,
but empty otherwise.
*/
package lldb

View File

@@ -638,7 +638,7 @@ retry:
}
if e.enum.index == e.enum.p.len() && e.enum.serial == e.enum.t.serial {
if err := e.enum.next(); err != nil {
if err = e.enum.next(); err != nil {
e.err = err
return nil, nil, e.err
}
@@ -1037,7 +1037,7 @@ func (p btreeIndexPage) underflow(a btreeStore, root, iroot, parent int64, ph *i
return nil, err
}
if lc := btreeIndexPage(left).len(); lc > kIndex {
if lc := left.len(); lc > kIndex {
ppp := buffer.Get(maxBuf)
defer buffer.Put(ppp)
pp := *ppp
@@ -1049,10 +1049,10 @@ func (p btreeIndexPage) underflow(a btreeStore, root, iroot, parent int64, ph *i
p = p.setLen(pc + 1)
di, si, sz := 1+1*14, 1+0*14, (2*pc+1)*7
copy(p[di:di+sz], p[si:])
p.setChild(0, btreeIndexPage(left).child(lc))
p.setChild(0, left.child(lc))
p.setDataPage(0, btreeIndexPage(pp).dataPage(parentIndex-1))
*index++
btreeIndexPage(pp).setDataPage(parentIndex-1, btreeIndexPage(left).dataPage(lc-1))
btreeIndexPage(pp).setDataPage(parentIndex-1, left.dataPage(lc-1))
left = left.setLen(lc - 1)
if err = a.Realloc(parent, pp); err != nil {
return nil, err
@@ -1479,8 +1479,9 @@ func (p btreeDataPage) split(a btreeStore, root, ph, parent int64, parentIndex,
} else {
nr := newBTreeIndexPage(ph)
nr = nr.insert3(0, rh, rh)
nrh, err := a.Alloc(nr)
if err != nil {
var nrh int64
if nrh, err = a.Alloc(nr); err != nil {
return nil, err
}
@@ -1866,7 +1867,7 @@ func (root btree) String(a btreeStore) string {
}
}
f(int64(iroot), "")
f(iroot, "")
return strings.Join(s, "\n")
}
@@ -2074,8 +2075,9 @@ func (root btree) extract(a btreeStore, dst []byte, c func(a, b []byte) int, key
if ok {
if btreePage(p).isIndex() {
dph := btreeIndexPage(p).dataPage(index)
dp, err := a.Get(dst, dph)
if err != nil {
var dp []byte
if dp, err = a.Get(dst, dph); err != nil {
return nil, err
}
@@ -2088,7 +2090,6 @@ func (root btree) extract(a btreeStore, dst []byte, c func(a, b []byte) int, key
}
if btreeIndexPage(p).len() < kIndex && ph != iroot {
var err error
if p, err = btreeIndexPage(p).underflow(a, int64(root), iroot, parent, &ph, parentIndex, &index); err != nil {
return nil, err
}
@@ -2325,8 +2326,9 @@ func (root btree) clear2(a btreeStore, ph int64) (err error) {
case true:
ip := btreeIndexPage(p)
for i := 0; i <= ip.len(); i++ {
root.clear2(a, ip.child(i))
if err = root.clear2(a, ip.child(i)); err != nil {
return err
}
}
case false:
dp := btreeDataPage(p)

View File

@@ -45,7 +45,7 @@ type ErrPERM struct {
// Error implements the built in error type.
func (e *ErrPERM) Error() string {
return fmt.Sprintf("%s: Operation not permitted", string(e.Src))
return fmt.Sprintf("%s: Operation not permitted", e.Src)
}
// ErrTag represents an ErrILSEQ kind.

View File

@@ -287,16 +287,16 @@ Note: No Allocator method returns io.EOF.
type Allocator struct {
f Filer
flt flt
Compress bool // enables content compression
cache cache
m map[int64]*node
lru lst
mu sync.Mutex
expHit int64
expMiss int64
cacheSz int
hit uint16
miss uint16
mu sync.Mutex
Compress bool // enables content compression
}
// NewAllocator returns a new Allocator. To open an existing file, pass its
@@ -338,7 +338,7 @@ func NewAllocator(f Filer, opts *Options) (a *Allocator, err error) {
}
if _, err = f.WriteAt(b[:], 0); err != nil {
a.f.Rollback()
_ = a.f.Rollback()
return
}
@@ -704,7 +704,7 @@ reloc:
atoms := n2atoms(dlen)
switch atoms {
case 1:
switch tag := first[15]; tag {
switch tag = first[15]; tag {
default:
return nil, &ErrILSEQ{Type: ErrTailTag, Off: off, Arg: int64(tag)}
case tagNotCompressed:
@@ -725,7 +725,7 @@ reloc:
return
}
switch tag := cc[0]; tag {
switch tag = cc[0]; tag {
default:
return nil, &ErrILSEQ{Type: ErrTailTag, Off: off, Arg: int64(tag)}
case tagNotCompressed:
@@ -760,7 +760,7 @@ reloc:
return
}
switch tag := cc[0]; tag {
switch tag = cc[0]; tag {
default:
return nil, &ErrILSEQ{Type: ErrTailTag, Off: off, Arg: int64(tag)}
case tagNotCompressed:
@@ -866,8 +866,8 @@ retry:
}
fh, fa := handle+needAtoms, atoms-needAtoms
sz, err := a.f.Size()
if err != nil {
var sz int64
if sz, err = a.f.Size(); err != nil {
return err
}
@@ -1147,7 +1147,7 @@ func (a *Allocator) makeUsedBlock(dst []byte, b []byte) (w []byte, rqAtoms int,
n2 := len(dst)
if rqAtoms2 := n2atoms(n2); rqAtoms2 < rqAtoms { // compression saved at least a single atom
w, n, rqAtoms, cc = dst, n2, rqAtoms2, tagCompressed
w, rqAtoms, cc = dst, rqAtoms2, tagCompressed
}
}
return
@@ -1206,7 +1206,7 @@ func (a *Allocator) verifyUnused(h, totalAtoms int64, tag byte, log func(error)
}
if atoms < 2 {
err = &ErrILSEQ{Type: ErrLongFreeBlkTooShort, Off: off, Arg: int64(atoms)}
err = &ErrILSEQ{Type: ErrLongFreeBlkTooShort, Off: off, Arg: atoms}
break
}

View File

@@ -7,15 +7,7 @@
package lldb
import (
"fmt"
"github.com/cznic/mathutil"
)
func doubleTrouble(first, second error) error {
return fmt.Errorf("%q. Additionally, while attempting to recover (rollback): %q", first, second)
}
import "github.com/cznic/mathutil"
// A Filer is a []byte-like model of a file or similar entity. It may
// optionally implement support for structural transaction safety. In contrast

2
vendor/github.com/cznic/lldb/gb.go generated vendored
View File

@@ -182,7 +182,7 @@ func EncodeScalars(scalars ...interface{}) (b []byte, err error) {
n := len(x)
if n <= 17 {
b = append(b, byte(gbBytes00+n))
b = append(b, []byte(x)...)
b = append(b, x...)
break
}

View File

@@ -13,12 +13,6 @@ import (
"github.com/cznic/internal/file"
)
const (
pgBits = 16
pgSize = 1 << pgBits
pgMask = pgSize - 1
)
var _ Filer = &MemFiler{}
// MemFiler is a memory backed Filer. It implements BeginUpdate, EndUpdate and

View File

@@ -336,19 +336,18 @@ func (f *bitFiler) dumpDirty(w io.WriterAt) (nwr int, err error) {
// RollbackFiler is safe for concurrent use by multiple goroutines.
type RollbackFiler struct {
mu sync.RWMutex
inCallback bool
inCallbackMu sync.RWMutex
bitFiler *bitFiler
checkpoint func(int64) error
closed bool
f Filer
parent Filer
tlevel int // transaction nesting level, 0 == not in transaction
writerAt io.WriterAt
// afterRollback, if not nil, is called after performing Rollback
// without errros.
afterRollback func() error
tlevel int // transaction nesting level, 0 == not in transaction
closed bool
inCallback bool
}
// NewRollbackFiler returns a RollbackFiler wrapping f.