vendor: Mega update all dependencies

GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/4080
This commit is contained in:
Jakob Borg
2017-04-05 14:34:41 +00:00
parent 49c1527724
commit a1bcc15458
1354 changed files with 55066 additions and 797850 deletions

29
vendor/github.com/cznic/b/btree.go generated vendored
View File

@@ -826,13 +826,7 @@ func (e *Enumerator) Next() (k interface{} /*K*/, v interface{} /*V*/, err error
}
if e.ver != e.t.ver {
f, hit := e.t.Seek(e.k)
if !e.hit && hit {
if err = f.next(); err != nil {
return
}
}
f, _ := e.t.Seek(e.k)
*e = *f
f.Close()
}
@@ -849,7 +843,7 @@ func (e *Enumerator) Next() (k interface{} /*K*/, v interface{} /*V*/, err error
i := e.q.d[e.i]
k, v = i.k, i.v
e.k, e.hit = k, false
e.k, e.hit = k, true
e.next()
return
}
@@ -880,13 +874,7 @@ func (e *Enumerator) Prev() (k interface{} /*K*/, v interface{} /*V*/, err error
}
if e.ver != e.t.ver {
f, hit := e.t.Seek(e.k)
if !e.hit && hit {
if err = f.prev(); err != nil {
return
}
}
f, _ := e.t.Seek(e.k)
*e = *f
f.Close()
}
@@ -895,15 +883,22 @@ func (e *Enumerator) Prev() (k interface{} /*K*/, v interface{} /*V*/, err error
return
}
if !e.hit {
// move to previous because Seek overshoots if there's no hit
if err = e.prev(); err != nil {
return
}
}
if e.i >= e.q.c {
if err = e.next(); err != nil {
if err = e.prev(); err != nil {
return
}
}
i := e.q.d[e.i]
k, v = i.k, i.v
e.k, e.hit = k, false
e.k, e.hit = k, true
e.prev()
return
}

27
vendor/github.com/cznic/bufs/LICENSE generated vendored
View File

@@ -1,27 +0,0 @@
Copyright (c) 2014 The bufs Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the names of the authors nor the names of the
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

391
vendor/github.com/cznic/bufs/bufs.go generated vendored
View File

@@ -1,391 +0,0 @@
// Copyright 2014 The bufs Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package bufs implements a simple buffer cache.
//
// The intended use scheme is like:
//
// type Foo struct {
// buffers bufs.Buffers
// ...
// }
//
// // Bar can call Qux, but not the other way around (in this example).
// const maxFooDepth = 2
//
// func NewFoo() *Foo {
// return &Foo{buffers: bufs.New(maxFooDepth), ...}
// }
//
// func (f *Foo) Bar(n int) {
// buf := f.buffers.Alloc(n) // needed locally for computation and/or I/O
// defer f.buffers.Free()
// ...
// f.Qux(whatever)
// }
//
// func (f *Foo) Qux(n int) {
// buf := f.buffers.Alloc(n) // needed locally for computation and/or I/O
// defer f.buffers.Free()
// ...
// }
//
// The whole idea behind 'bufs' is that when calling e.g. Foo.Bar N times, then
// normally, without using 'bufs', there will be 2*N (in this example) []byte
// buffers allocated. While using 'bufs', only 2 buffers (in this example)
// will ever be created. For large N it can be a substantial difference.
//
// It's not a good idea to use Buffers to cache too big buffers. The cost of
// having a cached buffer is that the buffer is naturally not eligible for
// garbage collection. Of course, that holds only while the Foo instance is
// reachable, in the above example.
//
// The buffer count limit is intentionally "hard" (read panicking), although
// configurable in New(). The rationale is to prevent recursive calls, using
// Alloc, to cause excessive, "static" memory consumption. Tune the limit
// carefully or do not use Buffers from within [mutually] recursive functions
// where the nesting depth is not realistically bounded to some rather small
// number.
//
// Buffers cannot guarantee improvements to you program performance. There may
// be a gain in case where they fit well. Firm grasp on what your code is
// actually doing, when and in what order is essential to proper use of
// Buffers. It's _highly_ recommended to first do profiling and memory
// profiling before even thinking about using 'bufs'. The real world example,
// and cause for this package, was a first correct, yet no optimizations done
// version of a program; producing few MB of useful data while allocating 20+GB
// of memory. Of course the garbage collector properly kicked in, yet the
// memory abuse caused ~80+% of run time to be spent memory management. The
// program _was_ expected to be slow in its still development phase, but the
// bottleneck was guessed to be in I/O. Actually the hard disk was waiting for
// the billions bytes being allocated and zeroed. Garbage collect on low
// memory, rinse and repeat.
//
// In the provided tests, TestFoo and TestFooBufs do the same simulated work,
// except the later uses Buffers while the former does not. Suggested test runs
// which show the differences:
//
// $ go test -bench . -benchmem
//
// or
//
// $ go test -c
// $ ./bufs.test -test.v -test.run Foo -test.memprofile mem.out -test.memprofilerate 1
// $ go tool pprof bufs.test mem.out --alloc_space --nodefraction 0.0001 --edgefraction 0 -web
// $ # Note: Foo vs FooBufs allocated memory is in hundreds of MBs vs 8 kB.
//
// or
//
// $ make demo # same as all of the above
//
//
// NOTE: Alloc/Free calls must be properly nested in the same way as in for
// example BeginTransaction/EndTransaction pairs. If your code can panic then
// the pairing should be enforced by deferred calls.
//
// NOTE: Buffers objects do not allocate any space until requested by Alloc,
// the mechanism works on demand only.
//
// FAQ: Why the 'bufs' package name?
//
// Package name 'bufs' was intentionally chosen instead of the perhaps more
// conventional 'buf'. There are already too many 'buf' named things in the
// code out there and that'll be a source of a lot of trouble. It's a bit
// similar situation as in the case of package "strings" (not "string").
package bufs
import (
"errors"
"sort"
"sync"
)
// Buffers type represents a buffer ([]byte) cache.
//
// NOTE: Do not modify Buffers directly, use only its methods. Do not create
// additional values (copies) of Buffers, that'll break its functionality. Use
// a pointer instead to refer to a single instance from different
// places/scopes.
type Buffers [][]byte
// New returns a newly created instance of Buffers with a maximum capacity of n
// buffers.
//
// NOTE: 'bufs.New(n)' is the same as 'make(bufs.Buffers, n)'.
func New(n int) Buffers {
return make(Buffers, n)
}
// Alloc will return a buffer such that len(r) == n. It will firstly try to
// find an existing and unused buffer of big enough size. Only when there is no
// such, then one of the buffer slots is reallocated to a bigger size.
//
// It's okay to use append with buffers returned by Alloc. But it can cause
// allocation in that case and will again be producing load for the garbage
// collector. The best use of Alloc is for I/O buffers where the needed size of
// the buffer is figured out at some point of the code path in a 'final size'
// sense. Another real world example are compression/decompression buffers.
//
// NOTE: The buffer returned by Alloc _is not_ zeroed. That's okay for e.g.
// passing a buffer to io.Reader. If you need a zeroed buffer use Calloc.
//
// NOTE: Buffers returned from Alloc _must not_ be exposed/returned to your
// clients. Those buffers are intended to be used strictly internally, within
// the methods of some "object".
//
// NOTE: Alloc will panic if there are no buffers (buffer slots) left.
func (p *Buffers) Alloc(n int) (r []byte) {
b := *p
if len(b) == 0 {
panic(errors.New("Buffers.Alloc: out of buffers"))
}
biggest, best, biggestI, bestI := -1, -1, -1, -1
for i, v := range b {
//ln := len(v)
// The above was correct, buts it's just confusing. It worked
// because not the buffers, but slices of them are returned in
// the 'if best >= n' code path.
ln := cap(v)
if ln >= biggest {
biggest, biggestI = ln, i
}
if ln >= n && (bestI < 0 || best > ln) {
best, bestI = ln, i
if ln == n {
break
}
}
}
last := len(b) - 1
if best >= n {
r = b[bestI]
b[last], b[bestI] = b[bestI], b[last]
*p = b[:last]
return r[:n]
}
r = make([]byte, n, overCommit(n))
b[biggestI] = r
b[last], b[biggestI] = b[biggestI], b[last]
*p = b[:last]
return
}
// Calloc will acquire a buffer using Alloc and then clears it to zeros. The
// zeroing goes up to n, not cap(r).
func (p *Buffers) Calloc(n int) (r []byte) {
r = p.Alloc(n)
for i := range r {
r[i] = 0
}
return
}
// Free makes the lastly allocated by Alloc buffer free (available) again for
// Alloc.
//
// NOTE: Improper Free invocations, like in the sequence {New, Alloc, Free,
// Free}, will panic.
func (p *Buffers) Free() {
b := *p
b = b[:len(b)+1]
*p = b
}
// Stats reports memory consumed by Buffers, without accounting for some
// (smallish) additional overhead.
func (p *Buffers) Stats() (bytes int) {
b := *p
b = b[:cap(b)]
for _, v := range b {
bytes += cap(v)
}
return
}
// Cache caches buffers ([]byte). A zero value of Cache is ready for use.
//
// NOTE: Do not modify a Cache directly, use only its methods. Do not create
// additional values (copies) of a Cache, that'll break its functionality. Use
// a pointer instead to refer to a single instance from different
// places/scopes.
type Cache [][]byte
// Get returns a buffer ([]byte) of length n. If no such buffer is cached then
// a biggest cached buffer is resized to have length n and returned. If there
// are no cached items at all, Get returns a newly allocated buffer.
//
// In other words the cache policy is:
//
// - If the cache is empty, the buffer must be newly created and returned.
// Cache remains empty.
//
// - If a buffer of sufficient size is found in the cache, remove it from the
// cache and return it.
//
// - Otherwise the cache is non empty, but no cached buffer is big enough.
// Enlarge the biggest cached buffer, remove it from the cache and return it.
// This provide cached buffers size adjustment based on demand.
//
// In short, if the cache is not empty, Get guarantees to make it always one
// item less. This rules prevent uncontrolled cache grow in some scenarios.
// The older policy was not preventing that. Another advantage is better cached
// buffers sizes "auto tuning", although not in every possible use case.
//
// NOTE: The buffer returned by Get _is not guaranteed_ to be zeroed. That's
// okay for e.g. passing a buffer to io.Reader. If you need a zeroed buffer
// use Cget.
func (c *Cache) Get(n int) []byte {
r, _ := c.get(n)
return r
}
func (c *Cache) get(n int) (r []byte, isZeroed bool) {
s := *c
lens := len(s)
if lens == 0 {
r, isZeroed = make([]byte, n, overCommit(n)), true
return
}
i := sort.Search(lens, func(x int) bool { return len(s[x]) >= n })
if i == lens {
i--
s[i] = make([]byte, n, overCommit(n))
}
r = s[i][:n]
copy(s[i:], s[i+1:])
s[lens-1] = nil
s = s[:lens-1]
*c = s
return r, false
}
// Cget will acquire a buffer using Get and then clears it to zeros. The
// zeroing goes up to n, not cap(r).
func (c *Cache) Cget(n int) (r []byte) {
r, ok := c.get(n)
if ok {
return
}
for i := range r {
r[i] = 0
}
return
}
// Put caches b for possible later reuse (via Get). No other references to b's
// backing array may exist. Otherwise a big mess is sooner or later inevitable.
func (c *Cache) Put(b []byte) {
b = b[:cap(b)]
lenb := len(b)
if lenb == 0 {
return
}
s := *c
lens := len(s)
i := sort.Search(lens, func(x int) bool { return len(s[x]) >= lenb })
s = append(s, nil)
copy(s[i+1:], s[i:])
s[i] = b
*c = s
return
}
// Stats reports memory consumed by a Cache, without accounting for some
// (smallish) additional overhead. 'n' is the number of cached buffers, bytes
// is their combined capacity.
func (c Cache) Stats() (n, bytes int) {
n = len(c)
for _, v := range c {
bytes += cap(v)
}
return
}
// CCache is a Cache which is safe for concurrent use by multiple goroutines.
type CCache struct {
c Cache
mu sync.Mutex
}
// Get returns a buffer ([]byte) of length n. If no such buffer is cached then
// a biggest cached buffer is resized to have length n and returned. If there
// are no cached items at all, Get returns a newly allocated buffer.
//
// In other words the cache policy is:
//
// - If the cache is empty, the buffer must be newly created and returned.
// Cache remains empty.
//
// - If a buffer of sufficient size is found in the cache, remove it from the
// cache and return it.
//
// - Otherwise the cache is non empty, but no cached buffer is big enough.
// Enlarge the biggest cached buffer, remove it from the cache and return it.
// This provide cached buffers size adjustment based on demand.
//
// In short, if the cache is not empty, Get guarantees to make it always one
// item less. This rules prevent uncontrolled cache grow in some scenarios.
// The older policy was not preventing that. Another advantage is better cached
// buffers sizes "auto tuning", although not in every possible use case.
//
// NOTE: The buffer returned by Get _is not guaranteed_ to be zeroed. That's
// okay for e.g. passing a buffer to io.Reader. If you need a zeroed buffer
// use Cget.
func (c *CCache) Get(n int) []byte {
c.mu.Lock()
r, _ := c.c.get(n)
c.mu.Unlock()
return r
}
// Cget will acquire a buffer using Get and then clears it to zeros. The
// zeroing goes up to n, not cap(r).
func (c *CCache) Cget(n int) (r []byte) {
c.mu.Lock()
r = c.c.Cget(n)
c.mu.Unlock()
return
}
// Put caches b for possible later reuse (via Get). No other references to b's
// backing array may exist. Otherwise a big mess is sooner or later inevitable.
func (c *CCache) Put(b []byte) {
c.mu.Lock()
c.c.Put(b)
c.mu.Unlock()
}
// Stats reports memory consumed by a Cache, without accounting for some
// (smallish) additional overhead. 'n' is the number of cached buffers, bytes
// is their combined capacity.
func (c *CCache) Stats() (n, bytes int) {
c.mu.Lock()
n, bytes = c.c.Stats()
c.mu.Unlock()
return
}
// GCache is a ready to use global instance of a CCache.
var GCache CCache
func overCommit(n int) int {
switch {
case n < 8:
return 8
case n < 1e5:
return 2 * n
case n < 1e6:
return 3 * n / 2
default:
return n
}
}

View File

@@ -9,6 +9,8 @@ import (
"os"
)
const hasPunchHole = false
// PunchHole deallocates space inside a file in the byte range starting at
// offset and continuing for len bytes. Not supported on ARM.
func PunchHole(f *os.File, off, len int64) error {

View File

@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !arm
package fileutil
import (
@@ -9,6 +11,8 @@ import (
"os"
)
const hasPunchHole = false
// PunchHole deallocates space inside a file in the byte range starting at
// offset and continuing for len bytes. Not supported on OSX.
func PunchHole(f *os.File, off, len int64) error {

View File

@@ -11,6 +11,8 @@ import (
"os"
)
const hasPunchHole = false
// PunchHole deallocates space inside a file in the byte range starting at
// offset and continuing for len bytes. Unimplemented on FreeBSD.
func PunchHole(f *os.File, off, len int64) error {

View File

@@ -15,6 +15,8 @@ import (
"syscall"
)
const hasPunchHole = true
func n(s []byte) byte {
for i, c := range s {
if c < '0' || c > '9' {

View File

@@ -11,6 +11,8 @@ import (
"os"
)
const hasPunchHole = false
// PunchHole deallocates space inside a file in the byte range starting at
// offset and continuing for len bytes. Similar to FreeBSD, this is
// unimplemented.

View File

@@ -9,6 +9,8 @@ import (
"os"
)
const hasPunchHole = false
// PunchHole deallocates space inside a file in the byte range starting at
// offset and continuing for len bytes. Similar to FreeBSD, this is
// unimplemented.

View File

@@ -9,6 +9,8 @@ import (
"os"
)
const hasPunchHole = false
// PunchHole deallocates space inside a file in the byte range starting at
// offset and continuing for len bytes. Unimplemented on Plan 9.
func PunchHole(f *os.File, off, len int64) error {

View File

@@ -11,6 +11,8 @@ import (
"os"
)
const hasPunchHole = false
// PunchHole deallocates space inside a file in the byte range starting at
// offset and continuing for len bytes. Not supported on Solaris.
func PunchHole(f *os.File, off, len int64) error {

View File

@@ -12,6 +12,8 @@ import (
"unsafe"
)
const hasPunchHole = true
// PunchHole deallocates space inside a file in the byte range starting at
// offset and continuing for len bytes. Not supported on Windows.
func PunchHole(f *os.File, off, len int64) error {

View File

@@ -34,6 +34,7 @@ package buffer
import (
"github.com/cznic/internal/slice"
"io"
)
// CGet returns a pointer to a byte slice of len size. The pointed to byte
@@ -53,3 +54,93 @@ func Get(size int) *[]byte { return slice.Bytes.Get(size).(*[]byte) }
//
// Put is safe for concurrent use by multiple goroutines.
func Put(p *[]byte) { slice.Bytes.Put(p) }
// Bytes is similar to bytes.Buffer but may generate less garbage when properly
// Closed. Zero value is ready to use.
type Bytes struct {
p *[]byte
}
// Bytes return the content of b. The result is R/O.
func (b *Bytes) Bytes() []byte {
if b.p != nil {
return *b.p
}
return nil
}
// Close will recycle the underlying storage, if any. After Close, b is again
// the zero value.
func (b *Bytes) Close() error {
if b.p != nil {
Put(b.p)
b.p = nil
}
return nil
}
// Len returns the size of content in b.
func (b *Bytes) Len() int {
if b.p != nil {
return len(*b.p)
}
return 0
}
// Reset discard the content of Bytes while keeping the internal storage, if any.
func (b *Bytes) Reset() {
if b.p != nil {
*b.p = (*b.p)[:0]
}
}
// Write writes p into b and returns (len(p), nil).
func (b *Bytes) Write(p []byte) (int, error) {
n := b.Len()
b.grow(n + len(p))
copy((*b.p)[n:], p)
return len(p), nil
}
// WriteByte writes p into b and returns nil.
func (b *Bytes) WriteByte(p byte) error {
n := b.Len()
b.grow(n + 1)
(*b.p)[n] = p
return nil
}
// WriteTo writes b's content to w and returns the number of bytes written to w
// and an error, if any.
func (b *Bytes) WriteTo(w io.Writer) (int64, error) {
n, err := w.Write(b.Bytes())
return int64(n), err
}
// WriteString writes s to b and returns (len(s), nil).
func (b *Bytes) WriteString(s string) (int, error) {
n := b.Len()
b.grow(n + len(s))
copy((*b.p)[n:], s)
return len(s), nil
}
func (b *Bytes) grow(n int) {
if b.p != nil {
if n <= cap(*b.p) {
*b.p = (*b.p)[:n]
return
}
np := Get(2 * n)
*np = (*np)[:n]
copy(*np, *b.p)
Put(b.p)
b.p = np
return
}
b.p = Get(n)
}

122
vendor/github.com/cznic/lldb/2pc.go generated vendored
View File

@@ -73,6 +73,7 @@ const (
wpt00Header = iota
wpt00WriteData
wpt00Checkpoint
wpt00Empty
)
const (
@@ -96,13 +97,41 @@ const (
// [1]: http://godoc.org/github.com/cznic/exp/dbm
type ACIDFiler0 struct {
*RollbackFiler
bwal *bufio.Writer
data []acidWrite
newEpoch bool
peakBitFilerPages int // track maximum transaction memory
peakWal int64 // tracks WAL maximum used size
testHook bool // keeps WAL untruncated (once)
wal *os.File
bwal *bufio.Writer
data []acidWrite
newEpoch bool
peakWal int64 // tracks WAL maximum used size
testHook bool // keeps WAL untruncated (once)
wal *os.File
walOptions walOptions
}
type walOptions struct {
headroom int64 // Minimum WAL size.
}
// WALOption amends WAL properties.
type WALOption func(*walOptions) error
// MinWAL sets the minimum size a WAL file will have. The "extra" allocated
// file space serves as a headroom. Commits that fit into the headroom should
// not fail due to 'not enough space on the volume' errors.
//
// The min parameter is first rounded-up to a non negative multiple of the size
// of the Allocator atom.
//
// Note: Setting minimum WAL size may render the DB non-recoverable when a
// crash occurs and the DB is opened in an earlier version of LLDB that does
// not support minimum WAL sizes.
func MinWAL(min int64) WALOption {
min = mathutil.MaxInt64(0, min)
if r := min % 16; r != 0 {
min += 16 - r
}
return func(o *walOptions) error {
o.headroom = min
return nil
}
}
// NewACIDFiler0 returns a newly created ACIDFiler0 with WAL in wal.
@@ -111,17 +140,24 @@ type ACIDFiler0 struct {
// granted and no recovery procedure is taken.
//
// If the WAL is of non zero size then it is checked for having a
// commited/fully finished transaction not yet been reflected in db. If such
// committed/fully finished transaction not yet been reflected in db. If such
// transaction exists it's committed to db. If the recovery process finishes
// successfully, the WAL is truncated to zero size and fsync'ed prior to return
// from NewACIDFiler0.
func NewACIDFiler(db Filer, wal *os.File) (r *ACIDFiler0, err error) {
// successfully, the WAL is truncated to the minimum WAL size and fsync'ed
// prior to return from NewACIDFiler0.
//
// opts allow to amend WAL properties.
func NewACIDFiler(db Filer, wal *os.File, opts ...WALOption) (r *ACIDFiler0, err error) {
fi, err := wal.Stat()
if err != nil {
return
}
r = &ACIDFiler0{wal: wal}
for _, o := range opts {
if err := o(&r.walOptions); err != nil {
return nil, err
}
}
if fi.Size() != 0 {
if err = r.recoverDb(db); err != nil {
@@ -149,15 +185,23 @@ func NewACIDFiler(db Filer, wal *os.File) (r *ACIDFiler0, err error) {
return
}
wfi, err := r.wal.Stat()
if err == nil {
r.peakWal = mathutil.MaxInt64(wfi.Size(), r.peakWal)
var wfi os.FileInfo
if wfi, err = r.wal.Stat(); err != nil {
return
}
r.peakWal = mathutil.MaxInt64(wfi.Size(), r.peakWal)
// Phase 1 commit complete
for _, v := range r.data {
if _, err := db.WriteAt(v.b, v.off); err != nil {
n := len(v.b)
if m := v.off + int64(n); m > sz {
if n -= int(m - sz); n <= 0 {
continue
}
}
if _, err = db.WriteAt(v.b[:n], v.off); err != nil {
return err
}
}
@@ -173,12 +217,8 @@ func NewACIDFiler(db Filer, wal *os.File) (r *ACIDFiler0, err error) {
// Phase 2 commit complete
if !r.testHook {
if err = r.wal.Truncate(0); err != nil {
return
}
if _, err = r.wal.Seek(0, 0); err != nil {
return
if err := r.emptyWAL(); err != nil {
return err
}
}
@@ -196,6 +236,33 @@ func NewACIDFiler(db Filer, wal *os.File) (r *ACIDFiler0, err error) {
return r, nil
}
func (a *ACIDFiler0) emptyWAL() error {
if err := a.wal.Truncate(a.walOptions.headroom); err != nil {
return err
}
if _, err := a.wal.Seek(0, 0); err != nil {
return err
}
if a.walOptions.headroom != 0 {
a.bwal.Reset(a.wal)
if err := (*acidWriter0)(a).writePacket([]interface{}{wpt00Empty}); err != nil {
return err
}
if err := a.bwal.Flush(); err != nil {
return err
}
if _, err := a.wal.Seek(0, 0); err != nil {
return err
}
}
return nil
}
// PeakWALSize reports the maximum size WAL has ever used.
func (a ACIDFiler0) PeakWALSize() int64 {
return a.peakWal
@@ -235,6 +302,14 @@ func (a *ACIDFiler0) recoverDb(db Filer) (err error) {
return
}
if items[0] == int64(wpt00Empty) {
if len(items) != 1 {
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("invalid packet items %#v", items)}
}
return nil
}
if len(items) != 3 || items[0] != int64(wpt00Header) || items[1] != int64(walTypeACIDFiler0) {
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("invalid packet items %#v", items)}
}
@@ -280,7 +355,8 @@ func (a *ACIDFiler0) recoverDb(db Filer) (err error) {
}
for {
k, v, err := enum.current()
var k, v []byte
k, v, err = enum.current()
if err != nil {
if fileutil.IsEOF(err) {
break
@@ -312,7 +388,7 @@ func (a *ACIDFiler0) recoverDb(db Filer) (err error) {
// Recovery complete
if err = a.wal.Truncate(0); err != nil {
if err := a.emptyWAL(); err != nil {
return err
}

View File

@@ -37,6 +37,10 @@ Packet definitions
This packet must be present only once - as the last packet of
a WAL file.
{wpt00Empty int}
The WAL size is of non-zero size due to configured headroom,
but empty otherwise.
*/
package lldb

View File

@@ -638,7 +638,7 @@ retry:
}
if e.enum.index == e.enum.p.len() && e.enum.serial == e.enum.t.serial {
if err := e.enum.next(); err != nil {
if err = e.enum.next(); err != nil {
e.err = err
return nil, nil, e.err
}
@@ -1037,7 +1037,7 @@ func (p btreeIndexPage) underflow(a btreeStore, root, iroot, parent int64, ph *i
return nil, err
}
if lc := btreeIndexPage(left).len(); lc > kIndex {
if lc := left.len(); lc > kIndex {
ppp := buffer.Get(maxBuf)
defer buffer.Put(ppp)
pp := *ppp
@@ -1049,10 +1049,10 @@ func (p btreeIndexPage) underflow(a btreeStore, root, iroot, parent int64, ph *i
p = p.setLen(pc + 1)
di, si, sz := 1+1*14, 1+0*14, (2*pc+1)*7
copy(p[di:di+sz], p[si:])
p.setChild(0, btreeIndexPage(left).child(lc))
p.setChild(0, left.child(lc))
p.setDataPage(0, btreeIndexPage(pp).dataPage(parentIndex-1))
*index++
btreeIndexPage(pp).setDataPage(parentIndex-1, btreeIndexPage(left).dataPage(lc-1))
btreeIndexPage(pp).setDataPage(parentIndex-1, left.dataPage(lc-1))
left = left.setLen(lc - 1)
if err = a.Realloc(parent, pp); err != nil {
return nil, err
@@ -1479,8 +1479,9 @@ func (p btreeDataPage) split(a btreeStore, root, ph, parent int64, parentIndex,
} else {
nr := newBTreeIndexPage(ph)
nr = nr.insert3(0, rh, rh)
nrh, err := a.Alloc(nr)
if err != nil {
var nrh int64
if nrh, err = a.Alloc(nr); err != nil {
return nil, err
}
@@ -1866,7 +1867,7 @@ func (root btree) String(a btreeStore) string {
}
}
f(int64(iroot), "")
f(iroot, "")
return strings.Join(s, "\n")
}
@@ -2074,8 +2075,9 @@ func (root btree) extract(a btreeStore, dst []byte, c func(a, b []byte) int, key
if ok {
if btreePage(p).isIndex() {
dph := btreeIndexPage(p).dataPage(index)
dp, err := a.Get(dst, dph)
if err != nil {
var dp []byte
if dp, err = a.Get(dst, dph); err != nil {
return nil, err
}
@@ -2088,7 +2090,6 @@ func (root btree) extract(a btreeStore, dst []byte, c func(a, b []byte) int, key
}
if btreeIndexPage(p).len() < kIndex && ph != iroot {
var err error
if p, err = btreeIndexPage(p).underflow(a, int64(root), iroot, parent, &ph, parentIndex, &index); err != nil {
return nil, err
}
@@ -2325,8 +2326,9 @@ func (root btree) clear2(a btreeStore, ph int64) (err error) {
case true:
ip := btreeIndexPage(p)
for i := 0; i <= ip.len(); i++ {
root.clear2(a, ip.child(i))
if err = root.clear2(a, ip.child(i)); err != nil {
return err
}
}
case false:
dp := btreeDataPage(p)

View File

@@ -45,7 +45,7 @@ type ErrPERM struct {
// Error implements the built in error type.
func (e *ErrPERM) Error() string {
return fmt.Sprintf("%s: Operation not permitted", string(e.Src))
return fmt.Sprintf("%s: Operation not permitted", e.Src)
}
// ErrTag represents an ErrILSEQ kind.

View File

@@ -287,16 +287,16 @@ Note: No Allocator method returns io.EOF.
type Allocator struct {
f Filer
flt flt
Compress bool // enables content compression
cache cache
m map[int64]*node
lru lst
mu sync.Mutex
expHit int64
expMiss int64
cacheSz int
hit uint16
miss uint16
mu sync.Mutex
Compress bool // enables content compression
}
// NewAllocator returns a new Allocator. To open an existing file, pass its
@@ -338,7 +338,7 @@ func NewAllocator(f Filer, opts *Options) (a *Allocator, err error) {
}
if _, err = f.WriteAt(b[:], 0); err != nil {
a.f.Rollback()
_ = a.f.Rollback()
return
}
@@ -704,7 +704,7 @@ reloc:
atoms := n2atoms(dlen)
switch atoms {
case 1:
switch tag := first[15]; tag {
switch tag = first[15]; tag {
default:
return nil, &ErrILSEQ{Type: ErrTailTag, Off: off, Arg: int64(tag)}
case tagNotCompressed:
@@ -725,7 +725,7 @@ reloc:
return
}
switch tag := cc[0]; tag {
switch tag = cc[0]; tag {
default:
return nil, &ErrILSEQ{Type: ErrTailTag, Off: off, Arg: int64(tag)}
case tagNotCompressed:
@@ -760,7 +760,7 @@ reloc:
return
}
switch tag := cc[0]; tag {
switch tag = cc[0]; tag {
default:
return nil, &ErrILSEQ{Type: ErrTailTag, Off: off, Arg: int64(tag)}
case tagNotCompressed:
@@ -866,8 +866,8 @@ retry:
}
fh, fa := handle+needAtoms, atoms-needAtoms
sz, err := a.f.Size()
if err != nil {
var sz int64
if sz, err = a.f.Size(); err != nil {
return err
}
@@ -1147,7 +1147,7 @@ func (a *Allocator) makeUsedBlock(dst []byte, b []byte) (w []byte, rqAtoms int,
n2 := len(dst)
if rqAtoms2 := n2atoms(n2); rqAtoms2 < rqAtoms { // compression saved at least a single atom
w, n, rqAtoms, cc = dst, n2, rqAtoms2, tagCompressed
w, rqAtoms, cc = dst, rqAtoms2, tagCompressed
}
}
return
@@ -1206,7 +1206,7 @@ func (a *Allocator) verifyUnused(h, totalAtoms int64, tag byte, log func(error)
}
if atoms < 2 {
err = &ErrILSEQ{Type: ErrLongFreeBlkTooShort, Off: off, Arg: int64(atoms)}
err = &ErrILSEQ{Type: ErrLongFreeBlkTooShort, Off: off, Arg: atoms}
break
}

View File

@@ -7,15 +7,7 @@
package lldb
import (
"fmt"
"github.com/cznic/mathutil"
)
func doubleTrouble(first, second error) error {
return fmt.Errorf("%q. Additionally, while attempting to recover (rollback): %q", first, second)
}
import "github.com/cznic/mathutil"
// A Filer is a []byte-like model of a file or similar entity. It may
// optionally implement support for structural transaction safety. In contrast

2
vendor/github.com/cznic/lldb/gb.go generated vendored
View File

@@ -182,7 +182,7 @@ func EncodeScalars(scalars ...interface{}) (b []byte, err error) {
n := len(x)
if n <= 17 {
b = append(b, byte(gbBytes00+n))
b = append(b, []byte(x)...)
b = append(b, x...)
break
}

View File

@@ -13,12 +13,6 @@ import (
"github.com/cznic/internal/file"
)
const (
pgBits = 16
pgSize = 1 << pgBits
pgMask = pgSize - 1
)
var _ Filer = &MemFiler{}
// MemFiler is a memory backed Filer. It implements BeginUpdate, EndUpdate and

View File

@@ -336,19 +336,18 @@ func (f *bitFiler) dumpDirty(w io.WriterAt) (nwr int, err error) {
// RollbackFiler is safe for concurrent use by multiple goroutines.
type RollbackFiler struct {
mu sync.RWMutex
inCallback bool
inCallbackMu sync.RWMutex
bitFiler *bitFiler
checkpoint func(int64) error
closed bool
f Filer
parent Filer
tlevel int // transaction nesting level, 0 == not in transaction
writerAt io.WriterAt
// afterRollback, if not nil, is called after performing Rollback
// without errros.
afterRollback func() error
tlevel int // transaction nesting level, 0 == not in transaction
closed bool
inCallback bool
}
// NewRollbackFiler returns a RollbackFiler wrapping f.

View File

@@ -149,7 +149,7 @@ func BitLenUintptr(n uintptr) int {
// PopCountByte returns population count of n (number of bits set in n).
func PopCountByte(n byte) int {
return int(popcnt[byte(n)])
return int(popcnt[n])
}
// PopCountUint16 returns population count of n (number of bits set in n).

View File

@@ -5,7 +5,9 @@
// Package mathutil provides utilities supplementing the standard 'math' and
// 'math/rand' packages.
//
// Compatibility issues
// Release history and compatibility issues
//
// 2016-10-10: New functions QuadPolyDiscriminant and QuadPolyFactors.
//
// 2013-12-13: The following functions have been REMOVED
//
@@ -89,7 +91,7 @@ func GCDUint16(a, b uint16) uint16 {
return a
}
// GCD returns the greatest common divisor of a and b.
// GCDUint32 returns the greatest common divisor of a and b.
func GCDUint32(a, b uint32) uint32 {
for b != 0 {
a, b = b, a%b
@@ -97,7 +99,7 @@ func GCDUint32(a, b uint32) uint32 {
return a
}
// GCD64 returns the greatest common divisor of a and b.
// GCDUint64 returns the greatest common divisor of a and b.
func GCDUint64(a, b uint64) uint64 {
for b != 0 {
a, b = b, a%b
@@ -257,7 +259,7 @@ func ModPowByte(b, e, m byte) byte {
return byte(r)
}
// ModPowByte computes (b^e)%m. It panics for m == 0 || b == e == 0.
// ModPowUint16 computes (b^e)%m. It panics for m == 0 || b == e == 0.
func ModPowUint16(b, e, m uint16) uint16 {
if b == 0 && e == 0 {
panic(0)
@@ -382,7 +384,7 @@ func MulUint128_64(a, b uint64) (hi, lo uint64) {
mid2 := ahi * blo
c1, lo := AddUint128_64(lo, mid1<<w)
c2, lo := AddUint128_64(lo, mid2<<w)
_, hi = AddUint128_64(ahi*bhi, mid1>>w+mid2>>w+uint64(c1+c2))
_, hi = AddUint128_64(ahi*bhi, mid1>>w+mid2>>w+c1+c2)
return
}

View File

@@ -8,14 +8,14 @@ import (
"sort"
)
// Generate the first permutation of data.
// PermutationFirst generates the first permutation of data.
func PermutationFirst(data sort.Interface) {
sort.Sort(data)
}
// Generate the next permutation of data if possible and return true.
// Return false if there is no more permutation left.
// Based on the algorithm described here:
// PermutationNext generates the next permutation of data if possible and
// return true. Return false if there is no more permutation left. Based on
// the algorithm described here:
// http://en.wikipedia.org/wiki/Permutation#Generation_in_lexicographic_order
func PermutationNext(data sort.Interface) bool {
var k, l int

111
vendor/github.com/cznic/mathutil/poly.go generated vendored Normal file
View File

@@ -0,0 +1,111 @@
// Copyright (c) 2016 The mathutil Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mathutil
import (
"fmt"
)
func abs(n int) uint64 {
if n >= 0 {
return uint64(n)
}
return uint64(-n)
}
// QuadPolyDiscriminant returns the discriminant of a quadratic polynomial in
// one variable of the form a*x^2+b*x+c with integer coefficients a, b, c, or
// an error on overflow.
//
// ds is the square of the discriminant. If |ds| is a square number, d is set
// to sqrt(|ds|), otherwise d is < 0.
func QuadPolyDiscriminant(a, b, c int) (ds, d int, _ error) {
if 2*BitLenUint64(abs(b)) > IntBits-1 ||
2+BitLenUint64(abs(a))+BitLenUint64(abs(c)) > IntBits-1 {
return 0, 0, fmt.Errorf("overflow")
}
ds = b*b - 4*a*c
s := ds
if s < 0 {
s = -s
}
d64 := SqrtUint64(uint64(s))
if d64*d64 != uint64(s) {
return ds, -1, nil
}
return ds, int(d64), nil
}
// PolyFactor describes an irreducible factor of a polynomial in one variable
// with integer coefficients P, Q of the form P*x+Q.
type PolyFactor struct {
P, Q int
}
// QuadPolyFactors returns the content and the irreducible factors of the
// primitive part of a quadratic polynomial in one variable with integer
// coefficients a, b, c of the form a*x^2+b*x+c in integers, or an error on
// overflow.
//
// If the factorization in integers does not exists, the return value is (nil,
// nil).
//
// See also:
// https://en.wikipedia.org/wiki/Factorization_of_polynomials#Primitive_part.E2.80.93content_factorization
func QuadPolyFactors(a, b, c int) (content int, primitivePart []PolyFactor, _ error) {
content = int(GCDUint64(abs(a), GCDUint64(abs(b), abs(c))))
switch {
case content == 0:
content = 1
case content > 0:
if a < 0 || a == 0 && b < 0 {
content = -content
}
}
a /= content
b /= content
c /= content
if a == 0 {
if b == 0 {
return content, []PolyFactor{{0, c}}, nil
}
if b < 0 && c < 0 {
b = -b
c = -c
}
if b < 0 {
b = -b
c = -c
}
return content, []PolyFactor{{b, c}}, nil
}
ds, d, err := QuadPolyDiscriminant(a, b, c)
if err != nil {
return 0, nil, err
}
if ds < 0 || d < 0 {
return 0, nil, nil
}
x1num := -b + d
x1denom := 2 * a
gcd := int(GCDUint64(abs(x1num), abs(x1denom)))
x1num /= gcd
x1denom /= gcd
x2num := -b - d
x2denom := 2 * a
gcd = int(GCDUint64(abs(x2num), abs(x2denom)))
x2num /= gcd
x2denom /= gcd
return content, []PolyFactor{{x1denom, -x1num}, {x2denom, -x2num}}, nil
}

View File

@@ -126,11 +126,11 @@ func builtinAvg(arg []interface{}, ctx map[interface{}]interface{}) (v interface
case complex64:
return complex64(complex128(x) / complex(float64(data.n), 0)), nil
case complex128:
return complex64(complex128(x) / complex(float64(data.n), 0)), nil
return complex64(x / complex(float64(data.n), 0)), nil
case float32:
return float32(float64(x) / float64(data.n)), nil
case float64:
return float64(x) / float64(data.n), nil
return x / float64(data.n), nil
case int8:
return int8(int64(x) / int64(data.n)), nil
case int16:
@@ -138,7 +138,7 @@ func builtinAvg(arg []interface{}, ctx map[interface{}]interface{}) (v interface
case int32:
return int32(int64(x) / int64(data.n)), nil
case int64:
return int64(int64(x) / int64(data.n)), nil
return x / int64(data.n), nil
case uint8:
return uint8(uint64(x) / data.n), nil
case uint16:
@@ -146,7 +146,7 @@ func builtinAvg(arg []interface{}, ctx map[interface{}]interface{}) (v interface
case uint32:
return uint32(uint64(x) / data.n), nil
case uint64:
return uint64(uint64(x) / data.n), nil
return x / data.n, nil
}
}
@@ -216,9 +216,9 @@ func builtinComplex(arg []interface{}, _ map[interface{}]interface{}) (v interfa
case idealUint:
return idealComplex(complex(float64(re), float64(im.(idealUint)))), nil
case float32:
return complex(float32(re), im.(float32)), nil
return complex(re, im.(float32)), nil
case float64:
return complex(float64(re), im.(float64)), nil
return complex(re, im.(float64)), nil
case int8:
return complex(float64(re), float64(im.(int8))), nil
case int16:

26
vendor/github.com/cznic/ql/coerce.go generated vendored
View File

@@ -93,13 +93,13 @@ func coerce1(inVal, otherVal interface{}) (coercedInVal interface{}) {
//case idealUint:
//case bool:
case complex64:
return complex64(complex(float32(x), 0))
return complex(float32(x), 0)
case complex128:
return complex128(complex(float64(x), 0))
return complex(float64(x), 0)
case float32:
return float32(float64(x))
case float64:
return float64(float64(x))
return float64(x)
//case int8:
//case int16:
//case int32:
@@ -130,9 +130,9 @@ func coerce1(inVal, otherVal interface{}) (coercedInVal interface{}) {
}
//case bool:
case complex64:
return complex64(complex(float32(x), 0))
return complex(float32(x), 0)
case complex128:
return complex128(complex(float64(x), 0))
return complex(float64(x), 0)
case float32:
return float32(int64(x))
case float64:
@@ -150,7 +150,7 @@ func coerce1(inVal, otherVal interface{}) (coercedInVal interface{}) {
return int32(int64(x))
}
case int64:
return int64(int64(x))
return int64(x)
//case string:
case uint8:
if x >= 0 && x <= math.MaxUint8 {
@@ -190,9 +190,9 @@ func coerce1(inVal, otherVal interface{}) (coercedInVal interface{}) {
return idealUint(int64(x))
//case bool:
case complex64:
return complex64(complex(float32(x), 0))
return complex(float32(x), 0)
case complex128:
return complex128(complex(float64(x), 0))
return complex(float64(x), 0)
case float32:
return float32(int64(x))
case float64:
@@ -204,7 +204,7 @@ func coerce1(inVal, otherVal interface{}) (coercedInVal interface{}) {
case int32:
return int32(int64(x))
case int64:
return int64(int64(x))
return int64(x)
//case string:
case uint8:
return uint8(int64(x))
@@ -237,9 +237,9 @@ func coerce1(inVal, otherVal interface{}) (coercedInVal interface{}) {
return idealUint(uint64(x))
//case bool:
case complex64:
return complex64(complex(float32(x), 0))
return complex(float32(x), 0)
case complex128:
return complex128(complex(float64(x), 0))
return complex(float64(x), 0)
case float32:
return float32(uint64(x))
case float64:
@@ -258,7 +258,7 @@ func coerce1(inVal, otherVal interface{}) (coercedInVal interface{}) {
}
case int64:
if x <= math.MaxInt64 {
return int64(int64(x))
return int64(x)
}
//case string:
case uint8:
@@ -274,7 +274,7 @@ func coerce1(inVal, otherVal interface{}) (coercedInVal interface{}) {
return uint32(int64(x))
}
case uint64:
return uint64(uint64(x))
return uint64(x)
case *big.Int:
return big.NewInt(0).SetUint64(uint64(x))
case *big.Rat:

34
vendor/github.com/cznic/ql/doc.go generated vendored
View File

@@ -14,8 +14,13 @@
//
// Change list
//
// 2017-01-10: Release v1.1.0 fixes some bugs and adds a configurable WAL
// headroom.
//
// https://github.com/cznic/ql/issues/140
//
// 2016-07-29: Release v1.0.6 enables alternatively using = instead of == for
// equality oparation.
// equality operation.
//
// https://github.com/cznic/ql/issues/131
//
@@ -279,18 +284,21 @@
//
// The following keywords are reserved and may not be used as identifiers.
//
// ADD COLUMN false int32 ORDER uint16
// ALTER complex128 float int64 OUTER uint32
// AND complex64 float32 int8 RIGHT uint64
// AS CREATE float64 INTO SELECT uint8
// ASC DEFAULT FROM JOIN SET UNIQUE
// BETWEEN DELETE GROUP LEFT string UPDATE
// bigint DESC IF LIMIT TABLE VALUES
// bigrat DISTINCT IN LIKE time WHERE
// blob DROP INDEX NOT true
// bool duration INSERT NULL OR
// BY EXISTS int OFFSET TRUNCATE
// byte EXPLAIN int16 ON uint
// ADD complex128 FROM LEFT string
// ALTER complex64 FULL LIKE TABLE
// AND CREATE GROUP LIMIT time
// AS DEFAULT IF NOT TRANSACTION
// ASC DELETE IN NULL true
// BEGIN DESC INDEX OFFSET TRUNCATE
// BETWEEN DISTINCT INSERT ON uint
// bigint DROP int OR uint16
// bigrat duration int16 ORDER uint32
// blob EXISTS int32 OUTER uint64
// bool EXPLAIN int64 RIGHT uint8
// BY false int8 ROLLBACK UNIQUE
// byte float INTO rune UPDATE
// COLUMN float32 IS SELECT VALUES
// COMMIT float64 JOIN SET WHERE
//
// Keywords are not case sensitive.
//

50
vendor/github.com/cznic/ql/driver.go generated vendored
View File

@@ -14,8 +14,10 @@ import (
"fmt"
"io"
"math/big"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
@@ -144,23 +146,51 @@ func (d *sqlDriver) lock() func() {
// efficient re-use.
//
// The returned connection is only used by one goroutine at a time.
//
// The name supported URL parameters:
//
// headroom Size of the WAL headroom. See https://github.com/cznic/ql/issues/140.
func (d *sqlDriver) Open(name string) (driver.Conn, error) {
if d != fileDriver && d != memDriver {
switch {
case d == fileDriver:
if !strings.Contains(name, "://") && !strings.HasPrefix(name, "file") {
name = "file://" + name
}
case d == memDriver:
if !strings.Contains(name, "://") && !strings.HasPrefix(name, "memory") {
name = "memory://" + name
}
default:
return nil, fmt.Errorf("open: unexpected/unsupported instance of driver.Driver: %p", d)
}
switch {
case d == fileDriver && strings.HasPrefix(name, "file://"):
name = name[len("file://"):]
case d == fileDriver && strings.HasPrefix(name, "memory://"):
d = memDriver
name = name[len("memory://"):]
name = filepath.ToSlash(name) // Ensure / separated URLs on Windows
uri, err := url.Parse(name)
if err != nil {
return nil, err
}
name = filepath.Clean(name)
if name == "" || name == "." || name == string(os.PathSeparator) {
switch uri.Scheme {
case "file":
// ok
case "memory":
d = memDriver
default:
return nil, fmt.Errorf("open: unexpected/unsupported scheme: %s", uri.Scheme)
}
name = filepath.Clean(filepath.Join(uri.Host, uri.Path))
if d == fileDriver && (name == "" || name == "." || name == string(os.PathSeparator)) {
return nil, fmt.Errorf("invalid DB name %q", name)
}
var headroom int64
if a := uri.Query()["headroom"]; len(a) != 0 {
if headroom, err = strconv.ParseInt(a[0], 10, 64); err != nil {
return nil, err
}
}
defer d.lock()()
db := d.dbs[name]
if db == nil {
@@ -170,7 +200,7 @@ func (d *sqlDriver) Open(name string) (driver.Conn, error) {
case true:
db0, err = OpenMem()
default:
db0, err = OpenFile(name, &Options{CanCreate: true})
db0, err = OpenFile(name, &Options{CanCreate: true, Headroom: headroom})
}
if err != nil {
return nil, err

78
vendor/github.com/cznic/ql/etc.go generated vendored
View File

@@ -145,7 +145,7 @@ func intExpr(x interface{}) (i int64, err error) {
return 0, invNegLO(x)
}
return int64(x), nil
return x, nil
case uint8:
return int64(x), nil
case uint16:
@@ -210,7 +210,7 @@ func limOffExpr(x interface{}) (i uint64, err error) {
case uint32:
return uint64(x), nil
case uint64:
return uint64(x), nil
return x, nil
default:
return 0, fmt.Errorf("non-integer used in LIMIT or OFFSET: %v (value of type %T)", x, x)
}
@@ -318,10 +318,10 @@ func indexExpr(s *string, x interface{}) (i uint64, err error) {
return uint64(x), nil
case uint64:
if s != nil && x >= uint64(len(*s)) {
return 0, invBoundX(*s, uint64(x))
return 0, invBoundX(*s, x)
}
return uint64(x), nil
return x, nil
default:
return 0, fmt.Errorf("non-integer string index %v (value of type %T)", x, x)
}
@@ -429,10 +429,10 @@ func sliceExpr(s *string, x interface{}, mod int) (i uint64, err error) {
return uint64(x), nil
case uint64:
if s != nil && x >= uint64(len(*s)+mod) {
return 0, invSliceBoundX(*s, uint64(x))
return 0, invSliceBoundX(*s, x)
}
return uint64(x), nil
return x, nil
default:
return 0, fmt.Errorf("invalid slice index %s (type %T)", x, x)
}
@@ -529,7 +529,7 @@ func convert(val interface{}, typ int) (v interface{}, err error) { //NTYPE
//case idealRune:
//case idealUint:
case bool:
return bool(x), nil
return x, nil
//case complex64:
//case complex128:
//case float32:
@@ -561,7 +561,7 @@ func convert(val interface{}, typ int) (v interface{}, err error) { //NTYPE
return complex(float32(x), 0), nil
//case bool:
case complex64:
return complex64(x), nil
return x, nil
case complex128:
return complex64(x), nil
//case float32:
@@ -595,7 +595,7 @@ func convert(val interface{}, typ int) (v interface{}, err error) { //NTYPE
case complex64:
return complex128(x), nil
case complex128:
return complex128(x), nil
return x, nil
//case float32:
//case float64:
//case int8:
@@ -626,7 +626,7 @@ func convert(val interface{}, typ int) (v interface{}, err error) { //NTYPE
//case complex64:
//case complex128:
case float32:
return float32(x), nil
return x, nil
case float64:
return float32(x), nil
case int8:
@@ -675,7 +675,7 @@ func convert(val interface{}, typ int) (v interface{}, err error) { //NTYPE
case float32:
return float64(x), nil
case float64:
return float64(x), nil
return x, nil
case int8:
return float64(x), nil
case int16:
@@ -728,7 +728,7 @@ func convert(val interface{}, typ int) (v interface{}, err error) { //NTYPE
case float64:
return int8(x), nil
case int8:
return int8(x), nil
return x, nil
case int16:
return int8(x), nil
case int32:
@@ -777,7 +777,7 @@ func convert(val interface{}, typ int) (v interface{}, err error) { //NTYPE
case int8:
return int16(x), nil
case int16:
return int16(x), nil
return x, nil
case int32:
return int16(x), nil
case int64:
@@ -826,7 +826,7 @@ func convert(val interface{}, typ int) (v interface{}, err error) { //NTYPE
case int16:
return int32(x), nil
case int32:
return int32(x), nil
return x, nil
case int64:
return int32(x), nil
//case string:
@@ -875,7 +875,7 @@ func convert(val interface{}, typ int) (v interface{}, err error) { //NTYPE
case int32:
return int64(x), nil
case int64:
return int64(x), nil
return x, nil
//case string:
case uint8:
return int64(x), nil
@@ -917,7 +917,7 @@ func convert(val interface{}, typ int) (v interface{}, err error) { //NTYPE
case int64:
return string(x), nil
case string:
return string(x), nil
return x, nil
case uint8:
return string(x), nil
case uint16:
@@ -970,7 +970,7 @@ func convert(val interface{}, typ int) (v interface{}, err error) { //NTYPE
return uint8(x), nil
//case string:
case uint8:
return uint8(x), nil
return x, nil
case uint16:
return uint8(x), nil
case uint32:
@@ -1019,7 +1019,7 @@ func convert(val interface{}, typ int) (v interface{}, err error) { //NTYPE
case uint8:
return uint16(x), nil
case uint16:
return uint16(x), nil
return x, nil
case uint32:
return uint16(x), nil
case uint64:
@@ -1068,7 +1068,7 @@ func convert(val interface{}, typ int) (v interface{}, err error) { //NTYPE
case uint16:
return uint32(x), nil
case uint32:
return uint32(x), nil
return x, nil
case uint64:
return uint32(x), nil
case *big.Int:
@@ -1117,7 +1117,7 @@ func convert(val interface{}, typ int) (v interface{}, err error) { //NTYPE
case uint32:
return uint64(x), nil
case uint64:
return uint64(x), nil
return x, nil
case *big.Int:
return x.Uint64(), nil
case time.Duration:
@@ -1162,7 +1162,7 @@ func convert(val interface{}, typ int) (v interface{}, err error) { //NTYPE
ii.Quo(ii, rr.Denom())
return ii, nil
case float64:
rr := big.NewRat(1, 1).SetFloat64(float64(x))
rr := big.NewRat(1, 1).SetFloat64(x)
ii := big.NewInt(0).Set(rr.Num())
ii.Quo(ii, rr.Denom())
return ii, nil
@@ -1365,7 +1365,7 @@ func typeCheck(rec []interface{}, cols []*col) (err error) {
rec[i] = complex64(y)
continue
case qComplex128:
rec[i] = complex128(y)
rec[i] = y
continue
case qFloat32, qFloat64, qInt8, qInt16, qInt32, qInt64, qUint8, qUint16, qUint32, qUint64:
return fmt.Errorf("constant %v truncated to real", y)
@@ -1378,13 +1378,13 @@ func typeCheck(rec []interface{}, cols []*col) (err error) {
rec[i] = complex(float32(y), 0)
continue
case qComplex128:
rec[i] = complex(float64(y), 0)
rec[i] = complex(y, 0)
continue
case qFloat32:
rec[i] = float32(y)
continue
case qFloat64:
rec[i] = float64(y)
rec[i] = y
continue
case qInt8:
if math.Floor(y) != y {
@@ -1532,7 +1532,7 @@ func typeCheck(rec []interface{}, cols []*col) (err error) {
return overflow(y, c.typ)
}
rec[i] = int64(y)
rec[i] = y
continue
case qString:
case qUint8:
@@ -1612,7 +1612,7 @@ func typeCheck(rec []interface{}, cols []*col) (err error) {
return overflow(y, c.typ)
}
rec[i] = int64(y)
rec[i] = y
continue
case qString:
case qUint8:
@@ -1719,7 +1719,7 @@ func typeCheck(rec []interface{}, cols []*col) (err error) {
rec[i] = uint32(y)
continue
case qUint64:
rec[i] = uint64(y)
rec[i] = y
continue
case qBigInt:
rec[i] = big.NewInt(0).SetUint64(y)
@@ -1788,7 +1788,7 @@ func collate1(a, b interface{}) int {
return 1
case complex64:
{
x, y := complex64(x), complex64(y)
x, y := complex64(x), y
if x == y {
return 0
}
@@ -1886,7 +1886,7 @@ func collate1(a, b interface{}) int {
}
case uint64:
{
x, y := uint64(x), uint64(y)
x, y := uint64(x), y
if x < y {
return -1
}
@@ -1968,7 +1968,7 @@ func collate1(a, b interface{}) int {
}
case int64:
{
x, y := int64(x), int64(y)
x, y := int64(x), y
if x < y {
return -1
}
@@ -2050,7 +2050,7 @@ func collate1(a, b interface{}) int {
}
case int64:
{
x, y := int64(x), int64(y)
x, y := int64(x), y
if x < y {
return -1
}
@@ -2106,7 +2106,7 @@ func collate1(a, b interface{}) int {
}
case float64:
{
x, y := float64(x), float64(y)
x, y := float64(x), y
if x < y {
return -1
}
@@ -2144,7 +2144,7 @@ func collate1(a, b interface{}) int {
return 1
case idealComplex:
{
x, y := complex64(x), complex64(y)
x, y := x, complex64(y)
if x == y {
return 0
}
@@ -2190,7 +2190,7 @@ func collate1(a, b interface{}) int {
return 1
case idealComplex:
{
x, y := complex128(x), complex128(y)
x, y := x, complex128(y)
if x == y {
return 0
}
@@ -2228,7 +2228,7 @@ func collate1(a, b interface{}) int {
return 1
case idealFloat:
{
x, y := float32(x), float32(y)
x, y := x, float32(y)
if x < y {
return -1
}
@@ -2258,7 +2258,7 @@ func collate1(a, b interface{}) int {
return 1
case idealFloat:
{
x, y := float64(x), float64(y)
x, y := x, float64(y)
if x < y {
return -1
}
@@ -2378,7 +2378,7 @@ func collate1(a, b interface{}) int {
return 1
case idealInt:
{
x, y := int64(x), int64(y)
x, y := x, int64(y)
if x < y {
return -1
}
@@ -2537,7 +2537,7 @@ func collate1(a, b interface{}) int {
return 1
case idealInt:
{
x, y := uint64(x), uint64(y)
x, y := x, uint64(y)
if x < y {
return -1
}
@@ -2550,7 +2550,7 @@ func collate1(a, b interface{}) int {
}
case idealUint:
{
x, y := uint64(x), uint64(y)
x, y := x, uint64(y)
if x < y {
return -1
}

4
vendor/github.com/cznic/ql/expr.go generated vendored
View File

@@ -1958,7 +1958,7 @@ func (o *binaryOperation) eval(execCtx *execCtx, ctx map[interface{}]interface{}
case uint32:
cnt = uint64(y)
case uint64:
cnt = uint64(y)
cnt = y
default:
return invOp2(a, b, op)
}
@@ -2057,7 +2057,7 @@ func (o *binaryOperation) eval(execCtx *execCtx, ctx map[interface{}]interface{}
case uint32:
cnt = uint64(y)
case uint64:
cnt = uint64(y)
cnt = y
default:
return invOp2(a, b, op)
}

72
vendor/github.com/cznic/ql/file.go generated vendored
View File

@@ -89,7 +89,7 @@ func OpenFile(name string, opt *Options) (db *DB, err error) {
}
}
fi, err := newFileFromOSFile(f) // always ACID
fi, err := newFileFromOSFile(f, opt.Headroom) // always ACID
if err != nil {
return
}
@@ -101,6 +101,8 @@ func OpenFile(name string, opt *Options) (db *DB, err error) {
}
}
fi.removeEmptyWAL = opt.RemoveEmptyWAL
return newDB(fi)
}
@@ -126,10 +128,25 @@ func OpenFile(name string, opt *Options) (db *DB, err error) {
// interface.
//
// If TempFile is nil it defaults to ioutil.TempFile.
//
// Headroom
//
// Headroom selects the minimum size a WAL file will have. The "extra"
// allocated file space serves as a headroom. Commits that fit into the
// headroom should not fail due to 'not enough space on the volume' errors. The
// headroom parameter is first rounded-up to a non negative multiple of the
// size of the lldb.Allocator atom.
//
// RemoveEmptyWAL
//
// RemoveEmptyWAL controls whether empty WAL files should be deleted on
// clean exit.
type Options struct {
CanCreate bool
OSFile lldb.OSFile
TempFile func(dir, prefix string) (f lldb.OSFile, err error)
CanCreate bool
OSFile lldb.OSFile
TempFile func(dir, prefix string) (f lldb.OSFile, err error)
Headroom int64
RemoveEmptyWAL bool
}
type fileBTreeIterator struct {
@@ -258,7 +275,7 @@ func infer(from []interface{}, to *[]*col) {
case time.Duration:
c.typ = qDuration
case chunk:
vals, err := lldb.DecodeScalars([]byte(x.b))
vals, err := lldb.DecodeScalars(x.b)
if err != nil {
panic(err)
}
@@ -374,19 +391,20 @@ func (t *fileTemp) Set(k, v []interface{}) (err error) {
}
type file struct {
a *lldb.Allocator
codec *gobCoder
f lldb.Filer
f0 lldb.OSFile
id int64
lck io.Closer
mu sync.Mutex
name string
tempFile func(dir, prefix string) (f lldb.OSFile, err error)
wal *os.File
a *lldb.Allocator
codec *gobCoder
f lldb.Filer
f0 lldb.OSFile
id int64
lck io.Closer
mu sync.Mutex
name string
tempFile func(dir, prefix string) (f lldb.OSFile, err error)
wal *os.File
removeEmptyWAL bool // Whether empty WAL files should be removed on close
}
func newFileFromOSFile(f lldb.OSFile) (fi *file, err error) {
func newFileFromOSFile(f lldb.OSFile, headroom int64) (fi *file, err error) {
nm := lockName(f.Name())
lck, err := lock.Lock(nm)
if err != nil {
@@ -434,9 +452,7 @@ func newFileFromOSFile(f lldb.OSFile) (fi *file, err error) {
return nil, err
}
if st.Size() != 0 {
return nil, fmt.Errorf("(file-001) non empty WAL file %s exists", wn)
}
closew = st.Size() == 0
}
info, err := f.Stat()
@@ -454,7 +470,7 @@ func newFileFromOSFile(f lldb.OSFile) (fi *file, err error) {
filer := lldb.Filer(lldb.NewOSFiler(f))
filer = lldb.NewInnerFiler(filer, 16)
if filer, err = lldb.NewACIDFiler(filer, w); err != nil {
if filer, err = lldb.NewACIDFiler(filer, w, lldb.MinWAL(headroom)); err != nil {
return nil, err
}
@@ -508,7 +524,7 @@ func newFileFromOSFile(f lldb.OSFile) (fi *file, err error) {
filer := lldb.Filer(lldb.NewOSFiler(f))
filer = lldb.NewInnerFiler(filer, 16)
if filer, err = lldb.NewACIDFiler(filer, w); err != nil {
if filer, err = lldb.NewACIDFiler(filer, w, lldb.MinWAL(headroom)); err != nil {
return nil, err
}
@@ -589,12 +605,22 @@ func (s *file) Close() (err error) {
es := s.f0.Sync()
ef := s.f0.Close()
var ew error
var ew, estat, eremove error
if s.wal != nil {
remove := false
wn := s.wal.Name()
if s.removeEmptyWAL {
var stat os.FileInfo
stat, estat = s.wal.Stat()
remove = stat.Size() == 0
}
ew = s.wal.Close()
if remove {
eremove = os.Remove(wn)
}
}
el := s.lck.Close()
return errSet(&err, es, ef, ew, el)
return errSet(&err, es, ef, ew, el, estat, eremove)
}
func (s *file) Name() string { return s.name }

56
vendor/github.com/cznic/ql/ql.go generated vendored
View File

@@ -771,16 +771,17 @@ func cols2meta(f []*col) (s string) {
// DB represent the database capable of executing QL statements.
type DB struct {
cc *TCtx // Current transaction context
exprCache map[string]expression
exprCacheMu sync.Mutex
hasIndex2 int // 0: nope, 1: in progress, 2: yes.
isMem bool
mu sync.Mutex
queue []chan struct{}
root *root
rw bool // DB FSM
rwmu sync.RWMutex
store storage
tnl int // Transaction nesting level
exprCache map[string]expression
exprCacheMu sync.Mutex
hasIndex2 int // 0: nope, 1: in progress, 2: yes.
}
var selIndex2Expr = MustCompile("select Expr from __Index2_Expr where Index2_ID == $1")
@@ -1086,7 +1087,7 @@ func mustCompile(src string) List {
return list
}
// Execute executes statements in a list while substituting QL paramaters from
// Execute executes statements in a list while substituting QL parameters from
// arg.
//
// The resulting []Recordset corresponds to the SELECT FROM statements in the
@@ -1214,6 +1215,15 @@ func (db *DB) Execute(ctx *TCtx, l List, arg ...interface{}) (rs []Recordset, in
return
}
func (db *DB) muUnlock() {
if n := len(db.queue); n != 0 {
db.queue[0] <- struct{}{}
copy(db.queue, db.queue[1:])
db.queue = db.queue[:n-1]
}
db.mu.Unlock()
}
func (db *DB) run1(pc *TCtx, s stmt, arg ...interface{}) (rs Recordset, tnla, tnlb int, err error) {
db.mu.Lock()
tnla = db.tnl
@@ -1222,7 +1232,7 @@ func (db *DB) run1(pc *TCtx, s stmt, arg ...interface{}) (rs Recordset, tnla, tn
case false:
switch s.(type) {
case beginTransactionStmt:
defer db.mu.Unlock()
defer db.muUnlock()
if pc == nil {
return nil, tnla, tnlb, errors.New("BEGIN TRANSACTION: cannot start a transaction in nil TransactionCtx")
}
@@ -1239,19 +1249,19 @@ func (db *DB) run1(pc *TCtx, s stmt, arg ...interface{}) (rs Recordset, tnla, tn
db.rw = true
return
case commitStmt:
defer db.mu.Unlock()
defer db.muUnlock()
return nil, tnla, tnlb, errCommitNotInTransaction
case rollbackStmt:
defer db.mu.Unlock()
defer db.muUnlock()
return nil, tnla, tnlb, errRollbackNotInTransaction
default:
if s.isUpdating() {
db.mu.Unlock()
db.muUnlock()
return nil, tnla, tnlb, fmt.Errorf("attempt to update the DB outside of a transaction")
}
db.rwmu.RLock() // can safely grab before Unlock
db.mu.Unlock()
db.muUnlock()
defer db.rwmu.RUnlock()
rs, err = s.exec(&execCtx{db, arg}) // R/O tctx
return rs, tnla, tnlb, err
@@ -1259,7 +1269,7 @@ func (db *DB) run1(pc *TCtx, s stmt, arg ...interface{}) (rs Recordset, tnla, tn
default: // case true:
switch s.(type) {
case beginTransactionStmt:
defer db.mu.Unlock()
defer db.muUnlock()
if pc == nil {
return nil, tnla, tnlb, errBeginTransNoCtx
@@ -1267,12 +1277,16 @@ func (db *DB) run1(pc *TCtx, s stmt, arg ...interface{}) (rs Recordset, tnla, tn
if pc != db.cc {
for db.rw {
db.mu.Unlock() // Transaction isolation
ch := make(chan struct{}, 1)
db.queue = append(db.queue, ch)
db.mu.Unlock()
<-ch
db.mu.Lock()
}
db.rw = true
db.rwmu.Lock()
}
if err = db.store.BeginTransaction(); err != nil {
@@ -1285,7 +1299,7 @@ func (db *DB) run1(pc *TCtx, s stmt, arg ...interface{}) (rs Recordset, tnla, tn
tnlb = db.tnl
return
case commitStmt:
defer db.mu.Unlock()
defer db.muUnlock()
if pc != db.cc {
return nil, tnla, tnlb, fmt.Errorf("invalid passed transaction context")
}
@@ -1303,7 +1317,7 @@ func (db *DB) run1(pc *TCtx, s stmt, arg ...interface{}) (rs Recordset, tnla, tn
db.rwmu.Unlock()
return
case rollbackStmt:
defer db.mu.Unlock()
defer db.muUnlock()
defer func() { pc.LastInsertID = db.root.lastInsertID }()
if pc != db.cc {
return nil, tnla, tnlb, fmt.Errorf("invalid passed transaction context")
@@ -1324,18 +1338,18 @@ func (db *DB) run1(pc *TCtx, s stmt, arg ...interface{}) (rs Recordset, tnla, tn
default:
if pc == nil {
if s.isUpdating() {
db.mu.Unlock()
db.muUnlock()
return nil, tnla, tnlb, fmt.Errorf("attempt to update the DB outside of a transaction")
}
db.mu.Unlock() // must Unlock before RLock
db.muUnlock() // must Unlock before RLock
db.rwmu.RLock()
defer db.rwmu.RUnlock()
rs, err = s.exec(&execCtx{db, arg})
return rs, tnla, tnlb, err
}
defer db.mu.Unlock()
defer db.muUnlock()
defer func() { pc.LastInsertID = db.root.lastInsertID }()
if pc != db.cc {
return nil, tnla, tnlb, fmt.Errorf("invalid passed transaction context")
@@ -1361,7 +1375,7 @@ func (db *DB) Flush() (err error) {
// Close will close the DB. Successful Close is idempotent.
func (db *DB) Close() error {
db.mu.Lock()
defer db.mu.Unlock()
defer db.muUnlock()
if db.store == nil {
return nil
}
@@ -1380,17 +1394,17 @@ func (db *DB) do(r recordset, f func(data []interface{}) (bool, error)) (err err
switch db.rw {
case false:
db.rwmu.RLock() // can safely grab before Unlock
db.mu.Unlock()
db.muUnlock()
defer db.rwmu.RUnlock()
default: // case true:
if r.tx == nil {
db.mu.Unlock() // must Unlock before RLock
db.muUnlock() // must Unlock before RLock
db.rwmu.RLock()
defer db.rwmu.RUnlock()
break
}
defer db.mu.Unlock()
defer db.muUnlock()
if r.tx != db.cc {
return fmt.Errorf("invalid passed transaction context")
}
@@ -1569,7 +1583,7 @@ func (db *DB) info() (r *DbInfo, err error) {
// to obtain the result.
func (db *DB) Info() (r *DbInfo, err error) {
db.mu.Lock()
defer db.mu.Unlock()
defer db.muUnlock()
return db.info()
}

View File

@@ -11,7 +11,10 @@ import (
"encoding/base64"
"fmt"
"io"
"os"
"path/filepath"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
@@ -643,3 +646,42 @@ func prettyPrint(protect map[interface{}]struct{}, sf Formatter, prefix, suffix
sf.Format("%u}" + suffix)
}
}
// Gopath returns the value of the $GOPATH environment variable or its default
// value if not set.
func Gopath() string {
if r := os.Getenv("GOPATH"); r != "" {
return r
}
// go1.8: https://github.com/golang/go/blob/74628a8b9f102bddd5078ee426efe0fd57033115/doc/code.html#L122
switch runtime.GOOS {
case "plan9":
return os.Getenv("home")
case "windows":
return filepath.Join(os.Getenv("USERPROFILE"), "go")
default:
return filepath.Join(os.Getenv("HOME"), "go")
}
}
// ImportPath returns the import path of the caller or an error, if any.
func ImportPath() (string, error) {
_, file, _, ok := runtime.Caller(1)
if !ok {
return "", fmt.Errorf("runtime.Caller failed")
}
gopath := Gopath()
for _, v := range filepath.SplitList(gopath) {
gp := filepath.Join(v, "src")
path, err := filepath.Rel(gp, file)
if err != nil {
continue
}
return filepath.Dir(path), nil
}
return "", fmt.Errorf("cannot determine import path using GOPATH=%s", gopath)
}