vendor: Add dependencies for discosrv

This commit is contained in:
Jakob Borg
2016-05-31 22:35:35 +02:00
parent eacae83886
commit f9e2623fdc
126 changed files with 60401 additions and 0 deletions

27
vendor/github.com/cznic/ql/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,27 @@
Copyright (c) 2014 The ql Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the names of the authors nor the names of the
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

155
vendor/github.com/cznic/ql/blob.go generated vendored Normal file
View File

@@ -0,0 +1,155 @@
// Copyright 2014 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ql
import (
"bytes"
"encoding/gob"
"math/big"
"sync"
"time"
)
const shortBlob = 256 // bytes
var (
gobInitDuration = time.Duration(278)
gobInitInt = big.NewInt(42)
gobInitRat = big.NewRat(355, 113)
gobInitTime time.Time
)
func init() {
var err error
if gobInitTime, err = time.ParseInLocation(
"Jan 2, 2006 at 3:04pm (MST)",
"Jul 9, 2012 at 5:02am (CEST)",
time.FixedZone("XYZ", 1234),
); err != nil {
panic(err)
}
newGobCoder()
}
type gobCoder struct {
buf bytes.Buffer
dec *gob.Decoder
enc *gob.Encoder
mu sync.Mutex
}
func newGobCoder() (g *gobCoder) {
g = &gobCoder{}
g.enc = gob.NewEncoder(&g.buf)
if err := g.enc.Encode(gobInitInt); err != nil {
panic(err)
}
if err := g.enc.Encode(gobInitRat); err != nil {
panic(err)
}
if err := g.enc.Encode(gobInitTime); err != nil {
panic(err)
}
if err := g.enc.Encode(gobInitDuration); err != nil {
panic(err)
}
g.dec = gob.NewDecoder(&g.buf)
i := big.NewInt(0)
if err := g.dec.Decode(i); err != nil {
panic(err)
}
r := big.NewRat(3, 5)
if err := g.dec.Decode(r); err != nil {
panic(err)
}
t := time.Now()
if err := g.dec.Decode(&t); err != nil {
panic(err)
}
var d time.Duration
if err := g.dec.Decode(&d); err != nil {
panic(err)
}
return
}
func isBlobType(v interface{}) (bool, Type) {
switch v.(type) {
case []byte:
return true, Blob
case *big.Int:
return true, BigInt
case *big.Rat:
return true, BigRat
case time.Time:
return true, Time
case time.Duration:
return true, Duration
default:
return false, -1
}
}
func (g *gobCoder) encode(v interface{}) (b []byte, err error) {
g.mu.Lock()
defer g.mu.Unlock()
g.buf.Reset()
switch x := v.(type) {
case []byte:
return x, nil
case *big.Int:
err = g.enc.Encode(x)
case *big.Rat:
err = g.enc.Encode(x)
case time.Time:
err = g.enc.Encode(x)
case time.Duration:
err = g.enc.Encode(int64(x))
default:
panic("internal error 002")
}
b = g.buf.Bytes()
return
}
func (g *gobCoder) decode(b []byte, typ int) (v interface{}, err error) {
g.mu.Lock()
defer g.mu.Unlock()
g.buf.Reset()
g.buf.Write(b)
switch typ {
case qBlob:
return b, nil
case qBigInt:
x := big.NewInt(0)
err = g.dec.Decode(&x)
v = x
case qBigRat:
x := big.NewRat(1, 1)
err = g.dec.Decode(&x)
v = x
case qTime:
var x time.Time
err = g.dec.Decode(&x)
v = x
case qDuration:
var x int64
err = g.dec.Decode(&x)
v = time.Duration(x)
default:
panic("internal error 003")
}
return
}

725
vendor/github.com/cznic/ql/btree.go generated vendored Normal file
View File

@@ -0,0 +1,725 @@
// Copyright 2014 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ql
import (
"io"
)
const (
kx = 128 //DONE benchmark tune this number if using custom key/value type(s).
kd = 64 //DONE benchmark tune this number if using custom key/value type(s).
)
type (
// cmp compares a and b. Return value is:
//
// < 0 if a < b
// 0 if a == b
// > 0 if a > b
//
cmp func(a, b []interface{}) int
d struct { // data page
c int
d [2*kd + 1]de
n *d
p *d
}
de struct { // d element
k []interface{}
v []interface{}
}
enumerator struct {
err error
hit bool
i int
k []interface{}
q *d
t *tree
ver int64
}
// tree is a B+tree.
tree struct {
c int
cmp cmp
first *d
last *d
r interface{}
ver int64
}
xe struct { // x element
ch interface{}
sep *d
}
x struct { // index page
c int
x [2*kx + 2]xe
}
)
var ( // R/O zero values
zd d
zde de
zx x
zxe xe
)
func clr(q interface{}) {
switch z := q.(type) {
case *x:
for i := 0; i <= z.c; i++ { // Ch0 Sep0 ... Chn-1 Sepn-1 Chn
clr(z.x[i].ch)
}
*z = zx // GC
case *d:
*z = zd // GC
}
}
// -------------------------------------------------------------------------- x
func newX(ch0 interface{}) *x {
r := &x{}
r.x[0].ch = ch0
return r
}
func (q *x) extract(i int) {
q.c--
if i < q.c {
copy(q.x[i:], q.x[i+1:q.c+1])
q.x[q.c].ch = q.x[q.c+1].ch
q.x[q.c].sep = nil // GC
q.x[q.c+1] = zxe // GC
}
}
func (q *x) insert(i int, d *d, ch interface{}) *x {
c := q.c
if i < c {
q.x[c+1].ch = q.x[c].ch
copy(q.x[i+2:], q.x[i+1:c])
q.x[i+1].sep = q.x[i].sep
}
c++
q.c = c
q.x[i].sep = d
q.x[i+1].ch = ch
return q
}
func (q *x) siblings(i int) (l, r *d) {
if i >= 0 {
if i > 0 {
l = q.x[i-1].ch.(*d)
}
if i < q.c {
r = q.x[i+1].ch.(*d)
}
}
return
}
// -------------------------------------------------------------------------- d
func (l *d) mvL(r *d, c int) {
copy(l.d[l.c:], r.d[:c])
copy(r.d[:], r.d[c:r.c])
l.c += c
r.c -= c
}
func (l *d) mvR(r *d, c int) {
copy(r.d[c:], r.d[:r.c])
copy(r.d[:c], l.d[l.c-c:])
r.c += c
l.c -= c
}
// ----------------------------------------------------------------------- tree
// treeNew returns a newly created, empty tree. The compare function is used
// for key collation.
func treeNew(cmp cmp) *tree {
return &tree{cmp: cmp}
}
// Clear removes all K/V pairs from the tree.
func (t *tree) Clear() {
if t.r == nil {
return
}
clr(t.r)
t.c, t.first, t.last, t.r = 0, nil, nil, nil
t.ver++
}
func (t *tree) cat(p *x, q, r *d, pi int) {
t.ver++
q.mvL(r, r.c)
if r.n != nil {
r.n.p = q
} else {
t.last = q
}
q.n = r.n
if p.c > 1 {
p.extract(pi)
p.x[pi].ch = q
} else {
t.r = q
}
}
func (t *tree) catX(p, q, r *x, pi int) {
t.ver++
q.x[q.c].sep = p.x[pi].sep
copy(q.x[q.c+1:], r.x[:r.c])
q.c += r.c + 1
q.x[q.c].ch = r.x[r.c].ch
if p.c > 1 {
p.c--
pc := p.c
if pi < pc {
p.x[pi].sep = p.x[pi+1].sep
copy(p.x[pi+1:], p.x[pi+2:pc+1])
p.x[pc].ch = p.x[pc+1].ch
p.x[pc].sep = nil // GC
p.x[pc+1].ch = nil // GC
}
return
}
t.r = q
}
//Delete removes the k's KV pair, if it exists, in which case Delete returns
//true.
func (t *tree) Delete(k []interface{}) (ok bool) {
pi := -1
var p *x
q := t.r
if q == nil {
return
}
for {
var i int
i, ok = t.find(q, k)
if ok {
switch z := q.(type) {
case *x:
dp := z.x[i].sep
switch {
case dp.c > kd:
t.extract(dp, 0)
default:
if z.c < kx && q != t.r {
t.underflowX(p, &z, pi, &i)
}
pi = i + 1
p = z
q = z.x[pi].ch
ok = false
continue
}
case *d:
t.extract(z, i)
if z.c >= kd {
return
}
if q != t.r {
t.underflow(p, z, pi)
} else if t.c == 0 {
t.Clear()
}
}
return
}
switch z := q.(type) {
case *x:
if z.c < kx && q != t.r {
t.underflowX(p, &z, pi, &i)
}
pi = i
p = z
q = z.x[i].ch
case *d:
return
}
}
}
func (t *tree) extract(q *d, i int) { // (r []interface{}) {
t.ver++
//r = q.d[i].v // prepared for Extract
q.c--
if i < q.c {
copy(q.d[i:], q.d[i+1:q.c+1])
}
q.d[q.c] = zde // GC
t.c--
return
}
func (t *tree) find(q interface{}, k []interface{}) (i int, ok bool) {
var mk []interface{}
l := 0
switch z := q.(type) {
case *x:
h := z.c - 1
for l <= h {
m := (l + h) >> 1
mk = z.x[m].sep.d[0].k
switch cmp := t.cmp(k, mk); {
case cmp > 0:
l = m + 1
case cmp == 0:
return m, true
default:
h = m - 1
}
}
case *d:
h := z.c - 1
for l <= h {
m := (l + h) >> 1
mk = z.d[m].k
switch cmp := t.cmp(k, mk); {
case cmp > 0:
l = m + 1
case cmp == 0:
return m, true
default:
h = m - 1
}
}
}
return l, false
}
// First returns the first item of the tree in the key collating order, or
// (nil, nil) if the tree is empty.
func (t *tree) First() (k []interface{}, v []interface{}) {
if q := t.first; q != nil {
q := &q.d[0]
k, v = q.k, q.v
}
return
}
// Get returns the value associated with k and true if it exists. Otherwise Get
// returns (nil, false).
func (t *tree) Get(k []interface{}) (v []interface{}, ok bool) {
q := t.r
if q == nil {
return
}
for {
var i int
if i, ok = t.find(q, k); ok {
switch z := q.(type) {
case *x:
return z.x[i].sep.d[0].v, true
case *d:
return z.d[i].v, true
}
}
switch z := q.(type) {
case *x:
q = z.x[i].ch
default:
return
}
}
}
func (t *tree) insert(q *d, i int, k []interface{}, v []interface{}) *d {
t.ver++
c := q.c
if i < c {
copy(q.d[i+1:], q.d[i:c])
}
c++
q.c = c
q.d[i].k, q.d[i].v = k, v
t.c++
return q
}
// Last returns the last item of the tree in the key collating order, or (nil,
// nil) if the tree is empty.
func (t *tree) Last() (k []interface{}, v []interface{}) {
if q := t.last; q != nil {
q := &q.d[q.c-1]
k, v = q.k, q.v
}
return
}
// Len returns the number of items in the tree.
func (t *tree) Len() int {
return t.c
}
func (t *tree) overflow(p *x, q *d, pi, i int, k []interface{}, v []interface{}) {
t.ver++
l, r := p.siblings(pi)
if l != nil && l.c < 2*kd && i > 0 {
l.mvL(q, 1)
t.insert(q, i-1, k, v)
return
}
if r != nil && r.c < 2*kd {
if i < 2*kd {
q.mvR(r, 1)
t.insert(q, i, k, v)
} else {
t.insert(r, 0, k, v)
}
return
}
t.split(p, q, pi, i, k, v)
}
// Seek returns an enumerator positioned on a an item such that k >= item's
// key. ok reports if k == item.key The enumerator's position is possibly
// after the last item in the tree.
func (t *tree) Seek(k []interface{}) (e *enumerator, ok bool) {
q := t.r
if q == nil {
e = &enumerator{nil, false, 0, k, nil, t, t.ver}
return
}
for {
var i int
if i, ok = t.find(q, k); ok {
switch z := q.(type) {
case *x:
e = &enumerator{nil, ok, 0, k, z.x[i].sep, t, t.ver}
return
case *d:
e = &enumerator{nil, ok, i, k, z, t, t.ver}
return
}
}
switch z := q.(type) {
case *x:
q = z.x[i].ch
case *d:
e = &enumerator{nil, ok, i, k, z, t, t.ver}
return
}
}
}
// SeekFirst returns an enumerator positioned on the first KV pair in the tree,
// if any. For an empty tree, err == io.EOF is returned and e will be nil.
func (t *tree) SeekFirst() (e *enumerator, err error) {
q := t.first
if q == nil {
return nil, io.EOF
}
return &enumerator{nil, true, 0, q.d[0].k, q, t, t.ver}, nil
}
// SeekLast returns an enumerator positioned on the last KV pair in the tree,
// if any. For an empty tree, err == io.EOF is returned and e will be nil.
func (t *tree) SeekLast() (e *enumerator, err error) {
q := t.last
if q == nil {
return nil, io.EOF
}
return &enumerator{nil, true, q.c - 1, q.d[q.c-1].k, q, t, t.ver}, nil
}
// Set sets the value associated with k.
func (t *tree) Set(k []interface{}, v []interface{}) {
pi := -1
var p *x
q := t.r
if q != nil {
for {
i, ok := t.find(q, k)
if ok {
switch z := q.(type) {
case *x:
z.x[i].sep.d[0].v = v
case *d:
z.d[i].v = v
}
return
}
switch z := q.(type) {
case *x:
if z.c > 2*kx {
t.splitX(p, &z, pi, &i)
}
pi = i
p = z
q = z.x[i].ch
case *d:
switch {
case z.c < 2*kd:
t.insert(z, i, k, v)
default:
t.overflow(p, z, pi, i, k, v)
}
return
}
}
}
z := t.insert(&d{}, 0, k, v)
t.r, t.first, t.last = z, z, z
return
}
func (t *tree) split(p *x, q *d, pi, i int, k []interface{}, v []interface{}) {
t.ver++
r := &d{}
if q.n != nil {
r.n = q.n
r.n.p = r
} else {
t.last = r
}
q.n = r
r.p = q
copy(r.d[:], q.d[kd:2*kd])
for i := range q.d[kd:] {
q.d[kd+i] = zde
}
q.c = kd
r.c = kd
if pi >= 0 {
p.insert(pi, r, r)
} else {
t.r = newX(q).insert(0, r, r)
}
if i > kd {
t.insert(r, i-kd, k, v)
return
}
t.insert(q, i, k, v)
}
func (t *tree) splitX(p *x, pp **x, pi int, i *int) {
t.ver++
q := *pp
r := &x{}
copy(r.x[:], q.x[kx+1:])
q.c = kx
r.c = kx
if pi >= 0 {
p.insert(pi, q.x[kx].sep, r)
} else {
t.r = newX(q).insert(0, q.x[kx].sep, r)
}
q.x[kx].sep = nil
for i := range q.x[kx+1:] {
q.x[kx+i+1] = zxe
}
if *i > kx {
*pp = r
*i -= kx + 1
}
}
func (t *tree) underflow(p *x, q *d, pi int) {
t.ver++
l, r := p.siblings(pi)
if l != nil && l.c+q.c >= 2*kd {
l.mvR(q, 1)
} else if r != nil && q.c+r.c >= 2*kd {
q.mvL(r, 1)
r.d[r.c] = zde // GC
} else if l != nil {
t.cat(p, l, q, pi-1)
} else {
t.cat(p, q, r, pi)
}
}
func (t *tree) underflowX(p *x, pp **x, pi int, i *int) {
t.ver++
var l, r *x
q := *pp
if pi >= 0 {
if pi > 0 {
l = p.x[pi-1].ch.(*x)
}
if pi < p.c {
r = p.x[pi+1].ch.(*x)
}
}
if l != nil && l.c > kx {
q.x[q.c+1].ch = q.x[q.c].ch
copy(q.x[1:], q.x[:q.c])
q.x[0].ch = l.x[l.c].ch
q.x[0].sep = p.x[pi-1].sep
q.c++
*i++
l.c--
p.x[pi-1].sep = l.x[l.c].sep
return
}
if r != nil && r.c > kx {
q.x[q.c].sep = p.x[pi].sep
q.c++
q.x[q.c].ch = r.x[0].ch
p.x[pi].sep = r.x[0].sep
copy(r.x[:], r.x[1:r.c])
r.c--
rc := r.c
r.x[rc].ch = r.x[rc+1].ch
r.x[rc].sep = nil
r.x[rc+1].ch = nil
return
}
if l != nil {
*i += l.c + 1
t.catX(p, l, q, pi-1)
*pp = l
return
}
t.catX(p, q, r, pi)
}
// ----------------------------------------------------------------- enumerator
// Next returns the currently enumerated item, if it exists and moves to the
// next item in the key collation order. If there is no item to return, err ==
// io.EOF is returned.
func (e *enumerator) Next() (k []interface{}, v []interface{}, err error) {
if err = e.err; err != nil {
return
}
if e.ver != e.t.ver {
f, hit := e.t.Seek(e.k)
if !e.hit && hit {
if err = f.next(); err != nil {
return
}
}
*e = *f
}
if e.q == nil {
e.err, err = io.EOF, io.EOF
return
}
if e.i >= e.q.c {
if err = e.next(); err != nil {
return
}
}
i := e.q.d[e.i]
k, v = i.k, i.v
e.k, e.hit = k, false
e.next()
return
}
func (e *enumerator) next() error {
if e.q == nil {
e.err = io.EOF
return io.EOF
}
switch {
case e.i < e.q.c-1:
e.i++
default:
if e.q, e.i = e.q.n, 0; e.q == nil {
e.err = io.EOF
}
}
return e.err
}
// Prev returns the currently enumerated item, if it exists and moves to the
// previous item in the key collation order. If there is no item to return, err
// == io.EOF is returned.
func (e *enumerator) Prev() (k []interface{}, v []interface{}, err error) {
if err = e.err; err != nil {
return
}
if e.ver != e.t.ver {
f, hit := e.t.Seek(e.k)
if !e.hit && hit {
if err = f.prev(); err != nil {
return
}
}
*e = *f
}
if e.q == nil {
e.err, err = io.EOF, io.EOF
return
}
if e.i >= e.q.c {
if err = e.next(); err != nil {
return
}
}
i := e.q.d[e.i]
k, v = i.k, i.v
e.k, e.hit = k, false
e.prev()
return
}
func (e *enumerator) prev() error {
if e.q == nil {
e.err = io.EOF
return io.EOF
}
switch {
case e.i > 0:
e.i--
default:
if e.q = e.q.p; e.q == nil {
e.err = io.EOF
break
}
e.i = e.q.c - 1
}
return e.err
}

991
vendor/github.com/cznic/ql/builtin.go generated vendored Normal file
View File

@@ -0,0 +1,991 @@
// Copyright 2014 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ql
import (
"fmt"
"math/rand"
"reflect"
"strconv"
"strings"
"time"
)
//TODO agg bigint, bigrat, time, duration
var builtin = map[string]struct {
f func([]interface{}, map[interface{}]interface{}) (interface{}, error)
minArgs int
maxArgs int
isStatic bool
isAggregate bool
}{
"__testBlob": {builtinTestBlob, 1, 1, true, false},
"__testString": {builtinTestString, 1, 1, true, false},
"avg": {builtinAvg, 1, 1, false, true},
"complex": {builtinComplex, 2, 2, true, false},
"contains": {builtinContains, 2, 2, true, false},
"count": {builtinCount, 0, 1, false, true},
"date": {builtinDate, 8, 8, true, false},
"day": {builtinDay, 1, 1, true, false},
"formatTime": {builtinFormatTime, 2, 2, true, false},
"formatFloat": {builtinFormatFloat, 1, 4, true, false},
"formatInt": {builtinFormatInt, 1, 2, true, false},
"hasPrefix": {builtinHasPrefix, 2, 2, true, false},
"hasSuffix": {builtinHasSuffix, 2, 2, true, false},
"hour": {builtinHour, 1, 1, true, false},
"hours": {builtinHours, 1, 1, true, false},
"id": {builtinID, 0, 1, false, false},
"imag": {builtinImag, 1, 1, true, false},
"len": {builtinLen, 1, 1, true, false},
"max": {builtinMax, 1, 1, false, true},
"min": {builtinMin, 1, 1, false, true},
"minute": {builtinMinute, 1, 1, true, false},
"minutes": {builtinMinutes, 1, 1, true, false},
"month": {builtinMonth, 1, 1, true, false},
"nanosecond": {builtinNanosecond, 1, 1, true, false},
"nanoseconds": {builtinNanoseconds, 1, 1, true, false},
"now": {builtinNow, 0, 0, false, false},
"parseTime": {builtinParseTime, 2, 2, true, false},
"real": {builtinReal, 1, 1, true, false},
"second": {builtinSecond, 1, 1, true, false},
"seconds": {builtinSeconds, 1, 1, true, false},
"since": {builtinSince, 1, 1, false, false},
"sum": {builtinSum, 1, 1, false, true},
"timeIn": {builtinTimeIn, 2, 2, true, false},
"weekday": {builtinWeekday, 1, 1, true, false},
"year": {builtinYear, 1, 1, true, false},
"yearDay": {builtinYearday, 1, 1, true, false},
}
func badNArgs(min int, s string, arg []interface{}) error {
a := []string{}
for _, v := range arg {
a = append(a, fmt.Sprintf("%v", v))
}
switch len(arg) < min {
case true:
return fmt.Errorf("missing argument to %s(%s)", s, strings.Join(a, ", "))
default: //case false:
return fmt.Errorf("too many arguments to %s(%s)", s, strings.Join(a, ", "))
}
}
func invArg(arg interface{}, s string) error {
return fmt.Errorf("invalid argument %v (type %T) for %s", arg, arg, s)
}
func builtinTestBlob(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
n, err := intExpr(arg[0])
if err != nil {
return nil, err
}
rng := rand.New(rand.NewSource(n))
b := make([]byte, n)
for i := range b {
b[i] = byte(rng.Int())
}
return b, nil
}
func builtinTestString(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
n, err := intExpr(arg[0])
if err != nil {
return nil, err
}
rng := rand.New(rand.NewSource(n))
b := make([]byte, n)
for i := range b {
b[i] = byte(rng.Int())
}
return string(b), nil
}
func builtinAvg(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
type avg struct {
sum interface{}
n uint64
}
if _, ok := ctx["$agg0"]; ok {
return
}
fn := ctx["$fn"]
if _, ok := ctx["$agg"]; ok {
data, ok := ctx[fn].(avg)
if !ok {
return
}
switch x := data.sum.(type) {
case complex64:
return complex64(complex128(x) / complex(float64(data.n), 0)), nil
case complex128:
return complex64(complex128(x) / complex(float64(data.n), 0)), nil
case float32:
return float32(float64(x) / float64(data.n)), nil
case float64:
return float64(x) / float64(data.n), nil
case int8:
return int8(int64(x) / int64(data.n)), nil
case int16:
return int16(int64(x) / int64(data.n)), nil
case int32:
return int32(int64(x) / int64(data.n)), nil
case int64:
return int64(int64(x) / int64(data.n)), nil
case uint8:
return uint8(uint64(x) / data.n), nil
case uint16:
return uint16(uint64(x) / data.n), nil
case uint32:
return uint32(uint64(x) / data.n), nil
case uint64:
return uint64(uint64(x) / data.n), nil
}
}
data, _ := ctx[fn].(avg)
y := arg[0]
if y == nil {
return
}
switch x := data.sum.(type) {
case nil:
switch y := y.(type) {
case float32, float64, int8, int16, int32, int64, uint8, uint16, uint32, uint64:
data = avg{y, 0}
default:
return nil, fmt.Errorf("avg: cannot accept %v (value if type %T)", y, y)
}
case complex64:
data.sum = x + y.(complex64)
case complex128:
data.sum = x + y.(complex128)
case float32:
data.sum = x + y.(float32)
case float64:
data.sum = x + y.(float64)
case int8:
data.sum = x + y.(int8)
case int16:
data.sum = x + y.(int16)
case int32:
data.sum = x + y.(int32)
case int64:
data.sum = x + y.(int64)
case uint8:
data.sum = x + y.(uint8)
case uint16:
data.sum = x + y.(uint16)
case uint32:
data.sum = x + y.(uint32)
case uint64:
data.sum = x + y.(uint64)
}
data.n++
ctx[fn] = data
return
}
func builtinComplex(arg []interface{}, _ map[interface{}]interface{}) (v interface{}, err error) {
re, im := arg[0], arg[1]
if re == nil || im == nil {
return nil, nil
}
re, im = coerce(re, im)
if reflect.TypeOf(re) != reflect.TypeOf(im) {
return nil, fmt.Errorf("complex(%T(%#v), %T(%#v)): invalid types", re, re, im, im)
}
switch re := re.(type) {
case idealFloat:
return idealComplex(complex(float64(re), float64(im.(idealFloat)))), nil
case idealInt:
return idealComplex(complex(float64(re), float64(im.(idealInt)))), nil
case idealRune:
return idealComplex(complex(float64(re), float64(im.(idealRune)))), nil
case idealUint:
return idealComplex(complex(float64(re), float64(im.(idealUint)))), nil
case float32:
return complex(float32(re), im.(float32)), nil
case float64:
return complex(float64(re), im.(float64)), nil
case int8:
return complex(float64(re), float64(im.(int8))), nil
case int16:
return complex(float64(re), float64(im.(int16))), nil
case int32:
return complex(float64(re), float64(im.(int32))), nil
case int64:
return complex(float64(re), float64(im.(int64))), nil
case uint8:
return complex(float64(re), float64(im.(uint8))), nil
case uint16:
return complex(float64(re), float64(im.(uint16))), nil
case uint32:
return complex(float64(re), float64(im.(uint32))), nil
case uint64:
return complex(float64(re), float64(im.(uint64))), nil
default:
return nil, invArg(re, "complex")
}
}
func builtinContains(arg []interface{}, _ map[interface{}]interface{}) (v interface{}, err error) {
switch s := arg[0].(type) {
case nil:
return nil, nil
case string:
switch chars := arg[1].(type) {
case nil:
return nil, nil
case string:
return strings.Contains(s, chars), nil
default:
return nil, invArg(chars, "string")
}
default:
return nil, invArg(s, "string")
}
}
func builtinCount(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
if _, ok := ctx["$agg0"]; ok {
return int64(0), nil
}
fn := ctx["$fn"]
if _, ok := ctx["$agg"]; ok {
return ctx[fn].(int64), nil
}
n, _ := ctx[fn].(int64)
switch len(arg) {
case 0:
n++
case 1:
if arg[0] != nil {
n++
}
default:
panic("internal error 067")
}
ctx[fn] = n
return
}
func builtinDate(arg []interface{}, _ map[interface{}]interface{}) (v interface{}, err error) {
for i, v := range arg {
switch i {
case 7:
switch x := v.(type) {
case string:
default:
return nil, invArg(x, "date")
}
default:
switch x := v.(type) {
case int64:
case idealInt:
arg[i] = int64(x)
default:
return nil, invArg(x, "date")
}
}
}
sloc := arg[7].(string)
loc := time.Local
switch sloc {
case "local":
default:
loc, err = time.LoadLocation(sloc)
if err != nil {
return
}
}
return time.Date(
int(arg[0].(int64)),
time.Month(arg[1].(int64)),
int(arg[2].(int64)),
int(arg[3].(int64)),
int(arg[4].(int64)),
int(arg[5].(int64)),
int(arg[6].(int64)),
loc,
), nil
}
func builtinLen(arg []interface{}, _ map[interface{}]interface{}) (v interface{}, err error) {
switch x := arg[0].(type) {
case nil:
return nil, nil
case string:
return int64(len(x)), nil
default:
return nil, invArg(x, "len")
}
}
func builtinDay(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
switch x := arg[0].(type) {
case nil:
return nil, nil
case time.Time:
return int64(x.Day()), nil
default:
return nil, invArg(x, "day")
}
}
func builtinFormatTime(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
switch x := arg[0].(type) {
case nil:
return nil, nil
case time.Time:
switch y := arg[1].(type) {
case nil:
return nil, nil
case string:
return x.Format(y), nil
default:
return nil, invArg(y, "formatTime")
}
default:
return nil, invArg(x, "formatTime")
}
}
func builtinFormatFloat(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
var val float64
var fmt byte = 'g'
prec := -1
bitSize := 64
switch x := arg[0].(type) {
case nil:
return nil, nil
case float32:
val = float64(x)
bitSize = 32
case float64:
val = x
default:
return nil, invArg(x, "formatFloat")
}
switch len(arg) {
case 4:
arg3 := coerce1(arg[3], int64(0))
switch y := arg3.(type) {
case nil:
return nil, nil
case int64:
bitSize = int(y)
default:
return nil, invArg(y, "formatFloat")
}
fallthrough
case 3:
arg2 := coerce1(arg[2], int64(0))
switch y := arg2.(type) {
case nil:
return nil, nil
case int64:
prec = int(y)
default:
return nil, invArg(y, "formatFloat")
}
fallthrough
case 2:
arg1 := coerce1(arg[1], byte(0))
switch y := arg1.(type) {
case nil:
return nil, nil
case byte:
fmt = y
default:
return nil, invArg(y, "formatFloat")
}
}
return strconv.FormatFloat(val, fmt, prec, bitSize), nil
}
func builtinFormatInt(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
var intVal int64
var uintVal uint64
uintType := false
base := 10
switch x := arg[0].(type) {
case nil:
return nil, nil
case int8:
intVal = int64(x)
case int16:
intVal = int64(x)
case int32:
intVal = int64(x)
case int64:
intVal = x
case uint8:
uintType = true
uintVal = uint64(x)
case uint16:
uintType = true
uintVal = uint64(x)
case uint32:
uintType = true
uintVal = uint64(x)
case uint64:
uintType = true
uintVal = x
default:
return nil, invArg(x, "formatInt")
}
switch len(arg) {
case 2:
arg1 := coerce1(arg[1], int64(0))
switch y := arg1.(type) {
case nil:
return nil, nil
case int64:
base = int(y)
default:
return nil, invArg(y, "formatInt")
}
}
if uintType {
return strconv.FormatUint(uintVal, base), nil
}
return strconv.FormatInt(intVal, base), nil
}
func builtinHasPrefix(arg []interface{}, _ map[interface{}]interface{}) (v interface{}, err error) {
switch s := arg[0].(type) {
case nil:
return nil, nil
case string:
switch prefix := arg[1].(type) {
case nil:
return nil, nil
case string:
return strings.HasPrefix(s, prefix), nil
default:
return nil, invArg(prefix, "string")
}
default:
return nil, invArg(s, "string")
}
}
func builtinHasSuffix(arg []interface{}, _ map[interface{}]interface{}) (v interface{}, err error) {
switch s := arg[0].(type) {
case nil:
return nil, nil
case string:
switch suffix := arg[1].(type) {
case nil:
return nil, nil
case string:
return strings.HasSuffix(s, suffix), nil
default:
return nil, invArg(suffix, "string")
}
default:
return nil, invArg(s, "string")
}
}
func builtinHour(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
switch x := arg[0].(type) {
case nil:
return nil, nil
case time.Time:
return int64(x.Hour()), nil
default:
return nil, invArg(x, "hour")
}
}
func builtinHours(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
switch x := arg[0].(type) {
case nil:
return nil, nil
case time.Duration:
return x.Hours(), nil
default:
return nil, invArg(x, "hours")
}
}
func builtinID(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
switch x := ctx["$id"].(type) {
case map[string]interface{}:
if len(arg) == 0 {
return nil, nil
}
tab := arg[0].(*ident)
id, ok := x[tab.s]
if !ok {
return nil, fmt.Errorf("value not available: id(%s)", tab)
}
if _, ok := id.(int64); ok {
return id, nil
}
return nil, fmt.Errorf("value not available: id(%s)", tab)
case int64:
return x, nil
default:
return nil, nil
}
}
func builtinImag(arg []interface{}, _ map[interface{}]interface{}) (v interface{}, err error) {
switch x := arg[0].(type) {
case nil:
return nil, nil
case idealComplex:
return imag(x), nil
case complex64:
return imag(x), nil
case complex128:
return imag(x), nil
default:
return nil, invArg(x, "imag")
}
}
func builtinMax(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
if _, ok := ctx["$agg0"]; ok {
return
}
fn := ctx["$fn"]
if _, ok := ctx["$agg"]; ok {
if v, ok = ctx[fn]; ok {
return
}
return nil, nil
}
max := ctx[fn]
y := arg[0]
if y == nil {
return
}
switch x := max.(type) {
case nil:
switch y := y.(type) {
case float32, float64, string, int8, int16, int32, int64, uint8, uint16, uint32, uint64, time.Time:
max = y
default:
return nil, fmt.Errorf("max: cannot accept %v (value if type %T)", y, y)
}
case float32:
if y := y.(float32); y > x {
max = y
}
case float64:
if y := y.(float64); y > x {
max = y
}
case string:
if y := y.(string); y > x {
max = y
}
case int8:
if y := y.(int8); y > x {
max = y
}
case int16:
if y := y.(int16); y > x {
max = y
}
case int32:
if y := y.(int32); y > x {
max = y
}
case int64:
if y := y.(int64); y > x {
max = y
}
case uint8:
if y := y.(uint8); y > x {
max = y
}
case uint16:
if y := y.(uint16); y > x {
max = y
}
case uint32:
if y := y.(uint32); y > x {
max = y
}
case uint64:
if y := y.(uint64); y > x {
max = y
}
case time.Time:
if y := y.(time.Time); y.After(x) {
max = y
}
}
ctx[fn] = max
return
}
func builtinMin(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
if _, ok := ctx["$agg0"]; ok {
return
}
fn := ctx["$fn"]
if _, ok := ctx["$agg"]; ok {
if v, ok = ctx[fn]; ok {
return
}
return nil, nil
}
min := ctx[fn]
y := arg[0]
if y == nil {
return
}
switch x := min.(type) {
case nil:
switch y := y.(type) {
case float32, float64, string, int8, int16, int32, int64, uint8, uint16, uint32, uint64, time.Time:
min = y
default:
return nil, fmt.Errorf("min: cannot accept %v (value if type %T)", y, y)
}
case float32:
if y := y.(float32); y < x {
min = y
}
case float64:
if y := y.(float64); y < x {
min = y
}
case string:
if y := y.(string); y < x {
min = y
}
case int8:
if y := y.(int8); y < x {
min = y
}
case int16:
if y := y.(int16); y < x {
min = y
}
case int32:
if y := y.(int32); y < x {
min = y
}
case int64:
if y := y.(int64); y < x {
min = y
}
case uint8:
if y := y.(uint8); y < x {
min = y
}
case uint16:
if y := y.(uint16); y < x {
min = y
}
case uint32:
if y := y.(uint32); y < x {
min = y
}
case uint64:
if y := y.(uint64); y < x {
min = y
}
case time.Time:
if y := y.(time.Time); y.Before(x) {
min = y
}
}
ctx[fn] = min
return
}
func builtinMinute(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
switch x := arg[0].(type) {
case nil:
return nil, nil
case time.Time:
return int64(x.Minute()), nil
default:
return nil, invArg(x, "minute")
}
}
func builtinMinutes(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
switch x := arg[0].(type) {
case nil:
return nil, nil
case time.Duration:
return x.Minutes(), nil
default:
return nil, invArg(x, "minutes")
}
}
func builtinMonth(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
switch x := arg[0].(type) {
case nil:
return nil, nil
case time.Time:
return int64(x.Month()), nil
default:
return nil, invArg(x, "month")
}
}
func builtinNanosecond(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
switch x := arg[0].(type) {
case nil:
return nil, nil
case time.Time:
return int64(x.Nanosecond()), nil
default:
return nil, invArg(x, "nanosecond")
}
}
func builtinNanoseconds(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
switch x := arg[0].(type) {
case nil:
return nil, nil
case time.Duration:
return x.Nanoseconds(), nil
default:
return nil, invArg(x, "nanoseconds")
}
}
func builtinNow(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
return time.Now(), nil
}
func builtinParseTime(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
var a [2]string
for i, v := range arg {
switch x := v.(type) {
case nil:
return nil, nil
case string:
a[i] = x
default:
return nil, invArg(x, "parseTime")
}
}
t, err := time.Parse(a[0], a[1])
if err != nil {
return nil, err
}
ls := t.Location().String()
if ls == "UTC" {
return t, nil
}
l, err := time.LoadLocation(ls)
if err != nil {
return t, nil
}
return time.ParseInLocation(a[0], a[1], l)
}
func builtinReal(arg []interface{}, _ map[interface{}]interface{}) (v interface{}, err error) {
switch x := arg[0].(type) {
case nil:
return nil, nil
case idealComplex:
return real(x), nil
case complex64:
return real(x), nil
case complex128:
return real(x), nil
default:
return nil, invArg(x, "real")
}
}
func builtinSecond(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
switch x := arg[0].(type) {
case nil:
return nil, nil
case time.Time:
return int64(x.Second()), nil
default:
return nil, invArg(x, "second")
}
}
func builtinSeconds(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
switch x := arg[0].(type) {
case nil:
return nil, nil
case time.Duration:
return x.Seconds(), nil
default:
return nil, invArg(x, "seconds")
}
}
func builtinSince(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
switch x := arg[0].(type) {
case nil:
return nil, nil
case time.Time:
return time.Since(x), nil
default:
return nil, invArg(x, "since")
}
}
func builtinSum(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
if _, ok := ctx["$agg0"]; ok {
return
}
fn := ctx["$fn"]
if _, ok := ctx["$agg"]; ok {
if v, ok = ctx[fn]; ok {
return
}
return nil, nil
}
sum := ctx[fn]
y := arg[0]
if y == nil {
return
}
switch x := sum.(type) {
case nil:
switch y := y.(type) {
case complex64, complex128, float32, float64, int8, int16, int32, int64, uint8, uint16, uint32, uint64:
sum = y
default:
return nil, fmt.Errorf("sum: cannot accept %v (value if type %T)", y, y)
}
case complex64:
sum = x + y.(complex64)
case complex128:
sum = x + y.(complex128)
case float32:
sum = x + y.(float32)
case float64:
sum = x + y.(float64)
case int8:
sum = x + y.(int8)
case int16:
sum = x + y.(int16)
case int32:
sum = x + y.(int32)
case int64:
sum = x + y.(int64)
case uint8:
sum = x + y.(uint8)
case uint16:
sum = x + y.(uint16)
case uint32:
sum = x + y.(uint32)
case uint64:
sum = x + y.(uint64)
}
ctx[fn] = sum
return
}
func builtinTimeIn(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
switch x := arg[0].(type) {
case nil:
return nil, nil
case time.Time:
switch y := arg[1].(type) {
case nil:
return nil, nil
case string:
loc := time.Local
switch y {
case "local":
default:
loc, err = time.LoadLocation(y)
if err != nil {
return
}
}
return x.In(loc), nil
default:
return nil, invArg(x, "timeIn")
}
default:
return nil, invArg(x, "timeIn")
}
}
func builtinWeekday(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
switch x := arg[0].(type) {
case nil:
return nil, nil
case time.Time:
return int64(x.Weekday()), nil
default:
return nil, invArg(x, "weekday")
}
}
func builtinYear(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
switch x := arg[0].(type) {
case nil:
return nil, nil
case time.Time:
return int64(x.Year()), nil
default:
return nil, invArg(x, "year")
}
}
func builtinYearday(arg []interface{}, ctx map[interface{}]interface{}) (v interface{}, err error) {
switch x := arg[0].(type) {
case nil:
return nil, nil
case time.Time:
return int64(x.YearDay()), nil
default:
return nil, invArg(x, "yearDay")
}
}

290
vendor/github.com/cznic/ql/coerce.go generated vendored Normal file
View File

@@ -0,0 +1,290 @@
// Copyright 2013 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// CAUTION: This file was generated automatically by
//
// $ go run helper/helper.go -o coerce.go
//
// DO NOT EDIT!
package ql
import (
"math"
"math/big"
"reflect"
"time"
)
func coerce(a, b interface{}) (x, y interface{}) {
if reflect.TypeOf(a) == reflect.TypeOf(b) {
return a, b
}
switch a.(type) {
case idealComplex, idealFloat, idealInt, idealRune, idealUint:
switch b.(type) {
case idealComplex, idealFloat, idealInt, idealRune, idealUint:
x, y = coerce1(a, b), b
if reflect.TypeOf(x) == reflect.TypeOf(y) {
return
}
return a, coerce1(b, a)
default:
return coerce1(a, b), b
}
default:
switch b.(type) {
case idealComplex, idealFloat, idealInt, idealRune, idealUint:
return a, coerce1(b, a)
default:
return a, b
}
}
}
func coerce1(inVal, otherVal interface{}) (coercedInVal interface{}) {
coercedInVal = inVal
if otherVal == nil {
return
}
switch x := inVal.(type) {
case nil:
return
case idealComplex:
switch otherVal.(type) {
//case idealComplex:
//case idealFloat:
//case idealInt:
//case idealRune:
//case idealUint:
//case bool:
case complex64:
return complex64(x)
case complex128:
return complex128(x)
//case float32:
//case float64:
//case int8:
//case int16:
//case int32:
//case int64:
//case string:
//case uint8:
//case uint16:
//case uint32:
//case uint64:
//case *big.Int:
//case *big.Rat:
//case time.Time:
//case time.Duration:
}
case idealFloat:
switch otherVal.(type) {
case idealComplex:
return idealComplex(complex(float64(x), 0))
case idealFloat:
return idealFloat(float64(x))
//case idealInt:
//case idealRune:
//case idealUint:
//case bool:
case complex64:
return complex64(complex(float32(x), 0))
case complex128:
return complex128(complex(float64(x), 0))
case float32:
return float32(float64(x))
case float64:
return float64(float64(x))
//case int8:
//case int16:
//case int32:
//case int64:
//case string:
//case uint8:
//case uint16:
//case uint32:
//case uint64:
//case *big.Int:
case *big.Rat:
return big.NewRat(1, 1).SetFloat64(float64(x))
//case time.Time:
//case time.Duration:
}
case idealInt:
switch otherVal.(type) {
case idealComplex:
return idealComplex(complex(float64(x), 0))
case idealFloat:
return idealFloat(int64(x))
case idealInt:
return idealInt(int64(x))
//case idealRune:
case idealUint:
if x >= 0 {
return idealUint(int64(x))
}
//case bool:
case complex64:
return complex64(complex(float32(x), 0))
case complex128:
return complex128(complex(float64(x), 0))
case float32:
return float32(int64(x))
case float64:
return float64(int64(x))
case int8:
if x >= math.MinInt8 && x <= math.MaxInt8 {
return int8(int64(x))
}
case int16:
if x >= math.MinInt16 && x <= math.MaxInt16 {
return int16(int64(x))
}
case int32:
if x >= math.MinInt32 && x <= math.MaxInt32 {
return int32(int64(x))
}
case int64:
return int64(int64(x))
//case string:
case uint8:
if x >= 0 && x <= math.MaxUint8 {
return uint8(int64(x))
}
case uint16:
if x >= 0 && x <= math.MaxUint16 {
return uint16(int64(x))
}
case uint32:
if x >= 0 && x <= math.MaxUint32 {
return uint32(int64(x))
}
case uint64:
if x >= 0 {
return uint64(int64(x))
}
case *big.Int:
return big.NewInt(int64(x))
case *big.Rat:
return big.NewRat(1, 1).SetInt64(int64(x))
//case time.Time:
case time.Duration:
return time.Duration(int64(x))
}
case idealRune:
switch otherVal.(type) {
case idealComplex:
return idealComplex(complex(float64(x), 0))
case idealFloat:
return idealFloat(int64(x))
case idealInt:
return idealInt(int64(x))
case idealRune:
return idealRune(int64(x))
case idealUint:
return idealUint(int64(x))
//case bool:
case complex64:
return complex64(complex(float32(x), 0))
case complex128:
return complex128(complex(float64(x), 0))
case float32:
return float32(int64(x))
case float64:
return float64(int64(x))
case int8:
return int8(int64(x))
case int16:
return int16(int64(x))
case int32:
return int32(int64(x))
case int64:
return int64(int64(x))
//case string:
case uint8:
return uint8(int64(x))
case uint16:
return uint16(int64(x))
case uint32:
return uint32(int64(x))
case uint64:
return uint64(int64(x))
case *big.Int:
return big.NewInt(int64(x))
case *big.Rat:
return big.NewRat(1, 1).SetInt64(int64(x))
//case time.Time:
case time.Duration:
return time.Duration(int64(x))
}
case idealUint:
switch otherVal.(type) {
case idealComplex:
return idealComplex(complex(float64(x), 0))
case idealFloat:
return idealFloat(uint64(x))
case idealInt:
if x <= math.MaxInt64 {
return idealInt(int64(x))
}
//case idealRune:
case idealUint:
return idealUint(uint64(x))
//case bool:
case complex64:
return complex64(complex(float32(x), 0))
case complex128:
return complex128(complex(float64(x), 0))
case float32:
return float32(uint64(x))
case float64:
return float64(uint64(x))
case int8:
if x <= math.MaxInt8 {
return int8(int64(x))
}
case int16:
if x <= math.MaxInt16 {
return int16(int64(x))
}
case int32:
if x <= math.MaxInt32 {
return int32(int64(x))
}
case int64:
if x <= math.MaxInt64 {
return int64(int64(x))
}
//case string:
case uint8:
if x >= 0 && x <= math.MaxUint8 {
return uint8(int64(x))
}
case uint16:
if x >= 0 && x <= math.MaxUint16 {
return uint16(int64(x))
}
case uint32:
if x >= 0 && x <= math.MaxUint32 {
return uint32(int64(x))
}
case uint64:
return uint64(uint64(x))
case *big.Int:
return big.NewInt(0).SetUint64(uint64(x))
case *big.Rat:
return big.NewRat(1, 1).SetInt(big.NewInt(0).SetUint64(uint64(x)))
//case time.Time:
case time.Duration:
if x <= math.MaxInt64 {
return time.Duration(int64(x))
}
}
}
return
}

298
vendor/github.com/cznic/ql/design/doc.go generated vendored Normal file
View File

@@ -0,0 +1,298 @@
// Copyright 2014 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package design describes some of the data structures used in QL.
Handles
A handle is a 7 byte "pointer" to a block in the DB[0].
Scalar encoding
Encoding of so called "scalars" provided by [1]. Unless specified otherwise,
all values discussed below are scalars, encoded scalars or encoding of scalar
arrays.
Database root
DB root is a 1-scalar found at a fixed handle (#1).
+---+------+--------+-----------------------+
| # | Name | Type | Description |
+---+------+--------+-----------------------+
| 0 | head | handle | First table meta data |
+---+------+--------+-----------------------+
Head is the head of a single linked list of table of meta data. It's zero if
there are no tables in the DB.
Table meta data
Table meta data are a 6-scalar.
+---+---------+--------+--------------------------+
| # | Name | Type | Description |
+---+---------+--------+--------------------------+
| 0 | next | handle | Next table meta data. |
| 1 | scols | string | Column defintitions |
| 2 | hhead | handle | -> head -> first record |
| 3 | name | string | Table name |
| 4 | indices | string | Index definitions |
| 5 | hxroots | handle | Index B+Trees roots list |
+---+---------+--------+--------------------------+
Fields #4 and #5 are optional for backward compatibility with existing
databases. OTOH, forward compatibility will not work. Once any indices are
created using a newer QL version the older versions of QL, expecting only 4
fields of meta data will not be able to use the DB. That's the intended
behavior because the older versions of QL cannot update the indexes, which can
break queries runned by the newer QL version which expect indices to be always
actualized on any table-with-indices mutation.
The handle of the next table meta data is in the field #0 (next). If there is
no next table meta data, the field is zero. Names and types of table columns
are stored in field #1 (scols). A single field is described by concatenating a
type tag and the column name. The type tags are
bool 'b'
complex64 'c'
complex128 'd'
float32 'f'
float64 'g', alias float
int8 'i'
int16 'j'
int32 'k'
int64 'l', alias int
string 's'
uint8 'u', alias byte
uint16 'v'
uint32 'w'
uint64 'x', alias uint
bigInt 'I'
bigRat 'R'
blob 'B'
duration 'D'
time 'T'
The scols value is the above described encoded fields joined using "|". For
example
CREATE TABLE t (Foo bool, Bar string, Baz float);
This statement adds a table meta data with scols
"bFool|sBar|gBaz"
Columns can be dropped from a table
ALTER TABLE t DROP COLUMN Bar;
This "erases" the field info in scols, so the value becomes
"bFool||gBaz"
Colums can be added to a table
ALTER TABLE t ADD Count uint;
New fields are always added to the end of scols
"bFool||gBaz|xCount"
Index of a field in strings.Split(scols, "|") is the index of the field in a
table record. The above discussed rules for column dropping and column adding
allow for schema evolution without a need to reshape any existing table data.
Dropped columns are left where they are and new records insert nil in their
place. The encoded nil is one byte. Added columns, when not present in
preexisting records are returned as nil values. If the overhead of dropped
columns becomes an issue and there's time/space and memory enough to move the
records of a table around:
BEGIN TRANSACTION;
CREATE TABLE new (column definitions);
INSERT INTO new SELECT * FROM old;
DROP TABLE old;
CREATE TABLE old (column definitions);
INSERT INTO old SELECT * FROM new;
DROP TABLE new;
END TRANSACTION;
This is not very time/space effective and for Big Data it can cause an OOM
because transactions are limited by memory resources available to the process.
Perhaps a method and/or QL statement to do this in-place should be added
(MAYBE consider adopting MySQL's OPTIMIZE TABLE syntax).
Field #2 (hhead) is a handle to a head of table records, i.e. not a handle to
the first record in the table. It is thus always non zero even for a table
having no records. The reason for this "double pointer" schema is to enable
adding (linking) a new record by updating a single value of the (hhead pointing
to) head.
tableMeta.hhead -> head -> firstTableRecord
The table name is stored in field #3 (name).
Indices
Consider an index named N, indexing column named C. The encoding of this
particular index is a string "<tag>N". <tag> is a string "n" for non unique
indices and "u" for unique indices. There is this index information for the
index possibly indexing the record id() and for all other columns of scols.
Where the column is not indexed, the index info is an empty string. Infos for
all indexes are joined with "|". For example
BEGIN TRANSACTION;
CREATE TABLE t (Foo int, Bar bool, Baz string);
CREATE INDEX X ON t (Baz);
CREATE UNIQUE INDEX Y ON t (Foo);
COMMIT;
The values of fields #1 and #4 for the above are
scols: "lFoo|bBar|sBaz"
indices: "|uY||nX"
Aligning properly the "|" split parts
id col #0 col#1 col#2
+----------+----+--------+--------+--------+
| scols: | | "lFoo" | "bBar" | "sBaz" |
+----------+----+--------+--------+--------+
| indices: | "" | "uY" | "" | "nX" |
+----------+----+--------+--------+--------+
shows that the record id() is not indexed for this table while the columns Foo
and Baz are.
Note that there cannot be two differently named indexes for the same column and
it's intended. The indices are B+Trees[2]. The list of handles to their roots
is pointed to by hxroots with zeros for non indexed columns. For the previous
example
tableMeta.hxroots -> {0, y, 0, x}
where x is the root of the B+Tree for the X index and y is the root of the
B+Tree for the Y index. If there would be an index for id(), its B+Tree root
will be present where the first zero is. Similarly to hhead, hxroots is never
zero, even when there are no indices for a table.
Table record
A table record is an N-scalar.
+-----+------------+--------+-------------------------------+
| # | Name | Type | Description |
+-----+------------+--------+-------------------------------+
| 0 | next | handle | Next record or zero. |
| 1 | id | int64 | Automatically assigned unique |
| | | | value obtainable by id(). |
| 2 | field #0 | scalar | First field of the record. |
| 3 | field #1 | scalar | Second field of the record. |
...
| N-1 | field #N-2 | scalar | Last field of the record. |
+-----+------------+--------+-------------------------------+
The linked "ordering" of table records has no semantics and it doesn't have to
correlate to the order of how the records were added to the table. In fact, an
efficient way of the linking leads to "ordering" which is actually reversed wrt
the insertion order.
Non unique index
The composite key of the B+Tree is {indexed values, record handle}. The B+Tree
value is not used.
B+Tree key B+Tree value
+----------------+---------------+ +--------------+
| Indexed Values | Record Handle | -> | not used |
+----------------+---------------+ +--------------+
Unique index
If the indexed values are all NULL then the composite B+Tree key is {nil,
record handle} and the B+Tree value is not used.
B+Tree key B+Tree value
+------+-----------------+ +--------------+
| NULL | Record Handle | -> | not used |
+------+-----------------+ +--------------+
If the indexed values are not all NULL then key of the B+Tree key are the indexed
values and the B+Tree value is the record handle.
B+Tree key B+Tree value
+----------------+ +---------------+
| Indexed Values | -> | Record Handle |
+----------------+ +---------------+
Non scalar types
Scalar types of [1] are bool, complex*, float*, int*, uint*, string and []byte
types. All other types are "blob-like".
QL type Go type
-----------------------------
blob []byte
bigint big.Int
bigrat big.Rat
time time.Time
duration time.Duration
Memory back-end stores the Go type directly. File back-end must resort to
encode all of the above as (tagged) []byte due to the lack of more types
supported natively by lldb. NULL values of blob-like types are encoded as nil
(gbNull in lldb/gb.go), exactly the same as the already existing QL types are.
Blob encoding
The values of the blob-like types are first encoded into a []byte slice:
+-----------------------+-------------------+
| blob | raw |
| bigint, bigrat, time | gob encoded |
| duration | gob encoded int64 |
+-----------------------+-------------------+
The gob encoding is "differential" wrt an initial encoding of all of the
blob-like type. IOW, the initial type descriptors which gob encoding must write
out are stripped off and "resupplied" on decoding transparently. See also
blob.go. If the length of the resulting slice is <= shortBlob, the first and
only chunk is the scalar encoding of
[]interface{}{typeTag, slice}. // initial (and last) chunk
The length of slice can be zero (for blob("")). If the resulting slice is long
(> shortBlob), the first chunk comes from encoding
[]interface{}{typeTag, nextHandle, firstPart}. // initial, but not final chunk
In this case len(firstPart) <= shortBlob. Second and other chunks: If the chunk
is the last one, src is
[]interface{lastPart}. // overflow chunk (last)
In this case len(lastPart) <= 64kB. If the chunk is not the last one, src is
[]interface{}{nextHandle, part}. // overflow chunk (not last)
In this case len(part) == 64kB.
Links
Referenced from above:
[0]: http://godoc.org/github.com/cznic/exp/lldb#hdr-Block_handles
[1]: http://godoc.org/github.com/cznic/exp/lldb#EncodeScalars
[2]: http://godoc.org/github.com/cznic/exp/lldb#BTree
Rationale
While these notes might be useful to anyone looking at QL sources, the
specifically intended reader is my future self.
*/
package design

2619
vendor/github.com/cznic/ql/doc.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

523
vendor/github.com/cznic/ql/driver.go generated vendored Normal file
View File

@@ -0,0 +1,523 @@
// Copyright 2014 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// database/sql/driver
package ql
import (
"bytes"
"database/sql"
"database/sql/driver"
"errors"
"fmt"
"io"
"math/big"
"os"
"path/filepath"
"strings"
"sync"
"time"
)
var (
_ driver.Conn = (*driverConn)(nil)
_ driver.Driver = (*sqlDriver)(nil)
_ driver.Execer = (*driverConn)(nil)
_ driver.Queryer = (*driverConn)(nil)
_ driver.Result = (*driverResult)(nil)
_ driver.Rows = (*driverRows)(nil)
_ driver.Stmt = (*driverStmt)(nil)
_ driver.Tx = (*driverConn)(nil)
txBegin = MustCompile("BEGIN TRANSACTION;")
txCommit = MustCompile("COMMIT;")
txRollback = MustCompile("ROLLBACK;")
errNoResult = errors.New("query statement does not produce a result set (no top level SELECT)")
)
type errList []error
func (e *errList) append(err error) {
if err != nil {
*e = append(*e, err)
}
}
func (e errList) error() error {
if len(e) == 0 {
return nil
}
return e
}
func (e errList) Error() string {
a := make([]string, len(e))
for i, v := range e {
a[i] = v.Error()
}
return strings.Join(a, "\n")
}
func params(args []driver.Value) []interface{} {
r := make([]interface{}, len(args))
for i, v := range args {
r[i] = interface{}(v)
}
return r
}
var (
fileDriver = &sqlDriver{dbs: map[string]*driverDB{}}
fileDriverOnce sync.Once
memDriver = &sqlDriver{isMem: true, dbs: map[string]*driverDB{}}
memDriverOnce sync.Once
)
// RegisterDriver registers a QL database/sql/driver[0] named "ql". The name
// parameter of
//
// sql.Open("ql", name)
//
// is interpreted as a path name to a named DB file which will be created if
// not present. The underlying QL database data are persisted on db.Close().
// RegisterDriver can be safely called multiple times, it'll register the
// driver only once.
//
// The name argument can be optionally prefixed by "file://". In that case the
// prefix is stripped before interpreting it as a file name.
//
// The name argument can be optionally prefixed by "memory://". In that case
// the prefix is stripped before interpreting it as a name of a memory-only,
// volatile DB.
//
// [0]: http://golang.org/pkg/database/sql/driver/
func RegisterDriver() {
fileDriverOnce.Do(func() { sql.Register("ql", fileDriver) })
}
// RegisterMemDriver registers a QL memory database/sql/driver[0] named
// "ql-mem". The name parameter of
//
// sql.Open("ql-mem", name)
//
// is interpreted as an unique memory DB name which will be created if not
// present. The underlying QL memory database data are not persisted on
// db.Close(). RegisterMemDriver can be safely called multiple times, it'll
// register the driver only once.
//
// [0]: http://golang.org/pkg/database/sql/driver/
func RegisterMemDriver() {
memDriverOnce.Do(func() { sql.Register("ql-mem", memDriver) })
}
type driverDB struct {
db *DB
name string
refcount int
}
func newDriverDB(db *DB, name string) *driverDB {
return &driverDB{db: db, name: name, refcount: 1}
}
// sqlDriver implements the interface required by database/sql/driver.
type sqlDriver struct {
dbs map[string]*driverDB
isMem bool
mu sync.Mutex
}
func (d *sqlDriver) lock() func() {
d.mu.Lock()
return d.mu.Unlock
}
// Open returns a new connection to the database. The name is a string in a
// driver-specific format.
//
// Open may return a cached connection (one previously closed), but doing so is
// unnecessary; the sql package maintains a pool of idle connections for
// efficient re-use.
//
// The returned connection is only used by one goroutine at a time.
func (d *sqlDriver) Open(name string) (driver.Conn, error) {
if d != fileDriver && d != memDriver {
return nil, fmt.Errorf("open: unexpected/unsupported instance of driver.Driver: %p", d)
}
switch {
case d == fileDriver && strings.HasPrefix(name, "file://"):
name = name[len("file://"):]
case d == fileDriver && strings.HasPrefix(name, "memory://"):
d = memDriver
name = name[len("memory://"):]
}
name = filepath.Clean(name)
if name == "" || name == "." || name == string(os.PathSeparator) {
return nil, fmt.Errorf("invalid DB name %q", name)
}
defer d.lock()()
db := d.dbs[name]
if db == nil {
var err error
var db0 *DB
switch d.isMem {
case true:
db0, err = OpenMem()
default:
db0, err = OpenFile(name, &Options{CanCreate: true})
}
if err != nil {
return nil, err
}
db = newDriverDB(db0, name)
d.dbs[name] = db
return newDriverConn(d, db), nil
}
db.refcount++
return newDriverConn(d, db), nil
}
// driverConn is a connection to a database. It is not used concurrently by
// multiple goroutines.
//
// Conn is assumed to be stateful.
type driverConn struct {
ctx *TCtx
db *driverDB
driver *sqlDriver
stop map[*driverStmt]struct{}
tnl int
}
func newDriverConn(d *sqlDriver, ddb *driverDB) driver.Conn {
r := &driverConn{
db: ddb,
driver: d,
stop: map[*driverStmt]struct{}{},
}
return r
}
// Prepare returns a prepared statement, bound to this connection.
func (c *driverConn) Prepare(query string) (driver.Stmt, error) {
list, err := Compile(query)
if err != nil {
return nil, err
}
s := &driverStmt{conn: c, stmt: list}
c.stop[s] = struct{}{}
return s, nil
}
// Close invalidates and potentially stops any current prepared statements and
// transactions, marking this connection as no longer in use.
//
// Because the sql package maintains a free pool of connections and only calls
// Close when there's a surplus of idle connections, it shouldn't be necessary
// for drivers to do their own connection caching.
func (c *driverConn) Close() error {
var err errList
for s := range c.stop {
err.append(s.Close())
}
defer c.driver.lock()()
dbs, name := c.driver.dbs, c.db.name
v := dbs[name]
v.refcount--
if v.refcount == 0 {
err.append(c.db.db.Close())
delete(dbs, name)
}
return err.error()
}
// Begin starts and returns a new transaction.
func (c *driverConn) Begin() (driver.Tx, error) {
if c.ctx == nil {
c.ctx = NewRWCtx()
}
if _, _, err := c.db.db.Execute(c.ctx, txBegin); err != nil {
return nil, err
}
c.tnl++
return c, nil
}
func (c *driverConn) Commit() error {
if c.tnl == 0 || c.ctx == nil {
return errCommitNotInTransaction
}
if _, _, err := c.db.db.Execute(c.ctx, txCommit); err != nil {
return err
}
c.tnl--
if c.tnl == 0 {
c.ctx = nil
}
return nil
}
func (c *driverConn) Rollback() error {
if c.tnl == 0 || c.ctx == nil {
return errRollbackNotInTransaction
}
if _, _, err := c.db.db.Execute(c.ctx, txRollback); err != nil {
return err
}
c.tnl--
if c.tnl == 0 {
c.ctx = nil
}
return nil
}
// Execer is an optional interface that may be implemented by a Conn.
//
// If a Conn does not implement Execer, the sql package's DB.Exec will first
// prepare a query, execute the statement, and then close the statement.
//
// Exec may return driver.ErrSkip.
func (c *driverConn) Exec(query string, args []driver.Value) (driver.Result, error) {
list, err := Compile(query)
if err != nil {
return nil, err
}
return driverExec(c.db, c.ctx, list, args)
}
func driverExec(db *driverDB, ctx *TCtx, list List, args []driver.Value) (driver.Result, error) {
if _, _, err := db.db.Execute(ctx, list, params(args)...); err != nil {
return nil, err
}
if len(list.l) == 1 {
switch list.l[0].(type) {
case *createTableStmt, *dropTableStmt, *alterTableAddStmt,
*alterTableDropColumnStmt, *truncateTableStmt:
return driver.ResultNoRows, nil
}
}
r := &driverResult{}
if ctx != nil {
r.lastInsertID, r.rowsAffected = ctx.LastInsertID, ctx.RowsAffected
}
return r, nil
}
// Queryer is an optional interface that may be implemented by a Conn.
//
// If a Conn does not implement Queryer, the sql package's DB.Query will first
// prepare a query, execute the statement, and then close the statement.
//
// Query may return driver.ErrSkip.
func (c *driverConn) Query(query string, args []driver.Value) (driver.Rows, error) {
list, err := Compile(query)
if err != nil {
return nil, err
}
return driverQuery(c.db, c.ctx, list, args)
}
func driverQuery(db *driverDB, ctx *TCtx, list List, args []driver.Value) (driver.Rows, error) {
rss, _, err := db.db.Execute(ctx, list, params(args)...)
if err != nil {
return nil, err
}
switch n := len(rss); n {
case 0:
return nil, errNoResult
case 1:
return newdriverRows(rss[len(rss)-1]), nil
default:
return nil, fmt.Errorf("query produced %d result sets, expected only one", n)
}
}
// driverResult is the result of a query execution.
type driverResult struct {
lastInsertID int64
rowsAffected int64
}
// LastInsertId returns the database's auto-generated ID after, for example, an
// INSERT into a table with primary key.
func (r *driverResult) LastInsertId() (int64, error) { // -golint
return r.lastInsertID, nil
}
// RowsAffected returns the number of rows affected by the query.
func (r *driverResult) RowsAffected() (int64, error) {
return r.rowsAffected, nil
}
// driverRows is an iterator over an executed query's results.
type driverRows struct {
rs Recordset
done chan int
rows chan interface{}
}
func newdriverRows(rs Recordset) *driverRows {
r := &driverRows{
rs: rs,
done: make(chan int),
rows: make(chan interface{}, 500),
}
go func() {
err := io.EOF
if e := r.rs.Do(false, func(data []interface{}) (bool, error) {
select {
case r.rows <- data:
return true, nil
case <-r.done:
return false, nil
}
}); e != nil {
err = e
}
select {
case r.rows <- err:
case <-r.done:
}
}()
return r
}
// Columns returns the names of the columns. The number of columns of the
// result is inferred from the length of the slice. If a particular column
// name isn't known, an empty string should be returned for that entry.
func (r *driverRows) Columns() []string {
f, _ := r.rs.Fields()
return f
}
// Close closes the rows iterator.
func (r *driverRows) Close() error {
close(r.done)
return nil
}
// Next is called to populate the next row of data into the provided slice. The
// provided slice will be the same size as the Columns() are wide.
//
// The dest slice may be populated only with a driver Value type, but excluding
// string. All string values must be converted to []byte.
//
// Next should return io.EOF when there are no more rows.
func (r *driverRows) Next(dest []driver.Value) error {
select {
case rx := <-r.rows:
switch x := rx.(type) {
case error:
return x
case []interface{}:
if g, e := len(x), len(dest); g != e {
return fmt.Errorf("field count mismatch: got %d, need %d", g, e)
}
for i, xi := range x {
switch v := xi.(type) {
case nil, int64, float64, bool, []byte, time.Time:
dest[i] = v
case complex64, complex128, *big.Int, *big.Rat:
var buf bytes.Buffer
fmt.Fprintf(&buf, "%v", v)
dest[i] = buf.Bytes()
case int8:
dest[i] = int64(v)
case int16:
dest[i] = int64(v)
case int32:
dest[i] = int64(v)
case int:
dest[i] = int64(v)
case uint8:
dest[i] = int64(v)
case uint16:
dest[i] = int64(v)
case uint32:
dest[i] = int64(v)
case uint64:
dest[i] = int64(v)
case uint:
dest[i] = int64(v)
case time.Duration:
dest[i] = int64(v)
case string:
dest[i] = []byte(v)
default:
return fmt.Errorf("internal error 004")
}
}
return nil
default:
return fmt.Errorf("internal error 005")
}
case <-r.done:
return io.EOF
}
}
// driverStmt is a prepared statement. It is bound to a driverConn and not used
// by multiple goroutines concurrently.
type driverStmt struct {
conn *driverConn
stmt List
}
// Close closes the statement.
//
// As of Go 1.1, a Stmt will not be closed if it's in use by any queries.
func (s *driverStmt) Close() error {
delete(s.conn.stop, s)
return nil
}
// NumInput returns the number of placeholder parameters.
//
// If NumInput returns >= 0, the sql package will sanity check argument counts
// from callers and return errors to the caller before the statement's Exec or
// Query methods are called.
//
// NumInput may also return -1, if the driver doesn't know its number of
// placeholders. In that case, the sql package will not sanity check Exec or
// Query argument counts.
func (s *driverStmt) NumInput() int {
if x := s.stmt; len(x.l) == 1 {
return x.params
}
return -1
}
// Exec executes a query that doesn't return rows, such as an INSERT or UPDATE.
func (s *driverStmt) Exec(args []driver.Value) (driver.Result, error) {
c := s.conn
return driverExec(c.db, c.ctx, s.stmt, args)
}
// Exec executes a query that may return rows, such as a SELECT.
func (s *driverStmt) Query(args []driver.Value) (driver.Rows, error) {
c := s.conn
return driverQuery(c.db, c.ctx, s.stmt, args)
}

61
vendor/github.com/cznic/ql/driver/driver.go generated vendored Normal file
View File

@@ -0,0 +1,61 @@
// Copyright 2014 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package driver registers a QL sql/driver named "ql" and a memory driver named "ql-mem".
See also [0], [1] and [3].
Usage
A skeleton program using ql/driver.
package main
import (
"database/sql"
_ "github.com/cznic/ql/driver"
)
func main() {
...
// Disk file DB
db, err := sql.Open("ql", "ql.db") // [2]
// alternatively
db, err := sql.Open("ql", "file://ql.db")
// and/or
// RAM DB
mdb, err := sql.Open("ql-mem", "mem.db")
// alternatively
mdb, err := sql.Open("ql", "memory://mem.db")
if err != nil {
log.Fatal(err)
}
// Use db/mdb here
...
}
This package exports nothing.
Links
Referenced from above:
[0]: http://godoc.org/github.com/cznic/ql
[1]: http://golang.org/pkg/database/sql/
[2]: http://golang.org/pkg/database/sql/#Open
[3]: http://golang.org/pkg/database/sql/driver
*/
package driver
import "github.com/cznic/ql"
func init() {
ql.RegisterDriver()
ql.RegisterMemDriver()
}

18
vendor/github.com/cznic/ql/errors.go generated vendored Normal file
View File

@@ -0,0 +1,18 @@
// Copyright 2014 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ql
import (
"errors"
)
var (
errBeginTransNoCtx = errors.New("BEGIN TRANSACTION: Must use R/W context, have nil")
errCommitNotInTransaction = errors.New("COMMIT: Not in transaction")
errDivByZero = errors.New("division by zero")
errIncompatibleDBFormat = errors.New("incompatible DB format")
errNoDataForHandle = errors.New("read: no data for handle")
errRollbackNotInTransaction = errors.New("ROLLBACK: Not in transaction")
)

2805
vendor/github.com/cznic/ql/etc.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

4025
vendor/github.com/cznic/ql/expr.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1323
vendor/github.com/cznic/ql/file.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

338
vendor/github.com/cznic/ql/helper/helper.go generated vendored Normal file
View File

@@ -0,0 +1,338 @@
// +build ignore
// Copyright 2014 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bufio"
"flag"
"fmt"
"io"
"log"
"os"
)
type t int
const (
qNil t = iota
idealComplex
idealFloat
idealInt
idealRune
idealUint
qBool
qComplex64
qComplex128
qFloat32
qFloat64
qInt8
qInt16
qInt32
qInt64
qString
qUint8
qUint16
qUint32
qUint64
qBigInt
qBigRat
qTime
qDuration
qEnd
)
func (n t) String() string {
switch n {
case qNil:
return "nil"
case idealComplex:
return "idealComplex"
case idealFloat:
return "idealFloat"
case idealInt:
return "idealInt"
case idealRune:
return "idealRune"
case idealUint:
return "idealUint"
case qBool:
return "bool"
case qComplex64:
return "complex64"
case qComplex128:
return "complex128"
case qFloat32:
return "float32"
case qFloat64:
return "float64"
case qInt8:
return "int8"
case qInt16:
return "int16"
case qInt32:
return "int32"
case qInt64:
return "int64"
case qString:
return "string"
case qUint8:
return "uint8"
case qUint16:
return "uint16"
case qUint32:
return "uint32"
case qUint64:
return "uint64"
case qBigInt:
return "*big.Int"
case qBigRat:
return "*big.Rat"
case qTime:
return "time.Time"
case qDuration:
return "time.Duration"
default:
panic("internal error 046")
}
}
func coerceIdealComplex(typ t) string {
switch typ {
case qComplex64, qComplex128:
return fmt.Sprintf("return %s(x)\n", typ)
default:
return ""
}
}
func coerceIdealFloat(typ t) string {
switch typ {
case idealComplex:
return fmt.Sprintf("return %s(complex(float64(x), 0))\n", typ)
case qComplex64:
return fmt.Sprintf("return %s(complex(float32(x), 0))\n", typ)
case qComplex128:
return fmt.Sprintf("return %s(complex(float64(x), 0))\n", typ)
case idealFloat, qFloat32, qFloat64:
return fmt.Sprintf("return %s(float64(x))\n", typ)
case qBigRat:
return fmt.Sprintf("return big.NewRat(1, 1).SetFloat64(float64(x))\n")
default:
return ""
}
return ""
}
func coerceIdealInt(typ t) string {
switch typ {
case idealComplex:
return fmt.Sprintf("return %s(complex(float64(x), 0))\n", typ)
case qComplex64:
return fmt.Sprintf("return %s(complex(float32(x), 0))\n", typ)
case qComplex128:
return fmt.Sprintf("return %s(complex(float64(x), 0))\n", typ)
case idealFloat, idealInt, qFloat32, qFloat64, qInt64:
return fmt.Sprintf("return %s(int64(x))\n", typ)
case idealUint:
return fmt.Sprintf("if x >= 0 { return %s(int64(x)) }\n", typ)
case qInt8:
return fmt.Sprintf("if x >= math.MinInt8 && x<= math.MaxInt8 { return %s(int64(x)) }\n", typ)
case qInt16:
return fmt.Sprintf("if x >= math.MinInt16 && x<= math.MaxInt16 { return %s(int64(x)) }\n", typ)
case qInt32:
return fmt.Sprintf("if x >= math.MinInt32 && x<= math.MaxInt32 { return %s(int64(x)) }\n", typ)
case qUint8:
return fmt.Sprintf("if x >= 0 && x<= math.MaxUint8 { return %s(int64(x)) }\n", typ)
case qUint16:
return fmt.Sprintf("if x >= 0 && x<= math.MaxUint16 { return %s(int64(x)) }\n", typ)
case qUint32:
return fmt.Sprintf("if x >= 0 && x<= math.MaxUint32 { return %s(int64(x)) }\n", typ)
case qUint64:
return fmt.Sprintf("if x >= 0 { return %s(int64(x)) }\n", typ)
case qBigInt:
return fmt.Sprintf("return big.NewInt(int64(x))\n")
case qBigRat:
return fmt.Sprintf("return big.NewRat(1, 1).SetInt64(int64(x))\n")
case qDuration:
return fmt.Sprintf("return time.Duration(int64(x))\n")
default:
return ""
}
return ""
}
func coerceIdealRune(typ t) string {
switch typ {
case idealComplex:
return fmt.Sprintf("return %s(complex(float64(x), 0))\n", typ)
case qComplex64:
return fmt.Sprintf("return %s(complex(float32(x), 0))\n", typ)
case qComplex128:
return fmt.Sprintf("return %s(complex(float64(x), 0))\n", typ)
case idealFloat, idealInt, idealRune, idealUint, qFloat32, qFloat64, qInt8, qInt16, qInt32, qInt64, qUint8, qUint16, qUint32, qUint64:
return fmt.Sprintf("return %s(int64(x))\n", typ)
case qBigInt:
return fmt.Sprintf("return big.NewInt(int64(x))\n")
case qBigRat:
return fmt.Sprintf("return big.NewRat(1, 1).SetInt64(int64(x))\n")
case qDuration:
return fmt.Sprintf("return time.Duration(int64(x))\n")
default:
return ""
}
return ""
}
func coerceIdealUint(typ t) string {
switch typ {
case idealComplex:
return fmt.Sprintf("return %s(complex(float64(x), 0))\n", typ)
case qComplex64:
return fmt.Sprintf("return %s(complex(float32(x), 0))\n", typ)
case qComplex128:
return fmt.Sprintf("return %s(complex(float64(x), 0))\n", typ)
case idealFloat, idealUint, qFloat32, qFloat64, qUint64:
return fmt.Sprintf("return %s(uint64(x))\n", typ)
case idealInt:
return fmt.Sprintf("if x <= math.MaxInt64 { return %s(int64(x)) }\n", typ)
case qInt8:
return fmt.Sprintf("if x <= math.MaxInt8 { return %s(int64(x)) }\n", typ)
case qInt16:
return fmt.Sprintf("if x<= math.MaxInt16 { return %s(int64(x)) }\n", typ)
case qInt32:
return fmt.Sprintf("if x<= math.MaxInt32 { return %s(int64(x)) }\n", typ)
case qInt64:
return fmt.Sprintf("if x<= math.MaxInt64 { return %s(int64(x)) }\n", typ)
case qUint8:
return fmt.Sprintf("if x >= 0 && x<= math.MaxUint8 { return %s(int64(x)) }\n", typ)
case qUint16:
return fmt.Sprintf("if x >= 0 && x<= math.MaxUint16 { return %s(int64(x)) }\n", typ)
case qUint32:
return fmt.Sprintf("if x >= 0 && x<= math.MaxUint32 { return %s(int64(x)) }\n", typ)
case qBigInt:
return fmt.Sprintf("return big.NewInt(0).SetUint64(uint64(x))\n")
case qBigRat:
return fmt.Sprintf("return big.NewRat(1, 1).SetInt(big.NewInt(0).SetUint64(uint64(x)))\n")
case qDuration:
return fmt.Sprintf("if x <= math.MaxInt64 { return time.Duration(int64(x)) }\n")
default:
return ""
}
return ""
}
func genCoerce1(w io.Writer, in t, f func(out t) string) {
fmt.Fprintf(w, "\tcase %s:\n", in)
fmt.Fprintf(w, "\t\tswitch otherVal.(type) {\n")
for i := idealComplex; i < qEnd; i++ {
s := f(i)
switch s {
case "":
fmt.Fprintf(w, "\t\t//case %s:\n", i)
default:
fmt.Fprintf(w, "\t\tcase %s:\n", i)
fmt.Fprintf(w, "\t\t\t%s", s)
}
}
fmt.Fprintf(w, "\t\t}\n") // switch
}
func genCoerce(w io.Writer) {
fmt.Fprintf(w,
`
func coerce1(inVal, otherVal interface{}) (coercedInVal interface{}) {
coercedInVal = inVal
if otherVal == nil {
return
}
switch x := inVal.(type) {
case nil:
return
`)
genCoerce1(w, idealComplex, coerceIdealComplex)
genCoerce1(w, idealFloat, coerceIdealFloat)
genCoerce1(w, idealInt, coerceIdealInt)
genCoerce1(w, idealRune, coerceIdealRune)
genCoerce1(w, idealUint, coerceIdealUint)
fmt.Fprintf(w, "\t}\n") // switch
fmt.Fprintf(w, "\treturn\n}\n") // func
}
func main() {
ofn := flag.String("o", "", "")
flag.Parse()
_, err := os.Stat(*ofn)
if err == nil {
log.Fatalf("%s exists", *ofn)
}
w := bufio.NewWriter(os.Stdout)
if s := *ofn; s != "" {
f, err := os.Create(s)
if err != nil {
log.Fatal(err)
}
defer f.Close()
w = bufio.NewWriter(f)
}
defer w.Flush()
fmt.Fprintf(w, `// Copyright 2013 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// CAUTION: This file was generated automatically by
//
// $ go run helper/helper.go -o coerce.go
//
// DO NOT EDIT!
package ql
import (
"math"
"math/big"
"reflect"
"time"
)
func coerce(a, b interface{}) (x, y interface{}) {
if reflect.TypeOf(a) == reflect.TypeOf(b) {
return a, b
}
switch a.(type) {
case idealComplex, idealFloat, idealInt, idealRune, idealUint:
switch b.(type) {
case idealComplex, idealFloat, idealInt, idealRune, idealUint:
x, y = coerce1(a, b), b
if reflect.TypeOf(x) == reflect.TypeOf(y) {
return
}
return a, coerce1(b, a)
default:
return coerce1(a, b), b
}
default:
switch b.(type) {
case idealComplex, idealFloat, idealInt, idealRune, idealUint:
return a, coerce1(b, a)
default:
return a, b
}
}
}
`)
genCoerce(w)
}

302
vendor/github.com/cznic/ql/httpfs.go generated vendored Normal file
View File

@@ -0,0 +1,302 @@
// Copyright (c) 2014 ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ql
import (
"fmt"
"io"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/cznic/mathutil"
)
var (
_ http.FileSystem = (*HTTPFS)(nil)
_ http.File = (*HTTPFile)(nil)
_ os.FileInfo = (*HTTPFile)(nil)
_ os.FileInfo = (*dirEntry)(nil)
)
type dirEntry string
func (d dirEntry) Name() string { return string(d) }
func (d dirEntry) Size() int64 { return -1 }
func (d dirEntry) Mode() os.FileMode { return os.ModeDir }
func (d dirEntry) ModTime() time.Time { return time.Time{} }
func (d dirEntry) IsDir() bool { return true }
func (d dirEntry) Sys() interface{} { return interface{}(nil) }
// A HTTPFile is returned by the HTTPFS's Open method and can be served by the
// http.FileServer implementation.
type HTTPFile struct {
closed bool
content []byte
dirEntries []os.FileInfo
isFile bool
name string
off int
sz int
}
// Close implements http.File.
func (f *HTTPFile) Close() error {
if f.closed {
return os.ErrInvalid
}
f.closed = true
return nil
}
// IsDir implements os.FileInfo
func (f *HTTPFile) IsDir() bool { return !f.isFile }
// Mode implements os.FileInfo
func (f *HTTPFile) Mode() os.FileMode {
switch f.isFile {
case false:
return os.FileMode(0444)
default:
return os.ModeDir
}
}
// ModTime implements os.FileInfo
func (f *HTTPFile) ModTime() time.Time {
return time.Time{}
}
// Name implements os.FileInfo
func (f *HTTPFile) Name() string { return path.Base(f.name) }
// Size implements os.FileInfo
func (f *HTTPFile) Size() int64 {
switch f.isFile {
case false:
return -1
default:
return int64(len(f.content))
}
}
// Stat implements http.File.
func (f *HTTPFile) Stat() (os.FileInfo, error) { return f, nil }
// Sys implements os.FileInfo
func (f *HTTPFile) Sys() interface{} { return interface{}(nil) }
// Readdir implements http.File.
func (f *HTTPFile) Readdir(count int) ([]os.FileInfo, error) {
if f.isFile {
return nil, fmt.Errorf("not a directory: %s", f.name)
}
if count <= 0 {
r := f.dirEntries
f.dirEntries = f.dirEntries[:0]
return r, nil
}
rq := mathutil.Min(count, len(f.dirEntries))
r := f.dirEntries[:rq]
f.dirEntries = f.dirEntries[rq:]
if len(r) != 0 {
return r, nil
}
return nil, io.EOF
}
// Read implements http.File.
func (f *HTTPFile) Read(b []byte) (int, error) {
if f.closed {
return 0, os.ErrInvalid
}
n := copy(b, f.content[f.off:])
f.off += n
if n != 0 {
return n, nil
}
return 0, io.EOF
}
// Seek implements http.File.
func (f *HTTPFile) Seek(offset int64, whence int) (int64, error) {
if f.closed {
return 0, os.ErrInvalid
}
if offset < 0 {
return int64(f.off), fmt.Errorf("cannot seek before start of file")
}
switch whence {
case 0:
noff := int64(f.off) + offset
if noff > mathutil.MaxInt {
return int64(f.off), fmt.Errorf("seek target overflows int: %d", noff)
}
f.off = mathutil.Min(int(offset), len(f.content))
if f.off == int(offset) {
return offset, nil
}
return int64(f.off), io.EOF
case 1:
noff := int64(f.off) + offset
if noff > mathutil.MaxInt {
return int64(f.off), fmt.Errorf("seek target overflows int: %d", noff)
}
off := mathutil.Min(f.off+int(offset), len(f.content))
if off == f.off+int(offset) {
f.off = off
return int64(off), nil
}
f.off = off
return int64(off), io.EOF
case 2:
noff := int64(f.off) - offset
if noff < 0 {
return int64(f.off), fmt.Errorf("cannot seek before start of file")
}
f.off = len(f.content) - int(offset)
return int64(f.off), nil
default:
return int64(f.off), fmt.Errorf("seek: invalid whence %d", whence)
}
}
// HTTPFS implements a http.FileSystem backed by data in a DB.
type HTTPFS struct {
db *DB
dir, get List
}
// NewHTTPFS returns a http.FileSystem backed by a result record set of query.
// The record set provides two mandatory fields: path and content (the field
// names are case sensitive). Type of name must be string and type of content
// must be blob (ie. []byte). Field 'path' value is the "file" pathname, which
// must be rooted; and field 'content' value is its "data".
func (db *DB) NewHTTPFS(query string) (*HTTPFS, error) {
if _, err := Compile(query); err != nil {
return nil, err
}
dir, err := Compile(fmt.Sprintf("SELECT path FROM (%s) WHERE hasPrefix(path, $1)", query))
if err != nil {
return nil, err
}
get, err := Compile(fmt.Sprintf("SELECT content FROM (%s) WHERE path == $1", query))
if err != nil {
return nil, err
}
return &HTTPFS{db: db, dir: dir, get: get}, nil
}
// Open implements http.FileSystem. The name parameter represents a file path.
// The elements in a file path are separated by slash ('/', U+002F) characters,
// regardless of host operating system convention.
func (f *HTTPFS) Open(name string) (http.File, error) {
if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
strings.Contains(name, "\x00") {
return nil, fmt.Errorf("invalid character in file path: %q", name)
}
name = path.Clean("/" + name)
rs, _, err := f.db.Execute(nil, f.get, name)
if err != nil {
return nil, err
}
n := 0
var fdata []byte
if err = rs[0].Do(false, func(data []interface{}) (more bool, err error) {
switch n {
case 0:
var ok bool
fdata, ok = data[0].([]byte)
if !ok {
return false, fmt.Errorf("open: expected blob, got %T", data[0])
}
n++
return true, nil
default:
return false, fmt.Errorf("open: more than one result was returned for %s", name)
}
}); err != nil {
return nil, err
}
if n == 1 { // file found
return &HTTPFile{name: name, isFile: true, content: fdata}, nil
}
dirName := name
if dirName[len(dirName)-1] != filepath.Separator {
dirName += string(filepath.Separator)
}
// Open("/a/b"): {/a/b/c.x,/a/b/d.x,/a/e.x,/a/b/f/g.x} -> {c.x,d.x,f}
rs, _, err = f.db.Execute(nil, f.dir, dirName)
if err != nil {
return nil, err
}
n = 0
r := &HTTPFile{name: dirName}
m := map[string]bool{}
x := len(dirName)
if err = rs[0].Do(false, func(data []interface{}) (more bool, err error) {
n++
switch name := data[0].(type) {
case string:
if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
strings.Contains(name, "\x00") {
return false, fmt.Errorf("invalid character in file path: %q", name)
}
name = path.Clean("/" + name)
rest := name[x:]
parts := strings.Split(rest, "/")
if len(parts) == 0 {
return true, nil
}
nm := parts[0]
switch len(parts) {
case 1: // file
r.dirEntries = append(r.dirEntries, &HTTPFile{isFile: true, name: nm})
default: // directory
if !m[nm] {
r.dirEntries = append(r.dirEntries, dirEntry(nm))
}
m[nm] = true
}
return true, nil
default:
return false, fmt.Errorf("expected string path, got %T(%v)", name, name)
}
}); err != nil {
return nil, err
}
if n != 0 {
return r, nil
}
return nil, os.ErrNotExist
}

625
vendor/github.com/cznic/ql/introspection.go generated vendored Normal file
View File

@@ -0,0 +1,625 @@
// Copyright 2014 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ql
import (
"bytes"
"fmt"
"go/ast"
"reflect"
"strings"
"sync"
)
var (
schemaCache = map[reflect.Type]*StructInfo{}
schemaMu sync.RWMutex
)
// StructInfo describes a struct type. An instance of StructInfo obtained from
// StructSchema is shared and must not be mutated. That includes the values
// pointed to by the elements of Fields and Indices.
type StructInfo struct {
Fields []*StructField // Fields describe the considered fields of a struct type.
HasID bool // Whether the struct has a considered field named ID of type int64.
Indices []*StructIndex // Indices describe indices defined by the index or uindex ql tags.
IsPtr bool // Whether the StructInfo was derived from a pointer to a struct.
}
// StructIndex describes an index defined by the ql tag index or uindex.
type StructIndex struct {
ColumnName string // Name of the column the index is on.
Name string // Name of the index.
Unique bool // Whether the index is unique.
}
// StructField describes a considered field of a struct type.
type StructField struct {
Index int // Index is the index of the field for reflect.Value.Field.
IsID bool // Whether the field corresponds to record id().
IsPtr bool // Whether the field is a pointer type.
MarshalType reflect.Type // The reflect.Type a field must be converted to when marshaling or nil when it is assignable directly. (Field->value)
Name string // Field name or value of the name tag (like in `ql:"name foo"`).
ReflectType reflect.Type // The reflect.Type of the field.
Tags map[string]string // QL tags of this field. (`ql:"a, b c, d"` -> {"a": "", "b": "c", "d": ""})
Type Type // QL type of the field.
UnmarshalType reflect.Type // The reflect.Type a value must be converted to when unmarshaling or nil when it is assignable directly. (Field<-value)
ZeroPtr reflect.Value // The reflect.Zero value of the field if it's a pointer type.
}
func (s *StructField) check(v interface{}) error {
t := reflect.TypeOf(v)
if !s.ReflectType.AssignableTo(t) {
if !s.ReflectType.ConvertibleTo(t) {
return fmt.Errorf("type %s (%v) cannot be converted to %T", s.ReflectType.Name(), s.ReflectType.Kind(), t.Name())
}
s.MarshalType = t
}
if !t.AssignableTo(s.ReflectType) {
if !t.ConvertibleTo(s.ReflectType) {
return fmt.Errorf("type %s (%v) cannot be converted to %T", t.Name(), t.Kind(), s.ReflectType.Name())
}
s.UnmarshalType = s.ReflectType
}
return nil
}
func parseTag(s string) map[string]string {
m := map[string]string{}
for _, v := range strings.Split(s, ",") {
v = strings.TrimSpace(v)
switch n := strings.IndexRune(v, ' '); {
case n < 0:
m[v] = ""
default:
m[v[:n]] = v[n+1:]
}
}
return m
}
// StructSchema returns StructInfo for v which must be a struct instance or a
// pointer to a struct. The info is computed only once for every type.
// Subsequent calls to StructSchema for the same type return a cached
// StructInfo.
//
// Note: The returned StructSchema is shared and must be not mutated, including
// any other data structures it may point to.
func StructSchema(v interface{}) (*StructInfo, error) {
if v == nil {
return nil, fmt.Errorf("cannot derive schema for %T(%v)", v, v)
}
typ := reflect.TypeOf(v)
schemaMu.RLock()
if r, ok := schemaCache[typ]; ok {
schemaMu.RUnlock()
return r, nil
}
schemaMu.RUnlock()
var schemaPtr bool
t := typ
if t.Kind() == reflect.Ptr {
t = t.Elem()
schemaPtr = true
}
if k := t.Kind(); k != reflect.Struct {
return nil, fmt.Errorf("cannot derive schema for type %T (%v)", v, k)
}
r := &StructInfo{IsPtr: schemaPtr}
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
fn := f.Name
if !ast.IsExported(fn) {
continue
}
tags := parseTag(f.Tag.Get("ql"))
if _, ok := tags["-"]; ok {
continue
}
if s := tags["name"]; s != "" {
fn = s
}
if fn == "ID" && f.Type.Kind() == reflect.Int64 {
r.HasID = true
}
var ix, unique bool
var xn string
xfn := fn
if s := tags["index"]; s != "" {
if _, ok := tags["uindex"]; ok {
return nil, fmt.Errorf("both index and uindex in QL struct tag")
}
ix, xn = true, s
} else if s := tags["uindex"]; s != "" {
if _, ok := tags["index"]; ok {
return nil, fmt.Errorf("both index and uindex in QL struct tag")
}
ix, unique, xn = true, true, s
}
if ix {
if fn == "ID" && r.HasID {
xfn = "id()"
}
r.Indices = append(r.Indices, &StructIndex{Name: xn, ColumnName: xfn, Unique: unique})
}
sf := &StructField{Index: i, Name: fn, Tags: tags, Type: Type(-1), ReflectType: f.Type}
fk := sf.ReflectType.Kind()
if fk == reflect.Ptr {
sf.IsPtr = true
sf.ZeroPtr = reflect.Zero(sf.ReflectType)
sf.ReflectType = sf.ReflectType.Elem()
fk = sf.ReflectType.Kind()
}
switch fk {
case reflect.Bool:
sf.Type = Bool
if err := sf.check(false); err != nil {
return nil, err
}
case reflect.Int, reflect.Uint:
return nil, fmt.Errorf("only integers of fixed size can be used to derive a schema: %v", fk)
case reflect.Int8:
sf.Type = Int8
if err := sf.check(int8(0)); err != nil {
return nil, err
}
case reflect.Int16:
if err := sf.check(int16(0)); err != nil {
return nil, err
}
sf.Type = Int16
case reflect.Int32:
if err := sf.check(int32(0)); err != nil {
return nil, err
}
sf.Type = Int32
case reflect.Int64:
if sf.ReflectType.Name() == "Duration" && sf.ReflectType.PkgPath() == "time" {
sf.Type = Duration
break
}
sf.Type = Int64
if err := sf.check(int64(0)); err != nil {
return nil, err
}
case reflect.Uint8:
sf.Type = Uint8
if err := sf.check(uint8(0)); err != nil {
return nil, err
}
case reflect.Uint16:
sf.Type = Uint16
if err := sf.check(uint16(0)); err != nil {
return nil, err
}
case reflect.Uint32:
sf.Type = Uint32
if err := sf.check(uint32(0)); err != nil {
return nil, err
}
case reflect.Uint64:
sf.Type = Uint64
if err := sf.check(uint64(0)); err != nil {
return nil, err
}
case reflect.Float32:
sf.Type = Float32
if err := sf.check(float32(0)); err != nil {
return nil, err
}
case reflect.Float64:
sf.Type = Float64
if err := sf.check(float64(0)); err != nil {
return nil, err
}
case reflect.Complex64:
sf.Type = Complex64
if err := sf.check(complex64(0)); err != nil {
return nil, err
}
case reflect.Complex128:
sf.Type = Complex128
if err := sf.check(complex128(0)); err != nil {
return nil, err
}
case reflect.Slice:
sf.Type = Blob
if err := sf.check([]byte(nil)); err != nil {
return nil, err
}
case reflect.Struct:
switch sf.ReflectType.PkgPath() {
case "math/big":
switch sf.ReflectType.Name() {
case "Int":
sf.Type = BigInt
case "Rat":
sf.Type = BigRat
}
case "time":
switch sf.ReflectType.Name() {
case "Time":
sf.Type = Time
}
}
case reflect.String:
sf.Type = String
if err := sf.check(""); err != nil {
return nil, err
}
}
if sf.Type < 0 {
return nil, fmt.Errorf("cannot derive schema for type %s (%v)", sf.ReflectType.Name(), fk)
}
sf.IsID = fn == "ID" && r.HasID
r.Fields = append(r.Fields, sf)
}
schemaMu.Lock()
schemaCache[typ] = r
if t != typ {
r2 := *r
r2.IsPtr = false
schemaCache[t] = &r2
}
schemaMu.Unlock()
return r, nil
}
// MustStructSchema is like StructSchema but panics on error. It simplifies
// safe initialization of global variables holding StructInfo.
//
// MustStructSchema is safe for concurrent use by multiple goroutines.
func MustStructSchema(v interface{}) *StructInfo {
s, err := StructSchema(v)
if err != nil {
panic(err)
}
return s
}
// SchemaOptions amend the result of Schema.
type SchemaOptions struct {
// Don't wrap the CREATE statement(s) in a transaction.
NoTransaction bool
// Don't insert the IF NOT EXISTS clause in the CREATE statement(s).
NoIfNotExists bool
// Do not strip the "pkg." part from type name "pkg.Type", produce
// "pkg_Type" table name instead. Applies only when no name is passed
// to Schema().
KeepPrefix bool
}
var zeroSchemaOptions SchemaOptions
// Schema returns a CREATE TABLE/INDEX statement(s) for a table derived from a
// struct or an error, if any. The table is named using the name parameter. If
// name is an empty string then the type name of the struct is used while non
// conforming characters are replaced by underscores. Value v can be also a
// pointer to a struct.
//
// Every considered struct field type must be one of the QL types or a type
// convertible to string, bool, int*, uint*, float* or complex* type or pointer
// to such type. Integers with a width dependent on the architecture can not be
// used. Only exported fields are considered. If an exported field QL tag
// contains "-" (`ql:"-"`) then such field is not considered. A field with name
// ID, having type int64, corresponds to id() - and is thus not a part of the
// CREATE statement. A field QL tag containing "index name" or "uindex name"
// triggers additionally creating an index or unique index on the respective
// field. Fields can be renamed using a QL tag "name newName". Fields are
// considered in the order of appearance. A QL tag is a struct tag part
// prefixed by "ql:". Tags can be combined, for example:
//
// type T struct {
// Foo string `ql:"index xFoo, name Bar"`
// }
//
// If opts.NoTransaction == true then the statement(s) are not wrapped in a
// transaction. If opt.NoIfNotExists == true then the CREATE statement(s) omits
// the IF NOT EXISTS clause. Passing nil opts is equal to passing
// &SchemaOptions{}
//
// Schema is safe for concurrent use by multiple goroutines.
func Schema(v interface{}, name string, opt *SchemaOptions) (List, error) {
if opt == nil {
opt = &zeroSchemaOptions
}
s, err := StructSchema(v)
if err != nil {
return List{}, err
}
var buf bytes.Buffer
if !opt.NoTransaction {
buf.WriteString("BEGIN TRANSACTION; ")
}
buf.WriteString("CREATE TABLE ")
if !opt.NoIfNotExists {
buf.WriteString("IF NOT EXISTS ")
}
if name == "" {
name = fmt.Sprintf("%T", v)
if !opt.KeepPrefix {
a := strings.Split(name, ".")
if l := len(a); l > 1 {
name = a[l-1]
}
}
nm := []rune{}
for _, v := range name {
switch {
case v >= '0' && v <= '9' || v == '_' || v >= 'a' && v <= 'z' || v >= 'A' && v <= 'Z':
// ok
default:
v = '_'
}
nm = append(nm, v)
}
name = string(nm)
}
buf.WriteString(name + " (")
for _, v := range s.Fields {
if v.IsID {
continue
}
buf.WriteString(fmt.Sprintf("%s %s, ", v.Name, v.Type))
}
buf.WriteString("); ")
for _, v := range s.Indices {
buf.WriteString("CREATE ")
if v.Unique {
buf.WriteString("UNIQUE ")
}
buf.WriteString("INDEX ")
if !opt.NoIfNotExists {
buf.WriteString("IF NOT EXISTS ")
}
buf.WriteString(fmt.Sprintf("%s ON %s (%s); ", v.Name, name, v.ColumnName))
}
if !opt.NoTransaction {
buf.WriteString("COMMIT; ")
}
l, err := Compile(buf.String())
if err != nil {
return List{}, fmt.Errorf("%s: %v", buf.String(), err)
}
return l, nil
}
// MustSchema is like Schema but panics on error. It simplifies safe
// initialization of global variables holding compiled schemas.
//
// MustSchema is safe for concurrent use by multiple goroutines.
func MustSchema(v interface{}, name string, opt *SchemaOptions) List {
l, err := Schema(v, name, opt)
if err != nil {
panic(err)
}
return l
}
// Marshal converts, in the order of appearance, fields of a struct instance v
// to []interface{} or an error, if any. Value v can be also a pointer to a
// struct.
//
// Every considered struct field type must be one of the QL types or a type
// convertible to string, bool, int*, uint*, float* or complex* type or pointer
// to such type. Integers with a width dependent on the architecture can not be
// used. Only exported fields are considered. If an exported field QL tag
// contains "-" then such field is not considered. A QL tag is a struct tag
// part prefixed by "ql:". Field with name ID, having type int64, corresponds
// to id() - and is thus not part of the result.
//
// Marshal is safe for concurrent use by multiple goroutines.
func Marshal(v interface{}) ([]interface{}, error) {
s, err := StructSchema(v)
if err != nil {
return nil, err
}
val := reflect.ValueOf(v)
if s.IsPtr {
val = val.Elem()
}
n := len(s.Fields)
if s.HasID {
n--
}
r := make([]interface{}, n)
j := 0
for _, v := range s.Fields {
if v.IsID {
continue
}
f := val.Field(v.Index)
if v.IsPtr {
if f.IsNil() {
r[j] = nil
j++
continue
}
f = f.Elem()
}
if m := v.MarshalType; m != nil {
f = f.Convert(m)
}
r[j] = f.Interface()
j++
}
return r, nil
}
// MustMarshal is like Marshal but panics on error. It simplifies marshaling of
// "safe" types, like eg. those which were already verified by Schema or
// MustSchema. When the underlying Marshal returns an error, MustMarshal
// panics.
//
// MustMarshal is safe for concurrent use by multiple goroutines.
func MustMarshal(v interface{}) []interface{} {
r, err := Marshal(v)
if err != nil {
panic(err)
}
return r
}
// Unmarshal stores data from []interface{} in the struct value pointed to by
// v.
//
// Every considered struct field type must be one of the QL types or a type
// convertible to string, bool, int*, uint*, float* or complex* type or pointer
// to such type. Integers with a width dependent on the architecture can not be
// used. Only exported fields are considered. If an exported field QL tag
// contains "-" then such field is not considered. A QL tag is a struct tag
// part prefixed by "ql:". Fields are considered in the order of appearance.
// Types of values in data must be compatible with the corresponding considered
// field of v.
//
// If the struct has no ID field then the number of values in data must be equal
// to the number of considered fields of v.
//
// type T struct {
// A bool
// B string
// }
//
// Assuming the schema is
//
// CREATE TABLE T (A bool, B string);
//
// Data might be a result of queries like
//
// SELECT * FROM T;
// SELECT A, B FROM T;
//
// If the struct has a considered ID field then the number of values in data
// must be equal to the number of considered fields in v - or one less. In the
// later case the ID field is not set.
//
// type U struct {
// ID int64
// A bool
// B string
// }
//
// Assuming the schema is
//
// CREATE TABLE T (A bool, B string);
//
// Data might be a result of queries like
//
// SELECT * FROM T; // ID not set
// SELECT A, B FROM T; // ID not set
// SELECT id(), A, B FROM T; // ID is set
//
// To unmarshal a value from data into a pointer field of v, Unmarshal first
// handles the case of the value being nil. In that case, Unmarshal sets the
// pointer to nil. Otherwise, Unmarshal unmarshals the data value into value
// pointed at by the pointer. If the pointer is nil, Unmarshal allocates a new
// value for it to point to.
//
// Unmarshal is safe for concurrent use by multiple goroutines.
func Unmarshal(v interface{}, data []interface{}) (err error) {
defer func() {
if r := recover(); r != nil {
var ok bool
if err, ok = r.(error); !ok {
err = fmt.Errorf("%v", r)
}
err = fmt.Errorf("unmarshal: %v", err)
}
}()
s, err := StructSchema(v)
if err != nil {
return err
}
if !s.IsPtr {
return fmt.Errorf("unmarshal: need a pointer to a struct")
}
id := false
nv, nf := len(data), len(s.Fields)
switch s.HasID {
case true:
switch {
case nv == nf:
id = true
case nv == nf-1:
// ok
default:
return fmt.Errorf("unmarshal: got %d values, need %d or %d", nv, nf-1, nf)
}
default:
switch {
case nv == nf:
// ok
default:
return fmt.Errorf("unmarshal: got %d values, need %d", nv, nf)
}
}
j := 0
vVal := reflect.ValueOf(v)
if s.IsPtr {
vVal = vVal.Elem()
}
for _, sf := range s.Fields {
if sf.IsID && !id {
continue
}
d := data[j]
val := reflect.ValueOf(d)
j++
fVal := vVal.Field(sf.Index)
if u := sf.UnmarshalType; u != nil {
val = val.Convert(u)
}
if !sf.IsPtr {
fVal.Set(val)
continue
}
if d == nil {
fVal.Set(sf.ZeroPtr)
continue
}
if fVal.IsNil() {
fVal.Set(reflect.New(sf.ReflectType))
}
fVal.Elem().Set(val)
}
return nil
}

1277
vendor/github.com/cznic/ql/mem.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

2749
vendor/github.com/cznic/ql/parser.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

2800
vendor/github.com/cznic/ql/plan.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1729
vendor/github.com/cznic/ql/ql.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

219
vendor/github.com/cznic/ql/ql/main.go generated vendored Normal file
View File

@@ -0,0 +1,219 @@
// Copyright 2014 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Command ql is a utility to explore a database, prototype a schema or test
// drive a query, etc.
//
// Installation:
//
// $ go get github.com/cznic/ql/ql
//
// Usage:
//
// ql [-db name] [-schema regexp] [-tables regexp] [-fld] statement_list
//
// Options:
//
// -db name Name of the database to use. Defaults to "ql.db".
// If the DB file does not exists it is created automatically.
//
// -schema re If re != "" show the CREATE statements of matching tables and exit.
//
// -tables re If re != "" show the matching table names and exit.
//
// -fld First row of a query result set will show field names.
//
// statement_list QL statements to execute.
// If no non flag arguments are present, ql reads from stdin.
// The list is wrapped into an automatic transaction.
//
// -t Report and measure time to execute, including creating/opening and closing the DB.
//
// Example:
//
// $ ql 'create table t (i int, s string)'
// $ ql << EOF
// > insert into t values
// > (1, "a"),
// > (2, "b"),
// > (3, "c"),
// > EOF
// $ ql 'select * from t'
// 3, "c"
// 2, "b"
// 1, "a"
// $ ql -fld 'select * from t where i != 2 order by s'
// "i", "s"
// 1, "a"
// 3, "c"
// $
package main
import (
"bufio"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"regexp"
"sort"
"strings"
"time"
"github.com/cznic/ql"
)
func str(data []interface{}) string {
a := make([]string, len(data))
for i, v := range data {
switch x := v.(type) {
case string:
a[i] = fmt.Sprintf("%q", x)
default:
a[i] = fmt.Sprint(x)
}
}
return strings.Join(a, ", ")
}
func main() {
if err := do(); err != nil {
log.Fatal(err)
}
}
func do() (err error) {
oDB := flag.String("db", "ql.db", "The DB file to open. It'll be created if missing.")
oFlds := flag.Bool("fld", false, "Show recordset's field names.")
oSchema := flag.String("schema", "", "If non empty, show the CREATE statements of matching tables and exit.")
oTables := flag.String("tables", "", "If non empty, list matching table names and exit.")
oTime := flag.Bool("t", false, "Measure and report time to execute the statement(s) including DB create/open/close.")
flag.Parse()
t0 := time.Now()
if *oTime {
defer func() {
fmt.Fprintf(os.Stderr, "%s\n", time.Since(t0))
}()
}
db, err := ql.OpenFile(*oDB, &ql.Options{CanCreate: true})
if err != nil {
return err
}
defer func() {
ec := db.Close()
switch {
case ec != nil && err != nil:
log.Println(ec)
case ec != nil:
err = ec
}
}()
if pat := *oSchema; pat != "" {
re, err := regexp.Compile(pat)
if err != nil {
return err
}
nfo, err := db.Info()
if err != nil {
return err
}
r := []string{}
for _, ti := range nfo.Tables {
if !re.MatchString(ti.Name) {
continue
}
a := []string{}
for _, ci := range ti.Columns {
a = append(a, fmt.Sprintf("%s %s", ci.Name, ci.Type))
}
r = append(r, fmt.Sprintf("CREATE TABLE %s (%s);", ti.Name, strings.Join(a, ", ")))
}
sort.Strings(r)
if len(r) != 0 {
fmt.Println(strings.Join(r, "\n"))
}
return nil
}
if pat := *oTables; pat != "" {
re, err := regexp.Compile(pat)
if err != nil {
return err
}
nfo, err := db.Info()
if err != nil {
return err
}
r := []string{}
for _, ti := range nfo.Tables {
if !re.MatchString(ti.Name) {
continue
}
r = append(r, ti.Name)
}
sort.Strings(r)
if len(r) != 0 {
fmt.Println(strings.Join(r, "\n"))
}
return nil
}
var src string
switch n := flag.NArg(); n {
case 0:
b, err := ioutil.ReadAll(bufio.NewReader(os.Stdin))
if err != nil {
return err
}
src = string(b)
default:
a := make([]string, n)
for i := range a {
a[i] = flag.Arg(i)
}
src = strings.Join(a, " ")
}
src = "BEGIN TRANSACTION; " + src + "; COMMIT;"
l, err := ql.Compile(src)
if err != nil {
log.Println(src)
return err
}
rs, i, err := db.Execute(ql.NewRWCtx(), l)
if err != nil {
a := strings.Split(strings.TrimSpace(fmt.Sprint(l)), "\n")
return fmt.Errorf("%v: %s", err, a[i])
}
if len(rs) == 0 {
return
}
switch {
case l.IsExplainStmt():
return rs[len(rs)-1].Do(*oFlds, func(data []interface{}) (bool, error) {
fmt.Println(data[0])
return true, nil
})
default:
return rs[len(rs)-1].Do(*oFlds, func(data []interface{}) (bool, error) {
fmt.Println(str(data))
return true, nil
})
}
}

4129
vendor/github.com/cznic/ql/scanner.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1268
vendor/github.com/cznic/ql/stmt.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

991
vendor/github.com/cznic/ql/storage.go generated vendored Normal file
View File

@@ -0,0 +1,991 @@
// Copyright (c) 2014 ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ql
import (
"fmt"
"strings"
)
type storage interface {
Acid() bool
BeginTransaction() error
Close() error
Commit() error
Create(data ...interface{}) (h int64, err error)
CreateIndex(unique bool) (handle int64, x btreeIndex, err error)
CreateTemp(asc bool) (bt temp, err error)
Delete(h int64, blobCols ...*col) error //LATER split the nil blobCols case
ID() (id int64, err error)
Name() string
OpenIndex(unique bool, handle int64) (btreeIndex, error) // Never called on the memory backend.
Read(dst []interface{}, h int64, cols ...*col) (data []interface{}, err error)
ResetID() (err error)
Rollback() error
Update(h int64, data ...interface{}) error
UpdateRow(h int64, blobCols []*col, data ...interface{}) error
Verify() (allocs int64, err error)
}
type btreeIterator interface {
Next() (k, v []interface{}, err error)
}
type temp interface {
BeginTransaction() error
Create(data ...interface{}) (h int64, err error)
Drop() (err error)
Get(k []interface{}) (v []interface{}, err error)
Read(dst []interface{}, h int64, cols ...*col) (data []interface{}, err error)
SeekFirst() (e btreeIterator, err error)
Set(k, v []interface{}) (err error)
}
type indexIterator interface {
Next() (k []interface{}, h int64, err error)
Prev() (k []interface{}, h int64, err error)
}
type btreeIndex interface {
Clear() error // supports truncate table statement
Create(indexedValues []interface{}, h int64) error // supports insert into statement
Delete(indexedValues []interface{}, h int64) error // supports delete from statement
Drop() error // supports drop table, drop index statements
Seek(indexedValues []interface{}) (iter indexIterator, hit bool, err error) // supports where clause
SeekFirst() (iter indexIterator, err error) // supports aggregate min / ascending order by
SeekLast() (iter indexIterator, err error) // supports aggregate max / descending order by
}
type indexedCol struct { // Column name or id() index.
name string
unique bool
x btreeIndex
xroot int64
}
type index2 struct { // Expression list index.
unique bool
x btreeIndex
xroot int64
sources []string
exprList []expression
}
func (x *index2) eval(ctx *execCtx, cols []*col, id int64, r []interface{}) ([]interface{}, error) {
f, isFile := ctx.db.store.(*file)
vlist := make([]interface{}, len(x.exprList))
m := map[interface{}]interface{}{"$id": id}
for _, col := range cols {
ci := col.index
v := interface{}(nil)
if ci < len(r) {
v = r[ci]
}
if b, ok := v.([]byte); ok && isFile {
var err error
if v, err = expand1(chunk{f: f, b: b}, nil); err != nil {
return nil, err
}
}
m[col.name] = v
}
for i, e := range x.exprList {
v, err := e.eval(ctx, m)
if err != nil {
return nil, err
}
if ok, typ := isBlobType(v); ok {
return nil, fmt.Errorf("value of a complex index cannot be of blob-like type: %v", typ)
}
vlist[i] = v
}
return vlist, nil
}
type indexKey struct {
value []interface{}
h int64
}
// storage fields
// 0: next int64
// 1: scols string
// 2: hhead int64
// 3: name string
// 4: indices string - optional
// 5: hxroots int64 - optional
type table struct {
cols []*col // logical
cols0 []*col // physical
h int64 //
head int64 // head of the single linked record list
hhead int64 // handle of the head of the single linked record list
hxroots int64
indices []*indexedCol
indices2 map[string]*index2
name string
next int64 // single linked table list
store storage
tnext *table
tprev *table
xroots []interface{}
constraints []*constraint
defaults []expression
}
func (t *table) hasIndices() bool { return len(t.indices) != 0 || len(t.indices2) != 0 }
func (t *table) hasIndices2() bool { return len(t.indices2) != 0 }
func (t *table) constraintsAndDefaults(ctx *execCtx) error {
if isSystemName[t.name] {
return nil
}
_, ok := ctx.db.root.tables["__Column2"]
if !ok {
return nil
}
cols := t.cols
constraints := make([]*constraint, len(cols))
defaults := make([]expression, len(cols))
arg := []interface{}{t.name}
rs, err := selectColumn2.l[0].exec(&execCtx{db: ctx.db, arg: arg})
if err != nil {
return err
}
var rows [][]interface{}
ok = false
if err := rs.(recordset).do(
&execCtx{db: ctx.db, arg: arg},
func(id interface{}, data []interface{}) (more bool, err error) {
rows = append(rows, data)
return true, nil
},
); err != nil {
return err
}
for _, row := range rows {
nm := row[0].(string)
nonNull := row[1].(bool)
cexpr := row[2].(string)
dexpr := row[3].(string)
for i, c := range cols {
if c.name == nm {
var co *constraint
if nonNull || cexpr != "" {
co = &constraint{}
constraints[i] = co
if cexpr != "" {
if co.expr, err = ctx.db.str2expr(cexpr); err != nil {
return fmt.Errorf("constraint %q: %v", cexpr, err)
}
}
t.constraints = constraints
}
if dexpr != "" {
if defaults[i], err = ctx.db.str2expr(dexpr); err != nil {
return fmt.Errorf("constraint %q: %v", dexpr, err)
}
t.defaults = defaults
}
}
}
}
return nil
}
func (t *table) checkConstraintsAndDefaults(ctx *execCtx, row []interface{}, m map[interface{}]interface{}) error {
cols := t.cols
if len(t.defaults) != 0 {
// 1.
for _, c := range cols {
m[c.name] = row[c.index]
}
// 2.
for i, c := range cols {
val := row[c.index]
expr := t.defaults[i]
if val != nil || expr == nil {
continue
}
dval, err := expr.eval(ctx, m)
if err != nil {
return err
}
row[c.index] = dval
if err = typeCheck(row, []*col{c}); err != nil {
return err
}
}
}
if len(t.constraints) != 0 {
// 3.
for _, c := range cols {
m[c.name] = row[c.index]
}
// 4.
for i, c := range cols {
constraint := t.constraints[i]
if constraint == nil {
continue
}
val := row[c.index]
expr := constraint.expr
if expr == nil { // Constraint: NOT NULL
if val == nil {
return fmt.Errorf("column %s: constraint violation: NOT NULL", c.name)
}
continue
}
// Constraint is an expression
cval, err := expr.eval(ctx, m)
if err != nil {
return err
}
if cval == nil {
return fmt.Errorf("column %s: constraint violation: %s", c.name, expr)
}
bval, ok := cval.(bool)
if !ok {
return fmt.Errorf("column %s: non bool constraint expression: %s", c.name, expr)
}
if !bval {
return fmt.Errorf("column %s: constraint violation: %s", c.name, expr)
}
}
}
return nil
}
func (t *table) clone() *table {
r := &table{}
*r = *t
r.constraints = append([]*constraint(nil), t.constraints...)
r.defaults = append([]expression(nil), t.defaults...)
r.indices2 = nil
if n := len(t.indices2); n != 0 {
r.indices2 = make(map[string]*index2, n)
for k, v := range t.indices2 {
r.indices2[k] = v
}
}
r.cols = make([]*col, len(t.cols))
for i, v := range t.cols {
c := &col{}
*c = *v
r.cols[i] = c
}
r.cols0 = make([]*col, len(t.cols0))
for i, v := range t.cols0 {
c := &col{}
*c = *v
r.cols0[i] = c
}
r.indices = make([]*indexedCol, len(t.indices))
for i, v := range t.indices {
if v != nil {
c := &indexedCol{}
*c = *v
r.indices[i] = c
}
}
r.xroots = make([]interface{}, len(t.xroots))
copy(r.xroots, t.xroots)
r.tnext, r.tprev = nil, nil
return r
}
func (t *table) findIndexByColName(name string) (*col, *indexedCol) {
for i, v := range t.indices {
if v == nil {
continue
}
if i == 0 {
if name == "id()" {
return idCol, v
}
continue
}
if c := t.cols[i-1]; c.name == name {
return c, v
}
}
return nil, nil
}
func (t *table) findIndexByName(name string) interface{} {
for _, v := range t.indices {
if v != nil && v.name == name {
return v
}
}
for k, v := range t.indices2 {
if k == name {
return v
}
}
return nil
}
func (t *table) load() (err error) {
data, err := t.store.Read(nil, t.h)
if err != nil {
return
}
var hasIndices bool
switch n := len(data); n {
case 4:
case 6:
hasIndices = true
default:
return fmt.Errorf("corrupted DB: table data len %d", n)
}
var ok bool
if t.next, ok = data[0].(int64); !ok {
return fmt.Errorf("corrupted DB: table data[0] of type %T", data[0])
}
scols, ok := data[1].(string)
if !ok {
return fmt.Errorf("corrupted DB: table data[1] of type %T", data[1])
}
if t.hhead, ok = data[2].(int64); !ok {
return fmt.Errorf("corrupted DB: table data[2] of type %T", data[2])
}
if t.name, ok = data[3].(string); !ok {
return fmt.Errorf("corrupted DB: table data[3] of type %T", data[3])
}
var head []interface{}
if head, err = t.store.Read(nil, t.hhead); err != nil {
return err
}
if len(head) != 1 {
return fmt.Errorf("corrupted DB: table head data len %d", len(head))
}
if t.head, ok = head[0].(int64); !ok {
return fmt.Errorf("corrupted DB: table head data[0] of type %T", head[0])
}
a := strings.Split(scols, "|")
t.cols0 = make([]*col, len(a))
for i, v := range a {
if len(v) < 1 {
return fmt.Errorf("corrupted DB: field info %q", v)
}
col := &col{name: v[1:], typ: int(v[0]), index: i}
t.cols0[i] = col
if col.name != "" {
t.cols = append(t.cols, col)
}
}
if !hasIndices {
return
}
if t.hxroots, ok = data[5].(int64); !ok {
return fmt.Errorf("corrupted DB: table data[5] of type %T", data[5])
}
xroots, err := t.store.Read(nil, t.hxroots)
if err != nil {
return err
}
if g, e := len(xroots), len(t.cols0)+1; g != e {
return fmt.Errorf("corrupted DB: got %d index roots, expected %d", g, e)
}
indices, ok := data[4].(string)
if !ok {
return fmt.Errorf("corrupted DB: table data[4] of type %T", data[4])
}
a = strings.Split(indices, "|")
if g, e := len(a), len(t.cols0)+1; g != e {
return fmt.Errorf("corrupted DB: got %d index definitions, expected %d", g, e)
}
t.indices = make([]*indexedCol, len(a))
for i, v := range a {
if v == "" {
continue
}
if len(v) < 2 {
return fmt.Errorf("corrupted DB: invalid index definition %q", v)
}
nm := v[1:]
h, ok := xroots[i].(int64)
if !ok {
return fmt.Errorf("corrupted DB: table index root of type %T", xroots[i])
}
if h == 0 {
return fmt.Errorf("corrupted DB: missing root for index %s", nm)
}
unique := v[0] == 'u'
x, err := t.store.OpenIndex(unique, h)
if err != nil {
return err
}
t.indices[i] = &indexedCol{nm, unique, x, h}
}
t.xroots = xroots
return
}
func newTable(store storage, name string, next int64, cols []*col, tprev, tnext *table) (t *table, err error) {
hhead, err := store.Create(int64(0))
if err != nil {
return
}
scols := cols2meta(cols)
h, err := store.Create(next, scols, hhead, name)
if err != nil {
return
}
t = &table{
cols0: cols,
h: h,
hhead: hhead,
name: name,
next: next,
store: store,
tnext: tnext,
tprev: tprev,
}
return t.updateCols(), nil
}
func (t *table) blobCols() (r []*col) {
for _, c := range t.cols0 {
switch c.typ {
case qBlob, qBigInt, qBigRat, qTime, qDuration:
r = append(r, c)
}
}
return
}
func (t *table) truncate() (err error) {
h := t.head
var rec []interface{}
blobCols := t.blobCols()
for h != 0 {
rec, err := t.store.Read(rec, h)
if err != nil {
return err
}
nh := rec[0].(int64)
if err = t.store.Delete(h, blobCols...); err != nil { //LATER remove double read for len(blobCols) != 0
return err
}
h = nh
}
if err = t.store.Update(t.hhead, 0); err != nil {
return
}
for _, v := range t.indices {
if v == nil {
continue
}
if err := v.x.Clear(); err != nil {
return err
}
}
for _, ix := range t.indices2 {
if err := ix.x.Clear(); err != nil {
return err
}
}
t.head = 0
return t.updated()
}
func (t *table) addIndex0(unique bool, indexName string, colIndex int) (int64, btreeIndex, error) {
switch len(t.indices) {
case 0:
indices := make([]*indexedCol, len(t.cols0)+1)
h, x, err := t.store.CreateIndex(unique)
if err != nil {
return -1, nil, err
}
indices[colIndex+1] = &indexedCol{indexName, unique, x, h}
xroots := make([]interface{}, len(indices))
xroots[colIndex+1] = h
hx, err := t.store.Create(xroots...)
if err != nil {
return -1, nil, err
}
t.hxroots, t.xroots, t.indices = hx, xroots, indices
return h, x, t.updated()
default:
ex := t.indices[colIndex+1]
if ex != nil && ex.name != "" {
colName := "id()"
if colIndex >= 0 {
colName = t.cols0[colIndex].name
}
return -1, nil, fmt.Errorf("column %s already has an index: %s", colName, ex.name)
}
h, x, err := t.store.CreateIndex(unique)
if err != nil {
return -1, nil, err
}
t.xroots[colIndex+1] = h
if err := t.store.Update(t.hxroots, t.xroots...); err != nil {
return -1, nil, err
}
t.indices[colIndex+1] = &indexedCol{indexName, unique, x, h}
return h, x, t.updated()
}
}
func (t *table) addIndex(unique bool, indexName string, colIndex int) (int64, error) {
hx, x, err := t.addIndex0(unique, indexName, colIndex)
if err != nil {
return -1, err
}
// Must fill the new index.
ncols := len(t.cols0)
h, store := t.head, t.store
for h != 0 {
rec, err := store.Read(nil, h, t.cols...)
if err != nil {
return -1, err
}
if n := ncols + 2 - len(rec); n > 0 {
rec = append(rec, make([]interface{}, n)...)
}
if err = x.Create([]interface{}{rec[colIndex+2]}, h); err != nil {
return -1, err
}
h = rec[0].(int64)
}
return hx, nil
}
func (t *table) addIndex2(execCtx *execCtx, unique bool, indexName string, exprList []expression) (int64, error) {
if _, ok := t.indices2[indexName]; ok {
panic("internal error 009")
}
hx, x, err := t.store.CreateIndex(unique)
if err != nil {
return -1, err
}
var a []string
for _, v := range exprList {
a = append(a, v.String())
}
x2 := &index2{unique, x, hx, a, exprList}
if t.indices2 == nil {
t.indices2 = map[string]*index2{}
}
t.indices2[indexName] = x2
// Must fill the new index.
m := map[interface{}]interface{}{}
h, store := t.head, t.store
for h != 0 {
rec, err := store.Read(nil, h, t.cols...)
if err != nil {
return -1, err
}
for _, col := range t.cols {
ci := col.index
v := interface{}(nil)
if ci < len(rec) {
v = rec[ci+2]
}
m[col.name] = v
}
id := rec[1].(int64)
vlist, err := x2.eval(execCtx, t.cols, id, rec[2:])
if err != nil {
return -1, err
}
if err := x2.x.Create(vlist, h); err != nil {
return -1, err
}
h = rec[0].(int64)
}
return hx, nil
}
func (t *table) dropIndex(xIndex int) error {
t.xroots[xIndex] = 0
if err := t.indices[xIndex].x.Drop(); err != nil {
return err
}
t.indices[xIndex] = nil
return t.updated()
}
func (t *table) updated() (err error) {
switch {
case len(t.indices) != 0:
a := []string{}
for _, v := range t.indices {
if v == nil {
a = append(a, "")
continue
}
s := "n"
if v.unique {
s = "u"
}
a = append(a, s+v.name)
}
return t.store.Update(t.h, t.next, cols2meta(t.updateCols().cols0), t.hhead, t.name, strings.Join(a, "|"), t.hxroots)
default:
return t.store.Update(t.h, t.next, cols2meta(t.updateCols().cols0), t.hhead, t.name)
}
}
// storage fields
// 0: next record handle int64
// 1: record id int64
// 2...: data row
func (t *table) addRecord(execCtx *execCtx, r []interface{}) (id int64, err error) {
if id, err = t.store.ID(); err != nil {
return
}
r = append([]interface{}{t.head, id}, r...)
h, err := t.store.Create(r...)
if err != nil {
return
}
for i, v := range t.indices {
if v == nil {
continue
}
if err = v.x.Create([]interface{}{r[i+1]}, h); err != nil {
return
}
}
for _, ix := range t.indices2 {
vlist, err := ix.eval(execCtx, t.cols, id, r[2:])
if err != nil {
return -1, err
}
if err := ix.x.Create(vlist, h); err != nil {
return -1, err
}
}
if err = t.store.Update(t.hhead, h); err != nil {
return
}
t.head = h
return
}
func (t *table) flds() (r []*fld) {
r = make([]*fld, len(t.cols))
for i, v := range t.cols {
r[i] = &fld{expr: &ident{v.name}, name: v.name}
}
return
}
func (t *table) fieldNames() []string {
r := make([]string, len(t.cols))
for i, v := range t.cols {
r[i] = v.name
}
return r
}
func (t *table) updateCols() *table {
t.cols = t.cols[:0]
for i, c := range t.cols0 {
if c.name != "" {
c.index = i
t.cols = append(t.cols, c)
}
}
return t
}
func (t *table) row0(ctx *execCtx, h int64) ([]interface{}, error) {
rec, err := ctx.db.store.Read(nil, h, t.cols...)
if err != nil {
return nil, err
}
if d := len(t.cols) - (len(rec) - 2); d > 0 {
rec = append(rec, make([]interface{}, d)...)
}
return rec, nil
}
func (t *table) row(ctx *execCtx, h int64) (int64, []interface{}, error) {
rec, err := t.row0(ctx, h)
if err != nil {
return -1, nil, err
}
return rec[1].(int64), rec[2:], nil
}
// storage fields
// 0: handle of first table in DB int64
type root struct {
head int64 // Single linked table list
lastInsertID int64
parent *root
rowsAffected int64 //LATER implement
store storage
tables map[string]*table
thead *table
}
func newRoot(store storage) (r *root, err error) {
data, err := store.Read(nil, 1)
if err != nil {
return
}
switch len(data) {
case 0: // new empty DB, create empty table list
if err = store.BeginTransaction(); err != nil {
return
}
if err = store.Update(1, int64(0)); err != nil {
store.Rollback()
return
}
if err = store.Commit(); err != nil {
return
}
return &root{
store: store,
tables: map[string]*table{},
}, nil
case 1: // existing DB, load tables
if len(data) != 1 {
return nil, fmt.Errorf("corrupted DB: root is an %d-scalar", len(data))
}
p, ok := data[0].(int64)
if !ok {
return nil, fmt.Errorf("corrupted DB: root head has type %T", data[0])
}
r := &root{
head: p,
store: store,
tables: map[string]*table{},
}
var tprev *table
for p != 0 {
t := &table{
h: p,
store: store,
tprev: tprev,
}
if r.thead == nil {
r.thead = t
}
if tprev != nil {
tprev.tnext = t
}
tprev = t
if err = t.load(); err != nil {
return nil, err
}
if r.tables[t.name] != nil { // duplicate
return nil, fmt.Errorf("corrupted DB: duplicate table metadata for table %s", t.name)
}
r.tables[t.name] = t
p = t.next
}
return r, nil
default:
return nil, errIncompatibleDBFormat
}
}
func (r *root) findIndexByName(name string) (*table, interface{}) {
for _, t := range r.tables {
if i := t.findIndexByName(name); i != nil {
return t, i
}
}
return nil, nil
}
func (r *root) updated() (err error) {
return r.store.Update(1, r.head)
}
func (r *root) createTable(name string, cols []*col) (t *table, err error) {
if _, ok := r.tables[name]; ok {
panic("internal error 065")
}
if t, err = newTable(r.store, name, r.head, cols, nil, r.thead); err != nil {
return nil, err
}
if err = r.store.Update(1, t.h); err != nil {
return nil, err
}
if p := r.thead; p != nil {
p.tprev = t
}
r.tables[name], r.head, r.thead = t, t.h, t
return
}
func (r *root) dropTable(t *table) (err error) {
defer func() {
if err != nil {
return
}
delete(r.tables, t.name)
}()
if err = t.truncate(); err != nil {
return
}
if err = t.store.Delete(t.hhead); err != nil {
return
}
if err = t.store.Delete(t.h); err != nil {
return
}
for _, v := range t.indices {
if v != nil && v.x != nil {
if err = v.x.Drop(); err != nil {
return
}
}
}
for _, v := range t.indices2 {
if err = v.x.Drop(); err != nil {
return
}
}
if h := t.hxroots; h != 0 {
if err = t.store.Delete(h); err != nil {
return
}
}
switch {
case t.tprev == nil && t.tnext == nil:
r.head = 0
r.thead = nil
err = r.updated()
return errSet(&err, r.store.ResetID())
case t.tprev == nil && t.tnext != nil:
next := t.tnext
next.tprev = nil
r.head = next.h
r.thead = next
if err = r.updated(); err != nil {
return
}
return next.updated()
case t.tprev != nil && t.tnext == nil: // last in list
prev := t.tprev
prev.next = 0
prev.tnext = nil
return prev.updated()
default: //case t.tprev != nil && t.tnext != nil:
prev, next := t.tprev, t.tnext
prev.next = next.h
prev.tnext = next
next.tprev = prev
if err = prev.updated(); err != nil {
return
}
return next.updated()
}
}

View File

@@ -0,0 +1,186 @@
/*
Copyright 2013 The Go Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package lock is a file locking library.
package lock
import (
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"sync"
)
// Lock locks the given file, creating the file if necessary. If the
// file already exists, it must have zero size or an error is returned.
// The lock is an exclusive lock (a write lock), but locked files
// should neither be read from nor written to. Such files should have
// zero size and only exist to co-ordinate ownership across processes.
//
// A nil Closer is returned if an error occurred. Otherwise, close that
// Closer to release the lock.
//
// On Linux, FreeBSD and OSX, a lock has the same semantics as fcntl(2)'s
// advisory locks. In particular, closing any other file descriptor for the
// same file will release the lock prematurely.
//
// Attempting to lock a file that is already locked by the current process
// has undefined behavior.
//
// On other operating systems, lock will fallback to using the presence and
// content of a file named name + '.lock' to implement locking behavior.
func Lock(name string) (io.Closer, error) {
abs, err := filepath.Abs(name)
if err != nil {
return nil, err
}
lockmu.Lock()
defer lockmu.Unlock()
if locked[abs] {
return nil, fmt.Errorf("file %q already locked", abs)
}
c, err := lockFn(abs)
if err != nil {
return nil, fmt.Errorf("cannot acquire lock: %v", err)
}
locked[abs] = true
return c, nil
}
var lockFn = lockPortable
// lockPortable is a portable version not using fcntl. Doesn't handle crashes as gracefully,
// since it can leave stale lock files.
func lockPortable(name string) (io.Closer, error) {
fi, err := os.Stat(name)
if err == nil && fi.Size() > 0 {
st := portableLockStatus(name)
switch st {
case statusLocked:
return nil, fmt.Errorf("file %q already locked", name)
case statusStale:
os.Remove(name)
case statusInvalid:
return nil, fmt.Errorf("can't Lock file %q: has invalid contents", name)
}
}
f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_EXCL, 0666)
if err != nil {
return nil, fmt.Errorf("failed to create lock file %s %v", name, err)
}
if err := json.NewEncoder(f).Encode(&pidLockMeta{OwnerPID: os.Getpid()}); err != nil {
return nil, fmt.Errorf("cannot write owner pid: %v", err)
}
return &unlocker{
f: f,
abs: name,
portable: true,
}, nil
}
type lockStatus int
const (
statusInvalid lockStatus = iota
statusLocked
statusUnlocked
statusStale
)
type pidLockMeta struct {
OwnerPID int
}
func portableLockStatus(path string) lockStatus {
f, err := os.Open(path)
if err != nil {
return statusUnlocked
}
defer f.Close()
var meta pidLockMeta
if json.NewDecoder(f).Decode(&meta) != nil {
return statusInvalid
}
if meta.OwnerPID == 0 {
return statusInvalid
}
p, err := os.FindProcess(meta.OwnerPID)
if err != nil {
// e.g. on Windows
return statusStale
}
// On unix, os.FindProcess always is true, so we have to send
// it a signal to see if it's alive.
if signalZero != nil {
if p.Signal(signalZero) != nil {
return statusStale
}
}
return statusLocked
}
var signalZero os.Signal // nil or set by lock_sigzero.go
var (
lockmu sync.Mutex
locked = map[string]bool{} // abs path -> true
)
type unlocker struct {
portable bool
f *os.File
abs string
// once guards the close method call.
once sync.Once
// err holds the error returned by Close.
err error
}
func (u *unlocker) Close() error {
u.once.Do(u.close)
return u.err
}
func (u *unlocker) close() {
lockmu.Lock()
defer lockmu.Unlock()
delete(locked, u.abs)
if u.portable {
// In the portable lock implementation, it's
// important to close before removing because
// Windows won't allow us to remove an open
// file.
if err := u.f.Close(); err != nil {
u.err = err
}
if err := os.Remove(u.abs); err != nil {
// Note that if both Close and Remove fail,
// we care more about the latter than the former
// so we'll return that error.
u.err = err
}
return
}
// In other implementatioons, it's nice for us to clean up.
// If we do do this, though, it needs to be before the
// u.f.Close below.
os.Remove(u.abs)
u.err = u.f.Close()
}

View File

@@ -0,0 +1,32 @@
// +build appengine
/*
Copyright 2013 The Go Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lock
import (
"errors"
"io"
)
func init() {
lockFn = lockAppEngine
}
func lockAppEngine(name string) (io.Closer, error) {
return nil, errors.New("Lock not available on App Engine")
}

View File

@@ -0,0 +1,67 @@
// +build darwin,amd64
// +build !appengine
/*
Copyright 2013 The Go Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lock
import (
"fmt"
"io"
"os"
"syscall"
"unsafe"
)
func init() {
lockFn = lockFcntl
}
func lockFcntl(name string) (io.Closer, error) {
fi, err := os.Stat(name)
if err == nil && fi.Size() > 0 {
return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
}
f, err := os.Create(name)
if err != nil {
return nil, fmt.Errorf("Lock Create of %s failed: %v", name, err)
}
// This type matches C's "struct flock" defined in /usr/include/sys/fcntl.h.
// TODO: move this into the standard syscall package.
k := struct {
Start uint64 // sizeof(off_t): 8
Len uint64 // sizeof(off_t): 8
Pid uint32 // sizeof(pid_t): 4
Type uint16 // sizeof(short): 2
Whence uint16 // sizeof(short): 2
}{
Type: syscall.F_WRLCK,
Whence: uint16(os.SEEK_SET),
Start: 0,
Len: 0, // 0 means to lock the entire file.
Pid: uint32(os.Getpid()),
}
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_SETLK), uintptr(unsafe.Pointer(&k)))
if errno != 0 {
f.Close()
return nil, errno
}
return &unlocker{f: f, abs: name}, nil
}

View File

@@ -0,0 +1,66 @@
/*
Copyright 2013 The Go Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lock
import (
"fmt"
"io"
"os"
"syscall"
"unsafe"
)
func init() {
lockFn = lockFcntl
}
func lockFcntl(name string) (io.Closer, error) {
fi, err := os.Stat(name)
if err == nil && fi.Size() > 0 {
return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
}
f, err := os.Create(name)
if err != nil {
return nil, err
}
// This type matches C's "struct flock" defined in /usr/include/fcntl.h.
// TODO: move this into the standard syscall package.
k := struct {
Start int64 /* off_t starting offset */
Len int64 /* off_t len = 0 means until end of file */
Pid int32 /* pid_t lock owner */
Type int16 /* short lock type: read/write, etc. */
Whence int16 /* short type of l_start */
Sysid int32 /* int remote system id or zero for local */
}{
Start: 0,
Len: 0, // 0 means to lock the entire file.
Pid: int32(os.Getpid()),
Type: syscall.F_WRLCK,
Whence: int16(os.SEEK_SET),
Sysid: 0,
}
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_SETLK), uintptr(unsafe.Pointer(&k)))
if errno != 0 {
f.Close()
return nil, errno
}
return &unlocker{f: f, abs: name}, nil
}

View File

@@ -0,0 +1,67 @@
// +build linux,amd64
// +build !appengine
/*
Copyright 2013 The Go Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lock
import (
"fmt"
"io"
"os"
"syscall"
"unsafe"
)
func init() {
lockFn = lockFcntl
}
func lockFcntl(name string) (io.Closer, error) {
fi, err := os.Stat(name)
if err == nil && fi.Size() > 0 {
return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
}
f, err := os.Create(name)
if err != nil {
return nil, err
}
// This type matches C's "struct flock" defined in /usr/include/bits/fcntl.h.
// TODO: move this into the standard syscall package.
k := struct {
Type uint32
Whence uint32
Start uint64
Len uint64
Pid uint32
}{
Type: syscall.F_WRLCK,
Whence: uint32(os.SEEK_SET),
Start: 0,
Len: 0, // 0 means to lock the entire file.
Pid: uint32(os.Getpid()),
}
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_SETLK), uintptr(unsafe.Pointer(&k)))
if errno != 0 {
f.Close()
return nil, errno
}
return &unlocker{f: f, abs: name}, nil
}

View File

@@ -0,0 +1,68 @@
// +build linux,arm
// +build !appengine
/*
Copyright 2013 The Go Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lock
import (
"fmt"
"io"
"os"
"syscall"
"unsafe"
)
func init() {
lockFn = lockFcntl
}
func lockFcntl(name string) (io.Closer, error) {
fi, err := os.Stat(name)
if err == nil && fi.Size() > 0 {
return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
}
f, err := os.Create(name)
if err != nil {
return nil, err
}
// This type matches C's "struct flock" defined in /usr/include/bits/fcntl.h.
// TODO: move this into the standard syscall package.
k := struct {
Type uint16
Whence uint16
Start uint32
Len uint32
Pid uint32
}{
Type: syscall.F_WRLCK,
Whence: uint16(os.SEEK_SET),
Start: 0,
Len: 0, // 0 means to lock the entire file.
Pid: uint32(os.Getpid()),
}
const F_SETLK = 6 // actual value. syscall package is wrong: golang.org/issue/7059
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(F_SETLK), uintptr(unsafe.Pointer(&k)))
if errno != 0 {
f.Close()
return nil, errno
}
return &unlocker{f: f, abs: name}, nil
}

View File

@@ -0,0 +1,41 @@
/*
Copyright 2013 The Go Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lock
import (
"fmt"
"io"
"os"
)
func init() {
lockFn = lockPlan9
}
func lockPlan9(name string) (io.Closer, error) {
fi, err := os.Stat(name)
if err == nil && fi.Size() > 0 {
return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
}
f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0644)
if err != nil {
return nil, fmt.Errorf("Lock Create of %s failed: %v", name, err)
}
return &unlocker{f: f, abs: name}, nil
}

View File

@@ -0,0 +1,26 @@
// +build !appengine
// +build linux darwin freebsd openbsd netbsd dragonfly
/*
Copyright 2013 The Go Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lock
import "syscall"
func init() {
signalZero = syscall.Signal(0)
}

View File

@@ -0,0 +1,324 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Two Phase Commit & Structural ACID
package lldb
import (
"bufio"
"encoding/binary"
"fmt"
"io"
"os"
"github.com/cznic/fileutil"
"github.com/cznic/mathutil"
)
var _ Filer = &ACIDFiler0{} // Ensure ACIDFiler0 is a Filer
type acidWrite struct {
b []byte
off int64
}
type acidWriter0 ACIDFiler0
func (a *acidWriter0) WriteAt(b []byte, off int64) (n int, err error) {
f := (*ACIDFiler0)(a)
if f.bwal == nil { // new epoch
f.data = f.data[:0]
f.bwal = bufio.NewWriter(f.wal)
if err = a.writePacket([]interface{}{wpt00Header, walTypeACIDFiler0, ""}); err != nil {
return
}
}
if err = a.writePacket([]interface{}{wpt00WriteData, b, off}); err != nil {
return
}
f.data = append(f.data, acidWrite{b, off})
return len(b), nil
}
func (a *acidWriter0) writePacket(items []interface{}) (err error) {
f := (*ACIDFiler0)(a)
b, err := EncodeScalars(items...)
if err != nil {
return
}
var b4 [4]byte
binary.BigEndian.PutUint32(b4[:], uint32(len(b)))
if _, err = f.bwal.Write(b4[:]); err != nil {
return
}
if _, err = f.bwal.Write(b); err != nil {
return
}
if m := (4 + len(b)) % 16; m != 0 {
var pad [15]byte
_, err = f.bwal.Write(pad[:16-m])
}
return
}
// WAL Packet Tags
const (
wpt00Header = iota
wpt00WriteData
wpt00Checkpoint
)
const (
walTypeACIDFiler0 = iota
)
// ACIDFiler0 is a very simple, synchronous implementation of 2PC. It uses a
// single write ahead log file to provide the structural atomicity
// (BeginUpdate/EndUpdate/Rollback) and durability (DB can be recovered from
// WAL if a crash occurred).
//
// ACIDFiler0 is a Filer.
//
// NOTE: Durable synchronous 2PC involves three fsyncs in this implementation
// (WAL, DB, zero truncated WAL). Where possible, it's recommended to collect
// transactions for, say one second before performing the two phase commit as
// the typical performance for rotational hard disks is about few tens of
// fsyncs per second atmost. For an example of such collective transaction
// approach please see the colecting FSM STT in Dbm's documentation[1].
//
// [1]: http://godoc.org/github.com/cznic/exp/dbm
type ACIDFiler0 struct {
*RollbackFiler
wal *os.File
bwal *bufio.Writer
data []acidWrite
testHook bool // keeps WAL untruncated (once)
peakWal int64 // tracks WAL maximum used size
peakBitFilerPages int // track maximum transaction memory
}
// NewACIDFiler0 returns a newly created ACIDFiler0 with WAL in wal.
//
// If the WAL is zero sized then a previous clean shutdown of db is taken for
// granted and no recovery procedure is taken.
//
// If the WAL is of non zero size then it is checked for having a
// commited/fully finished transaction not yet been reflected in db. If such
// transaction exists it's committed to db. If the recovery process finishes
// successfully, the WAL is truncated to zero size and fsync'ed prior to return
// from NewACIDFiler0.
func NewACIDFiler(db Filer, wal *os.File) (r *ACIDFiler0, err error) {
fi, err := wal.Stat()
if err != nil {
return
}
r = &ACIDFiler0{wal: wal}
if fi.Size() != 0 {
if err = r.recoverDb(db); err != nil {
return
}
}
acidWriter := (*acidWriter0)(r)
if r.RollbackFiler, err = NewRollbackFiler(
db,
func(sz int64) (err error) {
// Checkpoint
if err = acidWriter.writePacket([]interface{}{wpt00Checkpoint, sz}); err != nil {
return
}
if err = r.bwal.Flush(); err != nil {
return
}
r.bwal = nil
if err = r.wal.Sync(); err != nil {
return
}
wfi, err := r.wal.Stat()
switch err != nil {
case true:
// unexpected, but ignored
case false:
r.peakWal = mathutil.MaxInt64(wfi.Size(), r.peakWal)
}
// Phase 1 commit complete
for _, v := range r.data {
if _, err := db.WriteAt(v.b, v.off); err != nil {
return err
}
}
if err = db.Truncate(sz); err != nil {
return
}
if err = db.Sync(); err != nil {
return
}
// Phase 2 commit complete
if !r.testHook {
if err = r.wal.Truncate(0); err != nil {
return
}
if _, err = r.wal.Seek(0, 0); err != nil {
return
}
}
r.testHook = false
return r.wal.Sync()
},
acidWriter,
); err != nil {
return
}
return r, nil
}
// PeakWALSize reports the maximum size WAL has ever used.
func (a ACIDFiler0) PeakWALSize() int64 {
return a.peakWal
}
func (a *ACIDFiler0) readPacket(f *bufio.Reader) (items []interface{}, err error) {
var b4 [4]byte
n, err := io.ReadAtLeast(f, b4[:], 4)
if n != 4 {
return
}
ln := int(binary.BigEndian.Uint32(b4[:]))
m := (4 + ln) % 16
padd := (16 - m) % 16
b := make([]byte, ln+padd)
if n, err = io.ReadAtLeast(f, b, len(b)); n != len(b) {
return
}
return DecodeScalars(b[:ln])
}
func (a *ACIDFiler0) recoverDb(db Filer) (err error) {
fi, err := a.wal.Stat()
if err != nil {
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: err}
}
if sz := fi.Size(); sz%16 != 0 {
return &ErrILSEQ{Type: ErrFileSize, Name: a.wal.Name(), Arg: sz}
}
f := bufio.NewReader(a.wal)
items, err := a.readPacket(f)
if err != nil {
return
}
if len(items) != 3 || items[0] != int64(wpt00Header) || items[1] != int64(walTypeACIDFiler0) {
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("invalid packet items %#v", items)}
}
tr := NewBTree(nil)
for {
items, err = a.readPacket(f)
if err != nil {
return
}
if len(items) < 2 {
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("too few packet items %#v", items)}
}
switch items[0] {
case int64(wpt00WriteData):
if len(items) != 3 {
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("invalid data packet items %#v", items)}
}
b, off := items[1].([]byte), items[2].(int64)
var key [8]byte
binary.BigEndian.PutUint64(key[:], uint64(off))
if err = tr.Set(key[:], b); err != nil {
return
}
case int64(wpt00Checkpoint):
var b1 [1]byte
if n, err := f.Read(b1[:]); n != 0 || err == nil {
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("checkpoint n %d, err %v", n, err)}
}
if len(items) != 2 {
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("checkpoint packet invalid items %#v", items)}
}
sz := items[1].(int64)
enum, err := tr.seekFirst()
if err != nil {
return err
}
for {
k, v, err := enum.current()
if err != nil {
if fileutil.IsEOF(err) {
break
}
return err
}
if _, err = db.WriteAt(v, int64(binary.BigEndian.Uint64(k))); err != nil {
return err
}
if err = enum.next(); err != nil {
if fileutil.IsEOF(err) {
break
}
return err
}
}
if err = db.Truncate(sz); err != nil {
return err
}
if err = db.Sync(); err != nil {
return err
}
// Recovery complete
if err = a.wal.Truncate(0); err != nil {
return err
}
return a.wal.Sync()
default:
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("packet tag %v", items[0])}
}
}
}

View File

@@ -0,0 +1,44 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Anatomy of a WAL file
WAL file
A sequence of packets
WAL packet, parts in slice notation
[0:4], 4 bytes: N uint32 // network byte order
[4:4+N], N bytes: payload []byte // gb encoded scalars
Packets, including the 4 byte 'size' prefix, MUST BE padded to size == 0 (mod
16). The values of the padding bytes MUST BE zero.
Encoded scalars first item is a packet type number (packet tag). The meaning of
any other item(s) of the payload depends on the packet tag.
Packet definitions
{wpt00Header int, typ int, s string}
typ: Must be zero (ACIDFiler0 file).
s: Any comment string, empty string is okay.
This packet must be present only once - as the first packet of
a WAL file.
{wpt00WriteData int, b []byte, off int64}
Write data (WriteAt(b, off)).
{wpt00Checkpoint int, sz int64}
Checkpoint (Truncate(sz)).
This packet must be present only once - as the last packet of
a WAL file.
*/
package lldb
//TODO optimize bitfiler/wal/2pc data above final size

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,170 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Some errors returned by this package.
//
// Note that this package can return more errors than declared here, for
// example io.EOF from Filer.ReadAt().
package lldb
import (
"fmt"
)
// ErrDecodeScalars is possibly returned from DecodeScalars
type ErrDecodeScalars struct {
B []byte // Data being decoded
I int // offending offset
}
// Error implements the built in error type.
func (e *ErrDecodeScalars) Error() string {
return fmt.Sprintf("DecodeScalars: corrupted data @ %d/%d", e.I, len(e.B))
}
// ErrINVAL reports invalid values passed as parameters, for example negative
// offsets where only non-negative ones are allowed or read from the DB.
type ErrINVAL struct {
Src string
Val interface{}
}
// Error implements the built in error type.
func (e *ErrINVAL) Error() string {
return fmt.Sprintf("%s: %+v", e.Src, e.Val)
}
// ErrPERM is for example reported when a Filer is closed while BeginUpdate(s)
// are not balanced with EndUpdate(s)/Rollback(s) or when EndUpdate or Rollback
// is invoked which is not paired with a BeginUpdate.
type ErrPERM struct {
Src string
}
// Error implements the built in error type.
func (e *ErrPERM) Error() string {
return fmt.Sprintf("%s: Operation not permitted", string(e.Src))
}
// ErrTag represents an ErrILSEQ kind.
type ErrType int
// ErrILSEQ types
const (
ErrOther ErrType = iota
ErrAdjacentFree // Adjacent free blocks (.Off and .Arg)
ErrDecompress // Used compressed block: corrupted compression
ErrExpFreeTag // Expected a free block tag, got .Arg
ErrExpUsedTag // Expected a used block tag, got .Arg
ErrFLT // Free block is invalid or referenced multiple times
ErrFLTLoad // FLT truncated to .Off, need size >= .Arg
ErrFLTSize // Free block size (.Arg) doesn't belong to its list min size: .Arg2
ErrFileSize // File .Name size (.Arg) != 0 (mod 16)
ErrFreeChaining // Free block, .prev.next doesn't point back to this block
ErrFreeTailBlock // Last block is free
ErrHead // Head of a free block list has non zero Prev (.Arg)
ErrInvalidRelocTarget // Reloc doesn't target (.Arg) a short or long used block
ErrInvalidWAL // Corrupted write ahead log. .Name: file name, .More: more
ErrLongFreeBlkTooLong // Long free block spans beyond EOF, size .Arg
ErrLongFreeBlkTooShort // Long free block must have at least 2 atoms, got only .Arg
ErrLongFreeNextBeyondEOF // Long free block .Next (.Arg) spans beyond EOF
ErrLongFreePrevBeyondEOF // Long free block .Prev (.Arg) spans beyond EOF
ErrLongFreeTailTag // Expected a long free block tail tag, got .Arg
ErrLostFreeBlock // Free block is not in any FLT list
ErrNullReloc // Used reloc block with nil target
ErrRelocBeyondEOF // Used reloc points (.Arg) beyond EOF
ErrShortFreeTailTag // Expected a short free block tail tag, got .Arg
ErrSmall // Request for a free block (.Arg) returned a too small one (.Arg2) at .Off
ErrTailTag // Block at .Off has invalid tail CC (compression code) tag, got .Arg
ErrUnexpReloc // Unexpected reloc block referred to from reloc block .Arg
ErrVerifyPadding // Used block has nonzero padding
ErrVerifyTailSize // Long free block size .Arg but tail size .Arg2
ErrVerifyUsedSpan // Used block size (.Arg) spans beyond EOF
)
// ErrILSEQ reports a corrupted file format. Details in fields according to Type.
type ErrILSEQ struct {
Type ErrType
Off int64
Arg int64
Arg2 int64
Arg3 int64
Name string
More interface{}
}
// Error implements the built in error type.
func (e *ErrILSEQ) Error() string {
switch e.Type {
case ErrAdjacentFree:
return fmt.Sprintf("Adjacent free blocks at offset %#x and %#x", e.Off, e.Arg)
case ErrDecompress:
return fmt.Sprintf("Compressed block at offset %#x: Corrupted compressed content", e.Off)
case ErrExpFreeTag:
return fmt.Sprintf("Block at offset %#x: Expected a free block tag, got %#2x", e.Off, e.Arg)
case ErrExpUsedTag:
return fmt.Sprintf("Block at ofset %#x: Expected a used block tag, got %#2x", e.Off, e.Arg)
case ErrFLT:
return fmt.Sprintf("Free block at offset %#x is invalid or referenced multiple times", e.Off)
case ErrFLTLoad:
return fmt.Sprintf("FLT truncated to size %d, expected at least %d", e.Off, e.Arg)
case ErrFLTSize:
return fmt.Sprintf("Free block at offset %#x has size (%#x) should be at least (%#x)", e.Off, e.Arg, e.Arg2)
case ErrFileSize:
return fmt.Sprintf("File %q size (%#x) != 0 (mod 16)", e.Name, e.Arg)
case ErrFreeChaining:
return fmt.Sprintf("Free block at offset %#x: .prev.next doesn point back here.", e.Off)
case ErrFreeTailBlock:
return fmt.Sprintf("Free block at offset %#x: Cannot be last file block", e.Off)
case ErrHead:
return fmt.Sprintf("Block at offset %#x: Head of free block list has non zero .prev %#x", e.Off, e.Arg)
case ErrInvalidRelocTarget:
return fmt.Sprintf("Used reloc block at offset %#x: Target (%#x) is not a short or long used block", e.Off, e.Arg)
case ErrInvalidWAL:
return fmt.Sprintf("Corrupted write ahead log file: %q %v", e.Name, e.More)
case ErrLongFreeBlkTooLong:
return fmt.Sprintf("Long free block at offset %#x: Size (%#x) beyond EOF", e.Off, e.Arg)
case ErrLongFreeBlkTooShort:
return fmt.Sprintf("Long free block at offset %#x: Size (%#x) too small", e.Off, e.Arg)
case ErrLongFreeNextBeyondEOF:
return fmt.Sprintf("Long free block at offset %#x: Next (%#x) points beyond EOF", e.Off, e.Arg)
case ErrLongFreePrevBeyondEOF:
return fmt.Sprintf("Long free block at offset %#x: Prev (%#x) points beyond EOF", e.Off, e.Arg)
case ErrLongFreeTailTag:
return fmt.Sprintf("Block at offset %#x: Expected long free tail tag, got %#2x", e.Off, e.Arg)
case ErrLostFreeBlock:
return fmt.Sprintf("Free block at offset %#x: not in any FLT list", e.Off)
case ErrNullReloc:
return fmt.Sprintf("Used reloc block at offset %#x: Nil target", e.Off)
case ErrRelocBeyondEOF:
return fmt.Sprintf("Used reloc block at offset %#x: Link (%#x) points beyond EOF", e.Off, e.Arg)
case ErrShortFreeTailTag:
return fmt.Sprintf("Block at offset %#x: Expected short free tail tag, got %#2x", e.Off, e.Arg)
case ErrSmall:
return fmt.Sprintf("Request for of free block of size %d returned a too small (%d) one at offset %#x", e.Arg, e.Arg2, e.Off)
case ErrTailTag:
return fmt.Sprintf("Block at offset %#x: Invalid tail CC tag, got %#2x", e.Off, e.Arg)
case ErrUnexpReloc:
return fmt.Sprintf("Block at offset %#x: Unexpected reloc block. Referred to from reloc block at offset %#x", e.Off, e.Arg)
case ErrVerifyPadding:
return fmt.Sprintf("Used block at offset %#x: Nonzero padding", e.Off)
case ErrVerifyTailSize:
return fmt.Sprintf("Long free block at offset %#x: Size %#x, but tail size %#x", e.Off, e.Arg, e.Arg2)
case ErrVerifyUsedSpan:
return fmt.Sprintf("Used block at offset %#x: Size %#x spans beyond EOF", e.Off, e.Arg)
}
more := ""
if e.More != nil {
more = fmt.Sprintf(", %v", e.More)
}
off := ""
if e.Off != 0 {
off = fmt.Sprintf(", off: %#x", e.Off)
}
return fmt.Sprintf("Error%s%s", off, more)
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,192 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// An abstraction of file like (persistent) storage with optional (abstracted)
// support for structural integrity.
package lldb
import (
"fmt"
"github.com/cznic/mathutil"
)
func doubleTrouble(first, second error) error {
return fmt.Errorf("%q. Additionally, while attempting to recover (rollback): %q", first, second)
}
// A Filer is a []byte-like model of a file or similar entity. It may
// optionally implement support for structural transaction safety. In contrast
// to a file stream, a Filer is not sequentially accessible. ReadAt and WriteAt
// are always "addressed" by an offset and are assumed to perform atomically.
// A Filer is not safe for concurrent access, it's designed for consumption by
// the other objects in package, which should use a Filer from one goroutine
// only or via a mutex. BeginUpdate, EndUpdate and Rollback must be either all
// implemented by a Filer for structural integrity - or they should be all
// no-ops; where/if that requirement is relaxed.
//
// If a Filer wraps another Filer implementation, it usually invokes the same
// methods on the "inner" one, after some possible argument translations etc.
// If a Filer implements the structural transactions handling methods
// (BeginUpdate, EndUpdate and Rollback) as no-ops _and_ wraps another Filer:
// it then still MUST invoke those methods on the inner Filer. This is
// important for the case where a RollbackFiler exists somewhere down the
// chain. It's also important for an Allocator - to know when it must
// invalidate its FLT cache.
type Filer interface {
// BeginUpdate increments the "nesting" counter (initially zero). Every
// call to BeginUpdate must be eventually "balanced" by exactly one of
// EndUpdate or Rollback. Calls to BeginUpdate may nest.
BeginUpdate() error
// Analogous to os.File.Close().
Close() error
// EndUpdate decrements the "nesting" counter. If it's zero after that
// then assume the "storage" has reached structural integrity (after a
// batch of partial updates). If a Filer implements some support for
// that (write ahead log, journal, etc.) then the appropriate actions
// are to be taken for nesting == 0. Invocation of an unbalanced
// EndUpdate is an error.
EndUpdate() error
// Analogous to os.File.Name().
Name() string
// PunchHole deallocates space inside a "file" in the byte range
// starting at off and continuing for size bytes. The actual hole
// created by PunchHole may be smaller than requested. The Filer size
// (as reported by `Size()` does not change when hole punching, even
// when punching the end of a file off. In contrast to the Linux
// implementation of FALLOC_FL_PUNCH_HOLE in `fallocate`(2); a Filer is
// free not only to ignore `PunchHole()` (implement it as a nop), but
// additionally no guarantees about the content of the hole, when
// eventually read back, are required, i.e. any data, not only zeros,
// can be read from the "hole", including just anything what was left
// there - with all of the possible security problems.
PunchHole(off, size int64) error
// As os.File.ReadAt. Note: `off` is an absolute "file pointer"
// address and cannot be negative even when a Filer is a InnerFiler.
ReadAt(b []byte, off int64) (n int, err error)
// Rollback cancels and undoes the innermost pending update level.
// Rollback decrements the "nesting" counter. If a Filer implements
// some support for keeping structural integrity (write ahead log,
// journal, etc.) then the appropriate actions are to be taken.
// Invocation of an unbalanced Rollback is an error.
Rollback() error
// Analogous to os.File.FileInfo().Size().
Size() (int64, error)
// Analogous to os.Sync().
Sync() (err error)
// Analogous to os.File.Truncate().
Truncate(size int64) error
// Analogous to os.File.WriteAt(). Note: `off` is an absolute "file
// pointer" address and cannot be negative even when a Filer is a
// InnerFiler.
WriteAt(b []byte, off int64) (n int, err error)
}
var _ Filer = &InnerFiler{} // Ensure InnerFiler is a Filer.
// A InnerFiler is a Filer with added addressing/size translation.
type InnerFiler struct {
outer Filer
off int64
}
// NewInnerFiler returns a new InnerFiler wrapped by `outer` in a way which
// adds `off` to every access.
//
// For example, considering:
//
// inner := NewInnerFiler(outer, 10)
//
// then
//
// inner.WriteAt([]byte{42}, 4)
//
// translates to
//
// outer.WriteAt([]byte{42}, 14)
//
// But an attempt to emulate
//
// outer.WriteAt([]byte{17}, 9)
//
// by
//
// inner.WriteAt([]byte{17}, -1)
//
// will fail as the `off` parameter can never be < 0. Also note that
//
// inner.Size() == outer.Size() - off,
//
// i.e. `inner` pretends no `outer` exists. Finally, after e.g.
//
// inner.Truncate(7)
// outer.Size() == 17
//
// will be true.
func NewInnerFiler(outer Filer, off int64) *InnerFiler { return &InnerFiler{outer, off} }
// BeginUpdate implements Filer.
func (f *InnerFiler) BeginUpdate() error { return f.outer.BeginUpdate() }
// Close implements Filer.
func (f *InnerFiler) Close() (err error) { return f.outer.Close() }
// EndUpdate implements Filer.
func (f *InnerFiler) EndUpdate() error { return f.outer.EndUpdate() }
// Name implements Filer.
func (f *InnerFiler) Name() string { return f.outer.Name() }
// PunchHole implements Filer. `off`, `size` must be >= 0.
func (f *InnerFiler) PunchHole(off, size int64) error { return f.outer.PunchHole(f.off+off, size) }
// ReadAt implements Filer. `off` must be >= 0.
func (f *InnerFiler) ReadAt(b []byte, off int64) (n int, err error) {
if off < 0 {
return 0, &ErrINVAL{f.outer.Name() + ":ReadAt invalid off", off}
}
return f.outer.ReadAt(b, f.off+off)
}
// Rollback implements Filer.
func (f *InnerFiler) Rollback() error { return f.outer.Rollback() }
// Size implements Filer.
func (f *InnerFiler) Size() (int64, error) {
sz, err := f.outer.Size()
if err != nil {
return 0, err
}
return mathutil.MaxInt64(sz-f.off, 0), nil
}
// Sync() implements Filer.
func (f *InnerFiler) Sync() (err error) {
return f.outer.Sync()
}
// Truncate implements Filer.
func (f *InnerFiler) Truncate(size int64) error { return f.outer.Truncate(size + f.off) }
// WriteAt implements Filer. `off` must be >= 0.
func (f *InnerFiler) WriteAt(b []byte, off int64) (n int, err error) {
if off < 0 {
return 0, &ErrINVAL{f.outer.Name() + ":WriteAt invalid off", off}
}
return f.outer.WriteAt(b, f.off+off)
}

View File

@@ -0,0 +1,812 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Utilities to encode/decode and collate Go predeclared scalar types (and the
// typeless nil and []byte). The encoding format is a variation of the one
// used by the "encoding/gob" package.
package lldb
import (
"bytes"
"fmt"
"math"
"github.com/cznic/mathutil"
)
const (
gbNull = iota // 0x00
gbFalse // 0x01
gbTrue // 0x02
gbFloat0 // 0x03
gbFloat1 // 0x04
gbFloat2 // 0x05
gbFloat3 // 0x06
gbFloat4 // 0x07
gbFloat5 // 0x08
gbFloat6 // 0x09
gbFloat7 // 0x0a
gbFloat8 // 0x0b
gbComplex0 // 0x0c
gbComplex1 // 0x0d
gbComplex2 // 0x0e
gbComplex3 // 0x0f
gbComplex4 // 0x10
gbComplex5 // 0x11
gbComplex6 // 0x12
gbComplex7 // 0x13
gbComplex8 // 0x14
gbBytes00 // 0x15
gbBytes01 // 0x16
gbBytes02 // 0x17
gbBytes03 // 0x18
gbBytes04 // 0x19
gbBytes05 // 0x1a
gbBytes06 // 0x1b
gbBytes07 // 0x1c
gbBytes08 // 0x1d
gbBytes09 // 0x1e
gbBytes10 // 0x1f
gbBytes11 // 0x20
gbBytes12 // 0x21
gbBytes13 // 0x22
gbBytes14 // 0x23
gbBytes15 // 0x24
gbBytes16 // 0x25
gbBytes17 // Ox26
gbBytes1 // 0x27
gbBytes2 // 0x28: Offset by one to allow 64kB sized []byte.
gbString00 // 0x29
gbString01 // 0x2a
gbString02 // 0x2b
gbString03 // 0x2c
gbString04 // 0x2d
gbString05 // 0x2e
gbString06 // 0x2f
gbString07 // 0x30
gbString08 // 0x31
gbString09 // 0x32
gbString10 // 0x33
gbString11 // 0x34
gbString12 // 0x35
gbString13 // 0x36
gbString14 // 0x37
gbString15 // 0x38
gbString16 // 0x39
gbString17 // 0x3a
gbString1 // 0x3b
gbString2 // 0x3c
gbUintP1 // 0x3d
gbUintP2 // 0x3e
gbUintP3 // 0x3f
gbUintP4 // 0x40
gbUintP5 // 0x41
gbUintP6 // 0x42
gbUintP7 // 0x43
gbUintP8 // 0x44
gbIntM8 // 0x45
gbIntM7 // 0x46
gbIntM6 // 0x47
gbIntM5 // 0x48
gbIntM4 // 0x49
gbIntM3 // 0x4a
gbIntM2 // 0x4b
gbIntM1 // 0x4c
gbIntP1 // 0x4d
gbIntP2 // 0x4e
gbIntP3 // 0x4f
gbIntP4 // 0x50
gbIntP5 // 0x51
gbIntP6 // 0x52
gbIntP7 // 0x53
gbIntP8 // 0x54
gbInt0 // 0x55
gbIntMax = 255 - gbInt0 // 0xff == 170
)
// EncodeScalars encodes a vector of predeclared scalar type values to a
// []byte, making it suitable to store it as a "record" in a DB or to use it as
// a key of a BTree.
func EncodeScalars(scalars ...interface{}) (b []byte, err error) {
for _, scalar := range scalars {
switch x := scalar.(type) {
default:
return nil, &ErrINVAL{"EncodeScalars: unsupported type", fmt.Sprintf("%T in `%#v`", x, scalars)}
case nil:
b = append(b, gbNull)
case bool:
switch x {
case false:
b = append(b, gbFalse)
case true:
b = append(b, gbTrue)
}
case float32:
encFloat(float64(x), &b)
case float64:
encFloat(x, &b)
case complex64:
encComplex(complex128(x), &b)
case complex128:
encComplex(x, &b)
case string:
n := len(x)
if n <= 17 {
b = append(b, byte(gbString00+n))
b = append(b, []byte(x)...)
break
}
if n > 65535 {
return nil, fmt.Errorf("EncodeScalars: cannot encode string of length %d (limit 65536)", n)
}
pref := byte(gbString1)
if n > 255 {
pref++
}
b = append(b, pref)
encUint0(uint64(n), &b)
b = append(b, []byte(x)...)
case int8:
encInt(int64(x), &b)
case int16:
encInt(int64(x), &b)
case int32:
encInt(int64(x), &b)
case int64:
encInt(x, &b)
case int:
encInt(int64(x), &b)
case uint8:
encUint(uint64(x), &b)
case uint16:
encUint(uint64(x), &b)
case uint32:
encUint(uint64(x), &b)
case uint64:
encUint(x, &b)
case uint:
encUint(uint64(x), &b)
case []byte:
n := len(x)
if n <= 17 {
b = append(b, byte(gbBytes00+n))
b = append(b, []byte(x)...)
break
}
if n > 655356 {
return nil, fmt.Errorf("EncodeScalars: cannot encode []byte of length %d (limit 65536)", n)
}
pref := byte(gbBytes1)
if n > 255 {
pref++
}
b = append(b, pref)
if n <= 255 {
b = append(b, byte(n))
} else {
n--
b = append(b, byte(n>>8), byte(n))
}
b = append(b, x...)
}
}
return
}
func encComplex(f complex128, b *[]byte) {
encFloatPrefix(gbComplex0, real(f), b)
encFloatPrefix(gbComplex0, imag(f), b)
}
func encFloatPrefix(prefix byte, f float64, b *[]byte) {
u := math.Float64bits(f)
var n uint64
for i := 0; i < 8; i++ {
n <<= 8
n |= u & 0xFF
u >>= 8
}
bits := mathutil.BitLenUint64(n)
if bits == 0 {
*b = append(*b, prefix)
return
}
// 0 1 2 3 4 5 6 7 8 9
// . 1 1 1 1 1 1 1 1 2
encUintPrefix(prefix+1+byte((bits-1)>>3), n, b)
}
func encFloat(f float64, b *[]byte) {
encFloatPrefix(gbFloat0, f, b)
}
func encUint0(n uint64, b *[]byte) {
switch {
case n <= 0xff:
*b = append(*b, byte(n))
case n <= 0xffff:
*b = append(*b, byte(n>>8), byte(n))
case n <= 0xffffff:
*b = append(*b, byte(n>>16), byte(n>>8), byte(n))
case n <= 0xffffffff:
*b = append(*b, byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n <= 0xffffffffff:
*b = append(*b, byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n <= 0xffffffffffff:
*b = append(*b, byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n <= 0xffffffffffffff:
*b = append(*b, byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n <= math.MaxUint64:
*b = append(*b, byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
}
}
func encUintPrefix(prefix byte, n uint64, b *[]byte) {
*b = append(*b, prefix)
encUint0(n, b)
}
func encUint(n uint64, b *[]byte) {
bits := mathutil.Max(1, mathutil.BitLenUint64(n))
encUintPrefix(gbUintP1+byte((bits-1)>>3), n, b)
}
func encInt(n int64, b *[]byte) {
switch {
case n < -0x100000000000000:
*b = append(*b, byte(gbIntM8), byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n < -0x1000000000000:
*b = append(*b, byte(gbIntM7), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n < -0x10000000000:
*b = append(*b, byte(gbIntM6), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n < -0x100000000:
*b = append(*b, byte(gbIntM5), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n < -0x1000000:
*b = append(*b, byte(gbIntM4), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n < -0x10000:
*b = append(*b, byte(gbIntM3), byte(n>>16), byte(n>>8), byte(n))
case n < -0x100:
*b = append(*b, byte(gbIntM2), byte(n>>8), byte(n))
case n < 0:
*b = append(*b, byte(gbIntM1), byte(n))
case n <= gbIntMax:
*b = append(*b, byte(gbInt0+n))
case n <= 0xff:
*b = append(*b, gbIntP1, byte(n))
case n <= 0xffff:
*b = append(*b, gbIntP2, byte(n>>8), byte(n))
case n <= 0xffffff:
*b = append(*b, gbIntP3, byte(n>>16), byte(n>>8), byte(n))
case n <= 0xffffffff:
*b = append(*b, gbIntP4, byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n <= 0xffffffffff:
*b = append(*b, gbIntP5, byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n <= 0xffffffffffff:
*b = append(*b, gbIntP6, byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n <= 0xffffffffffffff:
*b = append(*b, gbIntP7, byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n <= 0x7fffffffffffffff:
*b = append(*b, gbIntP8, byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
}
}
func decodeFloat(b []byte) float64 {
var u uint64
for i, v := range b {
u |= uint64(v) << uint((i+8-len(b))*8)
}
return math.Float64frombits(u)
}
// DecodeScalars decodes a []byte produced by EncodeScalars.
func DecodeScalars(b []byte) (scalars []interface{}, err error) {
b0 := b
for len(b) != 0 {
switch tag := b[0]; tag {
//default:
//return nil, fmt.Errorf("tag %d(%#x) not supported", b[0], b[0])
case gbNull:
scalars = append(scalars, nil)
b = b[1:]
case gbFalse:
scalars = append(scalars, false)
b = b[1:]
case gbTrue:
scalars = append(scalars, true)
b = b[1:]
case gbFloat0:
scalars = append(scalars, 0.0)
b = b[1:]
case gbFloat1, gbFloat2, gbFloat3, gbFloat4, gbFloat5, gbFloat6, gbFloat7, gbFloat8:
n := 1 + int(tag) - gbFloat0
if len(b) < n-1 {
goto corrupted
}
scalars = append(scalars, decodeFloat(b[1:n]))
b = b[n:]
case gbComplex0, gbComplex1, gbComplex2, gbComplex3, gbComplex4, gbComplex5, gbComplex6, gbComplex7, gbComplex8:
n := 1 + int(tag) - gbComplex0
if len(b) < n-1 {
goto corrupted
}
re := decodeFloat(b[1:n])
b = b[n:]
if len(b) == 0 {
goto corrupted
}
tag = b[0]
if tag < gbComplex0 || tag > gbComplex8 {
goto corrupted
}
n = 1 + int(tag) - gbComplex0
if len(b) < n-1 {
goto corrupted
}
scalars = append(scalars, complex(re, decodeFloat(b[1:n])))
b = b[n:]
case gbBytes00, gbBytes01, gbBytes02, gbBytes03, gbBytes04,
gbBytes05, gbBytes06, gbBytes07, gbBytes08, gbBytes09,
gbBytes10, gbBytes11, gbBytes12, gbBytes13, gbBytes14,
gbBytes15, gbBytes16, gbBytes17:
n := int(tag - gbBytes00)
if len(b) < n+1 {
goto corrupted
}
scalars = append(scalars, append([]byte(nil), b[1:n+1]...))
b = b[n+1:]
case gbBytes1:
if len(b) < 2 {
goto corrupted
}
n := int(b[1])
b = b[2:]
if len(b) < n {
goto corrupted
}
scalars = append(scalars, append([]byte(nil), b[:n]...))
b = b[n:]
case gbBytes2:
if len(b) < 3 {
goto corrupted
}
n := int(b[1])<<8 | int(b[2]) + 1
b = b[3:]
if len(b) < n {
goto corrupted
}
scalars = append(scalars, append([]byte(nil), b[:n]...))
b = b[n:]
case gbString00, gbString01, gbString02, gbString03, gbString04,
gbString05, gbString06, gbString07, gbString08, gbString09,
gbString10, gbString11, gbString12, gbString13, gbString14,
gbString15, gbString16, gbString17:
n := int(tag - gbString00)
if len(b) < n+1 {
goto corrupted
}
scalars = append(scalars, string(b[1:n+1]))
b = b[n+1:]
case gbString1:
if len(b) < 2 {
goto corrupted
}
n := int(b[1])
b = b[2:]
if len(b) < n {
goto corrupted
}
scalars = append(scalars, string(b[:n]))
b = b[n:]
case gbString2:
if len(b) < 3 {
goto corrupted
}
n := int(b[1])<<8 | int(b[2])
b = b[3:]
if len(b) < n {
goto corrupted
}
scalars = append(scalars, string(b[:n]))
b = b[n:]
case gbUintP1, gbUintP2, gbUintP3, gbUintP4, gbUintP5, gbUintP6, gbUintP7, gbUintP8:
b = b[1:]
n := 1 + int(tag) - gbUintP1
if len(b) < n {
goto corrupted
}
var u uint64
for _, v := range b[:n] {
u = u<<8 | uint64(v)
}
scalars = append(scalars, u)
b = b[n:]
case gbIntM8, gbIntM7, gbIntM6, gbIntM5, gbIntM4, gbIntM3, gbIntM2, gbIntM1:
b = b[1:]
n := 8 - (int(tag) - gbIntM8)
if len(b) < n {
goto corrupted
}
u := uint64(math.MaxUint64)
for _, v := range b[:n] {
u = u<<8 | uint64(v)
}
scalars = append(scalars, int64(u))
b = b[n:]
case gbIntP1, gbIntP2, gbIntP3, gbIntP4, gbIntP5, gbIntP6, gbIntP7, gbIntP8:
b = b[1:]
n := 1 + int(tag) - gbIntP1
if len(b) < n {
goto corrupted
}
i := int64(0)
for _, v := range b[:n] {
i = i<<8 | int64(v)
}
scalars = append(scalars, i)
b = b[n:]
default:
scalars = append(scalars, int64(b[0])-gbInt0)
b = b[1:]
}
}
return append([]interface{}(nil), scalars...), nil
corrupted:
return nil, &ErrDecodeScalars{append([]byte(nil), b0...), len(b0) - len(b)}
}
func collateComplex(x, y complex128) int {
switch rx, ry := real(x), real(y); {
case rx < ry:
return -1
case rx == ry:
switch ix, iy := imag(x), imag(y); {
case ix < iy:
return -1
case ix == iy:
return 0
case ix > iy:
return 1
}
}
//case rx > ry:
return 1
}
func collateFloat(x, y float64) int {
switch {
case x < y:
return -1
case x == y:
return 0
}
//case x > y:
return 1
}
func collateInt(x, y int64) int {
switch {
case x < y:
return -1
case x == y:
return 0
}
//case x > y:
return 1
}
func collateUint(x, y uint64) int {
switch {
case x < y:
return -1
case x == y:
return 0
}
//case x > y:
return 1
}
func collateIntUint(x int64, y uint64) int {
if y > math.MaxInt64 {
return -1
}
return collateInt(x, int64(y))
}
func collateUintInt(x uint64, y int64) int {
return -collateIntUint(y, x)
}
func collateType(i interface{}) (r interface{}, err error) {
switch x := i.(type) {
default:
return nil, fmt.Errorf("invalid collate type %T", x)
case nil:
return i, nil
case bool:
return i, nil
case int8:
return int64(x), nil
case int16:
return int64(x), nil
case int32:
return int64(x), nil
case int64:
return i, nil
case int:
return int64(x), nil
case uint8:
return uint64(x), nil
case uint16:
return uint64(x), nil
case uint32:
return uint64(x), nil
case uint64:
return i, nil
case uint:
return uint64(x), nil
case float32:
return float64(x), nil
case float64:
return i, nil
case complex64:
return complex128(x), nil
case complex128:
return i, nil
case []byte:
return i, nil
case string:
return i, nil
}
}
// Collate collates two arrays of Go predeclared scalar types (and the typeless
// nil or []byte). If any other type appears in x or y, Collate will return a
// non nil error. String items are collated using strCollate or lexically
// byte-wise (as when using Go comparison operators) when strCollate is nil.
// []byte items are collated using bytes.Compare.
//
// Collate returns:
//
// -1 if x < y
// 0 if x == y
// +1 if x > y
//
// The same value as defined above must be returned from strCollate.
//
// The "outer" ordering is: nil, bool, number, []byte, string. IOW, nil is
// "smaller" than anything else except other nil, numbers collate before
// []byte, []byte collate before strings, etc.
//
// Integers and real numbers collate as expected in math. However, complex
// numbers are not ordered in Go. Here the ordering is defined: Complex numbers
// are in comparison considered first only by their real part. Iff the result
// is equality then the imaginary part is used to determine the ordering. In
// this "second order" comparing, integers and real numbers are considered as
// complex numbers with a zero imaginary part.
func Collate(x, y []interface{}, strCollate func(string, string) int) (r int, err error) {
nx, ny := len(x), len(y)
switch {
case nx == 0 && ny != 0:
return -1, nil
case nx == 0 && ny == 0:
return 0, nil
case nx != 0 && ny == 0:
return 1, nil
}
r = 1
if nx > ny {
x, y, r = y, x, -r
}
var c int
for i, xi0 := range x {
yi0 := y[i]
xi, err := collateType(xi0)
if err != nil {
return 0, err
}
yi, err := collateType(yi0)
if err != nil {
return 0, err
}
switch x := xi.(type) {
default:
panic(fmt.Errorf("internal error: %T", x))
case nil:
switch yi.(type) {
case nil:
// nop
default:
return -r, nil
}
case bool:
switch y := yi.(type) {
case nil:
return r, nil
case bool:
switch {
case !x && y:
return -r, nil
case x == y:
// nop
case x && !y:
return r, nil
}
default:
return -r, nil
}
case int64:
switch y := yi.(type) {
case nil, bool:
return r, nil
case int64:
c = collateInt(x, y)
case uint64:
c = collateIntUint(x, y)
case float64:
c = collateFloat(float64(x), y)
case complex128:
c = collateComplex(complex(float64(x), 0), y)
case []byte:
return -r, nil
case string:
return -r, nil
}
if c != 0 {
return c * r, nil
}
case uint64:
switch y := yi.(type) {
case nil, bool:
return r, nil
case int64:
c = collateUintInt(x, y)
case uint64:
c = collateUint(x, y)
case float64:
c = collateFloat(float64(x), y)
case complex128:
c = collateComplex(complex(float64(x), 0), y)
case []byte:
return -r, nil
case string:
return -r, nil
}
if c != 0 {
return c * r, nil
}
case float64:
switch y := yi.(type) {
case nil, bool:
return r, nil
case int64:
c = collateFloat(x, float64(y))
case uint64:
c = collateFloat(x, float64(y))
case float64:
c = collateFloat(x, y)
case complex128:
c = collateComplex(complex(x, 0), y)
case []byte:
return -r, nil
case string:
return -r, nil
}
if c != 0 {
return c * r, nil
}
case complex128:
switch y := yi.(type) {
case nil, bool:
return r, nil
case int64:
c = collateComplex(x, complex(float64(y), 0))
case uint64:
c = collateComplex(x, complex(float64(y), 0))
case float64:
c = collateComplex(x, complex(y, 0))
case complex128:
c = collateComplex(x, y)
case []byte:
return -r, nil
case string:
return -r, nil
}
if c != 0 {
return c * r, nil
}
case []byte:
switch y := yi.(type) {
case nil, bool, int64, uint64, float64, complex128:
return r, nil
case []byte:
c = bytes.Compare(x, y)
case string:
return -r, nil
}
if c != 0 {
return c * r, nil
}
case string:
switch y := yi.(type) {
case nil, bool, int64, uint64, float64, complex128:
return r, nil
case []byte:
return r, nil
case string:
switch {
case strCollate != nil:
c = strCollate(x, y)
case x < y:
return -r, nil
case x == y:
c = 0
case x > y:
return r, nil
}
}
if c != 0 {
return c * r, nil
}
}
}
if nx == ny {
return 0, nil
}
return -r, nil
}

View File

@@ -0,0 +1,155 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package lldb (WIP) implements a low level database engine. The database
// model used could be considered a specific implementation of some small(est)
// intersection of models listed in [1]. As a settled term is lacking, it'll be
// called here a 'Virtual memory model' (VMM).
//
// Experimental release notes
//
// This is an experimental release. Don't open a DB from two applications or
// two instances of an application - it will get corrupted (no file locking is
// implemented and this task is delegated to lldb's clients).
//
// WARNING: THE LLDB API IS SUBJECT TO CHANGE.
//
// Filers
//
// A Filer is an abstraction of storage. A Filer may be a part of some process'
// virtual address space, an OS file, a networked, remote file etc. Persistence
// of the storage is optional, opaque to VMM and it is specific to a concrete
// Filer implementation.
//
// Space management
//
// Mechanism to allocate, reallocate (resize), deallocate (and later reclaim
// the unused) contiguous parts of a Filer, called blocks. Blocks are
// identified and referred to by a handle, an int64.
//
// BTrees
//
// In addition to the VMM like services, lldb provides volatile and
// non-volatile BTrees. Keys and values of a BTree are limited in size to 64kB
// each (a bit more actually). Support for larger keys/values, if desired, can
// be built atop a BTree to certain limits.
//
// Handles vs pointers
//
// A handle is the abstracted storage counterpart of a memory address. There
// is one fundamental difference, though. Resizing a block never results in a
// change to the handle which refers to the resized block, so a handle is more
// akin to an unique numeric id/key. Yet it shares one property of pointers -
// handles can be associated again with blocks after the original handle block
// was deallocated. In other words, a handle uniqueness domain is the state of
// the database and is not something comparable to e.g. an ever growing
// numbering sequence.
//
// Also, as with memory pointers, dangling handles can be created and blocks
// overwritten when such handles are used. Using a zero handle to refer to a
// block will not panic; however, the resulting error is effectively the same
// exceptional situation as dereferencing a nil pointer.
//
// Blocks
//
// Allocated/used blocks, are limited in size to only a little bit more than
// 64kB. Bigger semantic entities/structures must be built in lldb's client
// code. The content of a block has no semantics attached, it's only a fully
// opaque `[]byte`.
//
// Scalars
//
// Use of "scalars" applies to EncodeScalars, DecodeScalars and Collate. Those
// first two "to bytes" and "from bytes" functions are suggested for handling
// multi-valued Allocator content items and/or keys/values of BTrees (using
// Collate for keys). Types called "scalar" are:
//
// nil (the typeless one)
// bool
// all integral types: [u]int8, [u]int16, [u]int32, [u]int, [u]int64
// all floating point types: float32, float64
// all complex types: complex64, complex128
// []byte (64kB max)
// string (64kb max)
//
// Specific implementations
//
// Included are concrete implementations of some of the VMM interfaces included
// to ease serving simple client code or for testing and possibly as an
// example. More details in the documentation of such implementations.
//
// [1]: http://en.wikipedia.org/wiki/Database_model
package lldb
const (
fltSz = 0x70 // size of the FLT
maxShort = 251
maxRq = 65787
maxFLTRq = 4112
maxHandle = 1<<56 - 1
atomLen = 16
tagUsedLong = 0xfc
tagUsedRelocated = 0xfd
tagFreeShort = 0xfe
tagFreeLong = 0xff
tagNotCompressed = 0
tagCompressed = 1
)
// Content size n -> blocksize in atoms.
func n2atoms(n int) int {
if n > maxShort {
n += 2
}
return (n+1)/16 + 1
}
// Content size n -> number of padding zeros.
func n2padding(n int) int {
if n > maxShort {
n += 2
}
return 15 - (n+1)&15
}
// Handle <-> offset
func h2off(h int64) int64 { return (h + 6) * 16 }
func off2h(off int64) int64 { return off/16 - 6 }
// Get a 7B int64 from b
func b2h(b []byte) (h int64) {
for _, v := range b[:7] {
h = h<<8 | int64(v)
}
return
}
// Put a 7B int64 into b
func h2b(b []byte, h int64) []byte {
for i := range b[:7] {
b[i], h = byte(h>>48), h<<8
}
return b
}
// Content length N (must be in [252, 65787]) to long used block M field.
func n2m(n int) (m int) {
return n % 0x10000
}
// Long used block M (must be in [0, 65535]) field to content length N.
func m2n(m int) (n int) {
if m <= maxShort {
m += 0x10000
}
return m
}
func bpack(a []byte) []byte {
if cap(a) > len(a) {
return append([]byte(nil), a...)
}
return a
}

View File

@@ -0,0 +1,344 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// A memory-only implementation of Filer.
/*
pgBits: 8
BenchmarkMemFilerWrSeq 100000 19430 ns/op 1646.93 MB/s
BenchmarkMemFilerRdSeq 100000 17390 ns/op 1840.13 MB/s
BenchmarkMemFilerWrRand 1000000 1903 ns/op 133.94 MB/s
BenchmarkMemFilerRdRand 1000000 1153 ns/op 221.16 MB/s
pgBits: 9
BenchmarkMemFilerWrSeq 100000 16195 ns/op 1975.80 MB/s
BenchmarkMemFilerRdSeq 200000 13011 ns/op 2459.39 MB/s
BenchmarkMemFilerWrRand 1000000 2248 ns/op 227.28 MB/s
BenchmarkMemFilerRdRand 1000000 1177 ns/op 433.94 MB/s
pgBits: 10
BenchmarkMemFilerWrSeq 100000 16169 ns/op 1979.04 MB/s
BenchmarkMemFilerRdSeq 200000 12673 ns/op 2524.91 MB/s
BenchmarkMemFilerWrRand 1000000 5550 ns/op 184.30 MB/s
BenchmarkMemFilerRdRand 1000000 1699 ns/op 601.79 MB/s
pgBits: 11
BenchmarkMemFilerWrSeq 100000 13449 ns/op 2379.31 MB/s
BenchmarkMemFilerRdSeq 200000 12058 ns/op 2653.80 MB/s
BenchmarkMemFilerWrRand 500000 4335 ns/op 471.47 MB/s
BenchmarkMemFilerRdRand 1000000 2843 ns/op 719.47 MB/s
pgBits: 12
BenchmarkMemFilerWrSeq 200000 11976 ns/op 2672.00 MB/s
BenchmarkMemFilerRdSeq 200000 12255 ns/op 2611.06 MB/s
BenchmarkMemFilerWrRand 200000 8058 ns/op 507.14 MB/s
BenchmarkMemFilerRdRand 500000 4365 ns/op 936.15 MB/s
pgBits: 13
BenchmarkMemFilerWrSeq 200000 10852 ns/op 2948.69 MB/s
BenchmarkMemFilerRdSeq 200000 11561 ns/op 2767.77 MB/s
BenchmarkMemFilerWrRand 200000 9748 ns/op 840.15 MB/s
BenchmarkMemFilerRdRand 500000 7236 ns/op 1131.59 MB/s
pgBits: 14
BenchmarkMemFilerWrSeq 200000 10328 ns/op 3098.12 MB/s
BenchmarkMemFilerRdSeq 200000 11292 ns/op 2833.66 MB/s
BenchmarkMemFilerWrRand 100000 16768 ns/op 978.75 MB/s
BenchmarkMemFilerRdRand 200000 13033 ns/op 1258.43 MB/s
pgBits: 15
BenchmarkMemFilerWrSeq 200000 10309 ns/op 3103.93 MB/s
BenchmarkMemFilerRdSeq 200000 11126 ns/op 2876.12 MB/s
BenchmarkMemFilerWrRand 50000 31985 ns/op 1021.74 MB/s
BenchmarkMemFilerRdRand 100000 25217 ns/op 1297.65 MB/s
pgBits: 16
BenchmarkMemFilerWrSeq 200000 10324 ns/op 3099.45 MB/s
BenchmarkMemFilerRdSeq 200000 11201 ns/op 2856.80 MB/s
BenchmarkMemFilerWrRand 20000 55226 ns/op 1184.76 MB/s
BenchmarkMemFilerRdRand 50000 48316 ns/op 1355.16 MB/s
pgBits: 17
BenchmarkMemFilerWrSeq 200000 10377 ns/op 3083.53 MB/s
BenchmarkMemFilerRdSeq 200000 11018 ns/op 2904.18 MB/s
BenchmarkMemFilerWrRand 10000 143425 ns/op 913.12 MB/s
BenchmarkMemFilerRdRand 20000 95267 ns/op 1376.99 MB/s
pgBits: 18
BenchmarkMemFilerWrSeq 200000 10312 ns/op 3102.96 MB/s
BenchmarkMemFilerRdSeq 200000 11069 ns/op 2890.84 MB/s
BenchmarkMemFilerWrRand 5000 280910 ns/op 934.14 MB/s
BenchmarkMemFilerRdRand 10000 188500 ns/op 1388.17 MB/s
*/
package lldb
import (
"bytes"
"fmt"
"io"
"github.com/cznic/fileutil"
"github.com/cznic/mathutil"
)
const (
pgBits = 16
pgSize = 1 << pgBits
pgMask = pgSize - 1
)
var _ Filer = &MemFiler{} // Ensure MemFiler is a Filer.
type memFilerMap map[int64]*[pgSize]byte
// MemFiler is a memory backed Filer. It implements BeginUpdate, EndUpdate and
// Rollback as no-ops. MemFiler is not automatically persistent, but it has
// ReadFrom and WriteTo methods.
type MemFiler struct {
m memFilerMap
nest int
size int64
}
// NewMemFiler returns a new MemFiler.
func NewMemFiler() *MemFiler {
return &MemFiler{m: memFilerMap{}}
}
// BeginUpdate implements Filer.
func (f *MemFiler) BeginUpdate() error {
f.nest++
return nil
}
// Close implements Filer.
func (f *MemFiler) Close() (err error) {
if f.nest != 0 {
return &ErrPERM{(f.Name() + ":Close")}
}
return
}
// EndUpdate implements Filer.
func (f *MemFiler) EndUpdate() (err error) {
if f.nest == 0 {
return &ErrPERM{(f.Name() + ": EndUpdate")}
}
f.nest--
return
}
// Name implements Filer.
func (f *MemFiler) Name() string {
return fmt.Sprintf("%p.memfiler", f)
}
// PunchHole implements Filer.
func (f *MemFiler) PunchHole(off, size int64) (err error) {
if off < 0 {
return &ErrINVAL{f.Name() + ": PunchHole off", off}
}
if size < 0 || off+size > f.size {
return &ErrINVAL{f.Name() + ": PunchHole size", size}
}
first := off >> pgBits
if off&pgMask != 0 {
first++
}
off += size - 1
last := off >> pgBits
if off&pgMask != 0 {
last--
}
if limit := f.size >> pgBits; last > limit {
last = limit
}
for pg := first; pg <= last; pg++ {
delete(f.m, pg)
}
return
}
var zeroPage [pgSize]byte
// ReadAt implements Filer.
func (f *MemFiler) ReadAt(b []byte, off int64) (n int, err error) {
avail := f.size - off
pgI := off >> pgBits
pgO := int(off & pgMask)
rem := len(b)
if int64(rem) >= avail {
rem = int(avail)
err = io.EOF
}
for rem != 0 && avail > 0 {
pg := f.m[pgI]
if pg == nil {
pg = &zeroPage
}
nc := copy(b[:mathutil.Min(rem, pgSize)], pg[pgO:])
pgI++
pgO = 0
rem -= nc
n += nc
b = b[nc:]
}
return
}
// ReadFrom is a helper to populate MemFiler's content from r. 'n' reports the
// number of bytes read from 'r'.
func (f *MemFiler) ReadFrom(r io.Reader) (n int64, err error) {
if err = f.Truncate(0); err != nil {
return
}
var (
b [pgSize]byte
rn int
off int64
)
var rerr error
for rerr == nil {
if rn, rerr = r.Read(b[:]); rn != 0 {
f.WriteAt(b[:rn], off)
off += int64(rn)
n += int64(rn)
}
}
if !fileutil.IsEOF(rerr) {
err = rerr
}
return
}
// Rollback implements Filer.
func (f *MemFiler) Rollback() (err error) { return }
// Size implements Filer.
func (f *MemFiler) Size() (int64, error) {
return f.size, nil
}
// Sync implements Filer.
func (f *MemFiler) Sync() error {
return nil
}
// Truncate implements Filer.
func (f *MemFiler) Truncate(size int64) (err error) {
switch {
case size < 0:
return &ErrINVAL{"Truncate size", size}
case size == 0:
f.m = memFilerMap{}
f.size = 0
return
}
first := size >> pgBits
if size&pgMask != 0 {
first++
}
last := f.size >> pgBits
if f.size&pgMask != 0 {
last++
}
for ; first < last; first++ {
delete(f.m, first)
}
f.size = size
return
}
// WriteAt implements Filer.
func (f *MemFiler) WriteAt(b []byte, off int64) (n int, err error) {
pgI := off >> pgBits
pgO := int(off & pgMask)
n = len(b)
rem := n
var nc int
for rem != 0 {
if pgO == 0 && rem >= pgSize && bytes.Equal(b[:pgSize], zeroPage[:]) {
delete(f.m, pgI)
nc = pgSize
} else {
pg := f.m[pgI]
if pg == nil {
pg = new([pgSize]byte)
f.m[pgI] = pg
}
nc = copy((*pg)[pgO:], b)
}
pgI++
pgO = 0
rem -= nc
b = b[nc:]
}
f.size = mathutil.MaxInt64(f.size, off+int64(n))
return
}
// WriteTo is a helper to copy/persist MemFiler's content to w. If w is also
// an io.WriterAt then WriteTo may attempt to _not_ write any big, for some
// value of big, runs of zeros, i.e. it will attempt to punch holes, where
// possible, in `w` if that happens to be a freshly created or to zero length
// truncated OS file. 'n' reports the number of bytes written to 'w'.
func (f *MemFiler) WriteTo(w io.Writer) (n int64, err error) {
var (
b [pgSize]byte
wn, rn int
off int64
rerr error
)
if wa, ok := w.(io.WriterAt); ok {
lastPgI := f.size >> pgBits
for pgI := int64(0); pgI <= lastPgI; pgI++ {
sz := pgSize
if pgI == lastPgI {
sz = int(f.size & pgMask)
}
pg := f.m[pgI]
if pg != nil {
wn, err = wa.WriteAt(pg[:sz], off)
if err != nil {
return
}
n += int64(wn)
off += int64(sz)
if wn != sz {
return n, io.ErrShortWrite
}
}
}
return
}
var werr error
for rerr == nil {
if rn, rerr = f.ReadAt(b[:], off); rn != 0 {
off += int64(rn)
if wn, werr = w.Write(b[:rn]); werr != nil {
return n, werr
}
n += int64(wn)
}
}
if !fileutil.IsEOF(rerr) {
err = rerr
}
return
}

View File

@@ -0,0 +1,130 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lldb
import (
"io"
"os"
"github.com/cznic/mathutil"
)
var _ Filer = (*OSFiler)(nil)
// OSFile is an os.File like minimal set of methods allowing to construct a
// Filer.
type OSFile interface {
Name() string
Stat() (fi os.FileInfo, err error)
Sync() (err error)
Truncate(size int64) (err error)
io.Closer
io.Reader
io.ReaderAt
io.Seeker
io.Writer
io.WriterAt
}
// OSFiler is like a SimpleFileFiler but based on an OSFile.
type OSFiler struct {
f OSFile
nest int
size int64 // not set if < 0
}
// NewOSFiler returns a Filer from an OSFile. This Filer is like the
// SimpleFileFiler, it does not implement the transaction related methods.
func NewOSFiler(f OSFile) (r *OSFiler) {
return &OSFiler{
f: f,
size: -1,
}
}
// BeginUpdate implements Filer.
func (f *OSFiler) BeginUpdate() (err error) {
f.nest++
return nil
}
// Close implements Filer.
func (f *OSFiler) Close() (err error) {
if f.nest != 0 {
return &ErrPERM{(f.Name() + ":Close")}
}
return f.f.Close()
}
// EndUpdate implements Filer.
func (f *OSFiler) EndUpdate() (err error) {
if f.nest == 0 {
return &ErrPERM{(f.Name() + ":EndUpdate")}
}
f.nest--
return
}
// Name implements Filer.
func (f *OSFiler) Name() string {
return f.f.Name()
}
// PunchHole implements Filer.
func (f *OSFiler) PunchHole(off, size int64) (err error) {
return
}
// ReadAt implements Filer.
func (f *OSFiler) ReadAt(b []byte, off int64) (n int, err error) {
return f.f.ReadAt(b, off)
}
// Rollback implements Filer.
func (f *OSFiler) Rollback() (err error) { return }
// Size implements Filer.
func (f *OSFiler) Size() (n int64, err error) {
if f.size < 0 { // boot
fi, err := f.f.Stat()
if err != nil {
return 0, err
}
f.size = fi.Size()
}
return f.size, nil
}
// Sync implements Filer.
func (f *OSFiler) Sync() (err error) {
return f.f.Sync()
}
// Truncate implements Filer.
func (f *OSFiler) Truncate(size int64) (err error) {
if size < 0 {
return &ErrINVAL{"Truncate size", size}
}
f.size = size
return f.f.Truncate(size)
}
// WriteAt implements Filer.
func (f *OSFiler) WriteAt(b []byte, off int64) (n int, err error) {
if f.size < 0 { // boot
fi, err := os.Stat(f.f.Name())
if err != nil {
return 0, err
}
f.size = fi.Size()
}
f.size = mathutil.MaxInt64(f.size, int64(len(b))+off)
return f.f.WriteAt(b, off)
}

View File

@@ -0,0 +1,123 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// A basic os.File backed Filer.
package lldb
import (
"os"
"github.com/cznic/fileutil"
"github.com/cznic/mathutil"
)
var _ Filer = &SimpleFileFiler{} // Ensure SimpleFileFiler is a Filer.
// SimpleFileFiler is an os.File backed Filer intended for use where structural
// consistency can be reached by other means (SimpleFileFiler is for example
// wrapped in eg. an RollbackFiler or ACIDFiler0) or where persistence is not
// required (temporary/working data sets).
//
// SimpleFileFiler is the most simple os.File backed Filer implementation as it
// does not really implement BeginUpdate and EndUpdate/Rollback in any way
// which would protect the structural integrity of data. If misused e.g. as a
// real database storage w/o other measures, it can easily cause data loss
// when, for example, a power outage occurs or the updating process terminates
// abruptly.
type SimpleFileFiler struct {
file *os.File
nest int
size int64 // not set if < 0
}
// NewSimpleFileFiler returns a new SimpleFileFiler.
func NewSimpleFileFiler(f *os.File) *SimpleFileFiler {
return &SimpleFileFiler{file: f, size: -1}
}
// BeginUpdate implements Filer.
func (f *SimpleFileFiler) BeginUpdate() error {
f.nest++
return nil
}
// Close implements Filer.
func (f *SimpleFileFiler) Close() (err error) {
if f.nest != 0 {
return &ErrPERM{(f.Name() + ":Close")}
}
return f.file.Close()
}
// EndUpdate implements Filer.
func (f *SimpleFileFiler) EndUpdate() (err error) {
if f.nest == 0 {
return &ErrPERM{(f.Name() + ":EndUpdate")}
}
f.nest--
return
}
// Name implements Filer.
func (f *SimpleFileFiler) Name() string {
return f.file.Name()
}
// PunchHole implements Filer.
func (f *SimpleFileFiler) PunchHole(off, size int64) (err error) {
return fileutil.PunchHole(f.file, off, size)
}
// ReadAt implements Filer.
func (f *SimpleFileFiler) ReadAt(b []byte, off int64) (n int, err error) {
return f.file.ReadAt(b, off)
}
// Rollback implements Filer.
func (f *SimpleFileFiler) Rollback() (err error) { return }
// Size implements Filer.
func (f *SimpleFileFiler) Size() (int64, error) {
if f.size < 0 { // boot
fi, err := os.Stat(f.file.Name())
if err != nil {
return 0, err
}
f.size = fi.Size()
}
return f.size, nil
}
// Sync implements Filer.
func (f *SimpleFileFiler) Sync() error {
return f.file.Sync()
}
// Truncate implements Filer.
func (f *SimpleFileFiler) Truncate(size int64) (err error) {
if size < 0 {
return &ErrINVAL{"Truncate size", size}
}
f.size = size
return f.file.Truncate(size)
}
// WriteAt implements Filer.
func (f *SimpleFileFiler) WriteAt(b []byte, off int64) (n int, err error) {
if f.size < 0 { // boot
fi, err := os.Stat(f.file.Name())
if err != nil {
return 0, err
}
f.size = fi.Size()
}
f.size = mathutil.MaxInt64(f.size, int64(len(b))+off)
return f.file.WriteAt(b, off)
}

View File

@@ -0,0 +1,642 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Structural transactions.
package lldb
//DONE+ TransactionalMemoryFiler
// ----
// Use NewRollbackFiler(myMemFiler, ...)
/*
bfBits: 3
BenchmarkRollbackFiler 20000000 102 ns/op 9.73 MB/s
bfBits: 4
BenchmarkRollbackFiler 50000000 55.7 ns/op 17.95 MB/s
bfBits: 5
BenchmarkRollbackFiler 100000000 32.2 ns/op 31.06 MB/s
bfBits: 6
BenchmarkRollbackFiler 100000000 20.6 ns/op 48.46 MB/s
bfBits: 7
BenchmarkRollbackFiler 100000000 15.1 ns/op 66.12 MB/s
bfBits: 8
BenchmarkRollbackFiler 100000000 10.5 ns/op 95.66 MB/s
bfBits: 9
BenchmarkRollbackFiler 200000000 8.02 ns/op 124.74 MB/s
bfBits: 10
BenchmarkRollbackFiler 200000000 9.25 ns/op 108.09 MB/s
bfBits: 11
BenchmarkRollbackFiler 100000000 11.7 ns/op 85.47 MB/s
bfBits: 12
BenchmarkRollbackFiler 100000000 17.2 ns/op 57.99 MB/s
bfBits: 13
BenchmarkRollbackFiler 100000000 32.7 ns/op 30.58 MB/s
bfBits: 14
BenchmarkRollbackFiler 50000000 39.6 ns/op 25.27 MB/s
*/
import (
"fmt"
"io"
"sync"
"github.com/cznic/fileutil"
"github.com/cznic/mathutil"
)
var (
_ Filer = &bitFiler{} // Ensure bitFiler is a Filer.
_ Filer = &RollbackFiler{} // ditto
)
const (
bfBits = 9
bfSize = 1 << bfBits
bfMask = bfSize - 1
)
var (
bitmask = [8]byte{1, 2, 4, 8, 16, 32, 64, 128}
bitZeroPage bitPage
allDirtyFlags [bfSize >> 3]byte
)
func init() {
for i := range allDirtyFlags {
allDirtyFlags[i] = 0xff
}
}
type (
bitPage struct {
prev, next *bitPage
data [bfSize]byte
flags [bfSize >> 3]byte
dirty bool
}
bitFilerMap map[int64]*bitPage
bitFiler struct {
parent Filer
m bitFilerMap
size int64
sync.Mutex
}
)
func newBitFiler(parent Filer) (f *bitFiler, err error) {
sz, err := parent.Size()
if err != nil {
return
}
return &bitFiler{parent: parent, m: bitFilerMap{}, size: sz}, nil
}
func (f *bitFiler) BeginUpdate() error { panic("internal error") }
func (f *bitFiler) EndUpdate() error { panic("internal error") }
func (f *bitFiler) Rollback() error { panic("internal error") }
func (f *bitFiler) Sync() error { panic("internal error") }
func (f *bitFiler) Close() (err error) { return }
func (f *bitFiler) Name() string { return fmt.Sprintf("%p.bitfiler", f) }
func (f *bitFiler) Size() (int64, error) { return f.size, nil }
func (f *bitFiler) PunchHole(off, size int64) (err error) {
first := off >> bfBits
if off&bfMask != 0 {
first++
}
off += size - 1
last := off >> bfBits
if off&bfMask != 0 {
last--
}
if limit := f.size >> bfBits; last > limit {
last = limit
}
f.Lock()
for pgI := first; pgI <= last; pgI++ {
pg := &bitPage{}
pg.flags = allDirtyFlags
f.m[pgI] = pg
}
f.Unlock()
return
}
func (f *bitFiler) ReadAt(b []byte, off int64) (n int, err error) {
avail := f.size - off
pgI := off >> bfBits
pgO := int(off & bfMask)
rem := len(b)
if int64(rem) >= avail {
rem = int(avail)
err = io.EOF
}
for rem != 0 && avail > 0 {
f.Lock()
pg := f.m[pgI]
if pg == nil {
pg = &bitPage{}
if f.parent != nil {
_, err = f.parent.ReadAt(pg.data[:], off&^bfMask)
if err != nil && !fileutil.IsEOF(err) {
f.Unlock()
return
}
err = nil
}
f.m[pgI] = pg
}
f.Unlock()
nc := copy(b[:mathutil.Min(rem, bfSize)], pg.data[pgO:])
pgI++
pgO = 0
rem -= nc
n += nc
b = b[nc:]
off += int64(nc)
}
return
}
func (f *bitFiler) Truncate(size int64) (err error) {
f.Lock()
defer f.Unlock()
switch {
case size < 0:
return &ErrINVAL{"Truncate size", size}
case size == 0:
f.m = bitFilerMap{}
f.size = 0
return
}
first := size >> bfBits
if size&bfMask != 0 {
first++
}
last := f.size >> bfBits
if f.size&bfMask != 0 {
last++
}
for ; first < last; first++ {
delete(f.m, first)
}
f.size = size
return
}
func (f *bitFiler) WriteAt(b []byte, off int64) (n int, err error) {
off0 := off
pgI := off >> bfBits
pgO := int(off & bfMask)
n = len(b)
rem := n
var nc int
for rem != 0 {
f.Lock()
pg := f.m[pgI]
if pg == nil {
pg = &bitPage{}
if f.parent != nil {
_, err = f.parent.ReadAt(pg.data[:], off&^bfMask)
if err != nil && !fileutil.IsEOF(err) {
f.Unlock()
return
}
err = nil
}
f.m[pgI] = pg
}
f.Unlock()
nc = copy(pg.data[pgO:], b)
pgI++
pg.dirty = true
for i := pgO; i < pgO+nc; i++ {
pg.flags[i>>3] |= bitmask[i&7]
}
pgO = 0
rem -= nc
b = b[nc:]
off += int64(nc)
}
f.size = mathutil.MaxInt64(f.size, off0+int64(n))
return
}
func (f *bitFiler) link() {
for pgI, pg := range f.m {
nx, ok := f.m[pgI+1]
if !ok || !nx.dirty {
continue
}
nx.prev, pg.next = pg, nx
}
}
func (f *bitFiler) dumpDirty(w io.WriterAt) (nwr int, err error) {
f.Lock()
defer f.Unlock()
f.link()
for pgI, pg := range f.m {
if !pg.dirty {
continue
}
for pg.prev != nil && pg.prev.dirty {
pg = pg.prev
pgI--
}
for pg != nil && pg.dirty {
last := false
var off int64
first := -1
for i := 0; i < bfSize; i++ {
flag := pg.flags[i>>3]&bitmask[i&7] != 0
switch {
case flag && !last: // Leading edge detected
off = pgI<<bfBits + int64(i)
first = i
case !flag && last: // Trailing edge detected
n, err := w.WriteAt(pg.data[first:i], off)
if n != i-first {
return 0, err
}
first = -1
nwr++
}
last = flag
}
if first >= 0 {
i := bfSize
n, err := w.WriteAt(pg.data[first:i], off)
if n != i-first {
return 0, err
}
nwr++
}
pg.dirty = false
pg = pg.next
pgI++
}
}
return
}
// RollbackFiler is a Filer implementing structural transaction handling.
// Structural transactions should be small and short lived because all non
// committed data are held in memory until committed or discarded by a
// Rollback.
//
// While using RollbackFiler, every intended update of the wrapped Filler, by
// WriteAt, Truncate or PunchHole, _must_ be made within a transaction.
// Attempts to do it outside of a transaction will return ErrPERM. OTOH,
// invoking ReadAt outside of a transaction is not a problem.
//
// No nested transactions: All updates within a transaction are held in memory.
// On a matching EndUpdate the updates held in memory are actually written to
// the wrapped Filer.
//
// Nested transactions: Correct data will be seen from RollbackFiler when any
// level of a nested transaction is rollbacked. The actual writing to the
// wrapped Filer happens only when the outer most transaction nesting level is
// closed.
//
// Invoking Rollback is an alternative to EndUpdate. It discards all changes
// made at the current transaction level and returns the "state" (possibly not
// yet persisted) of the Filer to what it was before the corresponding
// BeginUpdate.
//
// During an open transaction, all reads (using ReadAt) are "dirty" reads,
// seeing the uncommitted changes made to the Filer's data.
//
// Lldb databases should be based upon a RollbackFiler.
//
// With a wrapped MemFiler one gets transactional memory. With, for example a
// wrapped disk based SimpleFileFiler it protects against at least some HW
// errors - if Rollback is properly invoked on such failures and/or if there's
// some WAL or 2PC or whatever other safe mechanism based recovery procedure
// used by the client.
//
// The "real" writes to the wrapped Filer (or WAL instead) go through the
// writerAt supplied to NewRollbackFiler.
//
// List of functions/methods which are recommended to be wrapped in a
// BeginUpdate/EndUpdate structural transaction:
//
// Allocator.Alloc
// Allocator.Free
// Allocator.Realloc
//
// CreateBTree
// RemoveBTree
// BTree.Clear
// BTree.Delete
// BTree.DeleteAny
// BTree.Clear
// BTree.Extract
// BTree.Get (it can mutate the DB)
// BTree.Put
// BTree.Set
//
// NOTE: RollbackFiler is a generic solution intended to wrap Filers provided
// by this package which do not implement any of the transactional methods.
// RollbackFiler thus _does not_ invoke any of the transactional methods of its
// wrapped Filer.
//
// RollbackFiler is safe for concurrent use by multiple goroutines.
type RollbackFiler struct {
mu sync.RWMutex
inCallback bool
inCallbackMu sync.RWMutex
bitFiler *bitFiler
checkpoint func(int64) error
closed bool
f Filer
parent Filer
tlevel int // transaction nesting level, 0 == not in transaction
writerAt io.WriterAt
// afterRollback, if not nil, is called after performing Rollback
// without errros.
afterRollback func() error
}
// NewRollbackFiler returns a RollbackFiler wrapping f.
//
// The checkpoint parameter
//
// The checkpoint function is called after closing (by EndUpdate) the upper
// most level open transaction if all calls of writerAt were successful and the
// DB (or eg. a WAL) is thus now in a consistent state (virtually, in the ideal
// world with no write caches, no HW failures, no process crashes, ...).
//
// NOTE: In, for example, a 2PC it is necessary to reflect also the sz
// parameter as the new file size (as in the parameter to Truncate). All
// changes were successfully written already by writerAt before invoking
// checkpoint.
//
// The writerAt parameter
//
// The writerAt interface is used to commit the updates of the wrapped Filer.
// If any invocation of writerAt fails then a non nil error will be returned
// from EndUpdate and checkpoint will _not_ ne called. Neither is necessary to
// call Rollback. The rule of thumb: The [structural] transaction [level] is
// closed by invoking exactly once one of EndUpdate _or_ Rollback.
//
// It is presumed that writerAt uses WAL or 2PC or whatever other safe
// mechanism to physically commit the updates.
//
// Updates performed by invocations of writerAt are byte-precise, but not
// necessarily maximum possible length precise. IOW, for example an update
// crossing page boundaries may be performed by more than one writerAt
// invocation. No offset sorting is performed. This may change if it proves
// to be a problem. Such change would be considered backward compatible.
//
// NOTE: Using RollbackFiler, but failing to ever invoke a matching "closing"
// EndUpdate after an "opening" BeginUpdate means neither writerAt or
// checkpoint will ever get called - with all the possible data loss
// consequences.
func NewRollbackFiler(f Filer, checkpoint func(sz int64) error, writerAt io.WriterAt) (r *RollbackFiler, err error) {
if f == nil || checkpoint == nil || writerAt == nil {
return nil, &ErrINVAL{Src: "lldb.NewRollbackFiler, nil argument"}
}
return &RollbackFiler{
checkpoint: checkpoint,
f: f,
writerAt: writerAt,
}, nil
}
// Implements Filer.
func (r *RollbackFiler) BeginUpdate() (err error) {
r.mu.Lock()
defer r.mu.Unlock()
parent := r.f
if r.tlevel != 0 {
parent = r.bitFiler
}
r.bitFiler, err = newBitFiler(parent)
if err != nil {
return
}
r.tlevel++
return
}
// Implements Filer.
//
// Close will return an error if not invoked at nesting level 0. However, to
// allow emergency closing from eg. a signal handler; if Close is invoked
// within an open transaction(s), it rollbacks any non committed open
// transactions and performs the Close operation.
//
// IOW: Regardless of the transaction nesting level the Close is always
// performed but any uncommitted transaction data are lost.
func (r *RollbackFiler) Close() (err error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.closed {
return &ErrPERM{r.f.Name() + ": Already closed"}
}
r.closed = true
if err = r.f.Close(); err != nil {
return
}
if r.tlevel != 0 {
err = &ErrPERM{r.f.Name() + ": Close inside an open transaction"}
}
return
}
// Implements Filer.
func (r *RollbackFiler) EndUpdate() (err error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.tlevel == 0 {
return &ErrPERM{r.f.Name() + " : EndUpdate outside of a transaction"}
}
sz, err := r.size() // Cannot call .Size() -> deadlock
if err != nil {
return
}
r.tlevel--
bf := r.bitFiler
parent := bf.parent
w := r.writerAt
if r.tlevel != 0 {
w = parent
}
nwr, err := bf.dumpDirty(w)
if err != nil {
return
}
switch {
case r.tlevel == 0:
r.bitFiler = nil
if nwr == 0 {
return
}
return r.checkpoint(sz)
default:
r.bitFiler = parent.(*bitFiler)
sz, _ := bf.Size() // bitFiler.Size() never returns err != nil
return parent.Truncate(sz)
}
}
// Implements Filer.
func (r *RollbackFiler) Name() string {
r.mu.RLock()
defer r.mu.RUnlock()
return r.f.Name()
}
// Implements Filer.
func (r *RollbackFiler) PunchHole(off, size int64) error {
r.mu.Lock()
defer r.mu.Unlock()
if r.tlevel == 0 {
return &ErrPERM{r.f.Name() + ": PunchHole outside of a transaction"}
}
if off < 0 {
return &ErrINVAL{r.f.Name() + ": PunchHole off", off}
}
if size < 0 || off+size > r.bitFiler.size {
return &ErrINVAL{r.f.Name() + ": PunchHole size", size}
}
return r.bitFiler.PunchHole(off, size)
}
// Implements Filer.
func (r *RollbackFiler) ReadAt(b []byte, off int64) (n int, err error) {
r.inCallbackMu.RLock()
defer r.inCallbackMu.RUnlock()
if !r.inCallback {
r.mu.RLock()
defer r.mu.RUnlock()
}
if r.tlevel == 0 {
return r.f.ReadAt(b, off)
}
return r.bitFiler.ReadAt(b, off)
}
// Implements Filer.
func (r *RollbackFiler) Rollback() (err error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.tlevel == 0 {
return &ErrPERM{r.f.Name() + ": Rollback outside of a transaction"}
}
if r.tlevel > 1 {
r.bitFiler = r.bitFiler.parent.(*bitFiler)
}
r.tlevel--
if f := r.afterRollback; f != nil {
r.inCallbackMu.Lock()
r.inCallback = true
r.inCallbackMu.Unlock()
defer func() {
r.inCallbackMu.Lock()
r.inCallback = false
r.inCallbackMu.Unlock()
}()
return f()
}
return
}
func (r *RollbackFiler) size() (sz int64, err error) {
if r.tlevel == 0 {
return r.f.Size()
}
return r.bitFiler.Size()
}
// Implements Filer.
func (r *RollbackFiler) Size() (sz int64, err error) {
r.mu.Lock()
defer r.mu.Unlock()
return r.size()
}
// Implements Filer.
func (r *RollbackFiler) Sync() error {
r.mu.Lock()
defer r.mu.Unlock()
return r.f.Sync()
}
// Implements Filer.
func (r *RollbackFiler) Truncate(size int64) error {
r.mu.Lock()
defer r.mu.Unlock()
if r.tlevel == 0 {
return &ErrPERM{r.f.Name() + ": Truncate outside of a transaction"}
}
return r.bitFiler.Truncate(size)
}
// Implements Filer.
func (r *RollbackFiler) WriteAt(b []byte, off int64) (n int, err error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.tlevel == 0 {
return 0, &ErrPERM{r.f.Name() + ": WriteAt outside of a transaction"}
}
return r.bitFiler.WriteAt(b, off)
}