cmd/stdiscosrv: New discovery server (fixes #4618)
This is a new revision of the discovery server. Relevant changes and non-changes: - Protocol towards clients is unchanged. - Recommended large scale design is still to be deployed nehind nginx (I tested, and it's still a lot faster at terminating TLS). - Database backend is leveldb again, only. It scales enough, is easy to setup, and we don't need any backend to take care of. - Server supports replication. This is a simple TCP channel - protect it with a firewall when deploying over the internet. (We deploy this within the same datacenter, and with firewall.) Any incoming client announces are sent over the replication channel(s) to other peer discosrvs. Incoming replication changes are applied to the database as if they came from clients, but without the TLS/certificate overhead. - Metrics are exposed using the prometheus library, when enabled. - The database values and replication protocol is protobuf, because JSON was quite CPU intensive when I tried that and benchmarked it. - The "Retry-After" value for failed lookups gets slowly increased from a default of 120 seconds, by 5 seconds for each failed lookup, independently by each discosrv. This lowers the query load over time for clients that are never seen. The Retry-After maxes out at 3600 after a couple of weeks of this increase. The number of failed lookups is stored in the database, now and then (avoiding making each lookup a database put). All in all this means clients can be pointed towards a cluster using just multiple A / AAAA records to gain both load sharing and redundancy (if one is down, clients will talk to the remaining ones). GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/4648
This commit is contained in:
21
vendor/github.com/BurntSushi/toml/COPYING
generated
vendored
Normal file
21
vendor/github.com/BurntSushi/toml/COPYING
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 TOML authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
90
vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go
generated
vendored
Normal file
90
vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
// Command toml-test-decoder satisfies the toml-test interface for testing
|
||||
// TOML decoders. Namely, it accepts TOML on stdin and outputs JSON on stdout.
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.SetFlags(0)
|
||||
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func usage() {
|
||||
log.Printf("Usage: %s < toml-file\n", path.Base(os.Args[0]))
|
||||
flag.PrintDefaults()
|
||||
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func main() {
|
||||
if flag.NArg() != 0 {
|
||||
flag.Usage()
|
||||
}
|
||||
|
||||
var tmp interface{}
|
||||
if _, err := toml.DecodeReader(os.Stdin, &tmp); err != nil {
|
||||
log.Fatalf("Error decoding TOML: %s", err)
|
||||
}
|
||||
|
||||
typedTmp := translate(tmp)
|
||||
if err := json.NewEncoder(os.Stdout).Encode(typedTmp); err != nil {
|
||||
log.Fatalf("Error encoding JSON: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func translate(tomlData interface{}) interface{} {
|
||||
switch orig := tomlData.(type) {
|
||||
case map[string]interface{}:
|
||||
typed := make(map[string]interface{}, len(orig))
|
||||
for k, v := range orig {
|
||||
typed[k] = translate(v)
|
||||
}
|
||||
return typed
|
||||
case []map[string]interface{}:
|
||||
typed := make([]map[string]interface{}, len(orig))
|
||||
for i, v := range orig {
|
||||
typed[i] = translate(v).(map[string]interface{})
|
||||
}
|
||||
return typed
|
||||
case []interface{}:
|
||||
typed := make([]interface{}, len(orig))
|
||||
for i, v := range orig {
|
||||
typed[i] = translate(v)
|
||||
}
|
||||
|
||||
// We don't really need to tag arrays, but let's be future proof.
|
||||
// (If TOML ever supports tuples, we'll need this.)
|
||||
return tag("array", typed)
|
||||
case time.Time:
|
||||
return tag("datetime", orig.Format("2006-01-02T15:04:05Z"))
|
||||
case bool:
|
||||
return tag("bool", fmt.Sprintf("%v", orig))
|
||||
case int64:
|
||||
return tag("integer", fmt.Sprintf("%d", orig))
|
||||
case float64:
|
||||
return tag("float", fmt.Sprintf("%v", orig))
|
||||
case string:
|
||||
return tag("string", orig)
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("Unknown type: %T", tomlData))
|
||||
}
|
||||
|
||||
func tag(typeName string, data interface{}) map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"type": typeName,
|
||||
"value": data,
|
||||
}
|
||||
}
|
||||
131
vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go
generated
vendored
Normal file
131
vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go
generated
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
// Command toml-test-encoder satisfies the toml-test interface for testing
|
||||
// TOML encoders. Namely, it accepts JSON on stdin and outputs TOML on stdout.
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.SetFlags(0)
|
||||
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func usage() {
|
||||
log.Printf("Usage: %s < json-file\n", path.Base(os.Args[0]))
|
||||
flag.PrintDefaults()
|
||||
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func main() {
|
||||
if flag.NArg() != 0 {
|
||||
flag.Usage()
|
||||
}
|
||||
|
||||
var tmp interface{}
|
||||
if err := json.NewDecoder(os.Stdin).Decode(&tmp); err != nil {
|
||||
log.Fatalf("Error decoding JSON: %s", err)
|
||||
}
|
||||
|
||||
tomlData := translate(tmp)
|
||||
if err := toml.NewEncoder(os.Stdout).Encode(tomlData); err != nil {
|
||||
log.Fatalf("Error encoding TOML: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func translate(typedJson interface{}) interface{} {
|
||||
switch v := typedJson.(type) {
|
||||
case map[string]interface{}:
|
||||
if len(v) == 2 && in("type", v) && in("value", v) {
|
||||
return untag(v)
|
||||
}
|
||||
m := make(map[string]interface{}, len(v))
|
||||
for k, v2 := range v {
|
||||
m[k] = translate(v2)
|
||||
}
|
||||
return m
|
||||
case []interface{}:
|
||||
tabArray := make([]map[string]interface{}, len(v))
|
||||
for i := range v {
|
||||
if m, ok := translate(v[i]).(map[string]interface{}); ok {
|
||||
tabArray[i] = m
|
||||
} else {
|
||||
log.Fatalf("JSON arrays may only contain objects. This " +
|
||||
"corresponds to only tables being allowed in " +
|
||||
"TOML table arrays.")
|
||||
}
|
||||
}
|
||||
return tabArray
|
||||
}
|
||||
log.Fatalf("Unrecognized JSON format '%T'.", typedJson)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func untag(typed map[string]interface{}) interface{} {
|
||||
t := typed["type"].(string)
|
||||
v := typed["value"]
|
||||
switch t {
|
||||
case "string":
|
||||
return v.(string)
|
||||
case "integer":
|
||||
v := v.(string)
|
||||
n, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not parse '%s' as integer: %s", v, err)
|
||||
}
|
||||
return n
|
||||
case "float":
|
||||
v := v.(string)
|
||||
f, err := strconv.ParseFloat(v, 64)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not parse '%s' as float64: %s", v, err)
|
||||
}
|
||||
return f
|
||||
case "datetime":
|
||||
v := v.(string)
|
||||
t, err := time.Parse("2006-01-02T15:04:05Z", v)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not parse '%s' as a datetime: %s", v, err)
|
||||
}
|
||||
return t
|
||||
case "bool":
|
||||
v := v.(string)
|
||||
switch v {
|
||||
case "true":
|
||||
return true
|
||||
case "false":
|
||||
return false
|
||||
}
|
||||
log.Fatalf("Could not parse '%s' as a boolean.", v)
|
||||
case "array":
|
||||
v := v.([]interface{})
|
||||
array := make([]interface{}, len(v))
|
||||
for i := range v {
|
||||
if m, ok := v[i].(map[string]interface{}); ok {
|
||||
array[i] = untag(m)
|
||||
} else {
|
||||
log.Fatalf("Arrays may only contain other arrays or "+
|
||||
"primitive values, but found a '%T'.", m)
|
||||
}
|
||||
}
|
||||
return array
|
||||
}
|
||||
log.Fatalf("Unrecognized tag type '%s'.", t)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func in(key string, m map[string]interface{}) bool {
|
||||
_, ok := m[key]
|
||||
return ok
|
||||
}
|
||||
61
vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go
generated
vendored
Normal file
61
vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
// Command tomlv validates TOML documents and prints each key's type.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
)
|
||||
|
||||
var (
|
||||
flagTypes = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.SetFlags(0)
|
||||
|
||||
flag.BoolVar(&flagTypes, "types", flagTypes,
|
||||
"When set, the types of every defined key will be shown.")
|
||||
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func usage() {
|
||||
log.Printf("Usage: %s toml-file [ toml-file ... ]\n",
|
||||
path.Base(os.Args[0]))
|
||||
flag.PrintDefaults()
|
||||
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func main() {
|
||||
if flag.NArg() < 1 {
|
||||
flag.Usage()
|
||||
}
|
||||
for _, f := range flag.Args() {
|
||||
var tmp interface{}
|
||||
md, err := toml.DecodeFile(f, &tmp)
|
||||
if err != nil {
|
||||
log.Fatalf("Error in '%s': %s", f, err)
|
||||
}
|
||||
if flagTypes {
|
||||
printTypes(md)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func printTypes(md toml.MetaData) {
|
||||
tabw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
for _, key := range md.Keys() {
|
||||
fmt.Fprintf(tabw, "%s%s\t%s\n",
|
||||
strings.Repeat(" ", len(key)-1), key, md.Type(key...))
|
||||
}
|
||||
tabw.Flush()
|
||||
}
|
||||
509
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
Normal file
509
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
Normal file
@@ -0,0 +1,509 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func e(format string, args ...interface{}) error {
|
||||
return fmt.Errorf("toml: "+format, args...)
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface implemented by objects that can unmarshal a
|
||||
// TOML description of themselves.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalTOML(interface{}) error
|
||||
}
|
||||
|
||||
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
|
||||
func Unmarshal(p []byte, v interface{}) error {
|
||||
_, err := Decode(string(p), v)
|
||||
return err
|
||||
}
|
||||
|
||||
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
||||
// When using the various `Decode*` functions, the type `Primitive` may
|
||||
// be given to any value, and its decoding will be delayed.
|
||||
//
|
||||
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
|
||||
//
|
||||
// The underlying representation of a `Primitive` value is subject to change.
|
||||
// Do not rely on it.
|
||||
//
|
||||
// N.B. Primitive values are still parsed, so using them will only avoid
|
||||
// the overhead of reflection. They can be useful when you don't know the
|
||||
// exact type of TOML data until run time.
|
||||
type Primitive struct {
|
||||
undecoded interface{}
|
||||
context Key
|
||||
}
|
||||
|
||||
// DEPRECATED!
|
||||
//
|
||||
// Use MetaData.PrimitiveDecode instead.
|
||||
func PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md := MetaData{decoded: make(map[string]bool)}
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
||||
// decodes a TOML value that has already been parsed. Valid primitive values
|
||||
// can *only* be obtained from values filled by the decoder functions,
|
||||
// including this method. (i.e., `v` may contain more `Primitive`
|
||||
// values.)
|
||||
//
|
||||
// Meta data for primitive values is included in the meta data returned by
|
||||
// the `Decode*` functions with one exception: keys returned by the Undecoded
|
||||
// method will only reflect keys that were decoded. Namely, any keys hidden
|
||||
// behind a Primitive will be considered undecoded. Executing this method will
|
||||
// update the undecoded keys in the meta data. (See the example.)
|
||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md.context = primValue.context
|
||||
defer func() { md.context = nil }()
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// Decode will decode the contents of `data` in TOML format into a pointer
|
||||
// `v`.
|
||||
//
|
||||
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
|
||||
// used interchangeably.)
|
||||
//
|
||||
// TOML arrays of tables correspond to either a slice of structs or a slice
|
||||
// of maps.
|
||||
//
|
||||
// TOML datetimes correspond to Go `time.Time` values.
|
||||
//
|
||||
// All other TOML types (float, string, int, bool and array) correspond
|
||||
// to the obvious Go types.
|
||||
//
|
||||
// An exception to the above rules is if a type implements the
|
||||
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
|
||||
// (floats, strings, integers, booleans and datetimes) will be converted to
|
||||
// a byte string and given to the value's UnmarshalText method. See the
|
||||
// Unmarshaler example for a demonstration with time duration strings.
|
||||
//
|
||||
// Key mapping
|
||||
//
|
||||
// TOML keys can map to either keys in a Go map or field names in a Go
|
||||
// struct. The special `toml` struct tag may be used to map TOML keys to
|
||||
// struct fields that don't match the key name exactly. (See the example.)
|
||||
// A case insensitive match to struct names will be tried if an exact match
|
||||
// can't be found.
|
||||
//
|
||||
// The mapping between TOML values and Go values is loose. That is, there
|
||||
// may exist TOML values that cannot be placed into your representation, and
|
||||
// there may be parts of your representation that do not correspond to
|
||||
// TOML values. This loose mapping can be made stricter by using the IsDefined
|
||||
// and/or Undecoded methods on the MetaData returned.
|
||||
//
|
||||
// This decoder will not handle cyclic types. If a cyclic type is passed,
|
||||
// `Decode` will not terminate.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
|
||||
}
|
||||
if rv.IsNil() {
|
||||
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
|
||||
}
|
||||
p, err := parse(data)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
md := MetaData{
|
||||
p.mapping, p.types, p.ordered,
|
||||
make(map[string]bool, len(p.ordered)), nil,
|
||||
}
|
||||
return md, md.unify(p.mapping, indirect(rv))
|
||||
}
|
||||
|
||||
// DecodeFile is just like Decode, except it will automatically read the
|
||||
// contents of the file at `fpath` and decode it for you.
|
||||
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
|
||||
bs, err := ioutil.ReadFile(fpath)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
return Decode(string(bs), v)
|
||||
}
|
||||
|
||||
// DecodeReader is just like Decode, except it will consume all bytes
|
||||
// from the reader and decode it for you.
|
||||
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
|
||||
bs, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
return Decode(string(bs), v)
|
||||
}
|
||||
|
||||
// unify performs a sort of type unification based on the structure of `rv`,
|
||||
// which is the client representation.
|
||||
//
|
||||
// Any type mismatch produces an error. Finding a type that we don't know
|
||||
// how to handle produces an unsupported type error.
|
||||
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
|
||||
// Special case. Look for a `Primitive` value.
|
||||
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
|
||||
// Save the undecoded data and the key context into the primitive
|
||||
// value.
|
||||
context := make(Key, len(md.context))
|
||||
copy(context, md.context)
|
||||
rv.Set(reflect.ValueOf(Primitive{
|
||||
undecoded: data,
|
||||
context: context,
|
||||
}))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Special case. Unmarshaler Interface support.
|
||||
if rv.CanAddr() {
|
||||
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
|
||||
return v.UnmarshalTOML(data)
|
||||
}
|
||||
}
|
||||
|
||||
// Special case. Handle time.Time values specifically.
|
||||
// TODO: Remove this code when we decide to drop support for Go 1.1.
|
||||
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
|
||||
// interfaces.
|
||||
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
|
||||
return md.unifyDatetime(data, rv)
|
||||
}
|
||||
|
||||
// Special case. Look for a value satisfying the TextUnmarshaler interface.
|
||||
if v, ok := rv.Interface().(TextUnmarshaler); ok {
|
||||
return md.unifyText(data, v)
|
||||
}
|
||||
// BUG(burntsushi)
|
||||
// The behavior here is incorrect whenever a Go type satisfies the
|
||||
// encoding.TextUnmarshaler interface but also corresponds to a TOML
|
||||
// hash or array. In particular, the unmarshaler should only be applied
|
||||
// to primitive TOML values. But at this point, it will be applied to
|
||||
// all kinds of values and produce an incorrect error whenever those values
|
||||
// are hashes or arrays (including arrays of tables).
|
||||
|
||||
k := rv.Kind()
|
||||
|
||||
// laziness
|
||||
if k >= reflect.Int && k <= reflect.Uint64 {
|
||||
return md.unifyInt(data, rv)
|
||||
}
|
||||
switch k {
|
||||
case reflect.Ptr:
|
||||
elem := reflect.New(rv.Type().Elem())
|
||||
err := md.unify(data, reflect.Indirect(elem))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rv.Set(elem)
|
||||
return nil
|
||||
case reflect.Struct:
|
||||
return md.unifyStruct(data, rv)
|
||||
case reflect.Map:
|
||||
return md.unifyMap(data, rv)
|
||||
case reflect.Array:
|
||||
return md.unifyArray(data, rv)
|
||||
case reflect.Slice:
|
||||
return md.unifySlice(data, rv)
|
||||
case reflect.String:
|
||||
return md.unifyString(data, rv)
|
||||
case reflect.Bool:
|
||||
return md.unifyBool(data, rv)
|
||||
case reflect.Interface:
|
||||
// we only support empty interfaces.
|
||||
if rv.NumMethod() > 0 {
|
||||
return e("unsupported type %s", rv.Type())
|
||||
}
|
||||
return md.unifyAnything(data, rv)
|
||||
case reflect.Float32:
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
return md.unifyFloat64(data, rv)
|
||||
}
|
||||
return e("unsupported type %s", rv.Kind())
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
if !ok {
|
||||
if mapping == nil {
|
||||
return nil
|
||||
}
|
||||
return e("type mismatch for %s: expected table but found %T",
|
||||
rv.Type().String(), mapping)
|
||||
}
|
||||
|
||||
for key, datum := range tmap {
|
||||
var f *field
|
||||
fields := cachedTypeFields(rv.Type())
|
||||
for i := range fields {
|
||||
ff := &fields[i]
|
||||
if ff.name == key {
|
||||
f = ff
|
||||
break
|
||||
}
|
||||
if f == nil && strings.EqualFold(ff.name, key) {
|
||||
f = ff
|
||||
}
|
||||
}
|
||||
if f != nil {
|
||||
subv := rv
|
||||
for _, i := range f.index {
|
||||
subv = indirect(subv.Field(i))
|
||||
}
|
||||
if isUnifiable(subv) {
|
||||
md.decoded[md.context.add(key).String()] = true
|
||||
md.context = append(md.context, key)
|
||||
if err := md.unify(datum, subv); err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
} else if f.name != "" {
|
||||
// Bad user! No soup for you!
|
||||
return e("cannot write unexported field %s.%s",
|
||||
rv.Type().String(), f.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
if !ok {
|
||||
if tmap == nil {
|
||||
return nil
|
||||
}
|
||||
return badtype("map", mapping)
|
||||
}
|
||||
if rv.IsNil() {
|
||||
rv.Set(reflect.MakeMap(rv.Type()))
|
||||
}
|
||||
for k, v := range tmap {
|
||||
md.decoded[md.context.add(k).String()] = true
|
||||
md.context = append(md.context, k)
|
||||
|
||||
rvkey := indirect(reflect.New(rv.Type().Key()))
|
||||
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
|
||||
if err := md.unify(v, rvval); err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
|
||||
rvkey.SetString(k)
|
||||
rv.SetMapIndex(rvkey, rvval)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
if !datav.IsValid() {
|
||||
return nil
|
||||
}
|
||||
return badtype("slice", data)
|
||||
}
|
||||
sliceLen := datav.Len()
|
||||
if sliceLen != rv.Len() {
|
||||
return e("expected array length %d; got TOML array of length %d",
|
||||
rv.Len(), sliceLen)
|
||||
}
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
if !datav.IsValid() {
|
||||
return nil
|
||||
}
|
||||
return badtype("slice", data)
|
||||
}
|
||||
n := datav.Len()
|
||||
if rv.IsNil() || rv.Cap() < n {
|
||||
rv.Set(reflect.MakeSlice(rv.Type(), n, n))
|
||||
}
|
||||
rv.SetLen(n)
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
|
||||
sliceLen := data.Len()
|
||||
for i := 0; i < sliceLen; i++ {
|
||||
v := data.Index(i).Interface()
|
||||
sliceval := indirect(rv.Index(i))
|
||||
if err := md.unify(v, sliceval); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
|
||||
if _, ok := data.(time.Time); ok {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
}
|
||||
return badtype("time.Time", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||
if s, ok := data.(string); ok {
|
||||
rv.SetString(s)
|
||||
return nil
|
||||
}
|
||||
return badtype("string", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(float64); ok {
|
||||
switch rv.Kind() {
|
||||
case reflect.Float32:
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
rv.SetFloat(num)
|
||||
default:
|
||||
panic("bug")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("float", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(int64); ok {
|
||||
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Int8:
|
||||
if num < math.MinInt8 || num > math.MaxInt8 {
|
||||
return e("value %d is out of range for int8", num)
|
||||
}
|
||||
case reflect.Int16:
|
||||
if num < math.MinInt16 || num > math.MaxInt16 {
|
||||
return e("value %d is out of range for int16", num)
|
||||
}
|
||||
case reflect.Int32:
|
||||
if num < math.MinInt32 || num > math.MaxInt32 {
|
||||
return e("value %d is out of range for int32", num)
|
||||
}
|
||||
}
|
||||
rv.SetInt(num)
|
||||
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
|
||||
unum := uint64(num)
|
||||
switch rv.Kind() {
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Uint8:
|
||||
if num < 0 || unum > math.MaxUint8 {
|
||||
return e("value %d is out of range for uint8", num)
|
||||
}
|
||||
case reflect.Uint16:
|
||||
if num < 0 || unum > math.MaxUint16 {
|
||||
return e("value %d is out of range for uint16", num)
|
||||
}
|
||||
case reflect.Uint32:
|
||||
if num < 0 || unum > math.MaxUint32 {
|
||||
return e("value %d is out of range for uint32", num)
|
||||
}
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
} else {
|
||||
panic("unreachable")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("integer", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
||||
if b, ok := data.(bool); ok {
|
||||
rv.SetBool(b)
|
||||
return nil
|
||||
}
|
||||
return badtype("boolean", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
|
||||
var s string
|
||||
switch sdata := data.(type) {
|
||||
case TextMarshaler:
|
||||
text, err := sdata.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s = string(text)
|
||||
case fmt.Stringer:
|
||||
s = sdata.String()
|
||||
case string:
|
||||
s = sdata
|
||||
case bool:
|
||||
s = fmt.Sprintf("%v", sdata)
|
||||
case int64:
|
||||
s = fmt.Sprintf("%d", sdata)
|
||||
case float64:
|
||||
s = fmt.Sprintf("%f", sdata)
|
||||
default:
|
||||
return badtype("primitive (string-like)", data)
|
||||
}
|
||||
if err := v.UnmarshalText([]byte(s)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
|
||||
func rvalue(v interface{}) reflect.Value {
|
||||
return indirect(reflect.ValueOf(v))
|
||||
}
|
||||
|
||||
// indirect returns the value pointed to by a pointer.
|
||||
// Pointers are followed until the value is not a pointer.
|
||||
// New values are allocated for each nil pointer.
|
||||
//
|
||||
// An exception to this rule is if the value satisfies an interface of
|
||||
// interest to us (like encoding.TextUnmarshaler).
|
||||
func indirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() != reflect.Ptr {
|
||||
if v.CanSet() {
|
||||
pv := v.Addr()
|
||||
if _, ok := pv.Interface().(TextUnmarshaler); ok {
|
||||
return pv
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
return indirect(reflect.Indirect(v))
|
||||
}
|
||||
|
||||
func isUnifiable(rv reflect.Value) bool {
|
||||
if rv.CanSet() {
|
||||
return true
|
||||
}
|
||||
if _, ok := rv.Interface().(TextUnmarshaler); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func badtype(expected string, data interface{}) error {
|
||||
return e("cannot load TOML value of type %T into a Go %s", data, expected)
|
||||
}
|
||||
121
vendor/github.com/BurntSushi/toml/decode_meta.go
generated
vendored
Normal file
121
vendor/github.com/BurntSushi/toml/decode_meta.go
generated
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
package toml
|
||||
|
||||
import "strings"
|
||||
|
||||
// MetaData allows access to meta information about TOML data that may not
|
||||
// be inferrable via reflection. In particular, whether a key has been defined
|
||||
// and the TOML type of a key.
|
||||
type MetaData struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
keys []Key
|
||||
decoded map[string]bool
|
||||
context Key // Used only during decoding.
|
||||
}
|
||||
|
||||
// IsDefined returns true if the key given exists in the TOML data. The key
|
||||
// should be specified hierarchially. e.g.,
|
||||
//
|
||||
// // access the TOML key 'a.b.c'
|
||||
// IsDefined("a", "b", "c")
|
||||
//
|
||||
// IsDefined will return false if an empty key given. Keys are case sensitive.
|
||||
func (md *MetaData) IsDefined(key ...string) bool {
|
||||
if len(key) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var hash map[string]interface{}
|
||||
var ok bool
|
||||
var hashOrVal interface{} = md.mapping
|
||||
for _, k := range key {
|
||||
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
|
||||
return false
|
||||
}
|
||||
if hashOrVal, ok = hash[k]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Type returns a string representation of the type of the key specified.
|
||||
//
|
||||
// Type will return the empty string if given an empty key or a key that
|
||||
// does not exist. Keys are case sensitive.
|
||||
func (md *MetaData) Type(key ...string) string {
|
||||
fullkey := strings.Join(key, ".")
|
||||
if typ, ok := md.types[fullkey]; ok {
|
||||
return typ.typeString()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
|
||||
// to get values of this type.
|
||||
type Key []string
|
||||
|
||||
func (k Key) String() string {
|
||||
return strings.Join(k, ".")
|
||||
}
|
||||
|
||||
func (k Key) maybeQuotedAll() string {
|
||||
var ss []string
|
||||
for i := range k {
|
||||
ss = append(ss, k.maybeQuoted(i))
|
||||
}
|
||||
return strings.Join(ss, ".")
|
||||
}
|
||||
|
||||
func (k Key) maybeQuoted(i int) string {
|
||||
quote := false
|
||||
for _, c := range k[i] {
|
||||
if !isBareKeyChar(c) {
|
||||
quote = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if quote {
|
||||
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
|
||||
}
|
||||
return k[i]
|
||||
}
|
||||
|
||||
func (k Key) add(piece string) Key {
|
||||
newKey := make(Key, len(k)+1)
|
||||
copy(newKey, k)
|
||||
newKey[len(k)] = piece
|
||||
return newKey
|
||||
}
|
||||
|
||||
// Keys returns a slice of every key in the TOML data, including key groups.
|
||||
// Each key is itself a slice, where the first element is the top of the
|
||||
// hierarchy and the last is the most specific.
|
||||
//
|
||||
// The list will have the same order as the keys appeared in the TOML data.
|
||||
//
|
||||
// All keys returned are non-empty.
|
||||
func (md *MetaData) Keys() []Key {
|
||||
return md.keys
|
||||
}
|
||||
|
||||
// Undecoded returns all keys that have not been decoded in the order in which
|
||||
// they appear in the original TOML document.
|
||||
//
|
||||
// This includes keys that haven't been decoded because of a Primitive value.
|
||||
// Once the Primitive value is decoded, the keys will be considered decoded.
|
||||
//
|
||||
// Also note that decoding into an empty interface will result in no decoding,
|
||||
// and so no keys will be considered decoded.
|
||||
//
|
||||
// In this sense, the Undecoded keys correspond to keys in the TOML document
|
||||
// that do not have a concrete type in your representation.
|
||||
func (md *MetaData) Undecoded() []Key {
|
||||
undecoded := make([]Key, 0, len(md.keys))
|
||||
for _, key := range md.keys {
|
||||
if !md.decoded[key.String()] {
|
||||
undecoded = append(undecoded, key)
|
||||
}
|
||||
}
|
||||
return undecoded
|
||||
}
|
||||
27
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
Normal file
27
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
/*
|
||||
Package toml provides facilities for decoding and encoding TOML configuration
|
||||
files via reflection. There is also support for delaying decoding with
|
||||
the Primitive type, and querying the set of keys in a TOML document with the
|
||||
MetaData type.
|
||||
|
||||
The specification implemented: https://github.com/toml-lang/toml
|
||||
|
||||
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
|
||||
whether a file is a valid TOML document. It can also be used to print the
|
||||
type of each key in a TOML document.
|
||||
|
||||
Testing
|
||||
|
||||
There are two important types of tests used for this package. The first is
|
||||
contained inside '*_test.go' files and uses the standard Go unit testing
|
||||
framework. These tests are primarily devoted to holistically testing the
|
||||
decoder and encoder.
|
||||
|
||||
The second type of testing is used to verify the implementation's adherence
|
||||
to the TOML specification. These tests have been factored into their own
|
||||
project: https://github.com/BurntSushi/toml-test
|
||||
|
||||
The reason the tests are in a separate project is so that they can be used by
|
||||
any implementation of TOML. Namely, it is language agnostic.
|
||||
*/
|
||||
package toml
|
||||
568
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
Normal file
568
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
Normal file
@@ -0,0 +1,568 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type tomlEncodeError struct{ error }
|
||||
|
||||
var (
|
||||
errArrayMixedElementTypes = errors.New(
|
||||
"toml: cannot encode array with mixed element types")
|
||||
errArrayNilElement = errors.New(
|
||||
"toml: cannot encode array with nil element")
|
||||
errNonString = errors.New(
|
||||
"toml: cannot encode a map with non-string key type")
|
||||
errAnonNonStruct = errors.New(
|
||||
"toml: cannot encode an anonymous field that is not a struct")
|
||||
errArrayNoTable = errors.New(
|
||||
"toml: TOML array element cannot contain a table")
|
||||
errNoKey = errors.New(
|
||||
"toml: top-level values must be Go maps or structs")
|
||||
errAnything = errors.New("") // used in testing
|
||||
)
|
||||
|
||||
var quotedReplacer = strings.NewReplacer(
|
||||
"\t", "\\t",
|
||||
"\n", "\\n",
|
||||
"\r", "\\r",
|
||||
"\"", "\\\"",
|
||||
"\\", "\\\\",
|
||||
)
|
||||
|
||||
// Encoder controls the encoding of Go values to a TOML document to some
|
||||
// io.Writer.
|
||||
//
|
||||
// The indentation level can be controlled with the Indent field.
|
||||
type Encoder struct {
|
||||
// A single indentation level. By default it is two spaces.
|
||||
Indent string
|
||||
|
||||
// hasWritten is whether we have written any output to w yet.
|
||||
hasWritten bool
|
||||
w *bufio.Writer
|
||||
}
|
||||
|
||||
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
|
||||
// given. By default, a single indentation level is 2 spaces.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
w: bufio.NewWriter(w),
|
||||
Indent: " ",
|
||||
}
|
||||
}
|
||||
|
||||
// Encode writes a TOML representation of the Go value to the underlying
|
||||
// io.Writer. If the value given cannot be encoded to a valid TOML document,
|
||||
// then an error is returned.
|
||||
//
|
||||
// The mapping between Go values and TOML values should be precisely the same
|
||||
// as for the Decode* functions. Similarly, the TextMarshaler interface is
|
||||
// supported by encoding the resulting bytes as strings. (If you want to write
|
||||
// arbitrary binary data then you will need to use something like base64 since
|
||||
// TOML does not have any binary types.)
|
||||
//
|
||||
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
|
||||
// sub-hashes are encoded first.
|
||||
//
|
||||
// If a Go map is encoded, then its keys are sorted alphabetically for
|
||||
// deterministic output. More control over this behavior may be provided if
|
||||
// there is demand for it.
|
||||
//
|
||||
// Encoding Go values without a corresponding TOML representation---like map
|
||||
// types with non-string keys---will cause an error to be returned. Similarly
|
||||
// for mixed arrays/slices, arrays/slices with nil elements, embedded
|
||||
// non-struct types and nested slices containing maps or structs.
|
||||
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
|
||||
// and so is []map[string][]string.)
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
rv := eindirect(reflect.ValueOf(v))
|
||||
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
|
||||
return err
|
||||
}
|
||||
return enc.w.Flush()
|
||||
}
|
||||
|
||||
func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if terr, ok := r.(tomlEncodeError); ok {
|
||||
err = terr.error
|
||||
return
|
||||
}
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
enc.encode(key, rv)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) encode(key Key, rv reflect.Value) {
|
||||
// Special case. Time needs to be in ISO8601 format.
|
||||
// Special case. If we can marshal the type to text, then we used that.
|
||||
// Basically, this prevents the encoder for handling these types as
|
||||
// generic structs (or whatever the underlying type of a TextMarshaler is).
|
||||
switch rv.Interface().(type) {
|
||||
case time.Time, TextMarshaler:
|
||||
enc.keyEqElement(key, rv)
|
||||
return
|
||||
}
|
||||
|
||||
k := rv.Kind()
|
||||
switch k {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
||||
reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
||||
reflect.Uint64,
|
||||
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
|
||||
enc.keyEqElement(key, rv)
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
|
||||
enc.eArrayOfTables(key, rv)
|
||||
} else {
|
||||
enc.keyEqElement(key, rv)
|
||||
}
|
||||
case reflect.Interface:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.encode(key, rv.Elem())
|
||||
case reflect.Map:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.eTable(key, rv)
|
||||
case reflect.Ptr:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.encode(key, rv.Elem())
|
||||
case reflect.Struct:
|
||||
enc.eTable(key, rv)
|
||||
default:
|
||||
panic(e("unsupported type for key '%s': %s", key, k))
|
||||
}
|
||||
}
|
||||
|
||||
// eElement encodes any value that can be an array element (primitives and
|
||||
// arrays).
|
||||
func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
switch v := rv.Interface().(type) {
|
||||
case time.Time:
|
||||
// Special case time.Time as a primitive. Has to come before
|
||||
// TextMarshaler below because time.Time implements
|
||||
// encoding.TextMarshaler, but we need to always use UTC.
|
||||
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
|
||||
return
|
||||
case TextMarshaler:
|
||||
// Special case. Use text marshaler if it's available for this value.
|
||||
if s, err := v.MarshalText(); err != nil {
|
||||
encPanic(err)
|
||||
} else {
|
||||
enc.writeQuoted(string(s))
|
||||
}
|
||||
return
|
||||
}
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
enc.wf(strconv.FormatBool(rv.Bool()))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
||||
reflect.Int64:
|
||||
enc.wf(strconv.FormatInt(rv.Int(), 10))
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16,
|
||||
reflect.Uint32, reflect.Uint64:
|
||||
enc.wf(strconv.FormatUint(rv.Uint(), 10))
|
||||
case reflect.Float32:
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
|
||||
case reflect.Float64:
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
|
||||
case reflect.Array, reflect.Slice:
|
||||
enc.eArrayOrSliceElement(rv)
|
||||
case reflect.Interface:
|
||||
enc.eElement(rv.Elem())
|
||||
case reflect.String:
|
||||
enc.writeQuoted(rv.String())
|
||||
default:
|
||||
panic(e("unexpected primitive type: %s", rv.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
// By the TOML spec, all floats must have a decimal with at least one
|
||||
// number on either side.
|
||||
func floatAddDecimal(fstr string) string {
|
||||
if !strings.Contains(fstr, ".") {
|
||||
return fstr + ".0"
|
||||
}
|
||||
return fstr
|
||||
}
|
||||
|
||||
func (enc *Encoder) writeQuoted(s string) {
|
||||
enc.wf("\"%s\"", quotedReplacer.Replace(s))
|
||||
}
|
||||
|
||||
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
|
||||
length := rv.Len()
|
||||
enc.wf("[")
|
||||
for i := 0; i < length; i++ {
|
||||
elem := rv.Index(i)
|
||||
enc.eElement(elem)
|
||||
if i != length-1 {
|
||||
enc.wf(", ")
|
||||
}
|
||||
}
|
||||
enc.wf("]")
|
||||
}
|
||||
|
||||
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
trv := rv.Index(i)
|
||||
if isNil(trv) {
|
||||
continue
|
||||
}
|
||||
panicIfInvalidKey(key)
|
||||
enc.newline()
|
||||
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
|
||||
enc.newline()
|
||||
enc.eMapOrStruct(key, trv)
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
|
||||
panicIfInvalidKey(key)
|
||||
if len(key) == 1 {
|
||||
// Output an extra newline between top-level tables.
|
||||
// (The newline isn't written if nothing else has been written though.)
|
||||
enc.newline()
|
||||
}
|
||||
if len(key) > 0 {
|
||||
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
|
||||
enc.newline()
|
||||
}
|
||||
enc.eMapOrStruct(key, rv)
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
|
||||
switch rv := eindirect(rv); rv.Kind() {
|
||||
case reflect.Map:
|
||||
enc.eMap(key, rv)
|
||||
case reflect.Struct:
|
||||
enc.eStruct(key, rv)
|
||||
default:
|
||||
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
|
||||
rt := rv.Type()
|
||||
if rt.Key().Kind() != reflect.String {
|
||||
encPanic(errNonString)
|
||||
}
|
||||
|
||||
// Sort keys so that we have deterministic output. And write keys directly
|
||||
// underneath this key first, before writing sub-structs or sub-maps.
|
||||
var mapKeysDirect, mapKeysSub []string
|
||||
for _, mapKey := range rv.MapKeys() {
|
||||
k := mapKey.String()
|
||||
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
|
||||
mapKeysSub = append(mapKeysSub, k)
|
||||
} else {
|
||||
mapKeysDirect = append(mapKeysDirect, k)
|
||||
}
|
||||
}
|
||||
|
||||
var writeMapKeys = func(mapKeys []string) {
|
||||
sort.Strings(mapKeys)
|
||||
for _, mapKey := range mapKeys {
|
||||
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
|
||||
if isNil(mrv) {
|
||||
// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
enc.encode(key.add(mapKey), mrv)
|
||||
}
|
||||
}
|
||||
writeMapKeys(mapKeysDirect)
|
||||
writeMapKeys(mapKeysSub)
|
||||
}
|
||||
|
||||
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
|
||||
// Write keys for fields directly under this key first, because if we write
|
||||
// a field that creates a new table, then all keys under it will be in that
|
||||
// table (not the one we're writing here).
|
||||
rt := rv.Type()
|
||||
var fieldsDirect, fieldsSub [][]int
|
||||
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
|
||||
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
// skip unexported fields
|
||||
if f.PkgPath != "" && !f.Anonymous {
|
||||
continue
|
||||
}
|
||||
frv := rv.Field(i)
|
||||
if f.Anonymous {
|
||||
t := f.Type
|
||||
switch t.Kind() {
|
||||
case reflect.Struct:
|
||||
// Treat anonymous struct fields with
|
||||
// tag names as though they are not
|
||||
// anonymous, like encoding/json does.
|
||||
if getOptions(f.Tag).name == "" {
|
||||
addFields(t, frv, f.Index)
|
||||
continue
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() == reflect.Struct &&
|
||||
getOptions(f.Tag).name == "" {
|
||||
if !frv.IsNil() {
|
||||
addFields(t.Elem(), frv.Elem(), f.Index)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Fall through to the normal field encoding logic below
|
||||
// for non-struct anonymous fields.
|
||||
}
|
||||
}
|
||||
|
||||
if typeIsHash(tomlTypeOfGo(frv)) {
|
||||
fieldsSub = append(fieldsSub, append(start, f.Index...))
|
||||
} else {
|
||||
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
|
||||
}
|
||||
}
|
||||
}
|
||||
addFields(rt, rv, nil)
|
||||
|
||||
var writeFields = func(fields [][]int) {
|
||||
for _, fieldIndex := range fields {
|
||||
sft := rt.FieldByIndex(fieldIndex)
|
||||
sf := rv.FieldByIndex(fieldIndex)
|
||||
if isNil(sf) {
|
||||
// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
|
||||
opts := getOptions(sft.Tag)
|
||||
if opts.skip {
|
||||
continue
|
||||
}
|
||||
keyName := sft.Name
|
||||
if opts.name != "" {
|
||||
keyName = opts.name
|
||||
}
|
||||
if opts.omitempty && isEmpty(sf) {
|
||||
continue
|
||||
}
|
||||
if opts.omitzero && isZero(sf) {
|
||||
continue
|
||||
}
|
||||
|
||||
enc.encode(key.add(keyName), sf)
|
||||
}
|
||||
}
|
||||
writeFields(fieldsDirect)
|
||||
writeFields(fieldsSub)
|
||||
}
|
||||
|
||||
// tomlTypeName returns the TOML type name of the Go value's type. It is
|
||||
// used to determine whether the types of array elements are mixed (which is
|
||||
// forbidden). If the Go value is nil, then it is illegal for it to be an array
|
||||
// element, and valueIsNil is returned as true.
|
||||
|
||||
// Returns the TOML type of a Go value. The type may be `nil`, which means
|
||||
// no concrete TOML type could be found.
|
||||
func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() {
|
||||
return nil
|
||||
}
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
return tomlBool
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
||||
reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
||||
reflect.Uint64:
|
||||
return tomlInteger
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return tomlFloat
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlHash, tomlArrayType(rv)) {
|
||||
return tomlArrayHash
|
||||
}
|
||||
return tomlArray
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return tomlTypeOfGo(rv.Elem())
|
||||
case reflect.String:
|
||||
return tomlString
|
||||
case reflect.Map:
|
||||
return tomlHash
|
||||
case reflect.Struct:
|
||||
switch rv.Interface().(type) {
|
||||
case time.Time:
|
||||
return tomlDatetime
|
||||
case TextMarshaler:
|
||||
return tomlString
|
||||
default:
|
||||
return tomlHash
|
||||
}
|
||||
default:
|
||||
panic("unexpected reflect.Kind: " + rv.Kind().String())
|
||||
}
|
||||
}
|
||||
|
||||
// tomlArrayType returns the element type of a TOML array. The type returned
|
||||
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
|
||||
// slize). This function may also panic if it finds a type that cannot be
|
||||
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
|
||||
// nested arrays of tables).
|
||||
func tomlArrayType(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
firstType := tomlTypeOfGo(rv.Index(0))
|
||||
if firstType == nil {
|
||||
encPanic(errArrayNilElement)
|
||||
}
|
||||
|
||||
rvlen := rv.Len()
|
||||
for i := 1; i < rvlen; i++ {
|
||||
elem := rv.Index(i)
|
||||
switch elemType := tomlTypeOfGo(elem); {
|
||||
case elemType == nil:
|
||||
encPanic(errArrayNilElement)
|
||||
case !typeEqual(firstType, elemType):
|
||||
encPanic(errArrayMixedElementTypes)
|
||||
}
|
||||
}
|
||||
// If we have a nested array, then we must make sure that the nested
|
||||
// array contains ONLY primitives.
|
||||
// This checks arbitrarily nested arrays.
|
||||
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
|
||||
nest := tomlArrayType(eindirect(rv.Index(0)))
|
||||
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
|
||||
encPanic(errArrayNoTable)
|
||||
}
|
||||
}
|
||||
return firstType
|
||||
}
|
||||
|
||||
type tagOptions struct {
|
||||
skip bool // "-"
|
||||
name string
|
||||
omitempty bool
|
||||
omitzero bool
|
||||
}
|
||||
|
||||
func getOptions(tag reflect.StructTag) tagOptions {
|
||||
t := tag.Get("toml")
|
||||
if t == "-" {
|
||||
return tagOptions{skip: true}
|
||||
}
|
||||
var opts tagOptions
|
||||
parts := strings.Split(t, ",")
|
||||
opts.name = parts[0]
|
||||
for _, s := range parts[1:] {
|
||||
switch s {
|
||||
case "omitempty":
|
||||
opts.omitempty = true
|
||||
case "omitzero":
|
||||
opts.omitzero = true
|
||||
}
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
func isZero(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return rv.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return rv.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return rv.Float() == 0.0
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isEmpty(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
|
||||
return rv.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !rv.Bool()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (enc *Encoder) newline() {
|
||||
if enc.hasWritten {
|
||||
enc.wf("\n")
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
panicIfInvalidKey(key)
|
||||
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
|
||||
enc.eElement(val)
|
||||
enc.newline()
|
||||
}
|
||||
|
||||
func (enc *Encoder) wf(format string, v ...interface{}) {
|
||||
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
enc.hasWritten = true
|
||||
}
|
||||
|
||||
func (enc *Encoder) indentStr(key Key) string {
|
||||
return strings.Repeat(enc.Indent, len(key)-1)
|
||||
}
|
||||
|
||||
func encPanic(err error) {
|
||||
panic(tomlEncodeError{err})
|
||||
}
|
||||
|
||||
func eindirect(v reflect.Value) reflect.Value {
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return eindirect(v.Elem())
|
||||
default:
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
func isNil(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return rv.IsNil()
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func panicIfInvalidKey(key Key) {
|
||||
for _, k := range key {
|
||||
if len(k) == 0 {
|
||||
encPanic(e("Key '%s' is not a valid table name. Key names "+
|
||||
"cannot be empty.", key.maybeQuotedAll()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isValidKeyName(s string) bool {
|
||||
return len(s) != 0
|
||||
}
|
||||
19
vendor/github.com/BurntSushi/toml/encoding_types.go
generated
vendored
Normal file
19
vendor/github.com/BurntSushi/toml/encoding_types.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
// +build go1.2
|
||||
|
||||
package toml
|
||||
|
||||
// In order to support Go 1.1, we define our own TextMarshaler and
|
||||
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
|
||||
// standard library interfaces.
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
)
|
||||
|
||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextMarshaler encoding.TextMarshaler
|
||||
|
||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
|
||||
// here so that Go 1.1 can be supported.
|
||||
type TextUnmarshaler encoding.TextUnmarshaler
|
||||
18
vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
generated
vendored
Normal file
18
vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
// +build !go1.2
|
||||
|
||||
package toml
|
||||
|
||||
// These interfaces were introduced in Go 1.2, so we add them manually when
|
||||
// compiling for Go 1.1.
|
||||
|
||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextMarshaler interface {
|
||||
MarshalText() (text []byte, err error)
|
||||
}
|
||||
|
||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
|
||||
// here so that Go 1.1 can be supported.
|
||||
type TextUnmarshaler interface {
|
||||
UnmarshalText(text []byte) error
|
||||
}
|
||||
953
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
Normal file
953
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
Normal file
@@ -0,0 +1,953 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type itemType int
|
||||
|
||||
const (
|
||||
itemError itemType = iota
|
||||
itemNIL // used in the parser to indicate no type
|
||||
itemEOF
|
||||
itemText
|
||||
itemString
|
||||
itemRawString
|
||||
itemMultilineString
|
||||
itemRawMultilineString
|
||||
itemBool
|
||||
itemInteger
|
||||
itemFloat
|
||||
itemDatetime
|
||||
itemArray // the start of an array
|
||||
itemArrayEnd
|
||||
itemTableStart
|
||||
itemTableEnd
|
||||
itemArrayTableStart
|
||||
itemArrayTableEnd
|
||||
itemKeyStart
|
||||
itemCommentStart
|
||||
itemInlineTableStart
|
||||
itemInlineTableEnd
|
||||
)
|
||||
|
||||
const (
|
||||
eof = 0
|
||||
comma = ','
|
||||
tableStart = '['
|
||||
tableEnd = ']'
|
||||
arrayTableStart = '['
|
||||
arrayTableEnd = ']'
|
||||
tableSep = '.'
|
||||
keySep = '='
|
||||
arrayStart = '['
|
||||
arrayEnd = ']'
|
||||
commentStart = '#'
|
||||
stringStart = '"'
|
||||
stringEnd = '"'
|
||||
rawStringStart = '\''
|
||||
rawStringEnd = '\''
|
||||
inlineTableStart = '{'
|
||||
inlineTableEnd = '}'
|
||||
)
|
||||
|
||||
type stateFn func(lx *lexer) stateFn
|
||||
|
||||
type lexer struct {
|
||||
input string
|
||||
start int
|
||||
pos int
|
||||
line int
|
||||
state stateFn
|
||||
items chan item
|
||||
|
||||
// Allow for backing up up to three runes.
|
||||
// This is necessary because TOML contains 3-rune tokens (""" and ''').
|
||||
prevWidths [3]int
|
||||
nprev int // how many of prevWidths are in use
|
||||
// If we emit an eof, we can still back up, but it is not OK to call
|
||||
// next again.
|
||||
atEOF bool
|
||||
|
||||
// A stack of state functions used to maintain context.
|
||||
// The idea is to reuse parts of the state machine in various places.
|
||||
// For example, values can appear at the top level or within arbitrarily
|
||||
// nested arrays. The last state on the stack is used after a value has
|
||||
// been lexed. Similarly for comments.
|
||||
stack []stateFn
|
||||
}
|
||||
|
||||
type item struct {
|
||||
typ itemType
|
||||
val string
|
||||
line int
|
||||
}
|
||||
|
||||
func (lx *lexer) nextItem() item {
|
||||
for {
|
||||
select {
|
||||
case item := <-lx.items:
|
||||
return item
|
||||
default:
|
||||
lx.state = lx.state(lx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func lex(input string) *lexer {
|
||||
lx := &lexer{
|
||||
input: input,
|
||||
state: lexTop,
|
||||
line: 1,
|
||||
items: make(chan item, 10),
|
||||
stack: make([]stateFn, 0, 10),
|
||||
}
|
||||
return lx
|
||||
}
|
||||
|
||||
func (lx *lexer) push(state stateFn) {
|
||||
lx.stack = append(lx.stack, state)
|
||||
}
|
||||
|
||||
func (lx *lexer) pop() stateFn {
|
||||
if len(lx.stack) == 0 {
|
||||
return lx.errorf("BUG in lexer: no states to pop")
|
||||
}
|
||||
last := lx.stack[len(lx.stack)-1]
|
||||
lx.stack = lx.stack[0 : len(lx.stack)-1]
|
||||
return last
|
||||
}
|
||||
|
||||
func (lx *lexer) current() string {
|
||||
return lx.input[lx.start:lx.pos]
|
||||
}
|
||||
|
||||
func (lx *lexer) emit(typ itemType) {
|
||||
lx.items <- item{typ, lx.current(), lx.line}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
func (lx *lexer) emitTrim(typ itemType) {
|
||||
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
func (lx *lexer) next() (r rune) {
|
||||
if lx.atEOF {
|
||||
panic("next called after EOF")
|
||||
}
|
||||
if lx.pos >= len(lx.input) {
|
||||
lx.atEOF = true
|
||||
return eof
|
||||
}
|
||||
|
||||
if lx.input[lx.pos] == '\n' {
|
||||
lx.line++
|
||||
}
|
||||
lx.prevWidths[2] = lx.prevWidths[1]
|
||||
lx.prevWidths[1] = lx.prevWidths[0]
|
||||
if lx.nprev < 3 {
|
||||
lx.nprev++
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
|
||||
lx.prevWidths[0] = w
|
||||
lx.pos += w
|
||||
return r
|
||||
}
|
||||
|
||||
// ignore skips over the pending input before this point.
|
||||
func (lx *lexer) ignore() {
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can be called only twice between calls to next.
|
||||
func (lx *lexer) backup() {
|
||||
if lx.atEOF {
|
||||
lx.atEOF = false
|
||||
return
|
||||
}
|
||||
if lx.nprev < 1 {
|
||||
panic("backed up too far")
|
||||
}
|
||||
w := lx.prevWidths[0]
|
||||
lx.prevWidths[0] = lx.prevWidths[1]
|
||||
lx.prevWidths[1] = lx.prevWidths[2]
|
||||
lx.nprev--
|
||||
lx.pos -= w
|
||||
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
|
||||
lx.line--
|
||||
}
|
||||
}
|
||||
|
||||
// accept consumes the next rune if it's equal to `valid`.
|
||||
func (lx *lexer) accept(valid rune) bool {
|
||||
if lx.next() == valid {
|
||||
return true
|
||||
}
|
||||
lx.backup()
|
||||
return false
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (lx *lexer) peek() rune {
|
||||
r := lx.next()
|
||||
lx.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// skip ignores all input that matches the given predicate.
|
||||
func (lx *lexer) skip(pred func(rune) bool) {
|
||||
for {
|
||||
r := lx.next()
|
||||
if pred(r) {
|
||||
continue
|
||||
}
|
||||
lx.backup()
|
||||
lx.ignore()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// errorf stops all lexing by emitting an error and returning `nil`.
|
||||
// Note that any value that is a character is escaped if it's a special
|
||||
// character (newlines, tabs, etc.).
|
||||
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
|
||||
lx.items <- item{
|
||||
itemError,
|
||||
fmt.Sprintf(format, values...),
|
||||
lx.line,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// lexTop consumes elements at the top level of TOML data.
|
||||
func lexTop(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isWhitespace(r) || isNL(r) {
|
||||
return lexSkip(lx, lexTop)
|
||||
}
|
||||
switch r {
|
||||
case commentStart:
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case tableStart:
|
||||
return lexTableStart
|
||||
case eof:
|
||||
if lx.pos > lx.start {
|
||||
return lx.errorf("unexpected EOF")
|
||||
}
|
||||
lx.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
|
||||
// At this point, the only valid item can be a key, so we back up
|
||||
// and let the key lexer do the rest.
|
||||
lx.backup()
|
||||
lx.push(lexTopEnd)
|
||||
return lexKeyStart
|
||||
}
|
||||
|
||||
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
|
||||
// or a table.) It must see only whitespace, and will turn back to lexTop
|
||||
// upon a newline. If it sees EOF, it will quit the lexer successfully.
|
||||
func lexTopEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == commentStart:
|
||||
// a comment will read to a newline for us.
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case isWhitespace(r):
|
||||
return lexTopEnd
|
||||
case isNL(r):
|
||||
lx.ignore()
|
||||
return lexTop
|
||||
case r == eof:
|
||||
lx.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
return lx.errorf("expected a top-level item to end with a newline, "+
|
||||
"comment, or EOF, but got %q instead", r)
|
||||
}
|
||||
|
||||
// lexTable lexes the beginning of a table. Namely, it makes sure that
|
||||
// it starts with a character other than '.' and ']'.
|
||||
// It assumes that '[' has already been consumed.
|
||||
// It also handles the case that this is an item in an array of tables.
|
||||
// e.g., '[[name]]'.
|
||||
func lexTableStart(lx *lexer) stateFn {
|
||||
if lx.peek() == arrayTableStart {
|
||||
lx.next()
|
||||
lx.emit(itemArrayTableStart)
|
||||
lx.push(lexArrayTableEnd)
|
||||
} else {
|
||||
lx.emit(itemTableStart)
|
||||
lx.push(lexTableEnd)
|
||||
}
|
||||
return lexTableNameStart
|
||||
}
|
||||
|
||||
func lexTableEnd(lx *lexer) stateFn {
|
||||
lx.emit(itemTableEnd)
|
||||
return lexTopEnd
|
||||
}
|
||||
|
||||
func lexArrayTableEnd(lx *lexer) stateFn {
|
||||
if r := lx.next(); r != arrayTableEnd {
|
||||
return lx.errorf("expected end of table array name delimiter %q, "+
|
||||
"but got %q instead", arrayTableEnd, r)
|
||||
}
|
||||
lx.emit(itemArrayTableEnd)
|
||||
return lexTopEnd
|
||||
}
|
||||
|
||||
func lexTableNameStart(lx *lexer) stateFn {
|
||||
lx.skip(isWhitespace)
|
||||
switch r := lx.peek(); {
|
||||
case r == tableEnd || r == eof:
|
||||
return lx.errorf("unexpected end of table name " +
|
||||
"(table names cannot be empty)")
|
||||
case r == tableSep:
|
||||
return lx.errorf("unexpected table separator " +
|
||||
"(table names cannot be empty)")
|
||||
case r == stringStart || r == rawStringStart:
|
||||
lx.ignore()
|
||||
lx.push(lexTableNameEnd)
|
||||
return lexValue // reuse string lexing
|
||||
default:
|
||||
return lexBareTableName
|
||||
}
|
||||
}
|
||||
|
||||
// lexBareTableName lexes the name of a table. It assumes that at least one
|
||||
// valid character for the table has already been read.
|
||||
func lexBareTableName(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isBareKeyChar(r) {
|
||||
return lexBareTableName
|
||||
}
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
return lexTableNameEnd
|
||||
}
|
||||
|
||||
// lexTableNameEnd reads the end of a piece of a table name, optionally
|
||||
// consuming whitespace.
|
||||
func lexTableNameEnd(lx *lexer) stateFn {
|
||||
lx.skip(isWhitespace)
|
||||
switch r := lx.next(); {
|
||||
case isWhitespace(r):
|
||||
return lexTableNameEnd
|
||||
case r == tableSep:
|
||||
lx.ignore()
|
||||
return lexTableNameStart
|
||||
case r == tableEnd:
|
||||
return lx.pop()
|
||||
default:
|
||||
return lx.errorf("expected '.' or ']' to end table name, "+
|
||||
"but got %q instead", r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexKeyStart consumes a key name up until the first non-whitespace character.
|
||||
// lexKeyStart will ignore whitespace.
|
||||
func lexKeyStart(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
switch {
|
||||
case r == keySep:
|
||||
return lx.errorf("unexpected key separator %q", keySep)
|
||||
case isWhitespace(r) || isNL(r):
|
||||
lx.next()
|
||||
return lexSkip(lx, lexKeyStart)
|
||||
case r == stringStart || r == rawStringStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemKeyStart)
|
||||
lx.push(lexKeyEnd)
|
||||
return lexValue // reuse string lexing
|
||||
default:
|
||||
lx.ignore()
|
||||
lx.emit(itemKeyStart)
|
||||
return lexBareKey
|
||||
}
|
||||
}
|
||||
|
||||
// lexBareKey consumes the text of a bare key. Assumes that the first character
|
||||
// (which is not whitespace) has not yet been consumed.
|
||||
func lexBareKey(lx *lexer) stateFn {
|
||||
switch r := lx.next(); {
|
||||
case isBareKeyChar(r):
|
||||
return lexBareKey
|
||||
case isWhitespace(r):
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
return lexKeyEnd
|
||||
case r == keySep:
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
return lexKeyEnd
|
||||
default:
|
||||
return lx.errorf("bare keys cannot contain %q", r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
|
||||
// separator).
|
||||
func lexKeyEnd(lx *lexer) stateFn {
|
||||
switch r := lx.next(); {
|
||||
case r == keySep:
|
||||
return lexSkip(lx, lexValue)
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexKeyEnd)
|
||||
default:
|
||||
return lx.errorf("expected key separator %q, but got %q instead",
|
||||
keySep, r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexValue starts the consumption of a value anywhere a value is expected.
|
||||
// lexValue will ignore whitespace.
|
||||
// After a value is lexed, the last state on the next is popped and returned.
|
||||
func lexValue(lx *lexer) stateFn {
|
||||
// We allow whitespace to precede a value, but NOT newlines.
|
||||
// In array syntax, the array states are responsible for ignoring newlines.
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexValue)
|
||||
case isDigit(r):
|
||||
lx.backup() // avoid an extra state and use the same as above
|
||||
return lexNumberOrDateStart
|
||||
}
|
||||
switch r {
|
||||
case arrayStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemArray)
|
||||
return lexArrayValue
|
||||
case inlineTableStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemInlineTableStart)
|
||||
return lexInlineTableValue
|
||||
case stringStart:
|
||||
if lx.accept(stringStart) {
|
||||
if lx.accept(stringStart) {
|
||||
lx.ignore() // Ignore """
|
||||
return lexMultilineString
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
lx.ignore() // ignore the '"'
|
||||
return lexString
|
||||
case rawStringStart:
|
||||
if lx.accept(rawStringStart) {
|
||||
if lx.accept(rawStringStart) {
|
||||
lx.ignore() // Ignore """
|
||||
return lexMultilineRawString
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
lx.ignore() // ignore the "'"
|
||||
return lexRawString
|
||||
case '+', '-':
|
||||
return lexNumberStart
|
||||
case '.': // special error case, be kind to users
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
}
|
||||
if unicode.IsLetter(r) {
|
||||
// Be permissive here; lexBool will give a nice error if the
|
||||
// user wrote something like
|
||||
// x = foo
|
||||
// (i.e. not 'true' or 'false' but is something else word-like.)
|
||||
lx.backup()
|
||||
return lexBool
|
||||
}
|
||||
return lx.errorf("expected value but found %q instead", r)
|
||||
}
|
||||
|
||||
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
|
||||
// have already been consumed. All whitespace and newlines are ignored.
|
||||
func lexArrayValue(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexArrayValue)
|
||||
case r == commentStart:
|
||||
lx.push(lexArrayValue)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
return lx.errorf("unexpected comma")
|
||||
case r == arrayEnd:
|
||||
// NOTE(caleb): The spec isn't clear about whether you can have
|
||||
// a trailing comma or not, so we'll allow it.
|
||||
return lexArrayEnd
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexValue
|
||||
}
|
||||
|
||||
// lexArrayValueEnd consumes everything between the end of an array value and
|
||||
// the next value (or the end of the array): it ignores whitespace and newlines
|
||||
// and expects either a ',' or a ']'.
|
||||
func lexArrayValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexArrayValueEnd)
|
||||
case r == commentStart:
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
lx.ignore()
|
||||
return lexArrayValue // move on to the next value
|
||||
case r == arrayEnd:
|
||||
return lexArrayEnd
|
||||
}
|
||||
return lx.errorf(
|
||||
"expected a comma or array terminator %q, but got %q instead",
|
||||
arrayEnd, r,
|
||||
)
|
||||
}
|
||||
|
||||
// lexArrayEnd finishes the lexing of an array.
|
||||
// It assumes that a ']' has just been consumed.
|
||||
func lexArrayEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemArrayEnd)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexInlineTableValue consumes one key/value pair in an inline table.
|
||||
// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
|
||||
func lexInlineTableValue(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexInlineTableValue)
|
||||
case isNL(r):
|
||||
return lx.errorf("newlines not allowed within inline tables")
|
||||
case r == commentStart:
|
||||
lx.push(lexInlineTableValue)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
return lx.errorf("unexpected comma")
|
||||
case r == inlineTableEnd:
|
||||
return lexInlineTableEnd
|
||||
}
|
||||
lx.backup()
|
||||
lx.push(lexInlineTableValueEnd)
|
||||
return lexKeyStart
|
||||
}
|
||||
|
||||
// lexInlineTableValueEnd consumes everything between the end of an inline table
|
||||
// key/value pair and the next pair (or the end of the table):
|
||||
// it ignores whitespace and expects either a ',' or a '}'.
|
||||
func lexInlineTableValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexInlineTableValueEnd)
|
||||
case isNL(r):
|
||||
return lx.errorf("newlines not allowed within inline tables")
|
||||
case r == commentStart:
|
||||
lx.push(lexInlineTableValueEnd)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
lx.ignore()
|
||||
return lexInlineTableValue
|
||||
case r == inlineTableEnd:
|
||||
return lexInlineTableEnd
|
||||
}
|
||||
return lx.errorf("expected a comma or an inline table terminator %q, "+
|
||||
"but got %q instead", inlineTableEnd, r)
|
||||
}
|
||||
|
||||
// lexInlineTableEnd finishes the lexing of an inline table.
|
||||
// It assumes that a '}' has just been consumed.
|
||||
func lexInlineTableEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemInlineTableEnd)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexString consumes the inner contents of a string. It assumes that the
|
||||
// beginning '"' has already been consumed and ignored.
|
||||
func lexString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case isNL(r):
|
||||
return lx.errorf("strings cannot contain newlines")
|
||||
case r == '\\':
|
||||
lx.push(lexString)
|
||||
return lexStringEscape
|
||||
case r == stringEnd:
|
||||
lx.backup()
|
||||
lx.emit(itemString)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
return lexString
|
||||
}
|
||||
|
||||
// lexMultilineString consumes the inner contents of a string. It assumes that
|
||||
// the beginning '"""' has already been consumed and ignored.
|
||||
func lexMultilineString(lx *lexer) stateFn {
|
||||
switch lx.next() {
|
||||
case eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case '\\':
|
||||
return lexMultilineStringEscape
|
||||
case stringEnd:
|
||||
if lx.accept(stringEnd) {
|
||||
if lx.accept(stringEnd) {
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.emit(itemMultilineString)
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
}
|
||||
return lexMultilineString
|
||||
}
|
||||
|
||||
// lexRawString consumes a raw string. Nothing can be escaped in such a string.
|
||||
// It assumes that the beginning "'" has already been consumed and ignored.
|
||||
func lexRawString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case isNL(r):
|
||||
return lx.errorf("strings cannot contain newlines")
|
||||
case r == rawStringEnd:
|
||||
lx.backup()
|
||||
lx.emit(itemRawString)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
return lexRawString
|
||||
}
|
||||
|
||||
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
|
||||
// a string. It assumes that the beginning "'''" has already been consumed and
|
||||
// ignored.
|
||||
func lexMultilineRawString(lx *lexer) stateFn {
|
||||
switch lx.next() {
|
||||
case eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case rawStringEnd:
|
||||
if lx.accept(rawStringEnd) {
|
||||
if lx.accept(rawStringEnd) {
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.emit(itemRawMultilineString)
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
}
|
||||
return lexMultilineRawString
|
||||
}
|
||||
|
||||
// lexMultilineStringEscape consumes an escaped character. It assumes that the
|
||||
// preceding '\\' has already been consumed.
|
||||
func lexMultilineStringEscape(lx *lexer) stateFn {
|
||||
// Handle the special case first:
|
||||
if isNL(lx.next()) {
|
||||
return lexMultilineString
|
||||
}
|
||||
lx.backup()
|
||||
lx.push(lexMultilineString)
|
||||
return lexStringEscape(lx)
|
||||
}
|
||||
|
||||
func lexStringEscape(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch r {
|
||||
case 'b':
|
||||
fallthrough
|
||||
case 't':
|
||||
fallthrough
|
||||
case 'n':
|
||||
fallthrough
|
||||
case 'f':
|
||||
fallthrough
|
||||
case 'r':
|
||||
fallthrough
|
||||
case '"':
|
||||
fallthrough
|
||||
case '\\':
|
||||
return lx.pop()
|
||||
case 'u':
|
||||
return lexShortUnicodeEscape
|
||||
case 'U':
|
||||
return lexLongUnicodeEscape
|
||||
}
|
||||
return lx.errorf("invalid escape character %q; only the following "+
|
||||
"escape characters are allowed: "+
|
||||
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
|
||||
}
|
||||
|
||||
func lexShortUnicodeEscape(lx *lexer) stateFn {
|
||||
var r rune
|
||||
for i := 0; i < 4; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf(`expected four hexadecimal digits after '\u', `+
|
||||
"but got %q instead", lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
func lexLongUnicodeEscape(lx *lexer) stateFn {
|
||||
var r rune
|
||||
for i := 0; i < 8; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf(`expected eight hexadecimal digits after '\U', `+
|
||||
"but got %q instead", lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexNumberOrDateStart consumes either an integer, a float, or datetime.
|
||||
func lexNumberOrDateStart(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexNumberOrDate
|
||||
}
|
||||
switch r {
|
||||
case '_':
|
||||
return lexNumber
|
||||
case 'e', 'E':
|
||||
return lexFloat
|
||||
case '.':
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
}
|
||||
return lx.errorf("expected a digit but got %q", r)
|
||||
}
|
||||
|
||||
// lexNumberOrDate consumes either an integer, float or datetime.
|
||||
func lexNumberOrDate(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexNumberOrDate
|
||||
}
|
||||
switch r {
|
||||
case '-':
|
||||
return lexDatetime
|
||||
case '_':
|
||||
return lexNumber
|
||||
case '.', 'e', 'E':
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemInteger)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexDatetime consumes a Datetime, to a first approximation.
|
||||
// The parser validates that it matches one of the accepted formats.
|
||||
func lexDatetime(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexDatetime
|
||||
}
|
||||
switch r {
|
||||
case '-', 'T', ':', '.', 'Z', '+':
|
||||
return lexDatetime
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemDatetime)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexNumberStart consumes either an integer or a float. It assumes that a sign
|
||||
// has already been read, but that *no* digits have been consumed.
|
||||
// lexNumberStart will move to the appropriate integer or float states.
|
||||
func lexNumberStart(lx *lexer) stateFn {
|
||||
// We MUST see a digit. Even floats have to start with a digit.
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
if r == '.' {
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
}
|
||||
return lx.errorf("expected a digit but got %q", r)
|
||||
}
|
||||
return lexNumber
|
||||
}
|
||||
|
||||
// lexNumber consumes an integer or a float after seeing the first digit.
|
||||
func lexNumber(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexNumber
|
||||
}
|
||||
switch r {
|
||||
case '_':
|
||||
return lexNumber
|
||||
case '.', 'e', 'E':
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemInteger)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexFloat consumes the elements of a float. It allows any sequence of
|
||||
// float-like characters, so floats emitted by the lexer are only a first
|
||||
// approximation and must be validated by the parser.
|
||||
func lexFloat(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexFloat
|
||||
}
|
||||
switch r {
|
||||
case '_', '.', '-', '+', 'e', 'E':
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemFloat)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexBool consumes a bool string: 'true' or 'false.
|
||||
func lexBool(lx *lexer) stateFn {
|
||||
var rs []rune
|
||||
for {
|
||||
r := lx.next()
|
||||
if !unicode.IsLetter(r) {
|
||||
lx.backup()
|
||||
break
|
||||
}
|
||||
rs = append(rs, r)
|
||||
}
|
||||
s := string(rs)
|
||||
switch s {
|
||||
case "true", "false":
|
||||
lx.emit(itemBool)
|
||||
return lx.pop()
|
||||
}
|
||||
return lx.errorf("expected value but found %q instead", s)
|
||||
}
|
||||
|
||||
// lexCommentStart begins the lexing of a comment. It will emit
|
||||
// itemCommentStart and consume no characters, passing control to lexComment.
|
||||
func lexCommentStart(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemCommentStart)
|
||||
return lexComment
|
||||
}
|
||||
|
||||
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
|
||||
// It will consume *up to* the first newline character, and pass control
|
||||
// back to the last state on the stack.
|
||||
func lexComment(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
if isNL(r) || r == eof {
|
||||
lx.emit(itemText)
|
||||
return lx.pop()
|
||||
}
|
||||
lx.next()
|
||||
return lexComment
|
||||
}
|
||||
|
||||
// lexSkip ignores all slurped input and moves on to the next state.
|
||||
func lexSkip(lx *lexer, nextState stateFn) stateFn {
|
||||
return func(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
return nextState
|
||||
}
|
||||
}
|
||||
|
||||
// isWhitespace returns true if `r` is a whitespace character according
|
||||
// to the spec.
|
||||
func isWhitespace(r rune) bool {
|
||||
return r == '\t' || r == ' '
|
||||
}
|
||||
|
||||
func isNL(r rune) bool {
|
||||
return r == '\n' || r == '\r'
|
||||
}
|
||||
|
||||
func isDigit(r rune) bool {
|
||||
return r >= '0' && r <= '9'
|
||||
}
|
||||
|
||||
func isHexadecimal(r rune) bool {
|
||||
return (r >= '0' && r <= '9') ||
|
||||
(r >= 'a' && r <= 'f') ||
|
||||
(r >= 'A' && r <= 'F')
|
||||
}
|
||||
|
||||
func isBareKeyChar(r rune) bool {
|
||||
return (r >= 'A' && r <= 'Z') ||
|
||||
(r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') ||
|
||||
r == '_' ||
|
||||
r == '-'
|
||||
}
|
||||
|
||||
func (itype itemType) String() string {
|
||||
switch itype {
|
||||
case itemError:
|
||||
return "Error"
|
||||
case itemNIL:
|
||||
return "NIL"
|
||||
case itemEOF:
|
||||
return "EOF"
|
||||
case itemText:
|
||||
return "Text"
|
||||
case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
|
||||
return "String"
|
||||
case itemBool:
|
||||
return "Bool"
|
||||
case itemInteger:
|
||||
return "Integer"
|
||||
case itemFloat:
|
||||
return "Float"
|
||||
case itemDatetime:
|
||||
return "DateTime"
|
||||
case itemTableStart:
|
||||
return "TableStart"
|
||||
case itemTableEnd:
|
||||
return "TableEnd"
|
||||
case itemKeyStart:
|
||||
return "KeyStart"
|
||||
case itemArray:
|
||||
return "Array"
|
||||
case itemArrayEnd:
|
||||
return "ArrayEnd"
|
||||
case itemCommentStart:
|
||||
return "CommentStart"
|
||||
}
|
||||
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
|
||||
}
|
||||
|
||||
func (item item) String() string {
|
||||
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
|
||||
}
|
||||
592
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
Normal file
592
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
Normal file
@@ -0,0 +1,592 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type parser struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
lx *lexer
|
||||
|
||||
// A list of keys in the order that they appear in the TOML data.
|
||||
ordered []Key
|
||||
|
||||
// the full key for the current hash in scope
|
||||
context Key
|
||||
|
||||
// the base key name for everything except hashes
|
||||
currentKey string
|
||||
|
||||
// rough approximation of line number
|
||||
approxLine int
|
||||
|
||||
// A map of 'key.group.names' to whether they were created implicitly.
|
||||
implicits map[string]bool
|
||||
}
|
||||
|
||||
type parseError string
|
||||
|
||||
func (pe parseError) Error() string {
|
||||
return string(pe)
|
||||
}
|
||||
|
||||
func parse(data string) (p *parser, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
var ok bool
|
||||
if err, ok = r.(parseError); ok {
|
||||
return
|
||||
}
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
|
||||
p = &parser{
|
||||
mapping: make(map[string]interface{}),
|
||||
types: make(map[string]tomlType),
|
||||
lx: lex(data),
|
||||
ordered: make([]Key, 0),
|
||||
implicits: make(map[string]bool),
|
||||
}
|
||||
for {
|
||||
item := p.next()
|
||||
if item.typ == itemEOF {
|
||||
break
|
||||
}
|
||||
p.topLevel(item)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *parser) panicf(format string, v ...interface{}) {
|
||||
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
|
||||
p.approxLine, p.current(), fmt.Sprintf(format, v...))
|
||||
panic(parseError(msg))
|
||||
}
|
||||
|
||||
func (p *parser) next() item {
|
||||
it := p.lx.nextItem()
|
||||
if it.typ == itemError {
|
||||
p.panicf("%s", it.val)
|
||||
}
|
||||
return it
|
||||
}
|
||||
|
||||
func (p *parser) bug(format string, v ...interface{}) {
|
||||
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
|
||||
}
|
||||
|
||||
func (p *parser) expect(typ itemType) item {
|
||||
it := p.next()
|
||||
p.assertEqual(typ, it.typ)
|
||||
return it
|
||||
}
|
||||
|
||||
func (p *parser) assertEqual(expected, got itemType) {
|
||||
if expected != got {
|
||||
p.bug("Expected '%s' but got '%s'.", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) topLevel(item item) {
|
||||
switch item.typ {
|
||||
case itemCommentStart:
|
||||
p.approxLine = item.line
|
||||
p.expect(itemText)
|
||||
case itemTableStart:
|
||||
kg := p.next()
|
||||
p.approxLine = kg.line
|
||||
|
||||
var key Key
|
||||
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
|
||||
key = append(key, p.keyString(kg))
|
||||
}
|
||||
p.assertEqual(itemTableEnd, kg.typ)
|
||||
|
||||
p.establishContext(key, false)
|
||||
p.setType("", tomlHash)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemArrayTableStart:
|
||||
kg := p.next()
|
||||
p.approxLine = kg.line
|
||||
|
||||
var key Key
|
||||
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
|
||||
key = append(key, p.keyString(kg))
|
||||
}
|
||||
p.assertEqual(itemArrayTableEnd, kg.typ)
|
||||
|
||||
p.establishContext(key, true)
|
||||
p.setType("", tomlArrayHash)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemKeyStart:
|
||||
kname := p.next()
|
||||
p.approxLine = kname.line
|
||||
p.currentKey = p.keyString(kname)
|
||||
|
||||
val, typ := p.value(p.next())
|
||||
p.setValue(p.currentKey, val)
|
||||
p.setType(p.currentKey, typ)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
p.currentKey = ""
|
||||
default:
|
||||
p.bug("Unexpected type at top level: %s", item.typ)
|
||||
}
|
||||
}
|
||||
|
||||
// Gets a string for a key (or part of a key in a table name).
|
||||
func (p *parser) keyString(it item) string {
|
||||
switch it.typ {
|
||||
case itemText:
|
||||
return it.val
|
||||
case itemString, itemMultilineString,
|
||||
itemRawString, itemRawMultilineString:
|
||||
s, _ := p.value(it)
|
||||
return s.(string)
|
||||
default:
|
||||
p.bug("Unexpected key type: %s", it.typ)
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
// value translates an expected value from the lexer into a Go value wrapped
|
||||
// as an empty interface.
|
||||
func (p *parser) value(it item) (interface{}, tomlType) {
|
||||
switch it.typ {
|
||||
case itemString:
|
||||
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
|
||||
case itemMultilineString:
|
||||
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
|
||||
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
|
||||
case itemRawString:
|
||||
return it.val, p.typeOfPrimitive(it)
|
||||
case itemRawMultilineString:
|
||||
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
|
||||
case itemBool:
|
||||
switch it.val {
|
||||
case "true":
|
||||
return true, p.typeOfPrimitive(it)
|
||||
case "false":
|
||||
return false, p.typeOfPrimitive(it)
|
||||
}
|
||||
p.bug("Expected boolean value, but got '%s'.", it.val)
|
||||
case itemInteger:
|
||||
if !numUnderscoresOK(it.val) {
|
||||
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
|
||||
it.val)
|
||||
}
|
||||
val := strings.Replace(it.val, "_", "", -1)
|
||||
num, err := strconv.ParseInt(val, 10, 64)
|
||||
if err != nil {
|
||||
// Distinguish integer values. Normally, it'd be a bug if the lexer
|
||||
// provides an invalid integer, but it's possible that the number is
|
||||
// out of range of valid values (which the lexer cannot determine).
|
||||
// So mark the former as a bug but the latter as a legitimate user
|
||||
// error.
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
p.panicf("Integer '%s' is out of the range of 64-bit "+
|
||||
"signed integers.", it.val)
|
||||
} else {
|
||||
p.bug("Expected integer value, but got '%s'.", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemFloat:
|
||||
parts := strings.FieldsFunc(it.val, func(r rune) bool {
|
||||
switch r {
|
||||
case '.', 'e', 'E':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
for _, part := range parts {
|
||||
if !numUnderscoresOK(part) {
|
||||
p.panicf("Invalid float %q: underscores must be "+
|
||||
"surrounded by digits", it.val)
|
||||
}
|
||||
}
|
||||
if !numPeriodsOK(it.val) {
|
||||
// As a special case, numbers like '123.' or '1.e2',
|
||||
// which are valid as far as Go/strconv are concerned,
|
||||
// must be rejected because TOML says that a fractional
|
||||
// part consists of '.' followed by 1+ digits.
|
||||
p.panicf("Invalid float %q: '.' must be followed "+
|
||||
"by one or more digits", it.val)
|
||||
}
|
||||
val := strings.Replace(it.val, "_", "", -1)
|
||||
num, err := strconv.ParseFloat(val, 64)
|
||||
if err != nil {
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
p.panicf("Float '%s' is out of the range of 64-bit "+
|
||||
"IEEE-754 floating-point numbers.", it.val)
|
||||
} else {
|
||||
p.panicf("Invalid float value: %q", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemDatetime:
|
||||
var t time.Time
|
||||
var ok bool
|
||||
var err error
|
||||
for _, format := range []string{
|
||||
"2006-01-02T15:04:05Z07:00",
|
||||
"2006-01-02T15:04:05",
|
||||
"2006-01-02",
|
||||
} {
|
||||
t, err = time.ParseInLocation(format, it.val, time.Local)
|
||||
if err == nil {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
p.panicf("Invalid TOML Datetime: %q.", it.val)
|
||||
}
|
||||
return t, p.typeOfPrimitive(it)
|
||||
case itemArray:
|
||||
array := make([]interface{}, 0)
|
||||
types := make([]tomlType, 0)
|
||||
|
||||
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
|
||||
if it.typ == itemCommentStart {
|
||||
p.expect(itemText)
|
||||
continue
|
||||
}
|
||||
|
||||
val, typ := p.value(it)
|
||||
array = append(array, val)
|
||||
types = append(types, typ)
|
||||
}
|
||||
return array, p.typeOfArray(types)
|
||||
case itemInlineTableStart:
|
||||
var (
|
||||
hash = make(map[string]interface{})
|
||||
outerContext = p.context
|
||||
outerKey = p.currentKey
|
||||
)
|
||||
|
||||
p.context = append(p.context, p.currentKey)
|
||||
p.currentKey = ""
|
||||
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
|
||||
if it.typ != itemKeyStart {
|
||||
p.bug("Expected key start but instead found %q, around line %d",
|
||||
it.val, p.approxLine)
|
||||
}
|
||||
if it.typ == itemCommentStart {
|
||||
p.expect(itemText)
|
||||
continue
|
||||
}
|
||||
|
||||
// retrieve key
|
||||
k := p.next()
|
||||
p.approxLine = k.line
|
||||
kname := p.keyString(k)
|
||||
|
||||
// retrieve value
|
||||
p.currentKey = kname
|
||||
val, typ := p.value(p.next())
|
||||
// make sure we keep metadata up to date
|
||||
p.setType(kname, typ)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
hash[kname] = val
|
||||
}
|
||||
p.context = outerContext
|
||||
p.currentKey = outerKey
|
||||
return hash, tomlHash
|
||||
}
|
||||
p.bug("Unexpected value type: %s", it.typ)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// numUnderscoresOK checks whether each underscore in s is surrounded by
|
||||
// characters that are not underscores.
|
||||
func numUnderscoresOK(s string) bool {
|
||||
accept := false
|
||||
for _, r := range s {
|
||||
if r == '_' {
|
||||
if !accept {
|
||||
return false
|
||||
}
|
||||
accept = false
|
||||
continue
|
||||
}
|
||||
accept = true
|
||||
}
|
||||
return accept
|
||||
}
|
||||
|
||||
// numPeriodsOK checks whether every period in s is followed by a digit.
|
||||
func numPeriodsOK(s string) bool {
|
||||
period := false
|
||||
for _, r := range s {
|
||||
if period && !isDigit(r) {
|
||||
return false
|
||||
}
|
||||
period = r == '.'
|
||||
}
|
||||
return !period
|
||||
}
|
||||
|
||||
// establishContext sets the current context of the parser,
|
||||
// where the context is either a hash or an array of hashes. Which one is
|
||||
// set depends on the value of the `array` parameter.
|
||||
//
|
||||
// Establishing the context also makes sure that the key isn't a duplicate, and
|
||||
// will create implicit hashes automatically.
|
||||
func (p *parser) establishContext(key Key, array bool) {
|
||||
var ok bool
|
||||
|
||||
// Always start at the top level and drill down for our context.
|
||||
hashContext := p.mapping
|
||||
keyContext := make(Key, 0)
|
||||
|
||||
// We only need implicit hashes for key[0:-1]
|
||||
for _, k := range key[0 : len(key)-1] {
|
||||
_, ok = hashContext[k]
|
||||
keyContext = append(keyContext, k)
|
||||
|
||||
// No key? Make an implicit hash and move on.
|
||||
if !ok {
|
||||
p.addImplicit(keyContext)
|
||||
hashContext[k] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
// If the hash context is actually an array of tables, then set
|
||||
// the hash context to the last element in that array.
|
||||
//
|
||||
// Otherwise, it better be a table, since this MUST be a key group (by
|
||||
// virtue of it not being the last element in a key).
|
||||
switch t := hashContext[k].(type) {
|
||||
case []map[string]interface{}:
|
||||
hashContext = t[len(t)-1]
|
||||
case map[string]interface{}:
|
||||
hashContext = t
|
||||
default:
|
||||
p.panicf("Key '%s' was already created as a hash.", keyContext)
|
||||
}
|
||||
}
|
||||
|
||||
p.context = keyContext
|
||||
if array {
|
||||
// If this is the first element for this array, then allocate a new
|
||||
// list of tables for it.
|
||||
k := key[len(key)-1]
|
||||
if _, ok := hashContext[k]; !ok {
|
||||
hashContext[k] = make([]map[string]interface{}, 0, 5)
|
||||
}
|
||||
|
||||
// Add a new table. But make sure the key hasn't already been used
|
||||
// for something else.
|
||||
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
|
||||
hashContext[k] = append(hash, make(map[string]interface{}))
|
||||
} else {
|
||||
p.panicf("Key '%s' was already created and cannot be used as "+
|
||||
"an array.", keyContext)
|
||||
}
|
||||
} else {
|
||||
p.setValue(key[len(key)-1], make(map[string]interface{}))
|
||||
}
|
||||
p.context = append(p.context, key[len(key)-1])
|
||||
}
|
||||
|
||||
// setValue sets the given key to the given value in the current context.
|
||||
// It will make sure that the key hasn't already been defined, account for
|
||||
// implicit key groups.
|
||||
func (p *parser) setValue(key string, value interface{}) {
|
||||
var tmpHash interface{}
|
||||
var ok bool
|
||||
|
||||
hash := p.mapping
|
||||
keyContext := make(Key, 0)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
if tmpHash, ok = hash[k]; !ok {
|
||||
p.bug("Context for key '%s' has not been established.", keyContext)
|
||||
}
|
||||
switch t := tmpHash.(type) {
|
||||
case []map[string]interface{}:
|
||||
// The context is a table of hashes. Pick the most recent table
|
||||
// defined as the current hash.
|
||||
hash = t[len(t)-1]
|
||||
case map[string]interface{}:
|
||||
hash = t
|
||||
default:
|
||||
p.bug("Expected hash to have type 'map[string]interface{}', but "+
|
||||
"it has '%T' instead.", tmpHash)
|
||||
}
|
||||
}
|
||||
keyContext = append(keyContext, key)
|
||||
|
||||
if _, ok := hash[key]; ok {
|
||||
// Typically, if the given key has already been set, then we have
|
||||
// to raise an error since duplicate keys are disallowed. However,
|
||||
// it's possible that a key was previously defined implicitly. In this
|
||||
// case, it is allowed to be redefined concretely. (See the
|
||||
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
|
||||
//
|
||||
// But we have to make sure to stop marking it as an implicit. (So that
|
||||
// another redefinition provokes an error.)
|
||||
//
|
||||
// Note that since it has already been defined (as a hash), we don't
|
||||
// want to overwrite it. So our business is done.
|
||||
if p.isImplicit(keyContext) {
|
||||
p.removeImplicit(keyContext)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, we have a concrete key trying to override a previous
|
||||
// key, which is *always* wrong.
|
||||
p.panicf("Key '%s' has already been defined.", keyContext)
|
||||
}
|
||||
hash[key] = value
|
||||
}
|
||||
|
||||
// setType sets the type of a particular value at a given key.
|
||||
// It should be called immediately AFTER setValue.
|
||||
//
|
||||
// Note that if `key` is empty, then the type given will be applied to the
|
||||
// current context (which is either a table or an array of tables).
|
||||
func (p *parser) setType(key string, typ tomlType) {
|
||||
keyContext := make(Key, 0, len(p.context)+1)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
}
|
||||
if len(key) > 0 { // allow type setting for hashes
|
||||
keyContext = append(keyContext, key)
|
||||
}
|
||||
p.types[keyContext.String()] = typ
|
||||
}
|
||||
|
||||
// addImplicit sets the given Key as having been created implicitly.
|
||||
func (p *parser) addImplicit(key Key) {
|
||||
p.implicits[key.String()] = true
|
||||
}
|
||||
|
||||
// removeImplicit stops tagging the given key as having been implicitly
|
||||
// created.
|
||||
func (p *parser) removeImplicit(key Key) {
|
||||
p.implicits[key.String()] = false
|
||||
}
|
||||
|
||||
// isImplicit returns true if the key group pointed to by the key was created
|
||||
// implicitly.
|
||||
func (p *parser) isImplicit(key Key) bool {
|
||||
return p.implicits[key.String()]
|
||||
}
|
||||
|
||||
// current returns the full key name of the current context.
|
||||
func (p *parser) current() string {
|
||||
if len(p.currentKey) == 0 {
|
||||
return p.context.String()
|
||||
}
|
||||
if len(p.context) == 0 {
|
||||
return p.currentKey
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", p.context, p.currentKey)
|
||||
}
|
||||
|
||||
func stripFirstNewline(s string) string {
|
||||
if len(s) == 0 || s[0] != '\n' {
|
||||
return s
|
||||
}
|
||||
return s[1:]
|
||||
}
|
||||
|
||||
func stripEscapedWhitespace(s string) string {
|
||||
esc := strings.Split(s, "\\\n")
|
||||
if len(esc) > 1 {
|
||||
for i := 1; i < len(esc); i++ {
|
||||
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
|
||||
}
|
||||
}
|
||||
return strings.Join(esc, "")
|
||||
}
|
||||
|
||||
func (p *parser) replaceEscapes(str string) string {
|
||||
var replaced []rune
|
||||
s := []byte(str)
|
||||
r := 0
|
||||
for r < len(s) {
|
||||
if s[r] != '\\' {
|
||||
c, size := utf8.DecodeRune(s[r:])
|
||||
r += size
|
||||
replaced = append(replaced, c)
|
||||
continue
|
||||
}
|
||||
r += 1
|
||||
if r >= len(s) {
|
||||
p.bug("Escape sequence at end of string.")
|
||||
return ""
|
||||
}
|
||||
switch s[r] {
|
||||
default:
|
||||
p.bug("Expected valid escape code after \\, but got %q.", s[r])
|
||||
return ""
|
||||
case 'b':
|
||||
replaced = append(replaced, rune(0x0008))
|
||||
r += 1
|
||||
case 't':
|
||||
replaced = append(replaced, rune(0x0009))
|
||||
r += 1
|
||||
case 'n':
|
||||
replaced = append(replaced, rune(0x000A))
|
||||
r += 1
|
||||
case 'f':
|
||||
replaced = append(replaced, rune(0x000C))
|
||||
r += 1
|
||||
case 'r':
|
||||
replaced = append(replaced, rune(0x000D))
|
||||
r += 1
|
||||
case '"':
|
||||
replaced = append(replaced, rune(0x0022))
|
||||
r += 1
|
||||
case '\\':
|
||||
replaced = append(replaced, rune(0x005C))
|
||||
r += 1
|
||||
case 'u':
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
|
||||
// for us.)
|
||||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 5
|
||||
case 'U':
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
|
||||
// for us.)
|
||||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 9
|
||||
}
|
||||
}
|
||||
return string(replaced)
|
||||
}
|
||||
|
||||
func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
|
||||
s := string(bs)
|
||||
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
|
||||
if err != nil {
|
||||
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
|
||||
"lexer claims it's OK: %s", s, err)
|
||||
}
|
||||
if !utf8.ValidRune(rune(hex)) {
|
||||
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
|
||||
}
|
||||
return rune(hex)
|
||||
}
|
||||
|
||||
func isStringType(ty itemType) bool {
|
||||
return ty == itemString || ty == itemMultilineString ||
|
||||
ty == itemRawString || ty == itemRawMultilineString
|
||||
}
|
||||
91
vendor/github.com/BurntSushi/toml/type_check.go
generated
vendored
Normal file
91
vendor/github.com/BurntSushi/toml/type_check.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
package toml
|
||||
|
||||
// tomlType represents any Go type that corresponds to a TOML type.
|
||||
// While the first draft of the TOML spec has a simplistic type system that
|
||||
// probably doesn't need this level of sophistication, we seem to be militating
|
||||
// toward adding real composite types.
|
||||
type tomlType interface {
|
||||
typeString() string
|
||||
}
|
||||
|
||||
// typeEqual accepts any two types and returns true if they are equal.
|
||||
func typeEqual(t1, t2 tomlType) bool {
|
||||
if t1 == nil || t2 == nil {
|
||||
return false
|
||||
}
|
||||
return t1.typeString() == t2.typeString()
|
||||
}
|
||||
|
||||
func typeIsHash(t tomlType) bool {
|
||||
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
|
||||
}
|
||||
|
||||
type tomlBaseType string
|
||||
|
||||
func (btype tomlBaseType) typeString() string {
|
||||
return string(btype)
|
||||
}
|
||||
|
||||
func (btype tomlBaseType) String() string {
|
||||
return btype.typeString()
|
||||
}
|
||||
|
||||
var (
|
||||
tomlInteger tomlBaseType = "Integer"
|
||||
tomlFloat tomlBaseType = "Float"
|
||||
tomlDatetime tomlBaseType = "Datetime"
|
||||
tomlString tomlBaseType = "String"
|
||||
tomlBool tomlBaseType = "Bool"
|
||||
tomlArray tomlBaseType = "Array"
|
||||
tomlHash tomlBaseType = "Hash"
|
||||
tomlArrayHash tomlBaseType = "ArrayHash"
|
||||
)
|
||||
|
||||
// typeOfPrimitive returns a tomlType of any primitive value in TOML.
|
||||
// Primitive values are: Integer, Float, Datetime, String and Bool.
|
||||
//
|
||||
// Passing a lexer item other than the following will cause a BUG message
|
||||
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
|
||||
func (p *parser) typeOfPrimitive(lexItem item) tomlType {
|
||||
switch lexItem.typ {
|
||||
case itemInteger:
|
||||
return tomlInteger
|
||||
case itemFloat:
|
||||
return tomlFloat
|
||||
case itemDatetime:
|
||||
return tomlDatetime
|
||||
case itemString:
|
||||
return tomlString
|
||||
case itemMultilineString:
|
||||
return tomlString
|
||||
case itemRawString:
|
||||
return tomlString
|
||||
case itemRawMultilineString:
|
||||
return tomlString
|
||||
case itemBool:
|
||||
return tomlBool
|
||||
}
|
||||
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// typeOfArray returns a tomlType for an array given a list of types of its
|
||||
// values.
|
||||
//
|
||||
// In the current spec, if an array is homogeneous, then its type is always
|
||||
// "Array". If the array is not homogeneous, an error is generated.
|
||||
func (p *parser) typeOfArray(types []tomlType) tomlType {
|
||||
// Empty arrays are cool.
|
||||
if len(types) == 0 {
|
||||
return tomlArray
|
||||
}
|
||||
|
||||
theType := types[0]
|
||||
for _, t := range types[1:] {
|
||||
if !typeEqual(theType, t) {
|
||||
p.panicf("Array contains values of type '%s' and '%s', but "+
|
||||
"arrays must be homogeneous.", theType, t)
|
||||
}
|
||||
}
|
||||
return tomlArray
|
||||
}
|
||||
242
vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
Normal file
242
vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
Normal file
@@ -0,0 +1,242 @@
|
||||
package toml
|
||||
|
||||
// Struct field handling is adapted from code in encoding/json:
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the Go distribution.
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A field represents a single field found in a struct.
|
||||
type field struct {
|
||||
name string // the name of the field (`toml` tag included)
|
||||
tag bool // whether field has a `toml` tag
|
||||
index []int // represents the depth of an anonymous field
|
||||
typ reflect.Type // the type of the field
|
||||
}
|
||||
|
||||
// byName sorts field by name, breaking ties with depth,
|
||||
// then breaking ties with "name came from toml tag", then
|
||||
// breaking ties with index sequence.
|
||||
type byName []field
|
||||
|
||||
func (x byName) Len() int { return len(x) }
|
||||
|
||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byName) Less(i, j int) bool {
|
||||
if x[i].name != x[j].name {
|
||||
return x[i].name < x[j].name
|
||||
}
|
||||
if len(x[i].index) != len(x[j].index) {
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
if x[i].tag != x[j].tag {
|
||||
return x[i].tag
|
||||
}
|
||||
return byIndex(x).Less(i, j)
|
||||
}
|
||||
|
||||
// byIndex sorts field by index sequence.
|
||||
type byIndex []field
|
||||
|
||||
func (x byIndex) Len() int { return len(x) }
|
||||
|
||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byIndex) Less(i, j int) bool {
|
||||
for k, xik := range x[i].index {
|
||||
if k >= len(x[j].index) {
|
||||
return false
|
||||
}
|
||||
if xik != x[j].index[k] {
|
||||
return xik < x[j].index[k]
|
||||
}
|
||||
}
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
|
||||
// typeFields returns a list of fields that TOML should recognize for the given
|
||||
// type. The algorithm is breadth-first search over the set of structs to
|
||||
// include - the top struct and then any reachable anonymous structs.
|
||||
func typeFields(t reflect.Type) []field {
|
||||
// Anonymous fields to explore at the current level and the next.
|
||||
current := []field{}
|
||||
next := []field{{typ: t}}
|
||||
|
||||
// Count of queued names for current level and the next.
|
||||
count := map[reflect.Type]int{}
|
||||
nextCount := map[reflect.Type]int{}
|
||||
|
||||
// Types already visited at an earlier level.
|
||||
visited := map[reflect.Type]bool{}
|
||||
|
||||
// Fields found.
|
||||
var fields []field
|
||||
|
||||
for len(next) > 0 {
|
||||
current, next = next, current[:0]
|
||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
||||
|
||||
for _, f := range current {
|
||||
if visited[f.typ] {
|
||||
continue
|
||||
}
|
||||
visited[f.typ] = true
|
||||
|
||||
// Scan f.typ for fields to include.
|
||||
for i := 0; i < f.typ.NumField(); i++ {
|
||||
sf := f.typ.Field(i)
|
||||
if sf.PkgPath != "" && !sf.Anonymous { // unexported
|
||||
continue
|
||||
}
|
||||
opts := getOptions(sf.Tag)
|
||||
if opts.skip {
|
||||
continue
|
||||
}
|
||||
index := make([]int, len(f.index)+1)
|
||||
copy(index, f.index)
|
||||
index[len(f.index)] = i
|
||||
|
||||
ft := sf.Type
|
||||
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
||||
// Follow pointer.
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
// Record found field and index sequence.
|
||||
if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||
tagged := opts.name != ""
|
||||
name := opts.name
|
||||
if name == "" {
|
||||
name = sf.Name
|
||||
}
|
||||
fields = append(fields, field{name, tagged, index, ft})
|
||||
if count[f.typ] > 1 {
|
||||
// If there were multiple instances, add a second,
|
||||
// so that the annihilation code will see a duplicate.
|
||||
// It only cares about the distinction between 1 or 2,
|
||||
// so don't bother generating any more copies.
|
||||
fields = append(fields, fields[len(fields)-1])
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Record new anonymous struct to explore in next round.
|
||||
nextCount[ft]++
|
||||
if nextCount[ft] == 1 {
|
||||
f := field{name: ft.Name(), index: index, typ: ft}
|
||||
next = append(next, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(byName(fields))
|
||||
|
||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
||||
// except that fields with TOML tags are promoted.
|
||||
|
||||
// The fields are sorted in primary order of name, secondary order
|
||||
// of field index length. Loop over names; for each name, delete
|
||||
// hidden fields by choosing the one dominant field that survives.
|
||||
out := fields[:0]
|
||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
||||
// One iteration per name.
|
||||
// Find the sequence of fields with the name of this first field.
|
||||
fi := fields[i]
|
||||
name := fi.name
|
||||
for advance = 1; i+advance < len(fields); advance++ {
|
||||
fj := fields[i+advance]
|
||||
if fj.name != name {
|
||||
break
|
||||
}
|
||||
}
|
||||
if advance == 1 { // Only one field with this name
|
||||
out = append(out, fi)
|
||||
continue
|
||||
}
|
||||
dominant, ok := dominantField(fields[i : i+advance])
|
||||
if ok {
|
||||
out = append(out, dominant)
|
||||
}
|
||||
}
|
||||
|
||||
fields = out
|
||||
sort.Sort(byIndex(fields))
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// dominantField looks through the fields, all of which are known to
|
||||
// have the same name, to find the single field that dominates the
|
||||
// others using Go's embedding rules, modified by the presence of
|
||||
// TOML tags. If there are multiple top-level fields, the boolean
|
||||
// will be false: This condition is an error in Go and we skip all
|
||||
// the fields.
|
||||
func dominantField(fields []field) (field, bool) {
|
||||
// The fields are sorted in increasing index-length order. The winner
|
||||
// must therefore be one with the shortest index length. Drop all
|
||||
// longer entries, which is easy: just truncate the slice.
|
||||
length := len(fields[0].index)
|
||||
tagged := -1 // Index of first tagged field.
|
||||
for i, f := range fields {
|
||||
if len(f.index) > length {
|
||||
fields = fields[:i]
|
||||
break
|
||||
}
|
||||
if f.tag {
|
||||
if tagged >= 0 {
|
||||
// Multiple tagged fields at the same level: conflict.
|
||||
// Return no field.
|
||||
return field{}, false
|
||||
}
|
||||
tagged = i
|
||||
}
|
||||
}
|
||||
if tagged >= 0 {
|
||||
return fields[tagged], true
|
||||
}
|
||||
// All remaining fields have the same length. If there's more than one,
|
||||
// we have a conflict (two fields named "X" at the same level) and we
|
||||
// return no field.
|
||||
if len(fields) > 1 {
|
||||
return field{}, false
|
||||
}
|
||||
return fields[0], true
|
||||
}
|
||||
|
||||
var fieldCache struct {
|
||||
sync.RWMutex
|
||||
m map[reflect.Type][]field
|
||||
}
|
||||
|
||||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
||||
func cachedTypeFields(t reflect.Type) []field {
|
||||
fieldCache.RLock()
|
||||
f := fieldCache.m[t]
|
||||
fieldCache.RUnlock()
|
||||
if f != nil {
|
||||
return f
|
||||
}
|
||||
|
||||
// Compute fields without lock.
|
||||
// Might duplicate effort but won't hold other computations back.
|
||||
f = typeFields(t)
|
||||
if f == nil {
|
||||
f = []field{}
|
||||
}
|
||||
|
||||
fieldCache.Lock()
|
||||
if fieldCache.m == nil {
|
||||
fieldCache.m = map[reflect.Type][]field{}
|
||||
}
|
||||
fieldCache.m[t] = f
|
||||
fieldCache.Unlock()
|
||||
return f
|
||||
}
|
||||
7
vendor/github.com/lib/pq/LICENSE.md → vendor/github.com/a8m/mark/LICENSE
generated
vendored
7
vendor/github.com/lib/pq/LICENSE.md → vendor/github.com/a8m/mark/LICENSE
generated
vendored
@@ -1,8 +1,9 @@
|
||||
Copyright (c) 2011-2013, 'pq' Contributors
|
||||
Portions Copyright (C) 2011 Blake Mizerany
|
||||
The MIT License
|
||||
|
||||
Copyright (c) 2015 Ariel Mashraki
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
94
vendor/github.com/a8m/mark/cmd/mark/main.go
generated
vendored
Normal file
94
vendor/github.com/a8m/mark/cmd/mark/main.go
generated
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
// mark command line tool. available at https://github.com/a8m/mark
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/a8m/mark"
|
||||
)
|
||||
|
||||
var (
|
||||
input = flag.String("i", "", "")
|
||||
output = flag.String("o", "", "")
|
||||
smarty = flag.Bool("smartypants", false, "")
|
||||
fractions = flag.Bool("fractions", false, "")
|
||||
)
|
||||
|
||||
var usage = `Usage: mark [options...] <input>
|
||||
|
||||
Options:
|
||||
-i Specify file input, otherwise use last argument as input file.
|
||||
If no input file is specified, read from stdin.
|
||||
-o Specify file output. If none is specified, write to stdout.
|
||||
|
||||
-smartypants Use "smart" typograhic punctuation for things like
|
||||
quotes and dashes.
|
||||
-fractions Traslate fraction like to suitable HTML elements
|
||||
`
|
||||
|
||||
func main() {
|
||||
flag.Usage = func() {
|
||||
fmt.Fprint(os.Stderr, fmt.Sprintf(usage))
|
||||
}
|
||||
flag.Parse()
|
||||
// read
|
||||
var reader *bufio.Reader
|
||||
if *input != "" {
|
||||
file, err := os.Open(*input)
|
||||
if err != nil {
|
||||
usageAndExit(fmt.Sprintf("Error to open file input: %s.", *input))
|
||||
}
|
||||
defer file.Close()
|
||||
reader = bufio.NewReader(file)
|
||||
} else {
|
||||
stat, err := os.Stdin.Stat()
|
||||
if err != nil || (stat.Mode()&os.ModeCharDevice) != 0 {
|
||||
usageAndExit("")
|
||||
}
|
||||
reader = bufio.NewReader(os.Stdin)
|
||||
}
|
||||
// collect data
|
||||
var data string
|
||||
for {
|
||||
line, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
usageAndExit("failed to reading input.")
|
||||
}
|
||||
data += line
|
||||
}
|
||||
// write
|
||||
var (
|
||||
err error
|
||||
file = os.Stdout
|
||||
)
|
||||
if *output != "" {
|
||||
if file, err = os.Create(*output); err != nil {
|
||||
usageAndExit("error to create the wanted output file.")
|
||||
}
|
||||
}
|
||||
// mark rendering
|
||||
opts := mark.DefaultOptions()
|
||||
opts.Smartypants = *smarty
|
||||
opts.Fractions = *fractions
|
||||
m := mark.New(data, opts)
|
||||
if _, err := file.WriteString(m.Render()); err != nil {
|
||||
usageAndExit(fmt.Sprintf("error writing output to: %s.", file.Name()))
|
||||
}
|
||||
}
|
||||
|
||||
func usageAndExit(msg string) {
|
||||
if msg != "" {
|
||||
fmt.Fprintf(os.Stderr, msg)
|
||||
fmt.Fprintf(os.Stderr, "\n\n")
|
||||
}
|
||||
flag.Usage()
|
||||
fmt.Fprintf(os.Stderr, "\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
92
vendor/github.com/a8m/mark/grammar.go
generated
vendored
Normal file
92
vendor/github.com/a8m/mark/grammar.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
package mark
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// Block Grammar
|
||||
var (
|
||||
reHr = regexp.MustCompile(`^(?:(?:\* *){3,}|(?:_ *){3,}|(?:- *){3,}) *(?:\n+|$)`)
|
||||
reHeading = regexp.MustCompile(`^ *(#{1,6})(?: +#*| +([^\n]*?)|)(?: +#*|) *(?:\n|$)`)
|
||||
reLHeading = regexp.MustCompile(`^([^\n]+?) *\n {0,3}(=|-){1,} *(?:\n+|$)`)
|
||||
reBlockQuote = regexp.MustCompile(`^ *>[^\n]*(\n[^\n]+)*\n*`)
|
||||
reDefLink = regexp.MustCompile(`(?s)^ *\[([^\]]+)\]: *\n? *<?([^\s>]+)>?(?: *\n? *["'(](.+?)['")])? *(?:\n+|$)`)
|
||||
reSpaceGen = func(i int) *regexp.Regexp {
|
||||
return regexp.MustCompile(fmt.Sprintf(`(?m)^ {1,%d}`, i))
|
||||
}
|
||||
)
|
||||
|
||||
var reList = struct {
|
||||
item, marker, loose *regexp.Regexp
|
||||
scanLine, scanNewLine func(src string) string
|
||||
}{
|
||||
regexp.MustCompile(`^( *)(?:[*+-]|\d{1,9}\.) (.*)(?:\n|)`),
|
||||
regexp.MustCompile(`^ *([*+-]|\d+\.) +`),
|
||||
regexp.MustCompile(`(?m)\n\n(.*)`),
|
||||
regexp.MustCompile(`^(.*)(?:\n|)`).FindString,
|
||||
regexp.MustCompile(`^\n{1,}`).FindString,
|
||||
}
|
||||
|
||||
var reCodeBlock = struct {
|
||||
*regexp.Regexp
|
||||
trim func(src, repl string) string
|
||||
}{
|
||||
regexp.MustCompile(`^( {4}[^\n]+(?: *\n)*)+`),
|
||||
regexp.MustCompile("(?m)^( {0,4})").ReplaceAllLiteralString,
|
||||
}
|
||||
|
||||
var reGfmCode = struct {
|
||||
*regexp.Regexp
|
||||
endGen func(end string, i int) *regexp.Regexp
|
||||
}{
|
||||
regexp.MustCompile("^( {0,3})([`~]{3,}) *(\\S*)?(?:.*)"),
|
||||
func(end string, i int) *regexp.Regexp {
|
||||
return regexp.MustCompile(fmt.Sprintf(`(?s)(.*?)(?:((?m)^ {0,3}%s{%d,} *$)|$)`, end, i))
|
||||
},
|
||||
}
|
||||
|
||||
var reTable = struct {
|
||||
item, itemLp *regexp.Regexp
|
||||
split func(s string, n int) []string
|
||||
trim func(src, repl string) string
|
||||
}{
|
||||
regexp.MustCompile(`^ *(\S.*\|.*)\n *([-:]+ *\|[-| :]*)\n((?:.*\|.*(?:\n|$))*)\n*`),
|
||||
regexp.MustCompile(`(^ *\|.+)\n( *\| *[-:]+[-| :]*)\n((?: *\|.*(?:\n|$))*)\n*`),
|
||||
regexp.MustCompile(` *\| *`).Split,
|
||||
regexp.MustCompile(`^ *\| *| *\| *$`).ReplaceAllString,
|
||||
}
|
||||
|
||||
var reHTML = struct {
|
||||
CDATA_OPEN, CDATA_CLOSE string
|
||||
item, comment, tag, span *regexp.Regexp
|
||||
endTagGen func(tag string) *regexp.Regexp
|
||||
}{
|
||||
`![CDATA[`,
|
||||
"?\\]\\]",
|
||||
regexp.MustCompile(`^<(\w+|!\[CDATA\[)(?:"[^"]*"|'[^']*'|[^'">])*?>`),
|
||||
regexp.MustCompile(`(?sm)<!--.*?-->`),
|
||||
regexp.MustCompile(`^<!--.*?-->|^<\/?\w+(?:"[^"]*"|'[^']*'|[^'">])*?>`),
|
||||
// TODO: Add all span-tags and move to config.
|
||||
regexp.MustCompile(`^(a|em|strong|small|s|q|data|time|code|sub|sup|i|b|u|span|br|del|img)$`),
|
||||
func(tag string) *regexp.Regexp {
|
||||
return regexp.MustCompile(fmt.Sprintf(`(?s)(.+?)<\/%s> *`, tag))
|
||||
},
|
||||
}
|
||||
|
||||
// Inline Grammar
|
||||
var (
|
||||
reBr = regexp.MustCompile(`^(?: {2,}|\\)\n`)
|
||||
reLinkText = `(?:\[[^\]]*\]|[^\[\]]|\])*`
|
||||
reLinkHref = `\s*<?(.*?)>?(?:\s+['"\(](.*?)['"\)])?\s*`
|
||||
reGfmLink = regexp.MustCompile(`^(https?:\/\/[^\s<]+[^<.,:;"')\]\s])`)
|
||||
reLink = regexp.MustCompile(fmt.Sprintf(`(?s)^!?\[(%s)\]\(%s\)`, reLinkText, reLinkHref))
|
||||
reAutoLink = regexp.MustCompile(`^<([^ >]+(@|:\/)[^ >]+)>`)
|
||||
reRefLink = regexp.MustCompile(`^!?\[((?:\[[^\]]*\]|[^\[\]]|\])*)\](?:\s*\[([^\]]*)\])?`)
|
||||
reImage = regexp.MustCompile(fmt.Sprintf(`(?s)^!?\[(%s)\]\(%s\)`, reLinkText, reLinkHref))
|
||||
reCode = regexp.MustCompile("(?s)^`{1,2}\\s*(.*?[^`])\\s*`{1,2}")
|
||||
reStrike = regexp.MustCompile(`(?s)^~{2}(.+?)~{2}`)
|
||||
reEmphasise = `(?s)^_{%[1]d}(\S.*?_*)_{%[1]d}|^\*{%[1]d}(\S.*?\**)\*{%[1]d}`
|
||||
reItalic = regexp.MustCompile(fmt.Sprintf(reEmphasise, 1))
|
||||
reStrong = regexp.MustCompile(fmt.Sprintf(reEmphasise, 2))
|
||||
)
|
||||
568
vendor/github.com/a8m/mark/lexer.go
generated
vendored
Normal file
568
vendor/github.com/a8m/mark/lexer.go
generated
vendored
Normal file
@@ -0,0 +1,568 @@
|
||||
package mark
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// type position
|
||||
type Pos int
|
||||
|
||||
// itemType identifies the type of lex items.
|
||||
type itemType int
|
||||
|
||||
// Item represent a token or text string returned from the scanner
|
||||
type item struct {
|
||||
typ itemType // The type of this item.
|
||||
pos Pos // The starting position, in bytes, of this item in the input string.
|
||||
val string // The value of this item.
|
||||
}
|
||||
|
||||
const eof = -1 // Zero value so closed channel delivers EOF
|
||||
|
||||
const (
|
||||
itemError itemType = iota // Error occurred; value is text of error
|
||||
itemEOF
|
||||
itemNewLine
|
||||
itemHTML
|
||||
itemHeading
|
||||
itemLHeading
|
||||
itemBlockQuote
|
||||
itemList
|
||||
itemListItem
|
||||
itemLooseItem
|
||||
itemCodeBlock
|
||||
itemGfmCodeBlock
|
||||
itemHr
|
||||
itemTable
|
||||
itemLpTable
|
||||
itemTableRow
|
||||
itemTableCell
|
||||
itemStrong
|
||||
itemItalic
|
||||
itemStrike
|
||||
itemCode
|
||||
itemLink
|
||||
itemDefLink
|
||||
itemRefLink
|
||||
itemAutoLink
|
||||
itemGfmLink
|
||||
itemImage
|
||||
itemRefImage
|
||||
itemText
|
||||
itemBr
|
||||
itemPipe
|
||||
itemIndent
|
||||
)
|
||||
|
||||
// stateFn represents the state of the scanner as a function that returns the next state.
|
||||
type stateFn func(*lexer) stateFn
|
||||
|
||||
// Lexer interface, used to composed it inside the parser
|
||||
type Lexer interface {
|
||||
nextItem() item
|
||||
}
|
||||
|
||||
// lexer holds the state of the scanner.
|
||||
type lexer struct {
|
||||
input string // the string being scanned
|
||||
state stateFn // the next lexing function to enter
|
||||
pos Pos // current position in the input
|
||||
start Pos // start position of this item
|
||||
width Pos // width of last rune read from input
|
||||
lastPos Pos // position of most recent item returned by nextItem
|
||||
items chan item // channel of scanned items
|
||||
}
|
||||
|
||||
// lex creates a new lexer for the input string.
|
||||
func lex(input string) *lexer {
|
||||
l := &lexer{
|
||||
input: input,
|
||||
items: make(chan item),
|
||||
}
|
||||
go l.run()
|
||||
return l
|
||||
}
|
||||
|
||||
// lexInline create a new lexer for one phase lexing(inline blocks).
|
||||
func lexInline(input string) *lexer {
|
||||
l := &lexer{
|
||||
input: input,
|
||||
items: make(chan item),
|
||||
}
|
||||
go l.lexInline()
|
||||
return l
|
||||
}
|
||||
|
||||
// run runs the state machine for the lexer.
|
||||
func (l *lexer) run() {
|
||||
for l.state = lexAny; l.state != nil; {
|
||||
l.state = l.state(l)
|
||||
}
|
||||
close(l.items)
|
||||
}
|
||||
|
||||
// next return the next rune in the input
|
||||
func (l *lexer) next() rune {
|
||||
if int(l.pos) >= len(l.input) {
|
||||
l.width = 0
|
||||
return eof
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
|
||||
l.width = Pos(w)
|
||||
l.pos += l.width
|
||||
return r
|
||||
}
|
||||
|
||||
// lexAny scanner is kind of forwarder, it get the current char in the text
|
||||
// and forward it to the appropriate scanner based on some conditions.
|
||||
func lexAny(l *lexer) stateFn {
|
||||
switch r := l.peek(); r {
|
||||
case '*', '-', '_':
|
||||
return lexHr
|
||||
case '+', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
return lexList
|
||||
case '<':
|
||||
return lexHTML
|
||||
case '>':
|
||||
return lexBlockQuote
|
||||
case '[':
|
||||
return lexDefLink
|
||||
case '#':
|
||||
return lexHeading
|
||||
case '`', '~':
|
||||
return lexGfmCode
|
||||
case ' ':
|
||||
if reCodeBlock.MatchString(l.input[l.pos:]) {
|
||||
return lexCode
|
||||
} else if reGfmCode.MatchString(l.input[l.pos:]) {
|
||||
return lexGfmCode
|
||||
}
|
||||
// Keep moving forward until we get all the indentation size
|
||||
for ; r == l.peek(); r = l.next() {
|
||||
}
|
||||
l.emit(itemIndent)
|
||||
return lexAny
|
||||
case '|':
|
||||
if m := reTable.itemLp.MatchString(l.input[l.pos:]); m {
|
||||
l.emit(itemLpTable)
|
||||
return lexTable
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
if m := reTable.item.MatchString(l.input[l.pos:]); m {
|
||||
l.emit(itemTable)
|
||||
return lexTable
|
||||
}
|
||||
return lexText
|
||||
}
|
||||
}
|
||||
|
||||
// lexHeading test if the current text position is an heading item.
|
||||
// is so, it will emit an item and return back to lenAny function
|
||||
// else, lex it as a simple text value
|
||||
func lexHeading(l *lexer) stateFn {
|
||||
if m := reHeading.FindString(l.input[l.pos:]); m != "" {
|
||||
l.pos += Pos(len(m))
|
||||
l.emit(itemHeading)
|
||||
return lexAny
|
||||
}
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexHr test if the current text position is an horizontal rules item.
|
||||
// is so, it will emit an horizontal rule item and return back to lenAny function
|
||||
// else, forward it to lexList function
|
||||
func lexHr(l *lexer) stateFn {
|
||||
if match := reHr.FindString(l.input[l.pos:]); match != "" {
|
||||
l.pos += Pos(len(match))
|
||||
l.emit(itemHr)
|
||||
return lexAny
|
||||
}
|
||||
return lexList
|
||||
}
|
||||
|
||||
// lexGfmCode test if the current text position is start of GFM code-block item.
|
||||
// if so, it will generate regexp based on the fence type[`~] and it length.
|
||||
// it scan until the end, and then emit the code-block item and return back to the
|
||||
// lenAny forwarder.
|
||||
// else, lex it as a simple inline text.
|
||||
func lexGfmCode(l *lexer) stateFn {
|
||||
if match := reGfmCode.FindStringSubmatch(l.input[l.pos:]); len(match) != 0 {
|
||||
l.pos += Pos(len(match[0]))
|
||||
fence := match[2]
|
||||
// Generate Regexp based on fence type[`~] and length
|
||||
reGfmEnd := reGfmCode.endGen(fence[0:1], len(fence))
|
||||
infoContainer := reGfmEnd.FindStringSubmatch(l.input[l.pos:])
|
||||
l.pos += Pos(len(infoContainer[0]))
|
||||
infoString := infoContainer[1]
|
||||
// Remove leading and trailing spaces
|
||||
if indent := len(match[1]); indent > 0 {
|
||||
reSpace := reSpaceGen(indent)
|
||||
infoString = reSpace.ReplaceAllString(infoString, "")
|
||||
}
|
||||
l.emit(itemGfmCodeBlock, match[0]+infoString)
|
||||
return lexAny
|
||||
}
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexCode scans code block.
|
||||
func lexCode(l *lexer) stateFn {
|
||||
match := reCodeBlock.FindString(l.input[l.pos:])
|
||||
l.pos += Pos(len(match))
|
||||
l.emit(itemCodeBlock)
|
||||
return lexAny
|
||||
}
|
||||
|
||||
// lexText scans until end-of-line(\n)
|
||||
func lexText(l *lexer) stateFn {
|
||||
// Drain text before emitting
|
||||
emit := func(item itemType, pos Pos) {
|
||||
if l.pos > l.start {
|
||||
l.emit(itemText)
|
||||
}
|
||||
l.pos += pos
|
||||
l.emit(item)
|
||||
}
|
||||
Loop:
|
||||
for {
|
||||
switch r := l.peek(); r {
|
||||
case eof:
|
||||
emit(itemEOF, Pos(0))
|
||||
break Loop
|
||||
case '\n':
|
||||
// CM 4.4: An indented code block cannot interrupt a paragraph.
|
||||
if l.pos > l.start && strings.HasPrefix(l.input[l.pos+1:], " ") {
|
||||
l.next()
|
||||
continue
|
||||
}
|
||||
emit(itemNewLine, l.width)
|
||||
break Loop
|
||||
default:
|
||||
// Test for Setext-style headers
|
||||
if m := reLHeading.FindString(l.input[l.pos:]); m != "" {
|
||||
emit(itemLHeading, Pos(len(m)))
|
||||
break Loop
|
||||
}
|
||||
l.next()
|
||||
}
|
||||
}
|
||||
return lexAny
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can only be called once per call of next.
|
||||
func (l *lexer) backup() {
|
||||
l.pos -= l.width
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (l *lexer) peek() rune {
|
||||
r := l.next()
|
||||
l.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// emit passes an item back to the client.
|
||||
func (l *lexer) emit(t itemType, s ...string) {
|
||||
if len(s) == 0 {
|
||||
s = append(s, l.input[l.start:l.pos])
|
||||
}
|
||||
l.items <- item{t, l.start, s[0]}
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
// lexItem return the next item token, called by the parser.
|
||||
func (l *lexer) nextItem() item {
|
||||
item := <-l.items
|
||||
l.lastPos = l.pos
|
||||
return item
|
||||
}
|
||||
|
||||
// One phase lexing(inline reason)
|
||||
func (l *lexer) lexInline() {
|
||||
escape := regexp.MustCompile("^\\\\([\\`*{}\\[\\]()#+\\-.!_>~|])")
|
||||
// Drain text before emitting
|
||||
emit := func(item itemType, pos int) {
|
||||
if l.pos > l.start {
|
||||
l.emit(itemText)
|
||||
}
|
||||
l.pos += Pos(pos)
|
||||
l.emit(item)
|
||||
}
|
||||
Loop:
|
||||
for {
|
||||
switch r := l.peek(); r {
|
||||
case eof:
|
||||
if l.pos > l.start {
|
||||
l.emit(itemText)
|
||||
}
|
||||
break Loop
|
||||
// backslash escaping
|
||||
case '\\':
|
||||
if m := escape.FindStringSubmatch(l.input[l.pos:]); len(m) != 0 {
|
||||
if l.pos > l.start {
|
||||
l.emit(itemText)
|
||||
}
|
||||
l.pos += Pos(len(m[0]))
|
||||
l.emit(itemText, m[1])
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case ' ':
|
||||
if m := reBr.FindString(l.input[l.pos:]); m != "" {
|
||||
// pos - length of new-line
|
||||
emit(itemBr, len(m))
|
||||
break
|
||||
}
|
||||
l.next()
|
||||
case '_', '*', '~', '`':
|
||||
input := l.input[l.pos:]
|
||||
// Strong
|
||||
if m := reStrong.FindString(input); m != "" {
|
||||
emit(itemStrong, len(m))
|
||||
break
|
||||
}
|
||||
// Italic
|
||||
if m := reItalic.FindString(input); m != "" {
|
||||
emit(itemItalic, len(m))
|
||||
break
|
||||
}
|
||||
// Strike
|
||||
if m := reStrike.FindString(input); m != "" {
|
||||
emit(itemStrike, len(m))
|
||||
break
|
||||
}
|
||||
// InlineCode
|
||||
if m := reCode.FindString(input); m != "" {
|
||||
emit(itemCode, len(m))
|
||||
break
|
||||
}
|
||||
l.next()
|
||||
// itemLink, itemImage, itemRefLink, itemRefImage
|
||||
case '[', '!':
|
||||
input := l.input[l.pos:]
|
||||
if m := reLink.FindString(input); m != "" {
|
||||
pos := len(m)
|
||||
if r == '[' {
|
||||
emit(itemLink, pos)
|
||||
} else {
|
||||
emit(itemImage, pos)
|
||||
}
|
||||
break
|
||||
}
|
||||
if m := reRefLink.FindString(input); m != "" {
|
||||
pos := len(m)
|
||||
if r == '[' {
|
||||
emit(itemRefLink, pos)
|
||||
} else {
|
||||
emit(itemRefImage, pos)
|
||||
}
|
||||
break
|
||||
}
|
||||
l.next()
|
||||
// itemAutoLink, htmlBlock
|
||||
case '<':
|
||||
if m := reAutoLink.FindString(l.input[l.pos:]); m != "" {
|
||||
emit(itemAutoLink, len(m))
|
||||
break
|
||||
}
|
||||
if match, res := l.matchHTML(l.input[l.pos:]); match {
|
||||
emit(itemHTML, len(res))
|
||||
break
|
||||
}
|
||||
l.next()
|
||||
default:
|
||||
if m := reGfmLink.FindString(l.input[l.pos:]); m != "" {
|
||||
emit(itemGfmLink, len(m))
|
||||
break
|
||||
}
|
||||
l.next()
|
||||
}
|
||||
}
|
||||
close(l.items)
|
||||
}
|
||||
|
||||
// lexHTML.
|
||||
func lexHTML(l *lexer) stateFn {
|
||||
if match, res := l.matchHTML(l.input[l.pos:]); match {
|
||||
l.pos += Pos(len(res))
|
||||
l.emit(itemHTML)
|
||||
return lexAny
|
||||
}
|
||||
return lexText
|
||||
}
|
||||
|
||||
// Test if the given input is match the HTML pattern(blocks only)
|
||||
func (l *lexer) matchHTML(input string) (bool, string) {
|
||||
if m := reHTML.comment.FindString(input); m != "" {
|
||||
return true, m
|
||||
}
|
||||
if m := reHTML.item.FindStringSubmatch(input); len(m) != 0 {
|
||||
el, name := m[0], m[1]
|
||||
// if name is a span... is a text
|
||||
if reHTML.span.MatchString(name) {
|
||||
return false, ""
|
||||
}
|
||||
// if it's a self-closed html element, but not a itemAutoLink
|
||||
if strings.HasSuffix(el, "/>") && !reAutoLink.MatchString(el) {
|
||||
return true, el
|
||||
}
|
||||
if name == reHTML.CDATA_OPEN {
|
||||
name = reHTML.CDATA_CLOSE
|
||||
}
|
||||
reEndTag := reHTML.endTagGen(name)
|
||||
if m := reEndTag.FindString(input); m != "" {
|
||||
return true, m
|
||||
}
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// lexDefLink scans link definition
|
||||
func lexDefLink(l *lexer) stateFn {
|
||||
if m := reDefLink.FindString(l.input[l.pos:]); m != "" {
|
||||
l.pos += Pos(len(m))
|
||||
l.emit(itemDefLink)
|
||||
return lexAny
|
||||
}
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexList scans ordered and unordered lists.
|
||||
func lexList(l *lexer) stateFn {
|
||||
match, items := l.matchList(l.input[l.pos:])
|
||||
if !match {
|
||||
return lexText
|
||||
}
|
||||
var space int
|
||||
var typ itemType
|
||||
for i, item := range items {
|
||||
// Emit itemList on the first loop
|
||||
if i == 0 {
|
||||
l.emit(itemList, reList.marker.FindStringSubmatch(item)[1])
|
||||
}
|
||||
// Initialize each loop
|
||||
typ = itemListItem
|
||||
space = len(item)
|
||||
l.pos += Pos(space)
|
||||
item = reList.marker.ReplaceAllString(item, "")
|
||||
// Indented
|
||||
if strings.Contains(item, "\n ") {
|
||||
space -= len(item)
|
||||
reSpace := reSpaceGen(space)
|
||||
item = reSpace.ReplaceAllString(item, "")
|
||||
}
|
||||
// If current is loose
|
||||
for _, l := range reList.loose.FindAllString(item, -1) {
|
||||
if len(strings.TrimSpace(l)) > 0 || i != len(items)-1 {
|
||||
typ = itemLooseItem
|
||||
break
|
||||
}
|
||||
}
|
||||
// or previous
|
||||
if typ != itemLooseItem && i > 0 && strings.HasSuffix(items[i-1], "\n\n") {
|
||||
typ = itemLooseItem
|
||||
}
|
||||
l.emit(typ, strings.TrimSpace(item))
|
||||
}
|
||||
return lexAny
|
||||
}
|
||||
|
||||
func (l *lexer) matchList(input string) (bool, []string) {
|
||||
var res []string
|
||||
reItem := reList.item
|
||||
if !reItem.MatchString(input) {
|
||||
return false, res
|
||||
}
|
||||
// First item
|
||||
m := reItem.FindStringSubmatch(input)
|
||||
item, depth := m[0], len(m[1])
|
||||
input = input[len(item):]
|
||||
// Loop over the input
|
||||
for len(input) > 0 {
|
||||
// Count new-lines('\n')
|
||||
if m := reList.scanNewLine(input); m != "" {
|
||||
item += m
|
||||
input = input[len(m):]
|
||||
if len(m) >= 2 || !reItem.MatchString(input) && !strings.HasPrefix(input, " ") {
|
||||
break
|
||||
}
|
||||
}
|
||||
// DefLink or hr
|
||||
if reDefLink.MatchString(input) || reHr.MatchString(input) {
|
||||
break
|
||||
}
|
||||
// It's list in the same depth
|
||||
if m := reItem.FindStringSubmatch(input); len(m) > 0 && len(m[1]) == depth {
|
||||
if item != "" {
|
||||
res = append(res, item)
|
||||
}
|
||||
item = m[0]
|
||||
input = input[len(item):]
|
||||
} else {
|
||||
m := reList.scanLine(input)
|
||||
item += m
|
||||
input = input[len(m):]
|
||||
}
|
||||
}
|
||||
// Drain res
|
||||
if item != "" {
|
||||
res = append(res, item)
|
||||
}
|
||||
return true, res
|
||||
}
|
||||
|
||||
// Test if the given input match blockquote
|
||||
func (l *lexer) matchBlockQuote(input string) (bool, string) {
|
||||
match := reBlockQuote.FindString(input)
|
||||
if match == "" {
|
||||
return false, match
|
||||
}
|
||||
lines := strings.Split(match, "\n")
|
||||
for i, line := range lines {
|
||||
// if line is a link-definition or horizontal role, we cut the match until this point
|
||||
if reDefLink.MatchString(line) || reHr.MatchString(line) {
|
||||
match = strings.Join(lines[0:i], "\n")
|
||||
break
|
||||
}
|
||||
}
|
||||
return true, match
|
||||
}
|
||||
|
||||
// lexBlockQuote
|
||||
func lexBlockQuote(l *lexer) stateFn {
|
||||
if match, res := l.matchBlockQuote(l.input[l.pos:]); match {
|
||||
l.pos += Pos(len(res))
|
||||
l.emit(itemBlockQuote)
|
||||
return lexAny
|
||||
}
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexTable
|
||||
func lexTable(l *lexer) stateFn {
|
||||
re := reTable.item
|
||||
if l.peek() == '|' {
|
||||
re = reTable.itemLp
|
||||
}
|
||||
table := re.FindStringSubmatch(l.input[l.pos:])
|
||||
l.pos += Pos(len(table[0]))
|
||||
l.start = l.pos
|
||||
// Ignore the first match, and flat all rows(by splitting \n)
|
||||
rows := append(table[1:3], strings.Split(table[3], "\n")...)
|
||||
for _, row := range rows {
|
||||
if row == "" {
|
||||
continue
|
||||
}
|
||||
l.emit(itemTableRow)
|
||||
rawCells := reTable.trim(row, "")
|
||||
cells := reTable.split(rawCells, -1)
|
||||
// Emit cells in the current row
|
||||
for _, cell := range cells {
|
||||
l.emit(itemTableCell, cell)
|
||||
}
|
||||
}
|
||||
return lexAny
|
||||
}
|
||||
60
vendor/github.com/a8m/mark/mark.go
generated
vendored
Normal file
60
vendor/github.com/a8m/mark/mark.go
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
package mark
|
||||
|
||||
import "strings"
|
||||
|
||||
// Mark
|
||||
type Mark struct {
|
||||
*parse
|
||||
Input string
|
||||
}
|
||||
|
||||
// Mark options used to configure your Mark object
|
||||
// set `Smartypants` and `Fractions` to true to enable
|
||||
// smartypants and smartfractions rendering.
|
||||
type Options struct {
|
||||
Gfm bool
|
||||
Tables bool
|
||||
Smartypants bool
|
||||
Fractions bool
|
||||
}
|
||||
|
||||
// DefaultOptions return an options struct with default configuration
|
||||
// it's means that only Gfm, and Tables set to true.
|
||||
func DefaultOptions() *Options {
|
||||
return &Options{
|
||||
Gfm: true,
|
||||
Tables: true,
|
||||
}
|
||||
}
|
||||
|
||||
// New return a new Mark
|
||||
func New(input string, opts *Options) *Mark {
|
||||
// Preprocessing
|
||||
input = strings.Replace(input, "\t", " ", -1)
|
||||
if opts == nil {
|
||||
opts = DefaultOptions()
|
||||
}
|
||||
return &Mark{
|
||||
Input: input,
|
||||
parse: newParse(input, opts),
|
||||
}
|
||||
}
|
||||
|
||||
// parse and render input
|
||||
func (m *Mark) Render() string {
|
||||
m.parse.parse()
|
||||
m.render()
|
||||
return m.output
|
||||
}
|
||||
|
||||
// AddRenderFn let you pass NodeType, and RenderFn function
|
||||
// and override the default Node rendering
|
||||
func (m *Mark) AddRenderFn(typ NodeType, fn RenderFn) {
|
||||
m.renderFn[typ] = fn
|
||||
}
|
||||
|
||||
// Staic render function
|
||||
func Render(input string) string {
|
||||
m := New(input, nil)
|
||||
return m.Render()
|
||||
}
|
||||
614
vendor/github.com/a8m/mark/node.go
generated
vendored
Normal file
614
vendor/github.com/a8m/mark/node.go
generated
vendored
Normal file
@@ -0,0 +1,614 @@
|
||||
package mark
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A Node is an element in the parse tree.
|
||||
type Node interface {
|
||||
Type() NodeType
|
||||
Render() string
|
||||
}
|
||||
|
||||
// NodeType identifies the type of a parse tree node.
|
||||
type NodeType int
|
||||
|
||||
// Type returns itself and provides an easy default implementation
|
||||
// for embedding in a Node. Embedded in all non-trivial Nodes.
|
||||
func (t NodeType) Type() NodeType {
|
||||
return t
|
||||
}
|
||||
|
||||
// Render function, used for overriding default rendering.
|
||||
type RenderFn func(Node) string
|
||||
|
||||
const (
|
||||
NodeText NodeType = iota // A plain text
|
||||
NodeParagraph // A Paragraph
|
||||
NodeEmphasis // An emphasis(strong, em, ...)
|
||||
NodeHeading // A heading (h1, h2, ...)
|
||||
NodeBr // A link break
|
||||
NodeHr // A horizontal rule
|
||||
NodeImage // An image
|
||||
NodeRefImage // A image reference
|
||||
NodeList // A list of ListItems
|
||||
NodeListItem // A list item node
|
||||
NodeLink // A link(href)
|
||||
NodeRefLink // A link reference
|
||||
NodeDefLink // A link definition
|
||||
NodeTable // A table of NodeRows
|
||||
NodeRow // A row of NodeCells
|
||||
NodeCell // A table-cell(td)
|
||||
NodeCode // A code block(wrapped with pre)
|
||||
NodeBlockQuote // A blockquote
|
||||
NodeHTML // An inline HTML
|
||||
NodeCheckbox // A checkbox
|
||||
)
|
||||
|
||||
// ParagraphNode hold simple paragraph node contains text
|
||||
// that may be emphasis.
|
||||
type ParagraphNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Nodes []Node
|
||||
}
|
||||
|
||||
// Render returns the html representation of ParagraphNode
|
||||
func (n *ParagraphNode) Render() (s string) {
|
||||
for _, node := range n.Nodes {
|
||||
s += node.Render()
|
||||
}
|
||||
return wrap("p", s)
|
||||
}
|
||||
|
||||
func (p *parse) newParagraph(pos Pos) *ParagraphNode {
|
||||
return &ParagraphNode{NodeType: NodeParagraph, Pos: pos}
|
||||
}
|
||||
|
||||
// TextNode holds plain text.
|
||||
type TextNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Text string
|
||||
}
|
||||
|
||||
// Render returns the string representation of TexNode
|
||||
func (n *TextNode) Render() string {
|
||||
return n.Text
|
||||
}
|
||||
|
||||
func (p *parse) newText(pos Pos, text string) *TextNode {
|
||||
return &TextNode{NodeType: NodeText, Pos: pos, Text: p.text(text)}
|
||||
}
|
||||
|
||||
// HTMLNode holds the raw html source.
|
||||
type HTMLNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Src string
|
||||
}
|
||||
|
||||
// Render returns the src of the HTMLNode
|
||||
func (n *HTMLNode) Render() string {
|
||||
return n.Src
|
||||
}
|
||||
|
||||
func (p *parse) newHTML(pos Pos, src string) *HTMLNode {
|
||||
return &HTMLNode{NodeType: NodeHTML, Pos: pos, Src: src}
|
||||
}
|
||||
|
||||
// HrNode represents horizontal rule
|
||||
type HrNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
}
|
||||
|
||||
// Render returns the html representation of hr.
|
||||
func (n *HrNode) Render() string {
|
||||
return "<hr>"
|
||||
}
|
||||
|
||||
func (p *parse) newHr(pos Pos) *HrNode {
|
||||
return &HrNode{NodeType: NodeHr, Pos: pos}
|
||||
}
|
||||
|
||||
// BrNode represents a link-break element.
|
||||
type BrNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
}
|
||||
|
||||
// Render returns the html representation of line-break.
|
||||
func (n *BrNode) Render() string {
|
||||
return "<br>"
|
||||
}
|
||||
|
||||
func (p *parse) newBr(pos Pos) *BrNode {
|
||||
return &BrNode{NodeType: NodeBr, Pos: pos}
|
||||
}
|
||||
|
||||
// EmphasisNode holds plain-text wrapped with style.
|
||||
// (strong, em, del, code)
|
||||
type EmphasisNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Style itemType
|
||||
Nodes []Node
|
||||
}
|
||||
|
||||
// Tag return the tagName based on the Style field.
|
||||
func (n *EmphasisNode) Tag() (s string) {
|
||||
switch n.Style {
|
||||
case itemStrong:
|
||||
s = "strong"
|
||||
case itemItalic:
|
||||
s = "em"
|
||||
case itemStrike:
|
||||
s = "del"
|
||||
case itemCode:
|
||||
s = "code"
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Return the html representation of emphasis text.
|
||||
func (n *EmphasisNode) Render() string {
|
||||
var s string
|
||||
for _, node := range n.Nodes {
|
||||
s += node.Render()
|
||||
}
|
||||
return wrap(n.Tag(), s)
|
||||
}
|
||||
|
||||
func (p *parse) newEmphasis(pos Pos, style itemType) *EmphasisNode {
|
||||
return &EmphasisNode{NodeType: NodeEmphasis, Pos: pos, Style: style}
|
||||
}
|
||||
|
||||
// HeadingNode holds heaing element with specific level(1-6).
|
||||
type HeadingNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Level int
|
||||
Text string
|
||||
Nodes []Node
|
||||
}
|
||||
|
||||
// Render returns the html representation based on heading level.
|
||||
func (n *HeadingNode) Render() (s string) {
|
||||
for _, node := range n.Nodes {
|
||||
s += node.Render()
|
||||
}
|
||||
re := regexp.MustCompile(`[^\w]+`)
|
||||
id := re.ReplaceAllString(n.Text, "-")
|
||||
// ToLowerCase
|
||||
id = strings.ToLower(id)
|
||||
return fmt.Sprintf("<%[1]s id=\"%s\">%s</%[1]s>", "h"+strconv.Itoa(n.Level), id, s)
|
||||
}
|
||||
|
||||
func (p *parse) newHeading(pos Pos, level int, text string) *HeadingNode {
|
||||
return &HeadingNode{NodeType: NodeHeading, Pos: pos, Level: level, Text: p.text(text)}
|
||||
}
|
||||
|
||||
// Code holds CodeBlock node with specific lang field.
|
||||
type CodeNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Lang, Text string
|
||||
}
|
||||
|
||||
// Return the html representation of codeBlock
|
||||
func (n *CodeNode) Render() string {
|
||||
var attr string
|
||||
if n.Lang != "" {
|
||||
attr = fmt.Sprintf(" class=\"lang-%s\"", n.Lang)
|
||||
}
|
||||
code := fmt.Sprintf("<%[1]s%s>%s</%[1]s>", "code", attr, n.Text)
|
||||
return wrap("pre", code)
|
||||
}
|
||||
|
||||
func (p *parse) newCode(pos Pos, lang, text string) *CodeNode {
|
||||
// DRY: see `escape()` below
|
||||
text = strings.NewReplacer("<", "<", ">", ">", "\"", """, "&", "&").Replace(text)
|
||||
return &CodeNode{NodeType: NodeCode, Pos: pos, Lang: lang, Text: text}
|
||||
}
|
||||
|
||||
// Link holds a tag with optional title
|
||||
type LinkNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Title, Href string
|
||||
Nodes []Node
|
||||
}
|
||||
|
||||
// Return the html representation of link node
|
||||
func (n *LinkNode) Render() (s string) {
|
||||
for _, node := range n.Nodes {
|
||||
s += node.Render()
|
||||
}
|
||||
attrs := fmt.Sprintf("href=\"%s\"", n.Href)
|
||||
if n.Title != "" {
|
||||
attrs += fmt.Sprintf(" title=\"%s\"", n.Title)
|
||||
}
|
||||
return fmt.Sprintf("<a %s>%s</a>", attrs, s)
|
||||
}
|
||||
|
||||
func (p *parse) newLink(pos Pos, title, href string, nodes ...Node) *LinkNode {
|
||||
return &LinkNode{NodeType: NodeLink, Pos: pos, Title: p.text(title), Href: p.text(href), Nodes: nodes}
|
||||
}
|
||||
|
||||
// RefLink holds link with refrence to link definition
|
||||
type RefNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *parse
|
||||
Text, Ref, Raw string
|
||||
Nodes []Node
|
||||
}
|
||||
|
||||
// rendering based type
|
||||
func (n *RefNode) Render() string {
|
||||
var node Node
|
||||
ref := strings.ToLower(n.Ref)
|
||||
if l, ok := n.tr.links[ref]; ok {
|
||||
if n.Type() == NodeRefLink {
|
||||
node = n.tr.newLink(n.Pos, l.Title, l.Href, n.Nodes...)
|
||||
} else {
|
||||
node = n.tr.newImage(n.Pos, l.Title, l.Href, n.Text)
|
||||
}
|
||||
} else {
|
||||
node = n.tr.newText(n.Pos, n.Raw)
|
||||
}
|
||||
return node.Render()
|
||||
}
|
||||
|
||||
// newRefLink create new RefLink that suitable for link
|
||||
func (p *parse) newRefLink(typ itemType, pos Pos, raw, ref string, text []Node) *RefNode {
|
||||
return &RefNode{NodeType: NodeRefLink, Pos: pos, tr: p.root(), Raw: raw, Ref: ref, Nodes: text}
|
||||
}
|
||||
|
||||
// newRefImage create new RefLink that suitable for image
|
||||
func (p *parse) newRefImage(typ itemType, pos Pos, raw, ref, text string) *RefNode {
|
||||
return &RefNode{NodeType: NodeRefImage, Pos: pos, tr: p.root(), Raw: raw, Ref: ref, Text: text}
|
||||
}
|
||||
|
||||
// DefLinkNode refresent single reference to link-definition
|
||||
type DefLinkNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Name, Href, Title string
|
||||
}
|
||||
|
||||
// Deflink have no representation(Transparent node)
|
||||
func (n *DefLinkNode) Render() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (p *parse) newDefLink(pos Pos, name, href, title string) *DefLinkNode {
|
||||
return &DefLinkNode{NodeType: NodeLink, Pos: pos, Name: name, Href: href, Title: title}
|
||||
}
|
||||
|
||||
// ImageNode represents an image element with optional alt and title attributes.
|
||||
type ImageNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Title, Src, Alt string
|
||||
}
|
||||
|
||||
// Render returns the html representation on image node
|
||||
func (n *ImageNode) Render() string {
|
||||
attrs := fmt.Sprintf("src=\"%s\" alt=\"%s\"", n.Src, n.Alt)
|
||||
if n.Title != "" {
|
||||
attrs += fmt.Sprintf(" title=\"%s\"", n.Title)
|
||||
}
|
||||
return fmt.Sprintf("<img %s>", attrs)
|
||||
}
|
||||
|
||||
func (p *parse) newImage(pos Pos, title, src, alt string) *ImageNode {
|
||||
return &ImageNode{NodeType: NodeImage, Pos: pos, Title: p.text(title), Src: p.text(src), Alt: p.text(alt)}
|
||||
}
|
||||
|
||||
// ListNode holds list items nodes in ordered or unordered states.
|
||||
type ListNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Ordered bool
|
||||
Items []*ListItemNode
|
||||
}
|
||||
|
||||
func (n *ListNode) append(item *ListItemNode) {
|
||||
n.Items = append(n.Items, item)
|
||||
}
|
||||
|
||||
// Render returns the html representation of orderd(ol) or unordered(ul) list.
|
||||
func (n *ListNode) Render() (s string) {
|
||||
tag := "ul"
|
||||
if n.Ordered {
|
||||
tag = "ol"
|
||||
}
|
||||
for _, item := range n.Items {
|
||||
s += "\n" + item.Render()
|
||||
}
|
||||
s += "\n"
|
||||
return wrap(tag, s)
|
||||
}
|
||||
|
||||
func (p *parse) newList(pos Pos, ordered bool) *ListNode {
|
||||
return &ListNode{NodeType: NodeList, Pos: pos, Ordered: ordered}
|
||||
}
|
||||
|
||||
// ListItem represents single item in ListNode that may contains nested nodes.
|
||||
type ListItemNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Nodes []Node
|
||||
}
|
||||
|
||||
func (l *ListItemNode) append(n Node) {
|
||||
l.Nodes = append(l.Nodes, n)
|
||||
}
|
||||
|
||||
// Render returns the html representation of list-item
|
||||
func (l *ListItemNode) Render() (s string) {
|
||||
for _, node := range l.Nodes {
|
||||
s += node.Render()
|
||||
}
|
||||
return wrap("li", s)
|
||||
}
|
||||
|
||||
func (p *parse) newListItem(pos Pos) *ListItemNode {
|
||||
return &ListItemNode{NodeType: NodeListItem, Pos: pos}
|
||||
}
|
||||
|
||||
// TableNode represents table element contains head and body
|
||||
type TableNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Rows []*RowNode
|
||||
}
|
||||
|
||||
func (n *TableNode) append(row *RowNode) {
|
||||
n.Rows = append(n.Rows, row)
|
||||
}
|
||||
|
||||
// Render returns the html representation of a table
|
||||
func (n *TableNode) Render() string {
|
||||
var s string
|
||||
for i, row := range n.Rows {
|
||||
s += "\n"
|
||||
switch i {
|
||||
case 0:
|
||||
s += wrap("thead", "\n"+row.Render()+"\n")
|
||||
case 1:
|
||||
s += "<tbody>\n"
|
||||
fallthrough
|
||||
default:
|
||||
s += row.Render()
|
||||
}
|
||||
}
|
||||
s += "\n</tbody>\n"
|
||||
return wrap("table", s)
|
||||
}
|
||||
|
||||
func (p *parse) newTable(pos Pos) *TableNode {
|
||||
return &TableNode{NodeType: NodeTable, Pos: pos}
|
||||
}
|
||||
|
||||
// RowNode represnt tr that holds list of cell-nodes
|
||||
type RowNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Cells []*CellNode
|
||||
}
|
||||
|
||||
func (r *RowNode) append(cell *CellNode) {
|
||||
r.Cells = append(r.Cells, cell)
|
||||
}
|
||||
|
||||
// Render returns the html representation of table-row
|
||||
func (r *RowNode) Render() string {
|
||||
var s string
|
||||
for _, cell := range r.Cells {
|
||||
s += "\n" + cell.Render()
|
||||
}
|
||||
s += "\n"
|
||||
return wrap("tr", s)
|
||||
}
|
||||
|
||||
func (p *parse) newRow(pos Pos) *RowNode {
|
||||
return &RowNode{NodeType: NodeRow, Pos: pos}
|
||||
}
|
||||
|
||||
// AlignType identifies the aligment-type of specfic cell.
|
||||
type AlignType int
|
||||
|
||||
// Align returns itself and provides an easy default implementation
|
||||
// for embedding in a Node.
|
||||
func (t AlignType) Align() AlignType {
|
||||
return t
|
||||
}
|
||||
|
||||
// Alignment
|
||||
const (
|
||||
None AlignType = iota
|
||||
Right
|
||||
Left
|
||||
Center
|
||||
)
|
||||
|
||||
// Cell types
|
||||
const (
|
||||
Header = iota
|
||||
Data
|
||||
)
|
||||
|
||||
// CellNode represents table-data/cell that holds simple text(may be emphasis)
|
||||
// Note: the text in <th> elements are bold and centered by default.
|
||||
type CellNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
AlignType
|
||||
Kind int
|
||||
Nodes []Node
|
||||
}
|
||||
|
||||
// Render returns the html reprenestation of table-cell
|
||||
func (c *CellNode) Render() string {
|
||||
var s string
|
||||
tag := "td"
|
||||
if c.Kind == Header {
|
||||
tag = "th"
|
||||
}
|
||||
for _, node := range c.Nodes {
|
||||
s += node.Render()
|
||||
}
|
||||
return fmt.Sprintf("<%[1]s%s>%s</%[1]s>", tag, c.Style(), s)
|
||||
}
|
||||
|
||||
// Style return the cell-style based on alignment field
|
||||
func (c *CellNode) Style() string {
|
||||
s := " style=\"text-align:"
|
||||
switch c.Align() {
|
||||
case Right:
|
||||
s += "right\""
|
||||
case Left:
|
||||
s += "left\""
|
||||
case Center:
|
||||
s += "center\""
|
||||
default:
|
||||
s = ""
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (p *parse) newCell(pos Pos, kind int, align AlignType) *CellNode {
|
||||
return &CellNode{NodeType: NodeCell, Pos: pos, Kind: kind, AlignType: align}
|
||||
}
|
||||
|
||||
// BlockQuote represents block-quote tag.
|
||||
type BlockQuoteNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Nodes []Node
|
||||
}
|
||||
|
||||
// Render returns the html representation of BlockQuote
|
||||
func (n *BlockQuoteNode) Render() string {
|
||||
var s string
|
||||
for _, node := range n.Nodes {
|
||||
s += node.Render()
|
||||
}
|
||||
return wrap("blockquote", s)
|
||||
}
|
||||
|
||||
func (p *parse) newBlockQuote(pos Pos) *BlockQuoteNode {
|
||||
return &BlockQuoteNode{NodeType: NodeBlockQuote, Pos: pos}
|
||||
}
|
||||
|
||||
// CheckboxNode represents checked and unchecked checkbox tag.
|
||||
// Used in task lists.
|
||||
type CheckboxNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
Checked bool
|
||||
}
|
||||
|
||||
// Render returns the html representation of checked and unchecked CheckBox.
|
||||
func (n *CheckboxNode) Render() string {
|
||||
s := "<input type=\"checkbox\""
|
||||
if n.Checked {
|
||||
s += " checked"
|
||||
}
|
||||
return s + ">"
|
||||
}
|
||||
|
||||
func (p *parse) newCheckbox(pos Pos, checked bool) *CheckboxNode {
|
||||
return &CheckboxNode{NodeType: NodeCheckbox, Pos: pos, Checked: checked}
|
||||
}
|
||||
|
||||
// Wrap text with specific tag.
|
||||
func wrap(tag, body string) string {
|
||||
return fmt.Sprintf("<%[1]s>%s</%[1]s>", tag, body)
|
||||
}
|
||||
|
||||
// Group all text configuration in one place(escaping, smartypants, etc..)
|
||||
func (p *parse) text(input string) string {
|
||||
opts := p.root().options
|
||||
if opts.Smartypants {
|
||||
input = smartypants(input)
|
||||
}
|
||||
if opts.Fractions {
|
||||
input = smartyfractions(input)
|
||||
}
|
||||
return escape(input)
|
||||
}
|
||||
|
||||
// Helper escaper
|
||||
func escape(str string) (cpy string) {
|
||||
emp := regexp.MustCompile(`&\w+;`)
|
||||
for i := 0; i < len(str); i++ {
|
||||
switch s := str[i]; s {
|
||||
case '>':
|
||||
cpy += ">"
|
||||
case '"':
|
||||
cpy += """
|
||||
case '\'':
|
||||
cpy += "'"
|
||||
case '<':
|
||||
if res := reHTML.tag.FindString(str[i:]); res != "" {
|
||||
cpy += res
|
||||
i += len(res) - 1
|
||||
} else {
|
||||
cpy += "<"
|
||||
}
|
||||
case '&':
|
||||
if res := emp.FindString(str[i:]); res != "" {
|
||||
cpy += res
|
||||
i += len(res) - 1
|
||||
} else {
|
||||
cpy += "&"
|
||||
}
|
||||
default:
|
||||
cpy += str[i : i+1]
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Smartypants transformation helper, translate from marked.js
|
||||
func smartypants(text string) string {
|
||||
// em-dashes, en-dashes, ellipses
|
||||
re := strings.NewReplacer("---", "\u2014", "--", "\u2013", "...", "\u2026")
|
||||
text = re.Replace(text)
|
||||
// opening singles
|
||||
text = regexp.MustCompile("(^|[-\u2014/(\\[{\"\\s])'").ReplaceAllString(text, "$1\u2018")
|
||||
// closing singles & apostrophes
|
||||
text = strings.Replace(text, "'", "\u2019", -1)
|
||||
// opening doubles
|
||||
text = regexp.MustCompile("(^|[-\u2014/(\\[{\u2018\\s])\"").ReplaceAllString(text, "$1\u201c")
|
||||
// closing doubles
|
||||
text = strings.Replace(text, "\"", "\u201d", -1)
|
||||
return text
|
||||
}
|
||||
|
||||
// Smartyfractions transformation helper.
|
||||
func smartyfractions(text string) string {
|
||||
re := regexp.MustCompile(`(\d+)(/\d+)(/\d+|)`)
|
||||
return re.ReplaceAllStringFunc(text, func(str string) string {
|
||||
var match []string
|
||||
// If it's date like
|
||||
if match = re.FindStringSubmatch(str); match[3] != "" {
|
||||
return str
|
||||
}
|
||||
switch n := match[1] + match[2]; n {
|
||||
case "1/2", "1/3", "2/3", "1/4", "3/4", "1/5", "2/5", "3/5", "4/5",
|
||||
"1/6", "5/6", "1/7", "1/8", "3/8", "5/8", "7/8":
|
||||
return fmt.Sprintf("&frac%s;", strings.Replace(n, "/", "", 1))
|
||||
default:
|
||||
return fmt.Sprintf("<sup>%s</sup>⁄<sub>%s</sub>",
|
||||
match[1], strings.Replace(match[2], "/", "", 1))
|
||||
}
|
||||
})
|
||||
}
|
||||
436
vendor/github.com/a8m/mark/parser.go
generated
vendored
Normal file
436
vendor/github.com/a8m/mark/parser.go
generated
vendored
Normal file
@@ -0,0 +1,436 @@
|
||||
package mark
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// parse holds the state of the parser.
|
||||
type parse struct {
|
||||
Nodes []Node
|
||||
lex Lexer
|
||||
options *Options
|
||||
tr *parse
|
||||
output string
|
||||
peekCount int
|
||||
token [3]item // three-token lookahead for parser
|
||||
links map[string]*DefLinkNode // Deflink parsing, used RefLinks
|
||||
renderFn map[NodeType]RenderFn // Custom overridden fns
|
||||
}
|
||||
|
||||
// Return new parser
|
||||
func newParse(input string, opts *Options) *parse {
|
||||
return &parse{
|
||||
lex: lex(input),
|
||||
options: opts,
|
||||
links: make(map[string]*DefLinkNode),
|
||||
renderFn: make(map[NodeType]RenderFn),
|
||||
}
|
||||
}
|
||||
|
||||
// parse convert the raw text to Nodeparse.
|
||||
func (p *parse) parse() {
|
||||
Loop:
|
||||
for {
|
||||
var n Node
|
||||
switch t := p.peek(); t.typ {
|
||||
case itemEOF, itemError:
|
||||
break Loop
|
||||
case itemNewLine:
|
||||
p.next()
|
||||
case itemHr:
|
||||
n = p.newHr(p.next().pos)
|
||||
case itemHTML:
|
||||
t = p.next()
|
||||
n = p.newHTML(t.pos, t.val)
|
||||
case itemDefLink:
|
||||
n = p.parseDefLink()
|
||||
case itemHeading, itemLHeading:
|
||||
n = p.parseHeading()
|
||||
case itemCodeBlock, itemGfmCodeBlock:
|
||||
n = p.parseCodeBlock()
|
||||
case itemList:
|
||||
n = p.parseList()
|
||||
case itemTable, itemLpTable:
|
||||
n = p.parseTable()
|
||||
case itemBlockQuote:
|
||||
n = p.parseBlockQuote()
|
||||
case itemIndent:
|
||||
space := p.next()
|
||||
// If it isn't followed by itemText
|
||||
if p.peek().typ != itemText {
|
||||
continue
|
||||
}
|
||||
p.backup2(space)
|
||||
fallthrough
|
||||
// itemText
|
||||
default:
|
||||
tmp := p.newParagraph(t.pos)
|
||||
tmp.Nodes = p.parseText(p.next().val + p.scanLines())
|
||||
n = tmp
|
||||
}
|
||||
if n != nil {
|
||||
p.append(n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Root getter
|
||||
func (p *parse) root() *parse {
|
||||
if p.tr == nil {
|
||||
return p
|
||||
}
|
||||
return p.tr.root()
|
||||
}
|
||||
|
||||
// Render parse nodes to the wanted output
|
||||
func (p *parse) render() {
|
||||
var output string
|
||||
for i, node := range p.Nodes {
|
||||
// If there's a custom render function, use it instead.
|
||||
if fn, ok := p.renderFn[node.Type()]; ok {
|
||||
output = fn(node)
|
||||
} else {
|
||||
output = node.Render()
|
||||
}
|
||||
p.output += output
|
||||
if output != "" && i != len(p.Nodes)-1 {
|
||||
p.output += "\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// append new node to nodes-list
|
||||
func (p *parse) append(n Node) {
|
||||
p.Nodes = append(p.Nodes, n)
|
||||
}
|
||||
|
||||
// next returns the next token
|
||||
func (p *parse) next() item {
|
||||
if p.peekCount > 0 {
|
||||
p.peekCount--
|
||||
} else {
|
||||
p.token[0] = p.lex.nextItem()
|
||||
}
|
||||
return p.token[p.peekCount]
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next token.
|
||||
func (p *parse) peek() item {
|
||||
if p.peekCount > 0 {
|
||||
return p.token[p.peekCount-1]
|
||||
}
|
||||
p.peekCount = 1
|
||||
p.token[0] = p.lex.nextItem()
|
||||
return p.token[0]
|
||||
}
|
||||
|
||||
// backup backs the input stream tp one token
|
||||
func (p *parse) backup() {
|
||||
p.peekCount++
|
||||
}
|
||||
|
||||
// backup2 backs the input stream up two tokens.
|
||||
// The zeroth token is already there.
|
||||
func (p *parse) backup2(t1 item) {
|
||||
p.token[1] = t1
|
||||
p.peekCount = 2
|
||||
}
|
||||
|
||||
// parseText
|
||||
func (p *parse) parseText(input string) (nodes []Node) {
|
||||
// Trim whitespaces that not a line-break
|
||||
input = regexp.MustCompile(`(?m)^ +| +(\n|$)`).ReplaceAllStringFunc(input, func(s string) string {
|
||||
if reBr.MatchString(s) {
|
||||
return s
|
||||
}
|
||||
return strings.Replace(s, " ", "", -1)
|
||||
})
|
||||
l := lexInline(input)
|
||||
for token := range l.items {
|
||||
var node Node
|
||||
switch token.typ {
|
||||
case itemBr:
|
||||
node = p.newBr(token.pos)
|
||||
case itemStrong, itemItalic, itemStrike, itemCode:
|
||||
node = p.parseEmphasis(token.typ, token.pos, token.val)
|
||||
case itemLink, itemAutoLink, itemGfmLink:
|
||||
var title, href string
|
||||
var text []Node
|
||||
if token.typ == itemLink {
|
||||
match := reLink.FindStringSubmatch(token.val)
|
||||
text = p.parseText(match[1])
|
||||
href, title = match[2], match[3]
|
||||
} else {
|
||||
var match []string
|
||||
if token.typ == itemGfmLink {
|
||||
match = reGfmLink.FindStringSubmatch(token.val)
|
||||
} else {
|
||||
match = reAutoLink.FindStringSubmatch(token.val)
|
||||
}
|
||||
href = match[1]
|
||||
text = append(text, p.newText(token.pos, match[1]))
|
||||
}
|
||||
node = p.newLink(token.pos, title, href, text...)
|
||||
case itemImage:
|
||||
match := reImage.FindStringSubmatch(token.val)
|
||||
node = p.newImage(token.pos, match[3], match[2], match[1])
|
||||
case itemRefLink, itemRefImage:
|
||||
match := reRefLink.FindStringSubmatch(token.val)
|
||||
text, ref := match[1], match[2]
|
||||
if ref == "" {
|
||||
ref = text
|
||||
}
|
||||
if token.typ == itemRefLink {
|
||||
node = p.newRefLink(token.typ, token.pos, token.val, ref, p.parseText(text))
|
||||
} else {
|
||||
node = p.newRefImage(token.typ, token.pos, token.val, ref, text)
|
||||
}
|
||||
case itemHTML:
|
||||
node = p.newHTML(token.pos, token.val)
|
||||
default:
|
||||
node = p.newText(token.pos, token.val)
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// parse inline emphasis
|
||||
func (p *parse) parseEmphasis(typ itemType, pos Pos, val string) *EmphasisNode {
|
||||
var re *regexp.Regexp
|
||||
switch typ {
|
||||
case itemStrike:
|
||||
re = reStrike
|
||||
case itemStrong:
|
||||
re = reStrong
|
||||
case itemCode:
|
||||
re = reCode
|
||||
case itemItalic:
|
||||
re = reItalic
|
||||
}
|
||||
node := p.newEmphasis(pos, typ)
|
||||
match := re.FindStringSubmatch(val)
|
||||
text := match[len(match)-1]
|
||||
if text == "" {
|
||||
text = match[1]
|
||||
}
|
||||
node.Nodes = p.parseText(text)
|
||||
return node
|
||||
}
|
||||
|
||||
// parse heading block
|
||||
func (p *parse) parseHeading() (node *HeadingNode) {
|
||||
token := p.next()
|
||||
level := 1
|
||||
var text string
|
||||
if token.typ == itemHeading {
|
||||
match := reHeading.FindStringSubmatch(token.val)
|
||||
level, text = len(match[1]), match[2]
|
||||
} else {
|
||||
match := reLHeading.FindStringSubmatch(token.val)
|
||||
// using equal signs for first-level, and dashes for second-level.
|
||||
text = match[1]
|
||||
if match[2] == "-" {
|
||||
level = 2
|
||||
}
|
||||
}
|
||||
node = p.newHeading(token.pos, level, text)
|
||||
node.Nodes = p.parseText(text)
|
||||
return
|
||||
}
|
||||
|
||||
func (p *parse) parseDefLink() *DefLinkNode {
|
||||
token := p.next()
|
||||
match := reDefLink.FindStringSubmatch(token.val)
|
||||
name := strings.ToLower(match[1])
|
||||
// name(lowercase), href, title
|
||||
n := p.newDefLink(token.pos, name, match[2], match[3])
|
||||
// store in links
|
||||
links := p.root().links
|
||||
if _, ok := links[name]; !ok {
|
||||
links[name] = n
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// parse codeBlock
|
||||
func (p *parse) parseCodeBlock() *CodeNode {
|
||||
var lang, text string
|
||||
token := p.next()
|
||||
if token.typ == itemGfmCodeBlock {
|
||||
codeStart := reGfmCode.FindStringSubmatch(token.val)
|
||||
lang = codeStart[3]
|
||||
text = token.val[len(codeStart[0]):]
|
||||
} else {
|
||||
text = reCodeBlock.trim(token.val, "")
|
||||
}
|
||||
return p.newCode(token.pos, lang, text)
|
||||
}
|
||||
|
||||
func (p *parse) parseBlockQuote() (n *BlockQuoteNode) {
|
||||
token := p.next()
|
||||
// replacer
|
||||
re := regexp.MustCompile(`(?m)^ *> ?`)
|
||||
raw := re.ReplaceAllString(token.val, "")
|
||||
// TODO(a8m): doesn't work right now with defLink(inside the blockQuote)
|
||||
tr := &parse{lex: lex(raw), tr: p}
|
||||
tr.parse()
|
||||
n = p.newBlockQuote(token.pos)
|
||||
n.Nodes = tr.Nodes
|
||||
return
|
||||
}
|
||||
|
||||
// parse list
|
||||
func (p *parse) parseList() *ListNode {
|
||||
token := p.next()
|
||||
list := p.newList(token.pos, isDigit(token.val))
|
||||
Loop:
|
||||
for {
|
||||
switch token = p.peek(); token.typ {
|
||||
case itemLooseItem, itemListItem:
|
||||
list.append(p.parseListItem())
|
||||
default:
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// parse listItem
|
||||
func (p *parse) parseListItem() *ListItemNode {
|
||||
token := p.next()
|
||||
item := p.newListItem(token.pos)
|
||||
token.val = strings.TrimSpace(token.val)
|
||||
if p.isTaskItem(token.val) {
|
||||
item.Nodes = p.parseTaskItem(token)
|
||||
return item
|
||||
}
|
||||
tr := &parse{lex: lex(token.val), tr: p}
|
||||
tr.parse()
|
||||
for _, node := range tr.Nodes {
|
||||
// wrap with paragraph only when it's a loose item
|
||||
if n, ok := node.(*ParagraphNode); ok && token.typ == itemListItem {
|
||||
item.Nodes = append(item.Nodes, n.Nodes...)
|
||||
} else {
|
||||
item.append(node)
|
||||
}
|
||||
}
|
||||
return item
|
||||
}
|
||||
|
||||
// parseTaskItem parses list item as a task item.
|
||||
func (p *parse) parseTaskItem(token item) []Node {
|
||||
checkbox := p.newCheckbox(token.pos, token.val[1] == 'x')
|
||||
token.val = strings.TrimSpace(token.val[3:])
|
||||
return append([]Node{checkbox}, p.parseText(token.val)...)
|
||||
}
|
||||
|
||||
// isTaskItem tests if the given string is list task item.
|
||||
func (p *parse) isTaskItem(s string) bool {
|
||||
if len(s) < 5 || s[0] != '[' || (s[1] != 'x' && s[1] != ' ') || s[2] != ']' {
|
||||
return false
|
||||
}
|
||||
return "" != strings.TrimSpace(s[3:])
|
||||
}
|
||||
|
||||
// parse table
|
||||
func (p *parse) parseTable() *TableNode {
|
||||
table := p.newTable(p.next().pos)
|
||||
// Align [ None, Left, Right, ... ]
|
||||
// Header [ Cells: [ ... ] ]
|
||||
// Data: [ Rows: [ Cells: [ ... ] ] ]
|
||||
rows := struct {
|
||||
Align []AlignType
|
||||
Header []item
|
||||
Cells [][]item
|
||||
}{}
|
||||
Loop:
|
||||
for i := 0; ; {
|
||||
switch token := p.next(); token.typ {
|
||||
case itemTableRow:
|
||||
i++
|
||||
if i > 2 {
|
||||
rows.Cells = append(rows.Cells, []item{})
|
||||
}
|
||||
case itemTableCell:
|
||||
// Header
|
||||
if i == 1 {
|
||||
rows.Header = append(rows.Header, token)
|
||||
// Alignment
|
||||
} else if i == 2 {
|
||||
rows.Align = append(rows.Align, parseAlign(token.val))
|
||||
// Data
|
||||
} else {
|
||||
pos := i - 3
|
||||
rows.Cells[pos] = append(rows.Cells[pos], token)
|
||||
}
|
||||
default:
|
||||
p.backup()
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
// Tranform to nodes
|
||||
table.append(p.parseCells(Header, rows.Header, rows.Align))
|
||||
// Table body
|
||||
for _, row := range rows.Cells {
|
||||
table.append(p.parseCells(Data, row, rows.Align))
|
||||
}
|
||||
return table
|
||||
}
|
||||
|
||||
// parse cells and return new row
|
||||
func (p *parse) parseCells(kind int, items []item, align []AlignType) *RowNode {
|
||||
var row *RowNode
|
||||
for i, item := range items {
|
||||
if i == 0 {
|
||||
row = p.newRow(item.pos)
|
||||
}
|
||||
cell := p.newCell(item.pos, kind, align[i])
|
||||
cell.Nodes = p.parseText(item.val)
|
||||
row.append(cell)
|
||||
}
|
||||
return row
|
||||
}
|
||||
|
||||
// Used to consume lines(itemText) for a continues paragraphs
|
||||
func (p *parse) scanLines() (s string) {
|
||||
for {
|
||||
tkn := p.next()
|
||||
if tkn.typ == itemText || tkn.typ == itemIndent {
|
||||
s += tkn.val
|
||||
} else if tkn.typ == itemNewLine {
|
||||
if t := p.peek().typ; t != itemText && t != itemIndent {
|
||||
p.backup2(tkn)
|
||||
break
|
||||
}
|
||||
s += tkn.val
|
||||
} else {
|
||||
p.backup()
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// get align-string and return the align type of it
|
||||
func parseAlign(s string) (typ AlignType) {
|
||||
sfx, pfx := strings.HasSuffix(s, ":"), strings.HasPrefix(s, ":")
|
||||
switch {
|
||||
case sfx && pfx:
|
||||
typ = Center
|
||||
case sfx:
|
||||
typ = Right
|
||||
case pfx:
|
||||
typ = Left
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// test if given string is digit
|
||||
func isDigit(s string) bool {
|
||||
r, _ := utf8.DecodeRuneInString(s)
|
||||
return unicode.IsDigit(r)
|
||||
}
|
||||
20
vendor/github.com/beorn7/perks/quantile/LICENSE
generated
vendored
Normal file
20
vendor/github.com/beorn7/perks/quantile/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
Copyright (C) 2013 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
292
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
Normal file
292
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
Normal file
@@ -0,0 +1,292 @@
|
||||
// Package quantile computes approximate quantiles over an unbounded data
|
||||
// stream within low memory and CPU bounds.
|
||||
//
|
||||
// A small amount of accuracy is traded to achieve the above properties.
|
||||
//
|
||||
// Multiple streams can be merged before calling Query to generate a single set
|
||||
// of results. This is meaningful when the streams represent the same type of
|
||||
// data. See Merge and Samples.
|
||||
//
|
||||
// For more detailed information about the algorithm used, see:
|
||||
//
|
||||
// Effective Computation of Biased Quantiles over Data Streams
|
||||
//
|
||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sample holds an observed value and meta information for compression. JSON
|
||||
// tags have been added for convenience.
|
||||
type Sample struct {
|
||||
Value float64 `json:",string"`
|
||||
Width float64 `json:",string"`
|
||||
Delta float64 `json:",string"`
|
||||
}
|
||||
|
||||
// Samples represents a slice of samples. It implements sort.Interface.
|
||||
type Samples []Sample
|
||||
|
||||
func (a Samples) Len() int { return len(a) }
|
||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type invariant func(s *stream, r float64) float64
|
||||
|
||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the lower ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewLowBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * r
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the higher ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewHighBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * (s.n - r)
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||
// space and computation time. The targets map maps the desired quantiles to
|
||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targets map[float64]float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for quantile, epsilon := range targets {
|
||||
if quantile*s.n <= r {
|
||||
f = (2 * epsilon * r) / quantile
|
||||
} else {
|
||||
f = (2 * epsilon * (s.n - r)) / (1 - quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
*stream
|
||||
b Samples
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func newStream(ƒ invariant) *Stream {
|
||||
x := &stream{ƒ: ƒ}
|
||||
return &Stream{x, make(Samples, 0, 500), true}
|
||||
}
|
||||
|
||||
// Insert inserts v into the stream.
|
||||
func (s *Stream) Insert(v float64) {
|
||||
s.insert(Sample{Value: v, Width: 1})
|
||||
}
|
||||
|
||||
func (s *Stream) insert(sample Sample) {
|
||||
s.b = append(s.b, sample)
|
||||
s.sorted = false
|
||||
if len(s.b) == cap(s.b) {
|
||||
s.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Query returns the computed qth percentiles value. If s was created with
|
||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||
// will return an unspecified result.
|
||||
func (s *Stream) Query(q float64) float64 {
|
||||
if !s.flushed() {
|
||||
// Fast path when there hasn't been enough data for a flush;
|
||||
// this also yields better accuracy for small sets of data.
|
||||
l := len(s.b)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(math.Ceil(float64(l) * q))
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
s.maybeSort()
|
||||
return s.b[i].Value
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.query(q)
|
||||
}
|
||||
|
||||
// Merge merges samples into the underlying streams samples. This is handy when
|
||||
// merging multiple streams from separate threads, database shards, etc.
|
||||
//
|
||||
// ATTENTION: This method is broken and does not yield correct results. The
|
||||
// underlying algorithm is not capable of merging streams correctly.
|
||||
func (s *Stream) Merge(samples Samples) {
|
||||
sort.Sort(samples)
|
||||
s.stream.merge(samples)
|
||||
}
|
||||
|
||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||
func (s *Stream) Reset() {
|
||||
s.stream.reset()
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
// Samples returns stream samples held by s.
|
||||
func (s *Stream) Samples() Samples {
|
||||
if !s.flushed() {
|
||||
return s.b
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.samples()
|
||||
}
|
||||
|
||||
// Count returns the total number of samples observed in the stream
|
||||
// since initialization.
|
||||
func (s *Stream) Count() int {
|
||||
return len(s.b) + s.stream.count()
|
||||
}
|
||||
|
||||
func (s *Stream) flush() {
|
||||
s.maybeSort()
|
||||
s.stream.merge(s.b)
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
func (s *Stream) maybeSort() {
|
||||
if !s.sorted {
|
||||
s.sorted = true
|
||||
sort.Sort(s.b)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) flushed() bool {
|
||||
return len(s.stream.l) > 0
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
n float64
|
||||
l []Sample
|
||||
ƒ invariant
|
||||
}
|
||||
|
||||
func (s *stream) reset() {
|
||||
s.l = s.l[:0]
|
||||
s.n = 0
|
||||
}
|
||||
|
||||
func (s *stream) insert(v float64) {
|
||||
s.merge(Samples{{v, 1, 0}})
|
||||
}
|
||||
|
||||
func (s *stream) merge(samples Samples) {
|
||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||
// whole summaries. The paper doesn't mention merging summaries at
|
||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||
// do merges properly.
|
||||
var r float64
|
||||
i := 0
|
||||
for _, sample := range samples {
|
||||
for ; i < len(s.l); i++ {
|
||||
c := s.l[i]
|
||||
if c.Value > sample.Value {
|
||||
// Insert at position i.
|
||||
s.l = append(s.l, Sample{})
|
||||
copy(s.l[i+1:], s.l[i:])
|
||||
s.l[i] = Sample{
|
||||
sample.Value,
|
||||
sample.Width,
|
||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||
// TODO(beorn7): How to calculate delta correctly?
|
||||
}
|
||||
i++
|
||||
goto inserted
|
||||
}
|
||||
r += c.Width
|
||||
}
|
||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||
i++
|
||||
inserted:
|
||||
s.n += sample.Width
|
||||
r += sample.Width
|
||||
}
|
||||
s.compress()
|
||||
}
|
||||
|
||||
func (s *stream) count() int {
|
||||
return int(s.n)
|
||||
}
|
||||
|
||||
func (s *stream) query(q float64) float64 {
|
||||
t := math.Ceil(q * s.n)
|
||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||
p := s.l[0]
|
||||
var r float64
|
||||
for _, c := range s.l[1:] {
|
||||
r += p.Width
|
||||
if r+c.Width+c.Delta > t {
|
||||
return p.Value
|
||||
}
|
||||
p = c
|
||||
}
|
||||
return p.Value
|
||||
}
|
||||
|
||||
func (s *stream) compress() {
|
||||
if len(s.l) < 2 {
|
||||
return
|
||||
}
|
||||
x := s.l[len(s.l)-1]
|
||||
xi := len(s.l) - 1
|
||||
r := s.n - 1 - x.Width
|
||||
|
||||
for i := len(s.l) - 2; i >= 0; i-- {
|
||||
c := s.l[i]
|
||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||
x.Width += c.Width
|
||||
s.l[xi] = x
|
||||
// Remove element at i.
|
||||
copy(s.l[i:], s.l[i+1:])
|
||||
s.l = s.l[:len(s.l)-1]
|
||||
xi -= 1
|
||||
} else {
|
||||
x = c
|
||||
xi = i
|
||||
}
|
||||
r -= c.Width
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stream) samples() Samples {
|
||||
samples := make(Samples, len(s.l))
|
||||
copy(samples, s.l)
|
||||
return samples
|
||||
}
|
||||
12
vendor/github.com/cheggaaa/pb/LICENSE
generated
vendored
Normal file
12
vendor/github.com/cheggaaa/pb/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
Copyright (c) 2012-2015, Sergey Cherepanov
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
118
vendor/github.com/cheggaaa/pb/format.go
generated
vendored
Normal file
118
vendor/github.com/cheggaaa/pb/format.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
package pb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Units int
|
||||
|
||||
const (
|
||||
// U_NO are default units, they represent a simple value and are not formatted at all.
|
||||
U_NO Units = iota
|
||||
// U_BYTES units are formatted in a human readable way (B, KiB, MiB, ...)
|
||||
U_BYTES
|
||||
// U_BYTES_DEC units are like U_BYTES, but base 10 (B, KB, MB, ...)
|
||||
U_BYTES_DEC
|
||||
// U_DURATION units are formatted in a human readable way (3h14m15s)
|
||||
U_DURATION
|
||||
)
|
||||
|
||||
const (
|
||||
KiB = 1024
|
||||
MiB = 1048576
|
||||
GiB = 1073741824
|
||||
TiB = 1099511627776
|
||||
|
||||
KB = 1e3
|
||||
MB = 1e6
|
||||
GB = 1e9
|
||||
TB = 1e12
|
||||
)
|
||||
|
||||
func Format(i int64) *formatter {
|
||||
return &formatter{n: i}
|
||||
}
|
||||
|
||||
type formatter struct {
|
||||
n int64
|
||||
unit Units
|
||||
width int
|
||||
perSec bool
|
||||
}
|
||||
|
||||
func (f *formatter) To(unit Units) *formatter {
|
||||
f.unit = unit
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *formatter) Width(width int) *formatter {
|
||||
f.width = width
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *formatter) PerSec() *formatter {
|
||||
f.perSec = true
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *formatter) String() (out string) {
|
||||
switch f.unit {
|
||||
case U_BYTES:
|
||||
out = formatBytes(f.n)
|
||||
case U_BYTES_DEC:
|
||||
out = formatBytesDec(f.n)
|
||||
case U_DURATION:
|
||||
out = formatDuration(f.n)
|
||||
default:
|
||||
out = fmt.Sprintf(fmt.Sprintf("%%%dd", f.width), f.n)
|
||||
}
|
||||
if f.perSec {
|
||||
out += "/s"
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Convert bytes to human readable string. Like 2 MiB, 64.2 KiB, 52 B
|
||||
func formatBytes(i int64) (result string) {
|
||||
switch {
|
||||
case i >= TiB:
|
||||
result = fmt.Sprintf("%.02f TiB", float64(i)/TiB)
|
||||
case i >= GiB:
|
||||
result = fmt.Sprintf("%.02f GiB", float64(i)/GiB)
|
||||
case i >= MiB:
|
||||
result = fmt.Sprintf("%.02f MiB", float64(i)/MiB)
|
||||
case i >= KiB:
|
||||
result = fmt.Sprintf("%.02f KiB", float64(i)/KiB)
|
||||
default:
|
||||
result = fmt.Sprintf("%d B", i)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Convert bytes to base-10 human readable string. Like 2 MB, 64.2 KB, 52 B
|
||||
func formatBytesDec(i int64) (result string) {
|
||||
switch {
|
||||
case i >= TB:
|
||||
result = fmt.Sprintf("%.02f TB", float64(i)/TB)
|
||||
case i >= GB:
|
||||
result = fmt.Sprintf("%.02f GB", float64(i)/GB)
|
||||
case i >= MB:
|
||||
result = fmt.Sprintf("%.02f MB", float64(i)/MB)
|
||||
case i >= KB:
|
||||
result = fmt.Sprintf("%.02f KB", float64(i)/KB)
|
||||
default:
|
||||
result = fmt.Sprintf("%d B", i)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func formatDuration(n int64) (result string) {
|
||||
d := time.Duration(n)
|
||||
if d > time.Hour*24 {
|
||||
result = fmt.Sprintf("%dd", d/24/time.Hour)
|
||||
d -= (d / time.Hour / 24) * (time.Hour * 24)
|
||||
}
|
||||
result = fmt.Sprintf("%s%v", result, d)
|
||||
return
|
||||
}
|
||||
469
vendor/github.com/cheggaaa/pb/pb.go
generated
vendored
Normal file
469
vendor/github.com/cheggaaa/pb/pb.go
generated
vendored
Normal file
@@ -0,0 +1,469 @@
|
||||
// Simple console progress bars
|
||||
package pb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Current version
|
||||
const Version = "1.0.19"
|
||||
|
||||
const (
|
||||
// Default refresh rate - 200ms
|
||||
DEFAULT_REFRESH_RATE = time.Millisecond * 200
|
||||
FORMAT = "[=>-]"
|
||||
)
|
||||
|
||||
// DEPRECATED
|
||||
// variables for backward compatibility, from now do not work
|
||||
// use pb.Format and pb.SetRefreshRate
|
||||
var (
|
||||
DefaultRefreshRate = DEFAULT_REFRESH_RATE
|
||||
BarStart, BarEnd, Empty, Current, CurrentN string
|
||||
)
|
||||
|
||||
// Create new progress bar object
|
||||
func New(total int) *ProgressBar {
|
||||
return New64(int64(total))
|
||||
}
|
||||
|
||||
// Create new progress bar object using int64 as total
|
||||
func New64(total int64) *ProgressBar {
|
||||
pb := &ProgressBar{
|
||||
Total: total,
|
||||
RefreshRate: DEFAULT_REFRESH_RATE,
|
||||
ShowPercent: true,
|
||||
ShowCounters: true,
|
||||
ShowBar: true,
|
||||
ShowTimeLeft: true,
|
||||
ShowFinalTime: true,
|
||||
Units: U_NO,
|
||||
ManualUpdate: false,
|
||||
finish: make(chan struct{}),
|
||||
}
|
||||
return pb.Format(FORMAT)
|
||||
}
|
||||
|
||||
// Create new object and start
|
||||
func StartNew(total int) *ProgressBar {
|
||||
return New(total).Start()
|
||||
}
|
||||
|
||||
// Callback for custom output
|
||||
// For example:
|
||||
// bar.Callback = func(s string) {
|
||||
// mySuperPrint(s)
|
||||
// }
|
||||
//
|
||||
type Callback func(out string)
|
||||
|
||||
type ProgressBar struct {
|
||||
current int64 // current must be first member of struct (https://code.google.com/p/go/issues/detail?id=5278)
|
||||
previous int64
|
||||
|
||||
Total int64
|
||||
RefreshRate time.Duration
|
||||
ShowPercent, ShowCounters bool
|
||||
ShowSpeed, ShowTimeLeft, ShowBar bool
|
||||
ShowFinalTime bool
|
||||
Output io.Writer
|
||||
Callback Callback
|
||||
NotPrint bool
|
||||
Units Units
|
||||
Width int
|
||||
ForceWidth bool
|
||||
ManualUpdate bool
|
||||
AutoStat bool
|
||||
|
||||
// Default width for the time box.
|
||||
UnitsWidth int
|
||||
TimeBoxWidth int
|
||||
|
||||
finishOnce sync.Once //Guards isFinish
|
||||
finish chan struct{}
|
||||
isFinish bool
|
||||
|
||||
startTime time.Time
|
||||
startValue int64
|
||||
|
||||
changeTime time.Time
|
||||
|
||||
prefix, postfix string
|
||||
|
||||
mu sync.Mutex
|
||||
lastPrint string
|
||||
|
||||
BarStart string
|
||||
BarEnd string
|
||||
Empty string
|
||||
Current string
|
||||
CurrentN string
|
||||
|
||||
AlwaysUpdate bool
|
||||
}
|
||||
|
||||
// Start print
|
||||
func (pb *ProgressBar) Start() *ProgressBar {
|
||||
pb.startTime = time.Now()
|
||||
pb.startValue = atomic.LoadInt64(&pb.current)
|
||||
if pb.Total == 0 {
|
||||
pb.ShowTimeLeft = false
|
||||
pb.ShowPercent = false
|
||||
pb.AutoStat = false
|
||||
}
|
||||
if !pb.ManualUpdate {
|
||||
pb.Update() // Initial printing of the bar before running the bar refresher.
|
||||
go pb.refresher()
|
||||
}
|
||||
return pb
|
||||
}
|
||||
|
||||
// Increment current value
|
||||
func (pb *ProgressBar) Increment() int {
|
||||
return pb.Add(1)
|
||||
}
|
||||
|
||||
// Get current value
|
||||
func (pb *ProgressBar) Get() int64 {
|
||||
c := atomic.LoadInt64(&pb.current)
|
||||
return c
|
||||
}
|
||||
|
||||
// Set current value
|
||||
func (pb *ProgressBar) Set(current int) *ProgressBar {
|
||||
return pb.Set64(int64(current))
|
||||
}
|
||||
|
||||
// Set64 sets the current value as int64
|
||||
func (pb *ProgressBar) Set64(current int64) *ProgressBar {
|
||||
atomic.StoreInt64(&pb.current, current)
|
||||
return pb
|
||||
}
|
||||
|
||||
// Add to current value
|
||||
func (pb *ProgressBar) Add(add int) int {
|
||||
return int(pb.Add64(int64(add)))
|
||||
}
|
||||
|
||||
func (pb *ProgressBar) Add64(add int64) int64 {
|
||||
return atomic.AddInt64(&pb.current, add)
|
||||
}
|
||||
|
||||
// Set prefix string
|
||||
func (pb *ProgressBar) Prefix(prefix string) *ProgressBar {
|
||||
pb.prefix = prefix
|
||||
return pb
|
||||
}
|
||||
|
||||
// Set postfix string
|
||||
func (pb *ProgressBar) Postfix(postfix string) *ProgressBar {
|
||||
pb.postfix = postfix
|
||||
return pb
|
||||
}
|
||||
|
||||
// Set custom format for bar
|
||||
// Example: bar.Format("[=>_]")
|
||||
// Example: bar.Format("[\x00=\x00>\x00-\x00]") // \x00 is the delimiter
|
||||
func (pb *ProgressBar) Format(format string) *ProgressBar {
|
||||
var formatEntries []string
|
||||
if utf8.RuneCountInString(format) == 5 {
|
||||
formatEntries = strings.Split(format, "")
|
||||
} else {
|
||||
formatEntries = strings.Split(format, "\x00")
|
||||
}
|
||||
if len(formatEntries) == 5 {
|
||||
pb.BarStart = formatEntries[0]
|
||||
pb.BarEnd = formatEntries[4]
|
||||
pb.Empty = formatEntries[3]
|
||||
pb.Current = formatEntries[1]
|
||||
pb.CurrentN = formatEntries[2]
|
||||
}
|
||||
return pb
|
||||
}
|
||||
|
||||
// Set bar refresh rate
|
||||
func (pb *ProgressBar) SetRefreshRate(rate time.Duration) *ProgressBar {
|
||||
pb.RefreshRate = rate
|
||||
return pb
|
||||
}
|
||||
|
||||
// Set units
|
||||
// bar.SetUnits(U_NO) - by default
|
||||
// bar.SetUnits(U_BYTES) - for Mb, Kb, etc
|
||||
func (pb *ProgressBar) SetUnits(units Units) *ProgressBar {
|
||||
pb.Units = units
|
||||
return pb
|
||||
}
|
||||
|
||||
// Set max width, if width is bigger than terminal width, will be ignored
|
||||
func (pb *ProgressBar) SetMaxWidth(width int) *ProgressBar {
|
||||
pb.Width = width
|
||||
pb.ForceWidth = false
|
||||
return pb
|
||||
}
|
||||
|
||||
// Set bar width
|
||||
func (pb *ProgressBar) SetWidth(width int) *ProgressBar {
|
||||
pb.Width = width
|
||||
pb.ForceWidth = true
|
||||
return pb
|
||||
}
|
||||
|
||||
// End print
|
||||
func (pb *ProgressBar) Finish() {
|
||||
//Protect multiple calls
|
||||
pb.finishOnce.Do(func() {
|
||||
close(pb.finish)
|
||||
pb.write(atomic.LoadInt64(&pb.current))
|
||||
pb.mu.Lock()
|
||||
defer pb.mu.Unlock()
|
||||
switch {
|
||||
case pb.Output != nil:
|
||||
fmt.Fprintln(pb.Output)
|
||||
case !pb.NotPrint:
|
||||
fmt.Println()
|
||||
}
|
||||
pb.isFinish = true
|
||||
})
|
||||
}
|
||||
|
||||
// IsFinished return boolean
|
||||
func (pb *ProgressBar) IsFinished() bool {
|
||||
pb.mu.Lock()
|
||||
defer pb.mu.Unlock()
|
||||
return pb.isFinish
|
||||
}
|
||||
|
||||
// End print and write string 'str'
|
||||
func (pb *ProgressBar) FinishPrint(str string) {
|
||||
pb.Finish()
|
||||
if pb.Output != nil {
|
||||
fmt.Fprintln(pb.Output, str)
|
||||
} else {
|
||||
fmt.Println(str)
|
||||
}
|
||||
}
|
||||
|
||||
// implement io.Writer
|
||||
func (pb *ProgressBar) Write(p []byte) (n int, err error) {
|
||||
n = len(p)
|
||||
pb.Add(n)
|
||||
return
|
||||
}
|
||||
|
||||
// implement io.Reader
|
||||
func (pb *ProgressBar) Read(p []byte) (n int, err error) {
|
||||
n = len(p)
|
||||
pb.Add(n)
|
||||
return
|
||||
}
|
||||
|
||||
// Create new proxy reader over bar
|
||||
// Takes io.Reader or io.ReadCloser
|
||||
func (pb *ProgressBar) NewProxyReader(r io.Reader) *Reader {
|
||||
return &Reader{r, pb}
|
||||
}
|
||||
|
||||
func (pb *ProgressBar) write(current int64) {
|
||||
width := pb.GetWidth()
|
||||
|
||||
var percentBox, countersBox, timeLeftBox, speedBox, barBox, end, out string
|
||||
|
||||
// percents
|
||||
if pb.ShowPercent {
|
||||
var percent float64
|
||||
if pb.Total > 0 {
|
||||
percent = float64(current) / (float64(pb.Total) / float64(100))
|
||||
} else {
|
||||
percent = float64(current) / float64(100)
|
||||
}
|
||||
percentBox = fmt.Sprintf(" %6.02f%%", percent)
|
||||
}
|
||||
|
||||
// counters
|
||||
if pb.ShowCounters {
|
||||
current := Format(current).To(pb.Units).Width(pb.UnitsWidth)
|
||||
if pb.Total > 0 {
|
||||
total := Format(pb.Total).To(pb.Units).Width(pb.UnitsWidth)
|
||||
countersBox = fmt.Sprintf(" %s / %s ", current, total)
|
||||
} else {
|
||||
countersBox = fmt.Sprintf(" %s / ? ", current)
|
||||
}
|
||||
}
|
||||
|
||||
// time left
|
||||
pb.mu.Lock()
|
||||
currentFromStart := current - pb.startValue
|
||||
fromStart := time.Now().Sub(pb.startTime)
|
||||
lastChangeTime := pb.changeTime
|
||||
fromChange := lastChangeTime.Sub(pb.startTime)
|
||||
pb.mu.Unlock()
|
||||
select {
|
||||
case <-pb.finish:
|
||||
if pb.ShowFinalTime {
|
||||
var left time.Duration
|
||||
left = (fromStart / time.Second) * time.Second
|
||||
timeLeftBox = fmt.Sprintf(" %s", left.String())
|
||||
}
|
||||
default:
|
||||
if pb.ShowTimeLeft && currentFromStart > 0 {
|
||||
perEntry := fromChange / time.Duration(currentFromStart)
|
||||
var left time.Duration
|
||||
if pb.Total > 0 {
|
||||
left = time.Duration(pb.Total-currentFromStart) * perEntry
|
||||
left -= time.Since(lastChangeTime)
|
||||
left = (left / time.Second) * time.Second
|
||||
} else {
|
||||
left = time.Duration(currentFromStart) * perEntry
|
||||
left = (left / time.Second) * time.Second
|
||||
}
|
||||
if left > 0 {
|
||||
timeLeft := Format(int64(left)).To(U_DURATION).String()
|
||||
timeLeftBox = fmt.Sprintf(" %s", timeLeft)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(timeLeftBox) < pb.TimeBoxWidth {
|
||||
timeLeftBox = fmt.Sprintf("%s%s", strings.Repeat(" ", pb.TimeBoxWidth-len(timeLeftBox)), timeLeftBox)
|
||||
}
|
||||
|
||||
// speed
|
||||
if pb.ShowSpeed && currentFromStart > 0 {
|
||||
fromStart := time.Now().Sub(pb.startTime)
|
||||
speed := float64(currentFromStart) / (float64(fromStart) / float64(time.Second))
|
||||
speedBox = " " + Format(int64(speed)).To(pb.Units).Width(pb.UnitsWidth).PerSec().String()
|
||||
}
|
||||
|
||||
barWidth := escapeAwareRuneCountInString(countersBox + pb.BarStart + pb.BarEnd + percentBox + timeLeftBox + speedBox + pb.prefix + pb.postfix)
|
||||
// bar
|
||||
if pb.ShowBar {
|
||||
size := width - barWidth
|
||||
if size > 0 {
|
||||
if pb.Total > 0 {
|
||||
curSize := int(math.Ceil((float64(current) / float64(pb.Total)) * float64(size)))
|
||||
emptySize := size - curSize
|
||||
barBox = pb.BarStart
|
||||
if emptySize < 0 {
|
||||
emptySize = 0
|
||||
}
|
||||
if curSize > size {
|
||||
curSize = size
|
||||
}
|
||||
|
||||
cursorLen := escapeAwareRuneCountInString(pb.Current)
|
||||
if emptySize <= 0 {
|
||||
barBox += strings.Repeat(pb.Current, curSize/cursorLen)
|
||||
} else if curSize > 0 {
|
||||
cursorEndLen := escapeAwareRuneCountInString(pb.CurrentN)
|
||||
cursorRepetitions := (curSize - cursorEndLen) / cursorLen
|
||||
barBox += strings.Repeat(pb.Current, cursorRepetitions)
|
||||
barBox += pb.CurrentN
|
||||
}
|
||||
|
||||
emptyLen := escapeAwareRuneCountInString(pb.Empty)
|
||||
barBox += strings.Repeat(pb.Empty, emptySize/emptyLen)
|
||||
barBox += pb.BarEnd
|
||||
} else {
|
||||
pos := size - int(current)%int(size)
|
||||
barBox = pb.BarStart
|
||||
if pos-1 > 0 {
|
||||
barBox += strings.Repeat(pb.Empty, pos-1)
|
||||
}
|
||||
barBox += pb.Current
|
||||
if size-pos-1 > 0 {
|
||||
barBox += strings.Repeat(pb.Empty, size-pos-1)
|
||||
}
|
||||
barBox += pb.BarEnd
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check len
|
||||
out = pb.prefix + countersBox + barBox + percentBox + speedBox + timeLeftBox + pb.postfix
|
||||
if cl := escapeAwareRuneCountInString(out); cl < width {
|
||||
end = strings.Repeat(" ", width-cl)
|
||||
}
|
||||
|
||||
// and print!
|
||||
pb.mu.Lock()
|
||||
pb.lastPrint = out + end
|
||||
isFinish := pb.isFinish
|
||||
pb.mu.Unlock()
|
||||
switch {
|
||||
case isFinish:
|
||||
return
|
||||
case pb.Output != nil:
|
||||
fmt.Fprint(pb.Output, "\r"+out+end)
|
||||
case pb.Callback != nil:
|
||||
pb.Callback(out + end)
|
||||
case !pb.NotPrint:
|
||||
fmt.Print("\r" + out + end)
|
||||
}
|
||||
}
|
||||
|
||||
// GetTerminalWidth - returns terminal width for all platforms.
|
||||
func GetTerminalWidth() (int, error) {
|
||||
return terminalWidth()
|
||||
}
|
||||
|
||||
func (pb *ProgressBar) GetWidth() int {
|
||||
if pb.ForceWidth {
|
||||
return pb.Width
|
||||
}
|
||||
|
||||
width := pb.Width
|
||||
termWidth, _ := terminalWidth()
|
||||
if width == 0 || termWidth <= width {
|
||||
width = termWidth
|
||||
}
|
||||
|
||||
return width
|
||||
}
|
||||
|
||||
// Write the current state of the progressbar
|
||||
func (pb *ProgressBar) Update() {
|
||||
c := atomic.LoadInt64(&pb.current)
|
||||
p := atomic.LoadInt64(&pb.previous)
|
||||
if p != c {
|
||||
pb.mu.Lock()
|
||||
pb.changeTime = time.Now()
|
||||
pb.mu.Unlock()
|
||||
atomic.StoreInt64(&pb.previous, c)
|
||||
}
|
||||
pb.write(c)
|
||||
if pb.AutoStat {
|
||||
if c == 0 {
|
||||
pb.startTime = time.Now()
|
||||
pb.startValue = 0
|
||||
} else if c >= pb.Total && pb.isFinish != true {
|
||||
pb.Finish()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// String return the last bar print
|
||||
func (pb *ProgressBar) String() string {
|
||||
pb.mu.Lock()
|
||||
defer pb.mu.Unlock()
|
||||
return pb.lastPrint
|
||||
}
|
||||
|
||||
// Internal loop for refreshing the progressbar
|
||||
func (pb *ProgressBar) refresher() {
|
||||
for {
|
||||
select {
|
||||
case <-pb.finish:
|
||||
return
|
||||
case <-time.After(pb.RefreshRate):
|
||||
pb.Update()
|
||||
}
|
||||
}
|
||||
}
|
||||
11
vendor/github.com/cheggaaa/pb/pb_appengine.go
generated
vendored
Normal file
11
vendor/github.com/cheggaaa/pb/pb_appengine.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// +build appengine
|
||||
|
||||
package pb
|
||||
|
||||
import "errors"
|
||||
|
||||
// terminalWidth returns width of the terminal, which is not supported
|
||||
// and should always failed on appengine classic which is a sandboxed PaaS.
|
||||
func terminalWidth() (int, error) {
|
||||
return 0, errors.New("Not supported")
|
||||
}
|
||||
141
vendor/github.com/cheggaaa/pb/pb_win.go
generated
vendored
Normal file
141
vendor/github.com/cheggaaa/pb/pb_win.go
generated
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
// +build windows
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var tty = os.Stdin
|
||||
|
||||
var (
|
||||
kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
|
||||
// GetConsoleScreenBufferInfo retrieves information about the
|
||||
// specified console screen buffer.
|
||||
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx
|
||||
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
|
||||
|
||||
// GetConsoleMode retrieves the current input mode of a console's
|
||||
// input buffer or the current output mode of a console screen buffer.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx
|
||||
getConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||
|
||||
// SetConsoleMode sets the input mode of a console's input buffer
|
||||
// or the output mode of a console screen buffer.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
|
||||
setConsoleMode = kernel32.NewProc("SetConsoleMode")
|
||||
|
||||
// SetConsoleCursorPosition sets the cursor position in the
|
||||
// specified console screen buffer.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx
|
||||
setConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
|
||||
)
|
||||
|
||||
type (
|
||||
// Defines the coordinates of the upper left and lower right corners
|
||||
// of a rectangle.
|
||||
// See
|
||||
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686311(v=vs.85).aspx
|
||||
smallRect struct {
|
||||
Left, Top, Right, Bottom int16
|
||||
}
|
||||
|
||||
// Defines the coordinates of a character cell in a console screen
|
||||
// buffer. The origin of the coordinate system (0,0) is at the top, left cell
|
||||
// of the buffer.
|
||||
// See
|
||||
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms682119(v=vs.85).aspx
|
||||
coordinates struct {
|
||||
X, Y int16
|
||||
}
|
||||
|
||||
word int16
|
||||
|
||||
// Contains information about a console screen buffer.
|
||||
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093(v=vs.85).aspx
|
||||
consoleScreenBufferInfo struct {
|
||||
dwSize coordinates
|
||||
dwCursorPosition coordinates
|
||||
wAttributes word
|
||||
srWindow smallRect
|
||||
dwMaximumWindowSize coordinates
|
||||
}
|
||||
)
|
||||
|
||||
// terminalWidth returns width of the terminal.
|
||||
func terminalWidth() (width int, err error) {
|
||||
var info consoleScreenBufferInfo
|
||||
_, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0)
|
||||
if e != 0 {
|
||||
return 0, error(e)
|
||||
}
|
||||
return int(info.dwSize.X) - 1, nil
|
||||
}
|
||||
|
||||
func getCursorPos() (pos coordinates, err error) {
|
||||
var info consoleScreenBufferInfo
|
||||
_, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0)
|
||||
if e != 0 {
|
||||
return info.dwCursorPosition, error(e)
|
||||
}
|
||||
return info.dwCursorPosition, nil
|
||||
}
|
||||
|
||||
func setCursorPos(pos coordinates) error {
|
||||
_, _, e := syscall.Syscall(setConsoleCursorPosition.Addr(), 2, uintptr(syscall.Stdout), uintptr(uint32(uint16(pos.Y))<<16|uint32(uint16(pos.X))), 0)
|
||||
if e != 0 {
|
||||
return error(e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var ErrPoolWasStarted = errors.New("Bar pool was started")
|
||||
|
||||
var echoLocked bool
|
||||
var echoLockMutex sync.Mutex
|
||||
|
||||
var oldState word
|
||||
|
||||
func lockEcho() (quit chan int, err error) {
|
||||
echoLockMutex.Lock()
|
||||
defer echoLockMutex.Unlock()
|
||||
if echoLocked {
|
||||
err = ErrPoolWasStarted
|
||||
return
|
||||
}
|
||||
echoLocked = true
|
||||
|
||||
if _, _, e := syscall.Syscall(getConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&oldState)), 0); e != 0 {
|
||||
err = fmt.Errorf("Can't get terminal settings: %v", e)
|
||||
return
|
||||
}
|
||||
|
||||
newState := oldState
|
||||
const ENABLE_ECHO_INPUT = 0x0004
|
||||
const ENABLE_LINE_INPUT = 0x0002
|
||||
newState = newState & (^(ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT))
|
||||
if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(newState), 0); e != 0 {
|
||||
err = fmt.Errorf("Can't set terminal settings: %v", e)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func unlockEcho() (err error) {
|
||||
echoLockMutex.Lock()
|
||||
defer echoLockMutex.Unlock()
|
||||
if !echoLocked {
|
||||
return
|
||||
}
|
||||
echoLocked = false
|
||||
if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(oldState), 0); e != 0 {
|
||||
err = fmt.Errorf("Can't set terminal settings")
|
||||
}
|
||||
return
|
||||
}
|
||||
108
vendor/github.com/cheggaaa/pb/pb_x.go
generated
vendored
Normal file
108
vendor/github.com/cheggaaa/pb/pb_x.go
generated
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
// +build linux darwin freebsd netbsd openbsd solaris dragonfly
|
||||
// +build !appengine
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var ErrPoolWasStarted = errors.New("Bar pool was started")
|
||||
|
||||
var (
|
||||
echoLockMutex sync.Mutex
|
||||
origTermStatePtr *unix.Termios
|
||||
tty *os.File
|
||||
)
|
||||
|
||||
func init() {
|
||||
echoLockMutex.Lock()
|
||||
defer echoLockMutex.Unlock()
|
||||
|
||||
var err error
|
||||
tty, err = os.Open("/dev/tty")
|
||||
if err != nil {
|
||||
tty = os.Stdin
|
||||
}
|
||||
}
|
||||
|
||||
// terminalWidth returns width of the terminal.
|
||||
func terminalWidth() (int, error) {
|
||||
echoLockMutex.Lock()
|
||||
defer echoLockMutex.Unlock()
|
||||
|
||||
fd := int(tty.Fd())
|
||||
|
||||
ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return int(ws.Col), nil
|
||||
}
|
||||
|
||||
func lockEcho() (quit chan int, err error) {
|
||||
echoLockMutex.Lock()
|
||||
defer echoLockMutex.Unlock()
|
||||
if origTermStatePtr != nil {
|
||||
return quit, ErrPoolWasStarted
|
||||
}
|
||||
|
||||
fd := int(tty.Fd())
|
||||
|
||||
oldTermStatePtr, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Can't get terminal settings: %v", err)
|
||||
}
|
||||
|
||||
oldTermios := *oldTermStatePtr
|
||||
newTermios := oldTermios
|
||||
newTermios.Lflag &^= syscall.ECHO
|
||||
newTermios.Lflag |= syscall.ICANON | syscall.ISIG
|
||||
newTermios.Iflag |= syscall.ICRNL
|
||||
if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, &newTermios); err != nil {
|
||||
return nil, fmt.Errorf("Can't set terminal settings: %v", err)
|
||||
}
|
||||
|
||||
quit = make(chan int, 1)
|
||||
go catchTerminate(quit)
|
||||
return
|
||||
}
|
||||
|
||||
func unlockEcho() error {
|
||||
echoLockMutex.Lock()
|
||||
defer echoLockMutex.Unlock()
|
||||
if origTermStatePtr == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
fd := int(tty.Fd())
|
||||
|
||||
if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, origTermStatePtr); err != nil {
|
||||
return fmt.Errorf("Can't set terminal settings: %v", err)
|
||||
}
|
||||
|
||||
origTermStatePtr = nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// listen exit signals and restore terminal state
|
||||
func catchTerminate(quit chan int) {
|
||||
sig := make(chan os.Signal, 1)
|
||||
signal.Notify(sig, os.Interrupt, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGKILL)
|
||||
defer signal.Stop(sig)
|
||||
select {
|
||||
case <-quit:
|
||||
unlockEcho()
|
||||
case <-sig:
|
||||
unlockEcho()
|
||||
}
|
||||
}
|
||||
82
vendor/github.com/cheggaaa/pb/pool.go
generated
vendored
Normal file
82
vendor/github.com/cheggaaa/pb/pool.go
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
// +build linux darwin freebsd netbsd openbsd solaris dragonfly windows
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Create and start new pool with given bars
|
||||
// You need call pool.Stop() after work
|
||||
func StartPool(pbs ...*ProgressBar) (pool *Pool, err error) {
|
||||
pool = new(Pool)
|
||||
if err = pool.start(); err != nil {
|
||||
return
|
||||
}
|
||||
pool.Add(pbs...)
|
||||
return
|
||||
}
|
||||
|
||||
type Pool struct {
|
||||
Output io.Writer
|
||||
RefreshRate time.Duration
|
||||
bars []*ProgressBar
|
||||
lastBarsCount int
|
||||
quit chan int
|
||||
m sync.Mutex
|
||||
finishOnce sync.Once
|
||||
}
|
||||
|
||||
// Add progress bars.
|
||||
func (p *Pool) Add(pbs ...*ProgressBar) {
|
||||
p.m.Lock()
|
||||
defer p.m.Unlock()
|
||||
for _, bar := range pbs {
|
||||
bar.ManualUpdate = true
|
||||
bar.NotPrint = true
|
||||
bar.Start()
|
||||
p.bars = append(p.bars, bar)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pool) start() (err error) {
|
||||
p.RefreshRate = DefaultRefreshRate
|
||||
quit, err := lockEcho()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
p.quit = make(chan int)
|
||||
go p.writer(quit)
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Pool) writer(finish chan int) {
|
||||
var first = true
|
||||
for {
|
||||
select {
|
||||
case <-time.After(p.RefreshRate):
|
||||
if p.print(first) {
|
||||
p.print(false)
|
||||
finish <- 1
|
||||
return
|
||||
}
|
||||
first = false
|
||||
case <-p.quit:
|
||||
finish <- 1
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Restore terminal state and close pool
|
||||
func (p *Pool) Stop() error {
|
||||
// Wait until one final refresh has passed.
|
||||
time.Sleep(p.RefreshRate)
|
||||
|
||||
p.finishOnce.Do(func() {
|
||||
close(p.quit)
|
||||
})
|
||||
return unlockEcho()
|
||||
}
|
||||
45
vendor/github.com/cheggaaa/pb/pool_win.go
generated
vendored
Normal file
45
vendor/github.com/cheggaaa/pb/pool_win.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
// +build windows
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
)
|
||||
|
||||
func (p *Pool) print(first bool) bool {
|
||||
p.m.Lock()
|
||||
defer p.m.Unlock()
|
||||
var out string
|
||||
if !first {
|
||||
coords, err := getCursorPos()
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
coords.Y -= int16(p.lastBarsCount)
|
||||
if coords.Y < 0 {
|
||||
coords.Y = 0
|
||||
}
|
||||
coords.X = 0
|
||||
|
||||
err = setCursorPos(coords)
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
}
|
||||
isFinished := true
|
||||
for _, bar := range p.bars {
|
||||
if !bar.IsFinished() {
|
||||
isFinished = false
|
||||
}
|
||||
bar.Update()
|
||||
out += fmt.Sprintf("\r%s\n", bar.String())
|
||||
}
|
||||
if p.Output != nil {
|
||||
fmt.Fprint(p.Output, out)
|
||||
} else {
|
||||
fmt.Print(out)
|
||||
}
|
||||
p.lastBarsCount = len(p.bars)
|
||||
return isFinished
|
||||
}
|
||||
29
vendor/github.com/cheggaaa/pb/pool_x.go
generated
vendored
Normal file
29
vendor/github.com/cheggaaa/pb/pool_x.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// +build linux darwin freebsd netbsd openbsd solaris dragonfly
|
||||
|
||||
package pb
|
||||
|
||||
import "fmt"
|
||||
|
||||
func (p *Pool) print(first bool) bool {
|
||||
p.m.Lock()
|
||||
defer p.m.Unlock()
|
||||
var out string
|
||||
if !first {
|
||||
out = fmt.Sprintf("\033[%dA", p.lastBarsCount)
|
||||
}
|
||||
isFinished := true
|
||||
for _, bar := range p.bars {
|
||||
if !bar.IsFinished() {
|
||||
isFinished = false
|
||||
}
|
||||
bar.Update()
|
||||
out += fmt.Sprintf("\r%s\n", bar.String())
|
||||
}
|
||||
if p.Output != nil {
|
||||
fmt.Fprint(p.Output, out)
|
||||
} else {
|
||||
fmt.Print(out)
|
||||
}
|
||||
p.lastBarsCount = len(p.bars)
|
||||
return isFinished
|
||||
}
|
||||
25
vendor/github.com/cheggaaa/pb/reader.go
generated
vendored
Normal file
25
vendor/github.com/cheggaaa/pb/reader.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
package pb
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// It's proxy reader, implement io.Reader
|
||||
type Reader struct {
|
||||
io.Reader
|
||||
bar *ProgressBar
|
||||
}
|
||||
|
||||
func (r *Reader) Read(p []byte) (n int, err error) {
|
||||
n, err = r.Reader.Read(p)
|
||||
r.bar.Add(n)
|
||||
return
|
||||
}
|
||||
|
||||
// Close the reader when it implements io.Closer
|
||||
func (r *Reader) Close() (err error) {
|
||||
if closer, ok := r.Reader.(io.Closer); ok {
|
||||
return closer.Close()
|
||||
}
|
||||
return
|
||||
}
|
||||
17
vendor/github.com/cheggaaa/pb/runecount.go
generated
vendored
Normal file
17
vendor/github.com/cheggaaa/pb/runecount.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
package pb
|
||||
|
||||
import (
|
||||
"github.com/mattn/go-runewidth"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// Finds the control character sequences (like colors)
|
||||
var ctrlFinder = regexp.MustCompile("\x1b\x5b[0-9]+\x6d")
|
||||
|
||||
func escapeAwareRuneCountInString(s string) int {
|
||||
n := runewidth.StringWidth(s)
|
||||
for _, sm := range ctrlFinder.FindAllString(s, -1) {
|
||||
n -= runewidth.StringWidth(sm)
|
||||
}
|
||||
return n
|
||||
}
|
||||
9
vendor/github.com/cheggaaa/pb/termios_bsd.go
generated
vendored
Normal file
9
vendor/github.com/cheggaaa/pb/termios_bsd.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// +build darwin freebsd netbsd openbsd dragonfly
|
||||
// +build !appengine
|
||||
|
||||
package pb
|
||||
|
||||
import "syscall"
|
||||
|
||||
const ioctlReadTermios = syscall.TIOCGETA
|
||||
const ioctlWriteTermios = syscall.TIOCSETA
|
||||
13
vendor/github.com/cheggaaa/pb/termios_sysv.go
generated
vendored
Normal file
13
vendor/github.com/cheggaaa/pb/termios_sysv.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux solaris
|
||||
// +build !appengine
|
||||
|
||||
package pb
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const ioctlReadTermios = unix.TCGETS
|
||||
const ioctlWriteTermios = unix.TCSETS
|
||||
27
vendor/github.com/cznic/b/LICENSE
generated
vendored
27
vendor/github.com/cznic/b/LICENSE
generated
vendored
@@ -1,27 +0,0 @@
|
||||
Copyright (c) 2014 The b Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the names of the authors nor the names of the
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
909
vendor/github.com/cznic/b/btree.go
generated
vendored
909
vendor/github.com/cznic/b/btree.go
generated
vendored
@@ -1,909 +0,0 @@
|
||||
// Copyright 2014 The b Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package b
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
kx = 32 //TODO benchmark tune this number if using custom key/value type(s).
|
||||
kd = 32 //TODO benchmark tune this number if using custom key/value type(s).
|
||||
)
|
||||
|
||||
func init() {
|
||||
if kd < 1 {
|
||||
panic(fmt.Errorf("kd %d: out of range", kd))
|
||||
}
|
||||
|
||||
if kx < 2 {
|
||||
panic(fmt.Errorf("kx %d: out of range", kx))
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
btDPool = sync.Pool{New: func() interface{} { return &d{} }}
|
||||
btEPool = btEpool{sync.Pool{New: func() interface{} { return &Enumerator{} }}}
|
||||
btTPool = btTpool{sync.Pool{New: func() interface{} { return &Tree{} }}}
|
||||
btXPool = sync.Pool{New: func() interface{} { return &x{} }}
|
||||
)
|
||||
|
||||
type btTpool struct{ sync.Pool }
|
||||
|
||||
func (p *btTpool) get(cmp Cmp) *Tree {
|
||||
x := p.Get().(*Tree)
|
||||
x.cmp = cmp
|
||||
return x
|
||||
}
|
||||
|
||||
type btEpool struct{ sync.Pool }
|
||||
|
||||
func (p *btEpool) get(err error, hit bool, i int, k interface{} /*K*/, q *d, t *Tree, ver int64) *Enumerator {
|
||||
x := p.Get().(*Enumerator)
|
||||
x.err, x.hit, x.i, x.k, x.q, x.t, x.ver = err, hit, i, k, q, t, ver
|
||||
return x
|
||||
}
|
||||
|
||||
type (
|
||||
// Cmp compares a and b. Return value is:
|
||||
//
|
||||
// < 0 if a < b
|
||||
// 0 if a == b
|
||||
// > 0 if a > b
|
||||
//
|
||||
Cmp func(a, b interface{} /*K*/) int
|
||||
|
||||
d struct { // data page
|
||||
c int
|
||||
d [2*kd + 1]de
|
||||
n *d
|
||||
p *d
|
||||
}
|
||||
|
||||
de struct { // d element
|
||||
k interface{} /*K*/
|
||||
v interface{} /*V*/
|
||||
}
|
||||
|
||||
// Enumerator captures the state of enumerating a tree. It is returned
|
||||
// from the Seek* methods. The enumerator is aware of any mutations
|
||||
// made to the tree in the process of enumerating it and automatically
|
||||
// resumes the enumeration at the proper key, if possible.
|
||||
//
|
||||
// However, once an Enumerator returns io.EOF to signal "no more
|
||||
// items", it does no more attempt to "resync" on tree mutation(s). In
|
||||
// other words, io.EOF from an Enumerator is "sticky" (idempotent).
|
||||
Enumerator struct {
|
||||
err error
|
||||
hit bool
|
||||
i int
|
||||
k interface{} /*K*/
|
||||
q *d
|
||||
t *Tree
|
||||
ver int64
|
||||
}
|
||||
|
||||
// Tree is a B+tree.
|
||||
Tree struct {
|
||||
c int
|
||||
cmp Cmp
|
||||
first *d
|
||||
last *d
|
||||
r interface{}
|
||||
ver int64
|
||||
}
|
||||
|
||||
xe struct { // x element
|
||||
ch interface{}
|
||||
k interface{} /*K*/
|
||||
}
|
||||
|
||||
x struct { // index page
|
||||
c int
|
||||
x [2*kx + 2]xe
|
||||
}
|
||||
)
|
||||
|
||||
var ( // R/O zero values
|
||||
zd d
|
||||
zde de
|
||||
ze Enumerator
|
||||
zk interface{} /*K*/
|
||||
zt Tree
|
||||
zx x
|
||||
zxe xe
|
||||
)
|
||||
|
||||
func clr(q interface{}) {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
for i := 0; i <= x.c; i++ { // Ch0 Sep0 ... Chn-1 Sepn-1 Chn
|
||||
clr(x.x[i].ch)
|
||||
}
|
||||
*x = zx
|
||||
btXPool.Put(x)
|
||||
case *d:
|
||||
*x = zd
|
||||
btDPool.Put(x)
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------- x
|
||||
|
||||
func newX(ch0 interface{}) *x {
|
||||
r := btXPool.Get().(*x)
|
||||
r.x[0].ch = ch0
|
||||
return r
|
||||
}
|
||||
|
||||
func (q *x) extract(i int) {
|
||||
q.c--
|
||||
if i < q.c {
|
||||
copy(q.x[i:], q.x[i+1:q.c+1])
|
||||
q.x[q.c].ch = q.x[q.c+1].ch
|
||||
q.x[q.c].k = zk // GC
|
||||
q.x[q.c+1] = zxe // GC
|
||||
}
|
||||
}
|
||||
|
||||
func (q *x) insert(i int, k interface{} /*K*/, ch interface{}) *x {
|
||||
c := q.c
|
||||
if i < c {
|
||||
q.x[c+1].ch = q.x[c].ch
|
||||
copy(q.x[i+2:], q.x[i+1:c])
|
||||
q.x[i+1].k = q.x[i].k
|
||||
}
|
||||
c++
|
||||
q.c = c
|
||||
q.x[i].k = k
|
||||
q.x[i+1].ch = ch
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *x) siblings(i int) (l, r *d) {
|
||||
if i >= 0 {
|
||||
if i > 0 {
|
||||
l = q.x[i-1].ch.(*d)
|
||||
}
|
||||
if i < q.c {
|
||||
r = q.x[i+1].ch.(*d)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------- d
|
||||
|
||||
func (l *d) mvL(r *d, c int) {
|
||||
copy(l.d[l.c:], r.d[:c])
|
||||
copy(r.d[:], r.d[c:r.c])
|
||||
l.c += c
|
||||
r.c -= c
|
||||
}
|
||||
|
||||
func (l *d) mvR(r *d, c int) {
|
||||
copy(r.d[c:], r.d[:r.c])
|
||||
copy(r.d[:c], l.d[l.c-c:])
|
||||
r.c += c
|
||||
l.c -= c
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------- Tree
|
||||
|
||||
// TreeNew returns a newly created, empty Tree. The compare function is used
|
||||
// for key collation.
|
||||
func TreeNew(cmp Cmp) *Tree {
|
||||
return btTPool.get(cmp)
|
||||
}
|
||||
|
||||
// Clear removes all K/V pairs from the tree.
|
||||
func (t *Tree) Clear() {
|
||||
if t.r == nil {
|
||||
return
|
||||
}
|
||||
|
||||
clr(t.r)
|
||||
t.c, t.first, t.last, t.r = 0, nil, nil, nil
|
||||
t.ver++
|
||||
}
|
||||
|
||||
// Close performs Clear and recycles t to a pool for possible later reuse. No
|
||||
// references to t should exist or such references must not be used afterwards.
|
||||
func (t *Tree) Close() {
|
||||
t.Clear()
|
||||
*t = zt
|
||||
btTPool.Put(t)
|
||||
}
|
||||
|
||||
func (t *Tree) cat(p *x, q, r *d, pi int) {
|
||||
t.ver++
|
||||
q.mvL(r, r.c)
|
||||
if r.n != nil {
|
||||
r.n.p = q
|
||||
} else {
|
||||
t.last = q
|
||||
}
|
||||
q.n = r.n
|
||||
*r = zd
|
||||
btDPool.Put(r)
|
||||
if p.c > 1 {
|
||||
p.extract(pi)
|
||||
p.x[pi].ch = q
|
||||
return
|
||||
}
|
||||
|
||||
switch x := t.r.(type) {
|
||||
case *x:
|
||||
*x = zx
|
||||
btXPool.Put(x)
|
||||
case *d:
|
||||
*x = zd
|
||||
btDPool.Put(x)
|
||||
}
|
||||
t.r = q
|
||||
}
|
||||
|
||||
func (t *Tree) catX(p, q, r *x, pi int) {
|
||||
t.ver++
|
||||
q.x[q.c].k = p.x[pi].k
|
||||
copy(q.x[q.c+1:], r.x[:r.c])
|
||||
q.c += r.c + 1
|
||||
q.x[q.c].ch = r.x[r.c].ch
|
||||
*r = zx
|
||||
btXPool.Put(r)
|
||||
if p.c > 1 {
|
||||
p.c--
|
||||
pc := p.c
|
||||
if pi < pc {
|
||||
p.x[pi].k = p.x[pi+1].k
|
||||
copy(p.x[pi+1:], p.x[pi+2:pc+1])
|
||||
p.x[pc].ch = p.x[pc+1].ch
|
||||
p.x[pc].k = zk // GC
|
||||
p.x[pc+1].ch = nil // GC
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
switch x := t.r.(type) {
|
||||
case *x:
|
||||
*x = zx
|
||||
btXPool.Put(x)
|
||||
case *d:
|
||||
*x = zd
|
||||
btDPool.Put(x)
|
||||
}
|
||||
t.r = q
|
||||
}
|
||||
|
||||
// Delete removes the k's KV pair, if it exists, in which case Delete returns
|
||||
// true.
|
||||
func (t *Tree) Delete(k interface{} /*K*/) (ok bool) {
|
||||
pi := -1
|
||||
var p *x
|
||||
q := t.r
|
||||
if q == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for {
|
||||
var i int
|
||||
i, ok = t.find(q, k)
|
||||
if ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c < kx && q != t.r {
|
||||
x, i = t.underflowX(p, x, pi, i)
|
||||
}
|
||||
pi = i + 1
|
||||
p = x
|
||||
q = x.x[pi].ch
|
||||
continue
|
||||
case *d:
|
||||
t.extract(x, i)
|
||||
if x.c >= kd {
|
||||
return true
|
||||
}
|
||||
|
||||
if q != t.r {
|
||||
t.underflow(p, x, pi)
|
||||
} else if t.c == 0 {
|
||||
t.Clear()
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c < kx && q != t.r {
|
||||
x, i = t.underflowX(p, x, pi, i)
|
||||
}
|
||||
pi = i
|
||||
p = x
|
||||
q = x.x[i].ch
|
||||
case *d:
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) extract(q *d, i int) { // (r interface{} /*V*/) {
|
||||
t.ver++
|
||||
//r = q.d[i].v // prepared for Extract
|
||||
q.c--
|
||||
if i < q.c {
|
||||
copy(q.d[i:], q.d[i+1:q.c+1])
|
||||
}
|
||||
q.d[q.c] = zde // GC
|
||||
t.c--
|
||||
return
|
||||
}
|
||||
|
||||
func (t *Tree) find(q interface{}, k interface{} /*K*/) (i int, ok bool) {
|
||||
var mk interface{} /*K*/
|
||||
l := 0
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
h := x.c - 1
|
||||
for l <= h {
|
||||
m := (l + h) >> 1
|
||||
mk = x.x[m].k
|
||||
switch cmp := t.cmp(k, mk); {
|
||||
case cmp > 0:
|
||||
l = m + 1
|
||||
case cmp == 0:
|
||||
return m, true
|
||||
default:
|
||||
h = m - 1
|
||||
}
|
||||
}
|
||||
case *d:
|
||||
h := x.c - 1
|
||||
for l <= h {
|
||||
m := (l + h) >> 1
|
||||
mk = x.d[m].k
|
||||
switch cmp := t.cmp(k, mk); {
|
||||
case cmp > 0:
|
||||
l = m + 1
|
||||
case cmp == 0:
|
||||
return m, true
|
||||
default:
|
||||
h = m - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
return l, false
|
||||
}
|
||||
|
||||
// First returns the first item of the tree in the key collating order, or
|
||||
// (zero-value, zero-value) if the tree is empty.
|
||||
func (t *Tree) First() (k interface{} /*K*/, v interface{} /*V*/) {
|
||||
if q := t.first; q != nil {
|
||||
q := &q.d[0]
|
||||
k, v = q.k, q.v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Get returns the value associated with k and true if it exists. Otherwise Get
|
||||
// returns (zero-value, false).
|
||||
func (t *Tree) Get(k interface{} /*K*/) (v interface{} /*V*/, ok bool) {
|
||||
q := t.r
|
||||
if q == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
var i int
|
||||
if i, ok = t.find(q, k); ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i+1].ch
|
||||
continue
|
||||
case *d:
|
||||
return x.d[i].v, true
|
||||
}
|
||||
}
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i].ch
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) insert(q *d, i int, k interface{} /*K*/, v interface{} /*V*/) *d {
|
||||
t.ver++
|
||||
c := q.c
|
||||
if i < c {
|
||||
copy(q.d[i+1:], q.d[i:c])
|
||||
}
|
||||
c++
|
||||
q.c = c
|
||||
q.d[i].k, q.d[i].v = k, v
|
||||
t.c++
|
||||
return q
|
||||
}
|
||||
|
||||
// Last returns the last item of the tree in the key collating order, or
|
||||
// (zero-value, zero-value) if the tree is empty.
|
||||
func (t *Tree) Last() (k interface{} /*K*/, v interface{} /*V*/) {
|
||||
if q := t.last; q != nil {
|
||||
q := &q.d[q.c-1]
|
||||
k, v = q.k, q.v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Len returns the number of items in the tree.
|
||||
func (t *Tree) Len() int {
|
||||
return t.c
|
||||
}
|
||||
|
||||
func (t *Tree) overflow(p *x, q *d, pi, i int, k interface{} /*K*/, v interface{} /*V*/) {
|
||||
t.ver++
|
||||
l, r := p.siblings(pi)
|
||||
|
||||
if l != nil && l.c < 2*kd && i != 0 {
|
||||
l.mvL(q, 1)
|
||||
t.insert(q, i-1, k, v)
|
||||
p.x[pi-1].k = q.d[0].k
|
||||
return
|
||||
}
|
||||
|
||||
if r != nil && r.c < 2*kd {
|
||||
if i < 2*kd {
|
||||
q.mvR(r, 1)
|
||||
t.insert(q, i, k, v)
|
||||
p.x[pi].k = r.d[0].k
|
||||
return
|
||||
}
|
||||
|
||||
t.insert(r, 0, k, v)
|
||||
p.x[pi].k = k
|
||||
return
|
||||
}
|
||||
|
||||
t.split(p, q, pi, i, k, v)
|
||||
}
|
||||
|
||||
// Seek returns an Enumerator positioned on an item such that k >= item's key.
|
||||
// ok reports if k == item.key The Enumerator's position is possibly after the
|
||||
// last item in the tree.
|
||||
func (t *Tree) Seek(k interface{} /*K*/) (e *Enumerator, ok bool) {
|
||||
q := t.r
|
||||
if q == nil {
|
||||
e = btEPool.get(nil, false, 0, k, nil, t, t.ver)
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
var i int
|
||||
if i, ok = t.find(q, k); ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i+1].ch
|
||||
continue
|
||||
case *d:
|
||||
return btEPool.get(nil, ok, i, k, x, t, t.ver), true
|
||||
}
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i].ch
|
||||
case *d:
|
||||
return btEPool.get(nil, ok, i, k, x, t, t.ver), false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SeekFirst returns an enumerator positioned on the first KV pair in the tree,
|
||||
// if any. For an empty tree, err == io.EOF is returned and e will be nil.
|
||||
func (t *Tree) SeekFirst() (e *Enumerator, err error) {
|
||||
q := t.first
|
||||
if q == nil {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
return btEPool.get(nil, true, 0, q.d[0].k, q, t, t.ver), nil
|
||||
}
|
||||
|
||||
// SeekLast returns an enumerator positioned on the last KV pair in the tree,
|
||||
// if any. For an empty tree, err == io.EOF is returned and e will be nil.
|
||||
func (t *Tree) SeekLast() (e *Enumerator, err error) {
|
||||
q := t.last
|
||||
if q == nil {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
return btEPool.get(nil, true, q.c-1, q.d[q.c-1].k, q, t, t.ver), nil
|
||||
}
|
||||
|
||||
// Set sets the value associated with k.
|
||||
func (t *Tree) Set(k interface{} /*K*/, v interface{} /*V*/) {
|
||||
//dbg("--- PRE Set(%v, %v)\n%s", k, v, t.dump())
|
||||
//defer func() {
|
||||
// dbg("--- POST\n%s\n====\n", t.dump())
|
||||
//}()
|
||||
|
||||
pi := -1
|
||||
var p *x
|
||||
q := t.r
|
||||
if q == nil {
|
||||
z := t.insert(btDPool.Get().(*d), 0, k, v)
|
||||
t.r, t.first, t.last = z, z, z
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
i, ok := t.find(q, k)
|
||||
if ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
i++
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i
|
||||
p = x
|
||||
q = x.x[i].ch
|
||||
continue
|
||||
case *d:
|
||||
x.d[i].v = v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i
|
||||
p = x
|
||||
q = x.x[i].ch
|
||||
case *d:
|
||||
switch {
|
||||
case x.c < 2*kd:
|
||||
t.insert(x, i, k, v)
|
||||
default:
|
||||
t.overflow(p, x, pi, i, k, v)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Put combines Get and Set in a more efficient way where the tree is walked
|
||||
// only once. The upd(ater) receives (old-value, true) if a KV pair for k
|
||||
// exists or (zero-value, false) otherwise. It can then return a (new-value,
|
||||
// true) to create or overwrite the existing value in the KV pair, or
|
||||
// (whatever, false) if it decides not to create or not to update the value of
|
||||
// the KV pair.
|
||||
//
|
||||
// tree.Set(k, v) call conceptually equals calling
|
||||
//
|
||||
// tree.Put(k, func(interface{} /*K*/, bool){ return v, true })
|
||||
//
|
||||
// modulo the differing return values.
|
||||
func (t *Tree) Put(k interface{} /*K*/, upd func(oldV interface{} /*V*/, exists bool) (newV interface{} /*V*/, write bool)) (oldV interface{} /*V*/, written bool) {
|
||||
pi := -1
|
||||
var p *x
|
||||
q := t.r
|
||||
var newV interface{} /*V*/
|
||||
if q == nil {
|
||||
// new KV pair in empty tree
|
||||
newV, written = upd(newV, false)
|
||||
if !written {
|
||||
return
|
||||
}
|
||||
|
||||
z := t.insert(btDPool.Get().(*d), 0, k, newV)
|
||||
t.r, t.first, t.last = z, z, z
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
i, ok := t.find(q, k)
|
||||
if ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
i++
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i
|
||||
p = x
|
||||
q = x.x[i].ch
|
||||
continue
|
||||
case *d:
|
||||
oldV = x.d[i].v
|
||||
newV, written = upd(oldV, true)
|
||||
if !written {
|
||||
return
|
||||
}
|
||||
|
||||
x.d[i].v = newV
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i
|
||||
p = x
|
||||
q = x.x[i].ch
|
||||
case *d: // new KV pair
|
||||
newV, written = upd(newV, false)
|
||||
if !written {
|
||||
return
|
||||
}
|
||||
|
||||
switch {
|
||||
case x.c < 2*kd:
|
||||
t.insert(x, i, k, newV)
|
||||
default:
|
||||
t.overflow(p, x, pi, i, k, newV)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) split(p *x, q *d, pi, i int, k interface{} /*K*/, v interface{} /*V*/) {
|
||||
t.ver++
|
||||
r := btDPool.Get().(*d)
|
||||
if q.n != nil {
|
||||
r.n = q.n
|
||||
r.n.p = r
|
||||
} else {
|
||||
t.last = r
|
||||
}
|
||||
q.n = r
|
||||
r.p = q
|
||||
|
||||
copy(r.d[:], q.d[kd:2*kd])
|
||||
for i := range q.d[kd:] {
|
||||
q.d[kd+i] = zde
|
||||
}
|
||||
q.c = kd
|
||||
r.c = kd
|
||||
var done bool
|
||||
if i > kd {
|
||||
done = true
|
||||
t.insert(r, i-kd, k, v)
|
||||
}
|
||||
if pi >= 0 {
|
||||
p.insert(pi, r.d[0].k, r)
|
||||
} else {
|
||||
t.r = newX(q).insert(0, r.d[0].k, r)
|
||||
}
|
||||
if done {
|
||||
return
|
||||
}
|
||||
|
||||
t.insert(q, i, k, v)
|
||||
}
|
||||
|
||||
func (t *Tree) splitX(p *x, q *x, pi int, i int) (*x, int) {
|
||||
t.ver++
|
||||
r := btXPool.Get().(*x)
|
||||
copy(r.x[:], q.x[kx+1:])
|
||||
q.c = kx
|
||||
r.c = kx
|
||||
if pi >= 0 {
|
||||
p.insert(pi, q.x[kx].k, r)
|
||||
} else {
|
||||
t.r = newX(q).insert(0, q.x[kx].k, r)
|
||||
}
|
||||
|
||||
q.x[kx].k = zk
|
||||
for i := range q.x[kx+1:] {
|
||||
q.x[kx+i+1] = zxe
|
||||
}
|
||||
if i > kx {
|
||||
q = r
|
||||
i -= kx + 1
|
||||
}
|
||||
|
||||
return q, i
|
||||
}
|
||||
|
||||
func (t *Tree) underflow(p *x, q *d, pi int) {
|
||||
t.ver++
|
||||
l, r := p.siblings(pi)
|
||||
|
||||
if l != nil && l.c+q.c >= 2*kd {
|
||||
l.mvR(q, 1)
|
||||
p.x[pi-1].k = q.d[0].k
|
||||
return
|
||||
}
|
||||
|
||||
if r != nil && q.c+r.c >= 2*kd {
|
||||
q.mvL(r, 1)
|
||||
p.x[pi].k = r.d[0].k
|
||||
r.d[r.c] = zde // GC
|
||||
return
|
||||
}
|
||||
|
||||
if l != nil {
|
||||
t.cat(p, l, q, pi-1)
|
||||
return
|
||||
}
|
||||
|
||||
t.cat(p, q, r, pi)
|
||||
}
|
||||
|
||||
func (t *Tree) underflowX(p *x, q *x, pi int, i int) (*x, int) {
|
||||
t.ver++
|
||||
var l, r *x
|
||||
|
||||
if pi >= 0 {
|
||||
if pi > 0 {
|
||||
l = p.x[pi-1].ch.(*x)
|
||||
}
|
||||
if pi < p.c {
|
||||
r = p.x[pi+1].ch.(*x)
|
||||
}
|
||||
}
|
||||
|
||||
if l != nil && l.c > kx {
|
||||
q.x[q.c+1].ch = q.x[q.c].ch
|
||||
copy(q.x[1:], q.x[:q.c])
|
||||
q.x[0].ch = l.x[l.c].ch
|
||||
q.x[0].k = p.x[pi-1].k
|
||||
q.c++
|
||||
i++
|
||||
l.c--
|
||||
p.x[pi-1].k = l.x[l.c].k
|
||||
return q, i
|
||||
}
|
||||
|
||||
if r != nil && r.c > kx {
|
||||
q.x[q.c].k = p.x[pi].k
|
||||
q.c++
|
||||
q.x[q.c].ch = r.x[0].ch
|
||||
p.x[pi].k = r.x[0].k
|
||||
copy(r.x[:], r.x[1:r.c])
|
||||
r.c--
|
||||
rc := r.c
|
||||
r.x[rc].ch = r.x[rc+1].ch
|
||||
r.x[rc].k = zk
|
||||
r.x[rc+1].ch = nil
|
||||
return q, i
|
||||
}
|
||||
|
||||
if l != nil {
|
||||
i += l.c + 1
|
||||
t.catX(p, l, q, pi-1)
|
||||
q = l
|
||||
return q, i
|
||||
}
|
||||
|
||||
t.catX(p, q, r, pi)
|
||||
return q, i
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------- Enumerator
|
||||
|
||||
// Close recycles e to a pool for possible later reuse. No references to e
|
||||
// should exist or such references must not be used afterwards.
|
||||
func (e *Enumerator) Close() {
|
||||
*e = ze
|
||||
btEPool.Put(e)
|
||||
}
|
||||
|
||||
// Next returns the currently enumerated item, if it exists and moves to the
|
||||
// next item in the key collation order. If there is no item to return, err ==
|
||||
// io.EOF is returned.
|
||||
func (e *Enumerator) Next() (k interface{} /*K*/, v interface{} /*V*/, err error) {
|
||||
if err = e.err; err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if e.ver != e.t.ver {
|
||||
f, _ := e.t.Seek(e.k)
|
||||
*e = *f
|
||||
f.Close()
|
||||
}
|
||||
if e.q == nil {
|
||||
e.err, err = io.EOF, io.EOF
|
||||
return
|
||||
}
|
||||
|
||||
if e.i >= e.q.c {
|
||||
if err = e.next(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
i := e.q.d[e.i]
|
||||
k, v = i.k, i.v
|
||||
e.k, e.hit = k, true
|
||||
e.next()
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Enumerator) next() error {
|
||||
if e.q == nil {
|
||||
e.err = io.EOF
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
switch {
|
||||
case e.i < e.q.c-1:
|
||||
e.i++
|
||||
default:
|
||||
if e.q, e.i = e.q.n, 0; e.q == nil {
|
||||
e.err = io.EOF
|
||||
}
|
||||
}
|
||||
return e.err
|
||||
}
|
||||
|
||||
// Prev returns the currently enumerated item, if it exists and moves to the
|
||||
// previous item in the key collation order. If there is no item to return, err
|
||||
// == io.EOF is returned.
|
||||
func (e *Enumerator) Prev() (k interface{} /*K*/, v interface{} /*V*/, err error) {
|
||||
if err = e.err; err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if e.ver != e.t.ver {
|
||||
f, _ := e.t.Seek(e.k)
|
||||
*e = *f
|
||||
f.Close()
|
||||
}
|
||||
if e.q == nil {
|
||||
e.err, err = io.EOF, io.EOF
|
||||
return
|
||||
}
|
||||
|
||||
if !e.hit {
|
||||
// move to previous because Seek overshoots if there's no hit
|
||||
if err = e.prev(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if e.i >= e.q.c {
|
||||
if err = e.prev(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
i := e.q.d[e.i]
|
||||
k, v = i.k, i.v
|
||||
e.k, e.hit = k, true
|
||||
e.prev()
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Enumerator) prev() error {
|
||||
if e.q == nil {
|
||||
e.err = io.EOF
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
switch {
|
||||
case e.i > 0:
|
||||
e.i--
|
||||
default:
|
||||
if e.q = e.q.p; e.q == nil {
|
||||
e.err = io.EOF
|
||||
break
|
||||
}
|
||||
|
||||
e.i = e.q.c - 1
|
||||
}
|
||||
return e.err
|
||||
}
|
||||
73
vendor/github.com/cznic/b/doc.go
generated
vendored
73
vendor/github.com/cznic/b/doc.go
generated
vendored
@@ -1,73 +0,0 @@
|
||||
// Copyright 2014 The b Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package b implements the B+tree flavor of a BTree.
|
||||
//
|
||||
// Changelog
|
||||
//
|
||||
// 2016-07-16: Update benchmark results to newer Go version. Add a note on
|
||||
// concurrency.
|
||||
//
|
||||
// 2014-06-26: Lower GC presure by recycling things.
|
||||
//
|
||||
// 2014-04-18: Added new method Put.
|
||||
//
|
||||
// Concurrency considerations
|
||||
//
|
||||
// Tree.{Clear,Delete,Put,Set} mutate the tree. One can use eg. a
|
||||
// sync.Mutex.Lock/Unlock (or sync.RWMutex.Lock/Unlock) to wrap those calls if
|
||||
// they are to be invoked concurrently.
|
||||
//
|
||||
// Tree.{First,Get,Last,Len,Seek,SeekFirst,SekLast} read but do not mutate the
|
||||
// tree. One can use eg. a sync.RWMutex.RLock/RUnlock to wrap those calls if
|
||||
// they are to be invoked concurrently with any of the tree mutating methods.
|
||||
//
|
||||
// Enumerator.{Next,Prev} mutate the enumerator and read but not mutate the
|
||||
// tree. One can use eg. a sync.RWMutex.RLock/RUnlock to wrap those calls if
|
||||
// they are to be invoked concurrently with any of the tree mutating methods. A
|
||||
// separate mutex for the enumerator, or the whole tree in a simplified
|
||||
// variant, is necessary if the enumerator's Next/Prev methods per se are to
|
||||
// be invoked concurrently.
|
||||
//
|
||||
// Generic types
|
||||
//
|
||||
// Keys and their associated values are interface{} typed, similar to all of
|
||||
// the containers in the standard library.
|
||||
//
|
||||
// Semiautomatic production of a type specific variant of this package is
|
||||
// supported via
|
||||
//
|
||||
// $ make generic
|
||||
//
|
||||
// This command will write to stdout a version of the btree.go file where every
|
||||
// key type occurrence is replaced by the word 'KEY' and every value type
|
||||
// occurrence is replaced by the word 'VALUE'. Then you have to replace these
|
||||
// tokens with your desired type(s), using any technique you're comfortable
|
||||
// with.
|
||||
//
|
||||
// This is how, for example, 'example/int.go' was created:
|
||||
//
|
||||
// $ mkdir example
|
||||
// $ make generic | sed -e 's/KEY/int/g' -e 's/VALUE/int/g' > example/int.go
|
||||
//
|
||||
// No other changes to int.go are necessary, it compiles just fine.
|
||||
//
|
||||
// Running the benchmarks for 1000 keys on a machine with Intel i5-4670 CPU @
|
||||
// 3.4GHz, Go 1.7rc1.
|
||||
//
|
||||
// $ go test -bench 1e3 example/all_test.go example/int.go
|
||||
// BenchmarkSetSeq1e3-4 20000 78265 ns/op
|
||||
// BenchmarkGetSeq1e3-4 20000 67980 ns/op
|
||||
// BenchmarkSetRnd1e3-4 10000 172720 ns/op
|
||||
// BenchmarkGetRnd1e3-4 20000 89539 ns/op
|
||||
// BenchmarkDelSeq1e3-4 20000 87863 ns/op
|
||||
// BenchmarkDelRnd1e3-4 10000 130891 ns/op
|
||||
// BenchmarkSeekSeq1e3-4 10000 100118 ns/op
|
||||
// BenchmarkSeekRnd1e3-4 10000 121684 ns/op
|
||||
// BenchmarkNext1e3-4 200000 6330 ns/op
|
||||
// BenchmarkPrev1e3-4 200000 9066 ns/op
|
||||
// PASS
|
||||
// ok command-line-arguments 42.531s
|
||||
// $
|
||||
package b
|
||||
910
vendor/github.com/cznic/b/example/int.go
generated
vendored
910
vendor/github.com/cznic/b/example/int.go
generated
vendored
@@ -1,910 +0,0 @@
|
||||
// Copyright 2014 The b Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package b
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
kx = 32 //TODO benchmark tune this number if using custom key/value type(s).
|
||||
kd = 32 //TODO benchmark tune this number if using custom key/value type(s).
|
||||
)
|
||||
|
||||
func init() {
|
||||
if kd < 1 {
|
||||
panic(fmt.Errorf("kd %d: out of range", kd))
|
||||
}
|
||||
|
||||
if kx < 2 {
|
||||
panic(fmt.Errorf("kx %d: out of range", kx))
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
btDPool = sync.Pool{New: func() interface{} { return &d{} }}
|
||||
btEPool = btEpool{sync.Pool{New: func() interface{} { return &Enumerator{} }}}
|
||||
btTPool = btTpool{sync.Pool{New: func() interface{} { return &Tree{} }}}
|
||||
btXPool = sync.Pool{New: func() interface{} { return &x{} }}
|
||||
)
|
||||
|
||||
type btTpool struct{ sync.Pool }
|
||||
|
||||
func (p *btTpool) get(cmp Cmp) *Tree {
|
||||
x := p.Get().(*Tree)
|
||||
x.cmp = cmp
|
||||
return x
|
||||
}
|
||||
|
||||
type btEpool struct{ sync.Pool }
|
||||
|
||||
func (p *btEpool) get(err error, hit bool, i int, k int, q *d, t *Tree, ver int64) *Enumerator {
|
||||
x := p.Get().(*Enumerator)
|
||||
x.err, x.hit, x.i, x.k, x.q, x.t, x.ver = err, hit, i, k, q, t, ver
|
||||
return x
|
||||
}
|
||||
|
||||
type (
|
||||
// Cmp compares a and b. Return value is:
|
||||
//
|
||||
// < 0 if a < b
|
||||
// 0 if a == b
|
||||
// > 0 if a > b
|
||||
//
|
||||
Cmp func(a, b int) int
|
||||
|
||||
d struct { // data page
|
||||
c int
|
||||
d [2*kd + 1]de
|
||||
n *d
|
||||
p *d
|
||||
}
|
||||
|
||||
de struct { // d element
|
||||
k int
|
||||
v int
|
||||
}
|
||||
|
||||
// Enumerator captures the state of enumerating a tree. It is returned
|
||||
// from the Seek* methods. The enumerator is aware of any mutations
|
||||
// made to the tree in the process of enumerating it and automatically
|
||||
// resumes the enumeration at the proper key, if possible.
|
||||
//
|
||||
// However, once an Enumerator returns io.EOF to signal "no more
|
||||
// items", it does no more attempt to "resync" on tree mutation(s). In
|
||||
// other words, io.EOF from an Enumerator is "sticky" (idempotent).
|
||||
Enumerator struct {
|
||||
err error
|
||||
hit bool
|
||||
i int
|
||||
k int
|
||||
q *d
|
||||
t *Tree
|
||||
ver int64
|
||||
}
|
||||
|
||||
// Tree is a B+tree.
|
||||
Tree struct {
|
||||
c int
|
||||
cmp Cmp
|
||||
first *d
|
||||
last *d
|
||||
r interface{}
|
||||
ver int64
|
||||
}
|
||||
|
||||
xe struct { // x element
|
||||
ch interface{}
|
||||
k int
|
||||
}
|
||||
|
||||
x struct { // index page
|
||||
c int
|
||||
x [2*kx + 2]xe
|
||||
}
|
||||
)
|
||||
|
||||
var ( // R/O zero values
|
||||
zd d
|
||||
zde de
|
||||
ze Enumerator
|
||||
zk int
|
||||
zt Tree
|
||||
zx x
|
||||
zxe xe
|
||||
)
|
||||
|
||||
func clr(q interface{}) {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
for i := 0; i <= x.c; i++ { // Ch0 Sep0 ... Chn-1 Sepn-1 Chn
|
||||
clr(x.x[i].ch)
|
||||
}
|
||||
*x = zx
|
||||
btXPool.Put(x)
|
||||
case *d:
|
||||
*x = zd
|
||||
btDPool.Put(x)
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------- x
|
||||
|
||||
func newX(ch0 interface{}) *x {
|
||||
r := btXPool.Get().(*x)
|
||||
r.x[0].ch = ch0
|
||||
return r
|
||||
}
|
||||
|
||||
func (q *x) extract(i int) {
|
||||
q.c--
|
||||
if i < q.c {
|
||||
copy(q.x[i:], q.x[i+1:q.c+1])
|
||||
q.x[q.c].ch = q.x[q.c+1].ch
|
||||
q.x[q.c].k = zk // GC
|
||||
q.x[q.c+1] = zxe // GC
|
||||
}
|
||||
}
|
||||
|
||||
func (q *x) insert(i int, k int, ch interface{}) *x {
|
||||
c := q.c
|
||||
if i < c {
|
||||
q.x[c+1].ch = q.x[c].ch
|
||||
copy(q.x[i+2:], q.x[i+1:c])
|
||||
q.x[i+1].k = q.x[i].k
|
||||
}
|
||||
c++
|
||||
q.c = c
|
||||
q.x[i].k = k
|
||||
q.x[i+1].ch = ch
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *x) siblings(i int) (l, r *d) {
|
||||
if i >= 0 {
|
||||
if i > 0 {
|
||||
l = q.x[i-1].ch.(*d)
|
||||
}
|
||||
if i < q.c {
|
||||
r = q.x[i+1].ch.(*d)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------- d
|
||||
|
||||
func (l *d) mvL(r *d, c int) {
|
||||
copy(l.d[l.c:], r.d[:c])
|
||||
copy(r.d[:], r.d[c:r.c])
|
||||
l.c += c
|
||||
r.c -= c
|
||||
}
|
||||
|
||||
func (l *d) mvR(r *d, c int) {
|
||||
copy(r.d[c:], r.d[:r.c])
|
||||
copy(r.d[:c], l.d[l.c-c:])
|
||||
r.c += c
|
||||
l.c -= c
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------- Tree
|
||||
|
||||
// TreeNew returns a newly created, empty Tree. The compare function is used
|
||||
// for key collation.
|
||||
func TreeNew(cmp Cmp) *Tree {
|
||||
return btTPool.get(cmp)
|
||||
}
|
||||
|
||||
// Clear removes all K/V pairs from the tree.
|
||||
func (t *Tree) Clear() {
|
||||
if t.r == nil {
|
||||
return
|
||||
}
|
||||
|
||||
clr(t.r)
|
||||
t.c, t.first, t.last, t.r = 0, nil, nil, nil
|
||||
t.ver++
|
||||
}
|
||||
|
||||
// Close performs Clear and recycles t to a pool for possible later reuse. No
|
||||
// references to t should exist or such references must not be used afterwards.
|
||||
func (t *Tree) Close() {
|
||||
t.Clear()
|
||||
*t = zt
|
||||
btTPool.Put(t)
|
||||
}
|
||||
|
||||
func (t *Tree) cat(p *x, q, r *d, pi int) {
|
||||
t.ver++
|
||||
q.mvL(r, r.c)
|
||||
if r.n != nil {
|
||||
r.n.p = q
|
||||
} else {
|
||||
t.last = q
|
||||
}
|
||||
q.n = r.n
|
||||
*r = zd
|
||||
btDPool.Put(r)
|
||||
if p.c > 1 {
|
||||
p.extract(pi)
|
||||
p.x[pi].ch = q
|
||||
return
|
||||
}
|
||||
|
||||
switch x := t.r.(type) {
|
||||
case *x:
|
||||
*x = zx
|
||||
btXPool.Put(x)
|
||||
case *d:
|
||||
*x = zd
|
||||
btDPool.Put(x)
|
||||
}
|
||||
t.r = q
|
||||
}
|
||||
|
||||
func (t *Tree) catX(p, q, r *x, pi int) {
|
||||
t.ver++
|
||||
q.x[q.c].k = p.x[pi].k
|
||||
copy(q.x[q.c+1:], r.x[:r.c])
|
||||
q.c += r.c + 1
|
||||
q.x[q.c].ch = r.x[r.c].ch
|
||||
*r = zx
|
||||
btXPool.Put(r)
|
||||
if p.c > 1 {
|
||||
p.c--
|
||||
pc := p.c
|
||||
if pi < pc {
|
||||
p.x[pi].k = p.x[pi+1].k
|
||||
copy(p.x[pi+1:], p.x[pi+2:pc+1])
|
||||
p.x[pc].ch = p.x[pc+1].ch
|
||||
p.x[pc].k = zk // GC
|
||||
p.x[pc+1].ch = nil // GC
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
switch x := t.r.(type) {
|
||||
case *x:
|
||||
*x = zx
|
||||
btXPool.Put(x)
|
||||
case *d:
|
||||
*x = zd
|
||||
btDPool.Put(x)
|
||||
}
|
||||
t.r = q
|
||||
}
|
||||
|
||||
// Delete removes the k's KV pair, if it exists, in which case Delete returns
|
||||
// true.
|
||||
func (t *Tree) Delete(k int) (ok bool) {
|
||||
pi := -1
|
||||
var p *x
|
||||
q := t.r
|
||||
if q == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for {
|
||||
var i int
|
||||
i, ok = t.find(q, k)
|
||||
if ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c < kx && q != t.r {
|
||||
x, i = t.underflowX(p, x, pi, i)
|
||||
}
|
||||
pi = i + 1
|
||||
p = x
|
||||
q = x.x[pi].ch
|
||||
ok = false
|
||||
continue
|
||||
case *d:
|
||||
t.extract(x, i)
|
||||
if x.c >= kd {
|
||||
return true
|
||||
}
|
||||
|
||||
if q != t.r {
|
||||
t.underflow(p, x, pi)
|
||||
} else if t.c == 0 {
|
||||
t.Clear()
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c < kx && q != t.r {
|
||||
x, i = t.underflowX(p, x, pi, i)
|
||||
}
|
||||
pi = i
|
||||
p = x
|
||||
q = x.x[i].ch
|
||||
case *d:
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) extract(q *d, i int) { // (r int) {
|
||||
t.ver++
|
||||
//r = q.d[i].v // prepared for Extract
|
||||
q.c--
|
||||
if i < q.c {
|
||||
copy(q.d[i:], q.d[i+1:q.c+1])
|
||||
}
|
||||
q.d[q.c] = zde // GC
|
||||
t.c--
|
||||
return
|
||||
}
|
||||
|
||||
func (t *Tree) find(q interface{}, k int) (i int, ok bool) {
|
||||
var mk int
|
||||
l := 0
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
h := x.c - 1
|
||||
for l <= h {
|
||||
m := (l + h) >> 1
|
||||
mk = x.x[m].k
|
||||
switch cmp := t.cmp(k, mk); {
|
||||
case cmp > 0:
|
||||
l = m + 1
|
||||
case cmp == 0:
|
||||
return m, true
|
||||
default:
|
||||
h = m - 1
|
||||
}
|
||||
}
|
||||
case *d:
|
||||
h := x.c - 1
|
||||
for l <= h {
|
||||
m := (l + h) >> 1
|
||||
mk = x.d[m].k
|
||||
switch cmp := t.cmp(k, mk); {
|
||||
case cmp > 0:
|
||||
l = m + 1
|
||||
case cmp == 0:
|
||||
return m, true
|
||||
default:
|
||||
h = m - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
return l, false
|
||||
}
|
||||
|
||||
// First returns the first item of the tree in the key collating order, or
|
||||
// (zero-value, zero-value) if the tree is empty.
|
||||
func (t *Tree) First() (k int, v int) {
|
||||
if q := t.first; q != nil {
|
||||
q := &q.d[0]
|
||||
k, v = q.k, q.v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Get returns the value associated with k and true if it exists. Otherwise Get
|
||||
// returns (zero-value, false).
|
||||
func (t *Tree) Get(k int) (v int, ok bool) {
|
||||
q := t.r
|
||||
if q == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
var i int
|
||||
if i, ok = t.find(q, k); ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i+1].ch
|
||||
continue
|
||||
case *d:
|
||||
return x.d[i].v, true
|
||||
}
|
||||
}
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i].ch
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) insert(q *d, i int, k int, v int) *d {
|
||||
t.ver++
|
||||
c := q.c
|
||||
if i < c {
|
||||
copy(q.d[i+1:], q.d[i:c])
|
||||
}
|
||||
c++
|
||||
q.c = c
|
||||
q.d[i].k, q.d[i].v = k, v
|
||||
t.c++
|
||||
return q
|
||||
}
|
||||
|
||||
// Last returns the last item of the tree in the key collating order, or
|
||||
// (zero-value, zero-value) if the tree is empty.
|
||||
func (t *Tree) Last() (k int, v int) {
|
||||
if q := t.last; q != nil {
|
||||
q := &q.d[q.c-1]
|
||||
k, v = q.k, q.v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Len returns the number of items in the tree.
|
||||
func (t *Tree) Len() int {
|
||||
return t.c
|
||||
}
|
||||
|
||||
func (t *Tree) overflow(p *x, q *d, pi, i int, k int, v int) {
|
||||
t.ver++
|
||||
l, r := p.siblings(pi)
|
||||
|
||||
if l != nil && l.c < 2*kd && i != 0 {
|
||||
l.mvL(q, 1)
|
||||
t.insert(q, i-1, k, v)
|
||||
p.x[pi-1].k = q.d[0].k
|
||||
return
|
||||
}
|
||||
|
||||
if r != nil && r.c < 2*kd {
|
||||
if i < 2*kd {
|
||||
q.mvR(r, 1)
|
||||
t.insert(q, i, k, v)
|
||||
p.x[pi].k = r.d[0].k
|
||||
return
|
||||
}
|
||||
|
||||
t.insert(r, 0, k, v)
|
||||
p.x[pi].k = k
|
||||
return
|
||||
}
|
||||
|
||||
t.split(p, q, pi, i, k, v)
|
||||
}
|
||||
|
||||
// Seek returns an Enumerator positioned on an item such that k >= item's key.
|
||||
// ok reports if k == item.key The Enumerator's position is possibly after the
|
||||
// last item in the tree.
|
||||
func (t *Tree) Seek(k int) (e *Enumerator, ok bool) {
|
||||
q := t.r
|
||||
if q == nil {
|
||||
e = btEPool.get(nil, false, 0, k, nil, t, t.ver)
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
var i int
|
||||
if i, ok = t.find(q, k); ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i+1].ch
|
||||
continue
|
||||
case *d:
|
||||
return btEPool.get(nil, ok, i, k, x, t, t.ver), true
|
||||
}
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i].ch
|
||||
case *d:
|
||||
return btEPool.get(nil, ok, i, k, x, t, t.ver), false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SeekFirst returns an enumerator positioned on the first KV pair in the tree,
|
||||
// if any. For an empty tree, err == io.EOF is returned and e will be nil.
|
||||
func (t *Tree) SeekFirst() (e *Enumerator, err error) {
|
||||
q := t.first
|
||||
if q == nil {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
return btEPool.get(nil, true, 0, q.d[0].k, q, t, t.ver), nil
|
||||
}
|
||||
|
||||
// SeekLast returns an enumerator positioned on the last KV pair in the tree,
|
||||
// if any. For an empty tree, err == io.EOF is returned and e will be nil.
|
||||
func (t *Tree) SeekLast() (e *Enumerator, err error) {
|
||||
q := t.last
|
||||
if q == nil {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
return btEPool.get(nil, true, q.c-1, q.d[q.c-1].k, q, t, t.ver), nil
|
||||
}
|
||||
|
||||
// Set sets the value associated with k.
|
||||
func (t *Tree) Set(k int, v int) {
|
||||
//dbg("--- PRE Set(%v, %v)\n%s", k, v, t.dump())
|
||||
//defer func() {
|
||||
// dbg("--- POST\n%s\n====\n", t.dump())
|
||||
//}()
|
||||
|
||||
pi := -1
|
||||
var p *x
|
||||
q := t.r
|
||||
if q == nil {
|
||||
z := t.insert(btDPool.Get().(*d), 0, k, v)
|
||||
t.r, t.first, t.last = z, z, z
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
i, ok := t.find(q, k)
|
||||
if ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
i++
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i
|
||||
p = x
|
||||
q = x.x[i].ch
|
||||
continue
|
||||
case *d:
|
||||
x.d[i].v = v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i
|
||||
p = x
|
||||
q = x.x[i].ch
|
||||
case *d:
|
||||
switch {
|
||||
case x.c < 2*kd:
|
||||
t.insert(x, i, k, v)
|
||||
default:
|
||||
t.overflow(p, x, pi, i, k, v)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Put combines Get and Set in a more efficient way where the tree is walked
|
||||
// only once. The upd(ater) receives (old-value, true) if a KV pair for k
|
||||
// exists or (zero-value, false) otherwise. It can then return a (new-value,
|
||||
// true) to create or overwrite the existing value in the KV pair, or
|
||||
// (whatever, false) if it decides not to create or not to update the value of
|
||||
// the KV pair.
|
||||
//
|
||||
// tree.Set(k, v) call conceptually equals calling
|
||||
//
|
||||
// tree.Put(k, func(int, bool){ return v, true })
|
||||
//
|
||||
// modulo the differing return values.
|
||||
func (t *Tree) Put(k int, upd func(oldV int, exists bool) (newV int, write bool)) (oldV int, written bool) {
|
||||
pi := -1
|
||||
var p *x
|
||||
q := t.r
|
||||
var newV int
|
||||
if q == nil {
|
||||
// new KV pair in empty tree
|
||||
newV, written = upd(newV, false)
|
||||
if !written {
|
||||
return
|
||||
}
|
||||
|
||||
z := t.insert(btDPool.Get().(*d), 0, k, newV)
|
||||
t.r, t.first, t.last = z, z, z
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
i, ok := t.find(q, k)
|
||||
if ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
i++
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i
|
||||
p = x
|
||||
q = x.x[i].ch
|
||||
continue
|
||||
case *d:
|
||||
oldV = x.d[i].v
|
||||
newV, written = upd(oldV, true)
|
||||
if !written {
|
||||
return
|
||||
}
|
||||
|
||||
x.d[i].v = newV
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i
|
||||
p = x
|
||||
q = x.x[i].ch
|
||||
case *d: // new KV pair
|
||||
newV, written = upd(newV, false)
|
||||
if !written {
|
||||
return
|
||||
}
|
||||
|
||||
switch {
|
||||
case x.c < 2*kd:
|
||||
t.insert(x, i, k, newV)
|
||||
default:
|
||||
t.overflow(p, x, pi, i, k, newV)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) split(p *x, q *d, pi, i int, k int, v int) {
|
||||
t.ver++
|
||||
r := btDPool.Get().(*d)
|
||||
if q.n != nil {
|
||||
r.n = q.n
|
||||
r.n.p = r
|
||||
} else {
|
||||
t.last = r
|
||||
}
|
||||
q.n = r
|
||||
r.p = q
|
||||
|
||||
copy(r.d[:], q.d[kd:2*kd])
|
||||
for i := range q.d[kd:] {
|
||||
q.d[kd+i] = zde
|
||||
}
|
||||
q.c = kd
|
||||
r.c = kd
|
||||
var done bool
|
||||
if i > kd {
|
||||
done = true
|
||||
t.insert(r, i-kd, k, v)
|
||||
}
|
||||
if pi >= 0 {
|
||||
p.insert(pi, r.d[0].k, r)
|
||||
} else {
|
||||
t.r = newX(q).insert(0, r.d[0].k, r)
|
||||
}
|
||||
if done {
|
||||
return
|
||||
}
|
||||
|
||||
t.insert(q, i, k, v)
|
||||
}
|
||||
|
||||
func (t *Tree) splitX(p *x, q *x, pi int, i int) (*x, int) {
|
||||
t.ver++
|
||||
r := btXPool.Get().(*x)
|
||||
copy(r.x[:], q.x[kx+1:])
|
||||
q.c = kx
|
||||
r.c = kx
|
||||
if pi >= 0 {
|
||||
p.insert(pi, q.x[kx].k, r)
|
||||
} else {
|
||||
t.r = newX(q).insert(0, q.x[kx].k, r)
|
||||
}
|
||||
|
||||
q.x[kx].k = zk
|
||||
for i := range q.x[kx+1:] {
|
||||
q.x[kx+i+1] = zxe
|
||||
}
|
||||
if i > kx {
|
||||
q = r
|
||||
i -= kx + 1
|
||||
}
|
||||
|
||||
return q, i
|
||||
}
|
||||
|
||||
func (t *Tree) underflow(p *x, q *d, pi int) {
|
||||
t.ver++
|
||||
l, r := p.siblings(pi)
|
||||
|
||||
if l != nil && l.c+q.c >= 2*kd {
|
||||
l.mvR(q, 1)
|
||||
p.x[pi-1].k = q.d[0].k
|
||||
return
|
||||
}
|
||||
|
||||
if r != nil && q.c+r.c >= 2*kd {
|
||||
q.mvL(r, 1)
|
||||
p.x[pi].k = r.d[0].k
|
||||
r.d[r.c] = zde // GC
|
||||
return
|
||||
}
|
||||
|
||||
if l != nil {
|
||||
t.cat(p, l, q, pi-1)
|
||||
return
|
||||
}
|
||||
|
||||
t.cat(p, q, r, pi)
|
||||
}
|
||||
|
||||
func (t *Tree) underflowX(p *x, q *x, pi int, i int) (*x, int) {
|
||||
t.ver++
|
||||
var l, r *x
|
||||
|
||||
if pi >= 0 {
|
||||
if pi > 0 {
|
||||
l = p.x[pi-1].ch.(*x)
|
||||
}
|
||||
if pi < p.c {
|
||||
r = p.x[pi+1].ch.(*x)
|
||||
}
|
||||
}
|
||||
|
||||
if l != nil && l.c > kx {
|
||||
q.x[q.c+1].ch = q.x[q.c].ch
|
||||
copy(q.x[1:], q.x[:q.c])
|
||||
q.x[0].ch = l.x[l.c].ch
|
||||
q.x[0].k = p.x[pi-1].k
|
||||
q.c++
|
||||
i++
|
||||
l.c--
|
||||
p.x[pi-1].k = l.x[l.c].k
|
||||
return q, i
|
||||
}
|
||||
|
||||
if r != nil && r.c > kx {
|
||||
q.x[q.c].k = p.x[pi].k
|
||||
q.c++
|
||||
q.x[q.c].ch = r.x[0].ch
|
||||
p.x[pi].k = r.x[0].k
|
||||
copy(r.x[:], r.x[1:r.c])
|
||||
r.c--
|
||||
rc := r.c
|
||||
r.x[rc].ch = r.x[rc+1].ch
|
||||
r.x[rc].k = zk
|
||||
r.x[rc+1].ch = nil
|
||||
return q, i
|
||||
}
|
||||
|
||||
if l != nil {
|
||||
i += l.c + 1
|
||||
t.catX(p, l, q, pi-1)
|
||||
q = l
|
||||
return q, i
|
||||
}
|
||||
|
||||
t.catX(p, q, r, pi)
|
||||
return q, i
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------- Enumerator
|
||||
|
||||
// Close recycles e to a pool for possible later reuse. No references to e
|
||||
// should exist or such references must not be used afterwards.
|
||||
func (e *Enumerator) Close() {
|
||||
*e = ze
|
||||
btEPool.Put(e)
|
||||
}
|
||||
|
||||
// Next returns the currently enumerated item, if it exists and moves to the
|
||||
// next item in the key collation order. If there is no item to return, err ==
|
||||
// io.EOF is returned.
|
||||
func (e *Enumerator) Next() (k int, v int, err error) {
|
||||
if err = e.err; err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if e.ver != e.t.ver {
|
||||
f, _ := e.t.Seek(e.k)
|
||||
*e = *f
|
||||
f.Close()
|
||||
}
|
||||
if e.q == nil {
|
||||
e.err, err = io.EOF, io.EOF
|
||||
return
|
||||
}
|
||||
|
||||
if e.i >= e.q.c {
|
||||
if err = e.next(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
i := e.q.d[e.i]
|
||||
k, v = i.k, i.v
|
||||
e.k, e.hit = k, true
|
||||
e.next()
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Enumerator) next() error {
|
||||
if e.q == nil {
|
||||
e.err = io.EOF
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
switch {
|
||||
case e.i < e.q.c-1:
|
||||
e.i++
|
||||
default:
|
||||
if e.q, e.i = e.q.n, 0; e.q == nil {
|
||||
e.err = io.EOF
|
||||
}
|
||||
}
|
||||
return e.err
|
||||
}
|
||||
|
||||
// Prev returns the currently enumerated item, if it exists and moves to the
|
||||
// previous item in the key collation order. If there is no item to return, err
|
||||
// == io.EOF is returned.
|
||||
func (e *Enumerator) Prev() (k int, v int, err error) {
|
||||
if err = e.err; err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if e.ver != e.t.ver {
|
||||
f, _ := e.t.Seek(e.k)
|
||||
*e = *f
|
||||
f.Close()
|
||||
}
|
||||
if e.q == nil {
|
||||
e.err, err = io.EOF, io.EOF
|
||||
return
|
||||
}
|
||||
|
||||
if !e.hit {
|
||||
// move to previous because Seek overshoots if there's no hit
|
||||
if err = e.prev(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if e.i >= e.q.c {
|
||||
if err = e.prev(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
i := e.q.d[e.i]
|
||||
k, v = i.k, i.v
|
||||
e.k, e.hit = k, true
|
||||
e.prev()
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Enumerator) prev() error {
|
||||
if e.q == nil {
|
||||
e.err = io.EOF
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
switch {
|
||||
case e.i > 0:
|
||||
e.i--
|
||||
default:
|
||||
if e.q = e.q.p; e.q == nil {
|
||||
e.err = io.EOF
|
||||
break
|
||||
}
|
||||
|
||||
e.i = e.q.c - 1
|
||||
}
|
||||
return e.err
|
||||
}
|
||||
27
vendor/github.com/cznic/fileutil/LICENSE
generated
vendored
27
vendor/github.com/cznic/fileutil/LICENSE
generated
vendored
@@ -1,27 +0,0 @@
|
||||
Copyright (c) 2014 The fileutil Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the names of the authors nor the names of the
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
251
vendor/github.com/cznic/fileutil/falloc/docs.go
generated
vendored
251
vendor/github.com/cznic/fileutil/falloc/docs.go
generated
vendored
@@ -1,251 +0,0 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
/*
|
||||
|
||||
WIP: Package falloc provides allocation/deallocation of space within a
|
||||
file/store (WIP, unstable API).
|
||||
|
||||
Overall structure:
|
||||
File == n blocks.
|
||||
Block == n atoms.
|
||||
Atom == 16 bytes.
|
||||
|
||||
x6..x0 == least significant 7 bytes of a 64 bit integer, highest (7th) byte is
|
||||
0 and is not stored in the file.
|
||||
|
||||
Block first byte
|
||||
|
||||
Aka block type tag.
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
0xFF: Free atom (free block of size 1).
|
||||
+------++---------++---------++------+
|
||||
| 0 || 1...7 || 8...14 || 15 |
|
||||
+------++---------++---------++------+
|
||||
| 0xFF || p6...p0 || n6...n0 || 0xFF |
|
||||
+------++---------++---------++------+
|
||||
|
||||
Link to the previous free block (atom addressed) is p6...p0, next dtto in
|
||||
n6...n0. Doubly linked lists of "compatible" free blocks allows for free space
|
||||
reclaiming and merging. "Compatible" == of size at least some K. Heads of all
|
||||
such lists are organized per K or intervals of Ks elsewhere.
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
0xFE: Free block, size == s6...s0 atoms.
|
||||
+------++---------++---------++---------++--
|
||||
| +0 || 1...7 || 8...14 || 15...21 || 22...16*size-1
|
||||
+------++---------++---------++---------++--
|
||||
| 0xFE || p6...p0 || n6...n0 || s6...s0 || ...
|
||||
+------++---------++---------++---------++--
|
||||
|
||||
Prev and next links as in the 0xFF first byte case. End of this block - see
|
||||
"Block last byte": 0xFE bellow. Data between == undefined.
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
0xFD: Relocated block.
|
||||
+------++---------++-----------++------+
|
||||
| 0 || 1...7 || 8...14 || 15 |
|
||||
+------++---------++-----------++------+
|
||||
| 0xFD || r6...r0 || undefined || 0x00 | // == used block
|
||||
+------++---------++-----------++------+
|
||||
|
||||
Relocation link is r6..r0 == atom address. Relocations MUST NOT chain and MUST
|
||||
point to a "content" block, i.e. one with the first byte in 0x00...0xFC.
|
||||
|
||||
Relocated block allows to permanently assign a handle/file pointer ("atom"
|
||||
address) to some content and resize the content anytime afterwards w/o having
|
||||
to update all the possible existing references to the original handle.
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
0xFC: Used long block.
|
||||
+------++---------++--------------------++---------+---+
|
||||
| 0 || 1...2 || 3...N+2 || | |
|
||||
+------++---------++--------------------++---------+---+
|
||||
| 0xFC || n1...n0 || N bytes of content || padding | Z |
|
||||
+------++---------++--------------------++---------+---+
|
||||
|
||||
This block type is used for content of length in N == 238...61680 bytes. N is
|
||||
encoded as a 2 byte unsigned integer n1..n0 in network byte order. Values
|
||||
bellow 238 are reserved, those content lengths are to be carried by the
|
||||
0x00..0xFB block types.
|
||||
|
||||
1. n in 0x00EE...0xF0F0 is used for content under the same rules
|
||||
as in the 0x01..0xED type.
|
||||
|
||||
2. If the last byte of the content is not the last byte of an atom then
|
||||
the last byte of the block is 0x00.
|
||||
|
||||
3. If the last byte of the content IS the last byte of an atom:
|
||||
|
||||
3.1 If the last byte of content is in 0x00..0xFD then everything is OK.
|
||||
|
||||
3.2 If the last byte of content is 0xFE or 0xFF then the escape
|
||||
via n > 0xF0F0 MUST be used AND the block's last byte is 0x00 or 0x01,
|
||||
meaning value 0xFE and 0xFF respectively.
|
||||
|
||||
4. n in 0xF0F1...0xFFFF is like the escaped 0xEE..0xFB block.
|
||||
N == 13 + 16(n - 0xF0F1).
|
||||
|
||||
Discussion of the padding and Z fields - see the 0x01..0xED block type.
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
0xEE...0xFB: Used escaped short block.
|
||||
+---++----------------------++---+
|
||||
| 0 || 1...N-1 || |
|
||||
+---++----------------------++---+
|
||||
| X || N-1 bytes of content || Z |
|
||||
+---++----------------------++---+
|
||||
|
||||
N == 15 + 16(X - 0xEE). Z is the content last byte encoded as follows.
|
||||
|
||||
case Z == 0x00: The last byte of content is 0xFE
|
||||
|
||||
case Z == 0x01: The last byte of content is 0xFF
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
0x01...0xED: Used short block.
|
||||
+---++--------------------++---------+---+
|
||||
| 0 || 1...N || | |
|
||||
+---++--------------------++---------+---+
|
||||
| N || N bytes of content || padding | Z |
|
||||
+---++--------------------++---------+---+
|
||||
|
||||
This block type is used for content of length in 1...237 bytes. The value of
|
||||
the "padding" field, if of non zero length, is undefined.
|
||||
|
||||
If the last byte of content is the last byte of an atom (== its file byte
|
||||
offset & 0xF == 0xF) then such last byte MUST be in 0x00...0xFD.
|
||||
|
||||
If the last byte of content is the last byte of an atom AND the last byte of
|
||||
content is 0xFE or 0xFF then the short escape block type (0xEE...0xFB) MUST be
|
||||
used.
|
||||
|
||||
If the last byte of content is not the last byte of an atom, then the last byte
|
||||
of such block, i.e. the Z field, which is also a last byte of some atom, MUST
|
||||
be 0x00 (i.e. the used block marker). Other "tail" values are reserved.
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
0x00: Used empty block.
|
||||
+------++-----------++------+
|
||||
| 0 || 1...14 || 15 |
|
||||
+------++-----------++------+
|
||||
| 0x00 || undefined || 0x00 | // == used block, other "tail" values reserved.
|
||||
+------++-----------++------+
|
||||
|
||||
All of the rules for 0x01..0xED applies. Depicted only for its different
|
||||
semantics (e.g. an allocated [existing] string but with length of zero).
|
||||
|
||||
==============================================================================
|
||||
|
||||
Block last byte
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
0xFF: Free atom. Layout - see "Block first byte": FF.
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
0xFE: Free block, size n atoms. Preceding 7 bytes == size (s6...s0) of the free
|
||||
block in atoms, network byte order
|
||||
--++---------++------+
|
||||
|| -8...-2 || -1 |
|
||||
--++---------++------+
|
||||
... || s6...s0 || 0xFE | <- block's last byte
|
||||
--++---------++------+
|
||||
|
||||
Layout at start of this block - see "Block first byte": FE.
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
0x00...0xFD: Used (non free) block.
|
||||
|
||||
==============================================================================
|
||||
|
||||
Free lists table
|
||||
|
||||
The free lists table content is stored in the standard layout of a used block.
|
||||
|
||||
A table item is a 7 byte size field followed by a 7 byte atom address field
|
||||
(both in network byte order), thus every item is 14 contiguous bytes. The
|
||||
item's address field is pointing to a free block. The size field determines
|
||||
the minimal size (in atoms) of free blocks on that list.
|
||||
|
||||
The free list table is n above items, thus the content has 14n bytes. Note that
|
||||
the largest block content is 61680 bytes and as there are 14 bytes per table
|
||||
item, so the table is limited to at most 4405 entries.
|
||||
|
||||
Items in the table do not have to be sorted according to their size field values.
|
||||
|
||||
No two items can have the same value of the size field.
|
||||
|
||||
When freeing blocks, the block MUST be linked into an item list with the
|
||||
highest possible size field, which is less or equal to the number of atoms in
|
||||
the new free block.
|
||||
|
||||
When freeing a block, the block MUST be first merged with any adjacent free
|
||||
blocks (thus possibly creating a bigger free block) using information derived
|
||||
from the adjacent blocks first and last bytes. Such merged free blocks MUST be
|
||||
removed from their original doubly linked lists. Afterwards the new bigger free
|
||||
block is put to the free list table in the appropriate item.
|
||||
|
||||
Items with address field == 0 are legal. Such item is a placeholder for a empty
|
||||
list of free blocks of the item's size.
|
||||
|
||||
Items with size field == 0 are legal. Such item is a placeholder, used e.g. to
|
||||
avoid further reallocations/redirecting of the free lists table.
|
||||
|
||||
The largest possible allocation request (for content length 61680 bytes) is
|
||||
0xF10 (3856) atoms. All free blocks of this or bigger size are presumably put
|
||||
into a single table item with the size 3856. It may be useful to additionally
|
||||
have a free lists table item which links free blocks of some bigger size (say
|
||||
1M+) and then use the OS sparse file support (if present) to save the physical
|
||||
space used by such free blocks.
|
||||
|
||||
Smaller (<3856 atoms) free blocks can be organized exactly (every distinct size
|
||||
has its table item) or the sizes can run using other schema like e.g. "1, 2,
|
||||
4, 8, ..." (powers of 2) or "1, 2, 3, 5, 8, 13, ..." (the Fibonacci sequence)
|
||||
or they may be fine tuned to a specific usage pattern.
|
||||
|
||||
==============================================================================
|
||||
|
||||
Header
|
||||
|
||||
The first block of a file (atom address == file offset == 0) is the file header.
|
||||
The header block has the standard layout of a used short non escaped block.
|
||||
|
||||
Special conditions apply: The header block and its content MUST be like this:
|
||||
|
||||
+------+---------+---------+------+
|
||||
| 0 | 1...7 | 8...14 | 15 |
|
||||
+------+---------+---------+------+
|
||||
| 0x0F | m6...m0 | f6...f0 | FLTT |
|
||||
+------+---------+---------+------+
|
||||
|
||||
m6..m0 is a "magic" value 0xF1C1A1FE51B1E.
|
||||
|
||||
f6...f0 is the atom address of the free lists table (discussed elsewhere).
|
||||
If f6...f0 == 0x00 the there is no free lists table (yet).
|
||||
|
||||
FLTT describes the type of the Free List Table. Currently defined values:
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
FLTT == 0: Free List Table is fixed at atom address 2. It has a fixed size for 3856 entries
|
||||
for free list of size 1..3855 atoms and the last is for the list of free block >= 3856 atoms.
|
||||
*/
|
||||
package falloc
|
||||
|
||||
const (
|
||||
INVALID_HANDLE = Handle(-1)
|
||||
)
|
||||
130
vendor/github.com/cznic/fileutil/falloc/error.go
generated
vendored
130
vendor/github.com/cznic/fileutil/falloc/error.go
generated
vendored
@@ -1,130 +0,0 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
package falloc
|
||||
|
||||
import "fmt"
|
||||
|
||||
// EBadRequest is an error produced for invalid operation, e.g. for data of more than maximum allowed.
|
||||
type EBadRequest struct {
|
||||
Name string
|
||||
Size int
|
||||
}
|
||||
|
||||
func (e *EBadRequest) Error() string {
|
||||
return fmt.Sprintf("%s: size %d", e.Name, e.Size)
|
||||
}
|
||||
|
||||
// EClose is a file/store close error.
|
||||
type EClose struct {
|
||||
Name string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *EClose) Error() string {
|
||||
return fmt.Sprintf("%sx: %s", e.Name, e.Err)
|
||||
}
|
||||
|
||||
// ECorrupted is a file/store format error.
|
||||
type ECorrupted struct {
|
||||
Name string
|
||||
Ofs int64
|
||||
}
|
||||
|
||||
func (e *ECorrupted) Error() string {
|
||||
return fmt.Sprintf("%s: corrupted data @%#x", e.Name, e.Ofs)
|
||||
}
|
||||
|
||||
// ECreate is a file/store create error.
|
||||
type ECreate struct {
|
||||
Name string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *ECreate) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.Name, e.Err)
|
||||
}
|
||||
|
||||
// EFreeList is a file/store format error.
|
||||
type EFreeList struct {
|
||||
Name string
|
||||
Size int64
|
||||
Block int64
|
||||
}
|
||||
|
||||
func (e *EFreeList) Error() string {
|
||||
return fmt.Sprintf("%s: invalid free list item, size %#x, block %#x", e.Name, e.Size, e.Block)
|
||||
}
|
||||
|
||||
// EHandle is an error type reported for invalid Handles.
|
||||
type EHandle struct {
|
||||
Name string
|
||||
Handle Handle
|
||||
}
|
||||
|
||||
func (e EHandle) Error() string {
|
||||
return fmt.Sprintf("%s: invalid handle %#x", e.Name, e.Handle)
|
||||
}
|
||||
|
||||
// EHeader is a file/store format error.
|
||||
type EHeader struct {
|
||||
Name string
|
||||
Header []byte
|
||||
Expected []byte
|
||||
}
|
||||
|
||||
func (e *EHeader) Error() string {
|
||||
return fmt.Sprintf("%s: invalid header, got [% x], expected [% x]", e.Name, e.Header, e.Expected)
|
||||
}
|
||||
|
||||
// ENullHandle is a file/store access error via a null handle.
|
||||
type ENullHandle string
|
||||
|
||||
func (e ENullHandle) Error() string {
|
||||
return fmt.Sprintf("%s: access via null handle", e)
|
||||
}
|
||||
|
||||
// EOpen is a file/store open error.
|
||||
type EOpen struct {
|
||||
Name string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *EOpen) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.Name, e.Err)
|
||||
}
|
||||
|
||||
// ERead is a file/store read error.
|
||||
type ERead struct {
|
||||
Name string
|
||||
Ofs int64
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *ERead) Error() string {
|
||||
return fmt.Sprintf("%s, %#x: %s", e.Name, e.Ofs, e.Err)
|
||||
}
|
||||
|
||||
// ESize is a file/store size error.
|
||||
type ESize struct {
|
||||
Name string
|
||||
Size int64
|
||||
}
|
||||
|
||||
func (e *ESize) Error() string {
|
||||
return fmt.Sprintf("%s: invalid size %#x(%d), size %%16 != 0", e.Name, e.Size, e.Size)
|
||||
}
|
||||
|
||||
// EWrite is a file/store write error.
|
||||
type EWrite struct {
|
||||
Name string
|
||||
Ofs int64
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *EWrite) Error() string {
|
||||
return fmt.Sprintf("%s, %#x: %s", e.Name, e.Ofs, e.Err)
|
||||
}
|
||||
676
vendor/github.com/cznic/fileutil/falloc/falloc.go
generated
vendored
676
vendor/github.com/cznic/fileutil/falloc/falloc.go
generated
vendored
@@ -1,676 +0,0 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
/*
|
||||
|
||||
This is an mostly (WIP) conforming implementation of the "specs" in docs.go.
|
||||
|
||||
The main incompletness is support for only one kind of FTL, though this table kind is still per "specs".
|
||||
|
||||
*/
|
||||
|
||||
package falloc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/cznic/fileutil/storage"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Handle is a reference to a block in a file/store.
|
||||
// Handle is an uint56 wrapped in an in64, i.e. the most significant byte must be always zero.
|
||||
type Handle int64
|
||||
|
||||
// Put puts the 7 least significant bytes of h into b. The MSB of h should be zero.
|
||||
func (h Handle) Put(b []byte) {
|
||||
for ofs := 6; ofs >= 0; ofs-- {
|
||||
b[ofs] = byte(h)
|
||||
h >>= 8
|
||||
}
|
||||
}
|
||||
|
||||
// Get gets the 7 least significant bytes of h from b. The MSB of h is zeroed.
|
||||
func (h *Handle) Get(b []byte) {
|
||||
var x Handle
|
||||
for ofs := 0; ofs <= 6; ofs++ {
|
||||
x = x<<8 | Handle(b[ofs])
|
||||
}
|
||||
*h = x
|
||||
}
|
||||
|
||||
// File is a file/store with space allocation/deallocation support.
|
||||
type File struct {
|
||||
f storage.Accessor
|
||||
atoms int64 // current file size in atom units
|
||||
canfree int64 // only blocks >= canfree can be subject to Free()
|
||||
freetab [3857]int64 // freetab[0] is unused, freetab[1] is size 1 ptr, freetab[2] is size 2 ptr, ...
|
||||
rwm sync.RWMutex
|
||||
}
|
||||
|
||||
func (f *File) read(b []byte, off int64) {
|
||||
if n, err := f.f.ReadAt(b, off); n != len(b) {
|
||||
panic(&ERead{f.f.Name(), off, err})
|
||||
}
|
||||
}
|
||||
|
||||
func (f *File) write(b []byte, off int64) {
|
||||
if n, err := f.f.WriteAt(b, off); n != len(b) {
|
||||
panic(&EWrite{f.f.Name(), off, err})
|
||||
}
|
||||
}
|
||||
|
||||
var ( // R/O
|
||||
hdr = []byte{0x0f, 0xf1, 0xc1, 0xa1, 0xfe, 0xa5, 0x1b, 0x1e, 0, 0, 0, 0, 0, 0, 2, 0} // free lists table @2
|
||||
empty = make([]byte, 16)
|
||||
zero = []byte{0}
|
||||
zero7 = make([]byte, 7)
|
||||
)
|
||||
|
||||
// New returns a new File backed by store or an error if any.
|
||||
// Any existing data in store are discarded.
|
||||
func New(store storage.Accessor) (f *File, err error) {
|
||||
f = &File{f: store}
|
||||
return f, storage.Mutate(store, func() (err error) {
|
||||
if err = f.f.Truncate(0); err != nil {
|
||||
return &ECreate{f.f.Name(), err}
|
||||
}
|
||||
|
||||
if _, err = f.Alloc(hdr[1:]); err != nil { //TODO internal panicking versions of the exported fns.
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = f.Alloc(nil); err != nil { // (empty) root @1
|
||||
return
|
||||
}
|
||||
|
||||
b := make([]byte, 3856*14)
|
||||
for i := 1; i <= 3856; i++ {
|
||||
Handle(i).Put(b[(i-1)*14:])
|
||||
}
|
||||
if _, err = f.Alloc(b); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
f.canfree = f.atoms
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
// Open returns a new File backed by store or an error if any.
|
||||
// Store already has to be in a valid format.
|
||||
func Open(store storage.Accessor) (f *File, err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
f = nil
|
||||
err = e.(error)
|
||||
}
|
||||
}()
|
||||
|
||||
fi, err := store.Stat()
|
||||
if err != nil {
|
||||
panic(&EOpen{store.Name(), err})
|
||||
}
|
||||
|
||||
fs := fi.Size()
|
||||
if fs&0xf != 0 {
|
||||
panic(&ESize{store.Name(), fi.Size()})
|
||||
}
|
||||
|
||||
f = &File{f: store, atoms: fs >> 4}
|
||||
b := make([]byte, len(hdr))
|
||||
f.read(b, 0)
|
||||
if !bytes.Equal(b, hdr) {
|
||||
panic(&EHeader{store.Name(), b, append([]byte{}, hdr...)})
|
||||
}
|
||||
|
||||
var atoms int64
|
||||
b, atoms = f.readUsed(2)
|
||||
f.canfree = atoms + 2
|
||||
ofs := 0
|
||||
var size, p Handle
|
||||
for ofs < len(b) {
|
||||
size.Get(b[ofs:])
|
||||
ofs += 7
|
||||
p.Get(b[ofs:])
|
||||
ofs += 7
|
||||
if sz, pp := int64(size), int64(p); size == 0 || size > 3856 || (pp != 0 && pp < f.canfree) || pp<<4 > fs-16 {
|
||||
panic(&EFreeList{store.Name(), sz, pp})
|
||||
}
|
||||
|
||||
f.freetab[size] = int64(p)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Accessor returns the File's underlying Accessor.
|
||||
func (f *File) Accessor() storage.Accessor {
|
||||
return f.f
|
||||
}
|
||||
|
||||
// Close closes f and returns an error if any.
|
||||
func (f *File) Close() (err error) {
|
||||
return storage.Mutate(f.Accessor(), func() (err error) {
|
||||
if err = f.f.Close(); err != nil {
|
||||
err = &EClose{f.f.Name(), err}
|
||||
}
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
// Root returns the handle of the DB root (top level directory, ...).
|
||||
func (f *File) Root() Handle {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (f *File) readUsed(atom int64) (content []byte, atoms int64) {
|
||||
b, redirected := make([]byte, 7), false
|
||||
redir:
|
||||
ofs := atom << 4
|
||||
f.read(b[:1], ofs)
|
||||
switch pre := b[0]; {
|
||||
default:
|
||||
panic(&ECorrupted{f.f.Name(), ofs})
|
||||
case pre == 0x00: // Empty block
|
||||
case pre >= 1 && pre <= 237: // Short
|
||||
content = make([]byte, pre)
|
||||
f.read(content, ofs+1)
|
||||
case pre >= 0xee && pre <= 0xfb: // Short esc
|
||||
content = make([]byte, 15+16*(pre-0xee))
|
||||
f.read(content, ofs+1)
|
||||
content[len(content)-1] += 0xfe
|
||||
case pre == 0xfc: // Long
|
||||
f.read(b[:2], ofs+1)
|
||||
n := int(b[0])<<8 + int(b[1])
|
||||
switch {
|
||||
default:
|
||||
panic(&ECorrupted{f.f.Name(), ofs + 1})
|
||||
case n >= 238 && n <= 61680: // Long non esc
|
||||
content = make([]byte, n)
|
||||
f.read(content, ofs+3)
|
||||
case n >= 61681: // Long esc
|
||||
content = make([]byte, 13+16*(n-0xf0f1))
|
||||
f.read(content, ofs+3)
|
||||
content[len(content)-1] += 0xfe
|
||||
}
|
||||
case pre == 0xfd: // redir
|
||||
if redirected {
|
||||
panic(&ECorrupted{f.f.Name(), ofs})
|
||||
}
|
||||
|
||||
f.read(b[:7], ofs+1)
|
||||
(*Handle)(&atom).Get(b)
|
||||
redirected = true
|
||||
goto redir
|
||||
}
|
||||
return content, rq2Atoms(len(content))
|
||||
}
|
||||
|
||||
func (f *File) writeUsed(b []byte, atom int64) {
|
||||
n := len(b)
|
||||
switch ofs, atoms, endmark := atom<<4, rq2Atoms(n), true; {
|
||||
default:
|
||||
panic("internal error")
|
||||
case n == 0:
|
||||
f.write(empty, ofs)
|
||||
case n <= 237:
|
||||
if (n+1)&0xf == 0 { // content end == atom end
|
||||
if v := b[n-1]; v >= 0xfe { // escape
|
||||
pre := []byte{byte((16*0xee + n - 15) >> 4)}
|
||||
f.write(pre, ofs)
|
||||
f.write(b[:n-1], ofs+1)
|
||||
f.write([]byte{v - 0xfe}, ofs+atoms<<4-1)
|
||||
return
|
||||
}
|
||||
endmark = false
|
||||
}
|
||||
// non esacpe
|
||||
pre := []byte{byte(n)}
|
||||
f.write(pre, ofs)
|
||||
f.write(b, ofs+1)
|
||||
if endmark {
|
||||
f.write(zero, ofs+atoms<<4-1) // last block byte <- used block
|
||||
}
|
||||
case n > 237 && n <= 61680:
|
||||
if (n+3)&0xf == 0 { // content end == atom end
|
||||
if v := b[n-1]; v >= 0xfe { // escape
|
||||
x := (16*0xf0f1 + n - 13) >> 4
|
||||
pre := []byte{0xFC, byte(x >> 8), byte(x)}
|
||||
f.write(pre, ofs)
|
||||
f.write(b[:n-1], ofs+3)
|
||||
f.write([]byte{v - 0xfe}, ofs+atoms<<4-1)
|
||||
return
|
||||
}
|
||||
endmark = false
|
||||
}
|
||||
// non esacpe
|
||||
pre := []byte{0xfc, byte(n >> 8), byte(n)}
|
||||
f.write(pre, ofs)
|
||||
f.write(b, ofs+3)
|
||||
if endmark {
|
||||
f.write(zero, ofs+atoms<<4-1) // last block byte <- used block
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func rq2Atoms(rqbytes int) (rqatoms int64) {
|
||||
if rqbytes > 237 {
|
||||
rqbytes += 2
|
||||
}
|
||||
return int64(rqbytes>>4 + 1)
|
||||
}
|
||||
|
||||
func (f *File) extend(b []byte) (handle int64) {
|
||||
handle = f.atoms
|
||||
f.writeUsed(b, handle)
|
||||
f.atoms += rq2Atoms(len(b))
|
||||
return
|
||||
}
|
||||
|
||||
// Alloc stores b in a newly allocated space and returns its handle and an error if any.
|
||||
func (f *File) Alloc(b []byte) (handle Handle, err error) {
|
||||
err = storage.Mutate(f.Accessor(), func() (err error) {
|
||||
rqAtoms := rq2Atoms(len(b))
|
||||
if rqAtoms > 3856 {
|
||||
return &EBadRequest{f.f.Name(), len(b)}
|
||||
}
|
||||
|
||||
for foundsize, foundp := range f.freetab[rqAtoms:] {
|
||||
if foundp != 0 {
|
||||
// this works only for the current unique sizes list (except the last item!)
|
||||
size := int64(foundsize) + rqAtoms
|
||||
handle = Handle(foundp)
|
||||
if size == 3856 {
|
||||
buf := make([]byte, 7)
|
||||
f.read(buf, int64(handle)<<4+15)
|
||||
(*Handle)(&size).Get(buf)
|
||||
}
|
||||
f.delFree(int64(handle), size)
|
||||
if rqAtoms < size {
|
||||
f.addFree(int64(handle)+rqAtoms, size-rqAtoms)
|
||||
}
|
||||
f.writeUsed(b, int64(handle))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
handle = Handle(f.extend(b))
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// checkLeft returns the atom size of a free bleck left adjacent to block @atom.
|
||||
// If that block is not free the returned size is 0.
|
||||
func (f *File) checkLeft(atom int64) (size int64) {
|
||||
if atom <= f.canfree {
|
||||
return
|
||||
}
|
||||
|
||||
b := make([]byte, 7)
|
||||
fp := atom << 4
|
||||
f.read(b[:1], fp-1)
|
||||
switch last := b[0]; {
|
||||
case last <= 0xfd:
|
||||
// used block
|
||||
case last == 0xfe:
|
||||
f.read(b, fp-8)
|
||||
(*Handle)(&size).Get(b)
|
||||
case last == 0xff:
|
||||
size = 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getInfo returns the block @atom type and size.
|
||||
func (f *File) getInfo(atom int64) (pref byte, size int64) {
|
||||
b := make([]byte, 7)
|
||||
fp := atom << 4
|
||||
f.read(b[:1], fp)
|
||||
switch pref = b[0]; {
|
||||
case pref == 0: // Empty used
|
||||
size = 1
|
||||
case pref >= 1 && pref <= 237: // Short
|
||||
size = rq2Atoms(int(pref))
|
||||
case pref >= 0xee && pref <= 0xfb: // Short esc
|
||||
size = rq2Atoms(15 + 16*int(pref-0xee))
|
||||
case pref == 0xfc: // Long
|
||||
f.read(b[:2], fp+1)
|
||||
n := int(b[0])<<8 + int(b[1])
|
||||
switch {
|
||||
default:
|
||||
panic(&ECorrupted{f.f.Name(), fp + 1})
|
||||
case n >= 238 && n <= 61680: // Long non esc
|
||||
size = rq2Atoms(n)
|
||||
case n >= 61681: // Long esc
|
||||
size = rq2Atoms(13 + 16*(n-0xf0f1))
|
||||
}
|
||||
case pref == 0xfd: // reloc
|
||||
size = 1
|
||||
case pref == 0xfe:
|
||||
f.read(b, fp+15)
|
||||
(*Handle)(&size).Get(b)
|
||||
case pref == 0xff:
|
||||
size = 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getSize returns the atom size of the block @atom and wheter it is free.
|
||||
func (f *File) getSize(atom int64) (size int64, isFree bool) {
|
||||
var typ byte
|
||||
typ, size = f.getInfo(atom)
|
||||
isFree = typ >= 0xfe
|
||||
return
|
||||
}
|
||||
|
||||
// checkRight returns the atom size of a free bleck right adjacent to block @atom,atoms.
|
||||
// If that block is not free the returned size is 0.
|
||||
func (f *File) checkRight(atom, atoms int64) (size int64) {
|
||||
if atom+atoms >= f.atoms {
|
||||
return
|
||||
}
|
||||
|
||||
if sz, free := f.getSize(atom + atoms); free {
|
||||
size = sz
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// delFree removes the atoms@atom free block from the free block list
|
||||
func (f *File) delFree(atom, atoms int64) {
|
||||
b := make([]byte, 15)
|
||||
size := int(atoms)
|
||||
if n := len(f.freetab); atoms >= int64(n) {
|
||||
size = n - 1
|
||||
}
|
||||
fp := atom << 4
|
||||
f.read(b[1:], fp+1)
|
||||
var prev, next Handle
|
||||
prev.Get(b[1:])
|
||||
next.Get(b[8:])
|
||||
|
||||
switch {
|
||||
case prev == 0 && next != 0:
|
||||
next.Put(b)
|
||||
f.write(b[:7], int64(32+3+7+(size-1)*14))
|
||||
f.write(zero7, int64(next)<<4+1)
|
||||
f.freetab[size] = int64(next)
|
||||
case prev != 0 && next == 0:
|
||||
f.write(zero7, int64(prev)<<4+8)
|
||||
case prev != 0 && next != 0:
|
||||
prev.Put(b)
|
||||
f.write(b[:7], int64(next)<<4+1)
|
||||
next.Put(b)
|
||||
f.write(b[:7], int64(prev)<<4+8)
|
||||
default: // prev == 0 && next == 0:
|
||||
f.write(zero7, int64(32+3+7+(size-1)*14))
|
||||
f.freetab[size] = 0
|
||||
}
|
||||
}
|
||||
|
||||
// addFree adds atoms@atom to the free block lists and marks it free.
|
||||
func (f *File) addFree(atom, atoms int64) {
|
||||
b := make([]byte, 7)
|
||||
size := int(atoms)
|
||||
if n := len(f.freetab); atoms >= int64(n) {
|
||||
size = n - 1
|
||||
}
|
||||
head := f.freetab[size]
|
||||
if head == 0 { // empty list
|
||||
f.makeFree(0, atom, atoms, 0)
|
||||
Handle(atom).Put(b)
|
||||
f.write(b, int64(32+3+7+(size-1)*14))
|
||||
f.freetab[size] = atom
|
||||
return
|
||||
}
|
||||
|
||||
Handle(atom).Put(b)
|
||||
f.write(b, head<<4+1) // head.prev = atom
|
||||
f.makeFree(0, atom, atoms, head) // atom.next = head
|
||||
f.write(b, int64(32+3+7+(size-1)*14))
|
||||
f.freetab[size] = atom
|
||||
}
|
||||
|
||||
// makeFree sets up the content of a free block atoms@atom, fills the prev and next links.
|
||||
func (f *File) makeFree(prev, atom, atoms, next int64) {
|
||||
b := make([]byte, 23)
|
||||
fp := atom << 4
|
||||
if atoms == 1 {
|
||||
b[0] = 0xff
|
||||
Handle(prev).Put(b[1:])
|
||||
Handle(next).Put(b[8:])
|
||||
b[15] = 0xff
|
||||
f.write(b[:16], fp)
|
||||
return
|
||||
}
|
||||
|
||||
b[0] = 0xfe
|
||||
Handle(prev).Put(b[1:])
|
||||
Handle(next).Put(b[8:])
|
||||
Handle(atoms).Put(b[15:])
|
||||
f.write(b[:22], fp)
|
||||
b[22] = 0xfe
|
||||
f.write(b[15:], fp+atoms<<4-8)
|
||||
}
|
||||
|
||||
// Read reads and return the data associated with handle and an error if any.
|
||||
// Passing an invalid handle to Read may return invalid data without error.
|
||||
// It's like getting garbage via passing an invalid pointer to C.memcopy().
|
||||
func (f *File) Read(handle Handle) (b []byte, err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
b = nil
|
||||
err = e.(error)
|
||||
}
|
||||
}()
|
||||
|
||||
switch handle {
|
||||
case 0:
|
||||
panic(ENullHandle(f.f.Name()))
|
||||
case 2:
|
||||
panic(&EHandle{f.f.Name(), handle})
|
||||
default:
|
||||
b, _ = f.readUsed(int64(handle))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Free frees space associated with handle and returns an error if any. Passing an invalid
|
||||
// handle to Free or reusing handle afterwards will probably corrupt the database or provide
|
||||
// invalid data on Read. It's like corrupting memory via passing an invalid pointer to C.free()
|
||||
// or reusing that pointer.
|
||||
func (f *File) Free(handle Handle) (err error) {
|
||||
return storage.Mutate(f.Accessor(), func() (err error) {
|
||||
atom := int64(handle)
|
||||
atoms, isFree := f.getSize(atom)
|
||||
if isFree || atom < f.canfree {
|
||||
return &EHandle{f.f.Name(), handle}
|
||||
}
|
||||
|
||||
leftFree, rightFree := f.checkLeft(atom), f.checkRight(atom, atoms)
|
||||
switch {
|
||||
case leftFree != 0 && rightFree != 0:
|
||||
f.delFree(atom-leftFree, leftFree)
|
||||
f.delFree(atom+atoms, rightFree)
|
||||
f.addFree(atom-leftFree, leftFree+atoms+rightFree)
|
||||
case leftFree != 0 && rightFree == 0:
|
||||
f.delFree(atom-leftFree, leftFree)
|
||||
if atom+atoms == f.atoms { // the left free neighbour and this block together are an empy tail
|
||||
f.atoms = atom - leftFree
|
||||
f.f.Truncate(f.atoms << 4)
|
||||
return
|
||||
}
|
||||
|
||||
f.addFree(atom-leftFree, leftFree+atoms)
|
||||
case leftFree == 0 && rightFree != 0:
|
||||
f.delFree(atom+atoms, rightFree)
|
||||
f.addFree(atom, atoms+rightFree)
|
||||
default: // leftFree == 0 && rightFree == 0
|
||||
if atom+atoms < f.atoms { // isolated inner block
|
||||
f.addFree(atom, atoms)
|
||||
return
|
||||
}
|
||||
|
||||
f.f.Truncate(atom << 4) // isolated tail block, shrink file
|
||||
f.atoms = atom
|
||||
}
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
// Realloc reallocates space associted with handle to acomodate b, returns the newhandle
|
||||
// newly associated with b and an error if any. If keepHandle == true then Realloc guarantees
|
||||
// newhandle == handle even if the new data are larger then the previous content associated
|
||||
// with handle. If !keepHandle && newhandle != handle then reusing handle will probably corrupt
|
||||
// the database.
|
||||
// The above effects are like corrupting memory/data via passing an invalid pointer to C.realloc().
|
||||
func (f *File) Realloc(handle Handle, b []byte, keepHandle bool) (newhandle Handle, err error) {
|
||||
err = storage.Mutate(f.Accessor(), func() (err error) {
|
||||
switch handle {
|
||||
case 0, 2:
|
||||
return &EHandle{f.f.Name(), handle}
|
||||
case 1:
|
||||
keepHandle = true
|
||||
}
|
||||
newhandle = handle
|
||||
atom, newatoms := int64(handle), rq2Atoms(len(b))
|
||||
if newatoms > 3856 {
|
||||
return &EBadRequest{f.f.Name(), len(b)}
|
||||
}
|
||||
|
||||
typ, oldatoms := f.getInfo(atom)
|
||||
switch {
|
||||
default:
|
||||
return &ECorrupted{f.f.Name(), atom << 4}
|
||||
case typ <= 0xfc: // non relocated used block
|
||||
switch {
|
||||
case newatoms == oldatoms: // in place replace
|
||||
f.writeUsed(b, atom)
|
||||
case newatoms < oldatoms: // in place shrink
|
||||
rightFree := f.checkRight(atom, oldatoms)
|
||||
if rightFree > 0 { // right join
|
||||
f.delFree(atom+oldatoms, rightFree)
|
||||
}
|
||||
f.addFree(atom+newatoms, oldatoms+rightFree-newatoms)
|
||||
f.writeUsed(b, atom)
|
||||
case newatoms > oldatoms:
|
||||
if rightFree := f.checkRight(atom, oldatoms); rightFree > 0 && newatoms <= oldatoms+rightFree {
|
||||
f.delFree(atom+oldatoms, rightFree)
|
||||
if newatoms < oldatoms+rightFree {
|
||||
f.addFree(atom+newatoms, oldatoms+rightFree-newatoms)
|
||||
}
|
||||
f.writeUsed(b, atom)
|
||||
return
|
||||
}
|
||||
|
||||
if !keepHandle {
|
||||
f.Free(Handle(atom))
|
||||
newhandle, err = f.Alloc(b)
|
||||
return
|
||||
}
|
||||
|
||||
// reloc
|
||||
newatom, e := f.Alloc(b)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
|
||||
buf := make([]byte, 16)
|
||||
buf[0] = 0xfd
|
||||
Handle(newatom).Put(buf[1:])
|
||||
f.Realloc(Handle(atom), buf[1:], true)
|
||||
f.write(buf[:1], atom<<4)
|
||||
}
|
||||
case typ == 0xfd: // reloc
|
||||
var target Handle
|
||||
buf := make([]byte, 7)
|
||||
f.read(buf, atom<<4+1)
|
||||
target.Get(buf)
|
||||
switch {
|
||||
case newatoms == 1:
|
||||
f.writeUsed(b, atom)
|
||||
f.Free(target)
|
||||
default:
|
||||
if rightFree := f.checkRight(atom, 1); rightFree > 0 && newatoms <= 1+rightFree {
|
||||
f.delFree(atom+1, rightFree)
|
||||
if newatoms < 1+rightFree {
|
||||
f.addFree(atom+newatoms, 1+rightFree-newatoms)
|
||||
}
|
||||
f.writeUsed(b, atom)
|
||||
f.Free(target)
|
||||
return
|
||||
}
|
||||
|
||||
newtarget, e := f.Realloc(Handle(target), b, false)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
|
||||
if newtarget != target {
|
||||
Handle(newtarget).Put(buf)
|
||||
f.write(buf, atom<<4+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Lock locks f for writing. If the lock is already locked for reading or writing,
|
||||
// Lock blocks until the lock is available. To ensure that the lock eventually becomes available,
|
||||
// a blocked Lock call excludes new readers from acquiring the lock.
|
||||
func (f *File) Lock() {
|
||||
f.rwm.Lock()
|
||||
}
|
||||
|
||||
// RLock locks f for reading. If the lock is already locked for writing or there is a writer
|
||||
// already waiting to release the lock, RLock blocks until the writer has released the lock.
|
||||
func (f *File) RLock() {
|
||||
f.rwm.RLock()
|
||||
}
|
||||
|
||||
// Unlock unlocks f for writing. It is a run-time error if f is not locked for writing on entry to Unlock.
|
||||
//
|
||||
// As with Mutexes, a locked RWMutex is not associated with a particular goroutine.
|
||||
// One goroutine may RLock (Lock) f and then arrange for another goroutine to RUnlock (Unlock) it.
|
||||
func (f *File) Unlock() {
|
||||
f.rwm.Unlock()
|
||||
}
|
||||
|
||||
// RUnlock undoes a single RLock call; it does not affect other simultaneous readers.
|
||||
// It is a run-time error if f is not locked for reading on entry to RUnlock.
|
||||
func (f *File) RUnlock() {
|
||||
f.rwm.RUnlock()
|
||||
}
|
||||
|
||||
// LockedAlloc wraps Alloc in a Lock/Unlock pair.
|
||||
func (f *File) LockedAlloc(b []byte) (handle Handle, err error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
return f.Alloc(b)
|
||||
}
|
||||
|
||||
// LockedFree wraps Free in a Lock/Unlock pair.
|
||||
func (f *File) LockedFree(handle Handle) (err error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
return f.Free(handle)
|
||||
}
|
||||
|
||||
// LockedRead wraps Read in a RLock/RUnlock pair.
|
||||
func (f *File) LockedRead(handle Handle) (b []byte, err error) {
|
||||
f.RLock()
|
||||
defer f.RUnlock()
|
||||
return f.Read(handle)
|
||||
}
|
||||
|
||||
// LockedRealloc wraps Realloc in a Lock/Unlock pair.
|
||||
func (f *File) LockedRealloc(handle Handle, b []byte, keepHandle bool) (newhandle Handle, err error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
return f.Realloc(handle, b, keepHandle)
|
||||
}
|
||||
15
vendor/github.com/cznic/fileutil/falloc/test_deps.go
generated
vendored
15
vendor/github.com/cznic/fileutil/falloc/test_deps.go
generated
vendored
@@ -1,15 +0,0 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
package falloc
|
||||
|
||||
// Pull test dependencies too.
|
||||
// Enables easy 'go test X' after 'go get X'
|
||||
import (
|
||||
_ "github.com/cznic/fileutil"
|
||||
_ "github.com/cznic/fileutil/storage"
|
||||
_ "github.com/cznic/mathutil"
|
||||
)
|
||||
225
vendor/github.com/cznic/fileutil/fileutil.go
generated
vendored
225
vendor/github.com/cznic/fileutil/fileutil.go
generated
vendored
@@ -1,225 +0,0 @@
|
||||
// Copyright (c) 2014 The fileutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package fileutil collects some file utility functions.
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// GoMFile is a concurrent access safe version of MFile.
|
||||
type GoMFile struct {
|
||||
mfile *MFile
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// NewGoMFile return a newly created GoMFile.
|
||||
func NewGoMFile(fname string, flag int, perm os.FileMode, delta_ns int64) (m *GoMFile, err error) {
|
||||
m = &GoMFile{}
|
||||
if m.mfile, err = NewMFile(fname, flag, perm, delta_ns); err != nil {
|
||||
m = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (m *GoMFile) File() (file *os.File, err error) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
return m.mfile.File()
|
||||
}
|
||||
|
||||
func (m *GoMFile) SetChanged() {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
m.mfile.SetChanged()
|
||||
}
|
||||
|
||||
func (m *GoMFile) SetHandler(h MFileHandler) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
m.mfile.SetHandler(h)
|
||||
}
|
||||
|
||||
// MFileHandler resolves modifications of File.
|
||||
// Possible File context is expected to be a part of the handler's closure.
|
||||
type MFileHandler func(*os.File) error
|
||||
|
||||
// MFile represents an os.File with a guard/handler on change/modification.
|
||||
// Example use case is an app with a configuration file which can be modified at any time
|
||||
// and have to be reloaded in such event prior to performing something configurable by that
|
||||
// file. The checks are made only on access to the MFile file by
|
||||
// File() and a time threshold/hysteresis value can be chosen on creating a new MFile.
|
||||
type MFile struct {
|
||||
file *os.File
|
||||
handler MFileHandler
|
||||
t0 int64
|
||||
delta int64
|
||||
ctime int64
|
||||
}
|
||||
|
||||
// NewMFile returns a newly created MFile or Error if any.
|
||||
// The fname, flag and perm parameters have the same meaning as in os.Open.
|
||||
// For meaning of the delta_ns parameter please see the (m *MFile) File() docs.
|
||||
func NewMFile(fname string, flag int, perm os.FileMode, delta_ns int64) (m *MFile, err error) {
|
||||
m = &MFile{}
|
||||
m.t0 = time.Now().UnixNano()
|
||||
if m.file, err = os.OpenFile(fname, flag, perm); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var fi os.FileInfo
|
||||
if fi, err = m.file.Stat(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.ctime = fi.ModTime().UnixNano()
|
||||
m.delta = delta_ns
|
||||
runtime.SetFinalizer(m, func(m *MFile) {
|
||||
m.file.Close()
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// SetChanged forces next File() to unconditionally handle modification of the wrapped os.File.
|
||||
func (m *MFile) SetChanged() {
|
||||
m.ctime = -1
|
||||
}
|
||||
|
||||
// SetHandler sets a function to be invoked when modification of MFile is to be processed.
|
||||
func (m *MFile) SetHandler(h MFileHandler) {
|
||||
m.handler = h
|
||||
}
|
||||
|
||||
// File returns an os.File from MFile. If time elapsed between the last invocation of this function
|
||||
// and now is at least delta_ns ns (a parameter of NewMFile) then the file is checked for
|
||||
// change/modification. For delta_ns == 0 the modification is checked w/o getting os.Time().
|
||||
// If a change is detected a handler is invoked on the MFile file.
|
||||
// Any of these steps can produce an Error. If that happens the function returns nil, Error.
|
||||
func (m *MFile) File() (file *os.File, err error) {
|
||||
var now int64
|
||||
|
||||
mustCheck := m.delta == 0
|
||||
if !mustCheck {
|
||||
now = time.Now().UnixNano()
|
||||
mustCheck = now-m.t0 > m.delta
|
||||
}
|
||||
|
||||
if mustCheck { // check interval reached
|
||||
var fi os.FileInfo
|
||||
if fi, err = m.file.Stat(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if fi.ModTime().UnixNano() != m.ctime { // modification detected
|
||||
if m.handler == nil {
|
||||
return nil, fmt.Errorf("no handler set for modified file %q", m.file.Name())
|
||||
}
|
||||
if err = m.handler(m.file); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.ctime = fi.ModTime().UnixNano()
|
||||
}
|
||||
m.t0 = now
|
||||
}
|
||||
|
||||
return m.file, nil
|
||||
}
|
||||
|
||||
// Read reads buf from r. It will either fill the full buf or fail.
|
||||
// It wraps the functionality of an io.Reader which may return less bytes than requested,
|
||||
// but may block if not all data are ready for the io.Reader.
|
||||
func Read(r io.Reader, buf []byte) (err error) {
|
||||
have := 0
|
||||
remain := len(buf)
|
||||
got := 0
|
||||
for remain > 0 {
|
||||
if got, err = r.Read(buf[have:]); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
remain -= got
|
||||
have += got
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// "os" and/or "syscall" extensions
|
||||
|
||||
// FadviseAdvice is used by Fadvise.
|
||||
type FadviseAdvice int
|
||||
|
||||
// FAdviseAdvice values.
|
||||
const (
|
||||
// $ grep FADV /usr/include/bits/fcntl.h
|
||||
POSIX_FADV_NORMAL FadviseAdvice = iota // No further special treatment.
|
||||
POSIX_FADV_RANDOM // Expect random page references.
|
||||
POSIX_FADV_SEQUENTIAL // Expect sequential page references.
|
||||
POSIX_FADV_WILLNEED // Will need these pages.
|
||||
POSIX_FADV_DONTNEED // Don't need these pages.
|
||||
POSIX_FADV_NOREUSE // Data will be accessed once.
|
||||
)
|
||||
|
||||
// TempFile creates a new temporary file in the directory dir with a name
|
||||
// ending with suffix, basename starting with prefix, opens the file for
|
||||
// reading and writing, and returns the resulting *os.File. If dir is the
|
||||
// empty string, TempFile uses the default directory for temporary files (see
|
||||
// os.TempDir). Multiple programs calling TempFile simultaneously will not
|
||||
// choose the same file. The caller can use f.Name() to find the pathname of
|
||||
// the file. It is the caller's responsibility to remove the file when no
|
||||
// longer needed.
|
||||
//
|
||||
// NOTE: This function differs from ioutil.TempFile.
|
||||
func TempFile(dir, prefix, suffix string) (f *os.File, err error) {
|
||||
if dir == "" {
|
||||
dir = os.TempDir()
|
||||
}
|
||||
|
||||
nconflict := 0
|
||||
for i := 0; i < 10000; i++ {
|
||||
name := filepath.Join(dir, prefix+nextInfix()+suffix)
|
||||
f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if os.IsExist(err) {
|
||||
if nconflict++; nconflict > 10 {
|
||||
randmu.Lock()
|
||||
rand = reseed()
|
||||
randmu.Unlock()
|
||||
}
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Random number state.
|
||||
// We generate random temporary file names so that there's a good
|
||||
// chance the file doesn't exist yet - keeps the number of tries in
|
||||
// TempFile to a minimum.
|
||||
var rand uint32
|
||||
var randmu sync.Mutex
|
||||
|
||||
func reseed() uint32 {
|
||||
return uint32(time.Now().UnixNano() + int64(os.Getpid()))
|
||||
}
|
||||
|
||||
func nextInfix() string {
|
||||
randmu.Lock()
|
||||
r := rand
|
||||
if r == 0 {
|
||||
r = reseed()
|
||||
}
|
||||
r = r*1664525 + 1013904223 // constants from Numerical Recipes
|
||||
rand = r
|
||||
randmu.Unlock()
|
||||
return strconv.Itoa(int(1e9 + r%1e9))[1:]
|
||||
}
|
||||
27
vendor/github.com/cznic/fileutil/fileutil_arm.go
generated
vendored
27
vendor/github.com/cznic/fileutil/fileutil_arm.go
generated
vendored
@@ -1,27 +0,0 @@
|
||||
// Copyright (c) 2014 The fileutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
const hasPunchHole = false
|
||||
|
||||
// PunchHole deallocates space inside a file in the byte range starting at
|
||||
// offset and continuing for len bytes. Not supported on ARM.
|
||||
func PunchHole(f *os.File, off, len int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fadvise predeclares an access pattern for file data. See also 'man 2
|
||||
// posix_fadvise'. Not supported on ARM.
|
||||
func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEOF reports whether err is an EOF condition.
|
||||
func IsEOF(err error) bool { return err == io.EOF }
|
||||
29
vendor/github.com/cznic/fileutil/fileutil_darwin.go
generated
vendored
29
vendor/github.com/cznic/fileutil/fileutil_darwin.go
generated
vendored
@@ -1,29 +0,0 @@
|
||||
// Copyright (c) 2014 The fileutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !arm
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
const hasPunchHole = false
|
||||
|
||||
// PunchHole deallocates space inside a file in the byte range starting at
|
||||
// offset and continuing for len bytes. Not supported on OSX.
|
||||
func PunchHole(f *os.File, off, len int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fadvise predeclares an access pattern for file data. See also 'man 2
|
||||
// posix_fadvise'. Not supported on OSX.
|
||||
func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEOF reports whether err is an EOF condition.
|
||||
func IsEOF(err error) bool { return err == io.EOF }
|
||||
29
vendor/github.com/cznic/fileutil/fileutil_freebsd.go
generated
vendored
29
vendor/github.com/cznic/fileutil/fileutil_freebsd.go
generated
vendored
@@ -1,29 +0,0 @@
|
||||
// Copyright (c) 2014 The fileutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !arm
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
const hasPunchHole = false
|
||||
|
||||
// PunchHole deallocates space inside a file in the byte range starting at
|
||||
// offset and continuing for len bytes. Unimplemented on FreeBSD.
|
||||
func PunchHole(f *os.File, off, len int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fadvise predeclares an access pattern for file data. See also 'man 2
|
||||
// posix_fadvise'. Unimplemented on FreeBSD.
|
||||
func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEOF reports whether err is an EOF condition.
|
||||
func IsEOF(err error) bool { return err == io.EOF }
|
||||
98
vendor/github.com/cznic/fileutil/fileutil_linux.go
generated
vendored
98
vendor/github.com/cznic/fileutil/fileutil_linux.go
generated
vendored
@@ -1,98 +0,0 @@
|
||||
// Copyright (c) 2014 The fileutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !arm
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const hasPunchHole = true
|
||||
|
||||
func n(s []byte) byte {
|
||||
for i, c := range s {
|
||||
if c < '0' || c > '9' {
|
||||
s = s[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
v, _ := strconv.Atoi(string(s))
|
||||
return byte(v)
|
||||
}
|
||||
|
||||
func init() {
|
||||
b, err := ioutil.ReadFile("/proc/sys/kernel/osrelease")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
tokens := bytes.Split(b, []byte("."))
|
||||
if len(tokens) > 3 {
|
||||
tokens = tokens[:3]
|
||||
}
|
||||
switch len(tokens) {
|
||||
case 3:
|
||||
// Supported since kernel 2.6.38
|
||||
if bytes.Compare([]byte{n(tokens[0]), n(tokens[1]), n(tokens[2])}, []byte{2, 6, 38}) < 0 {
|
||||
puncher = func(*os.File, int64, int64) error { return nil }
|
||||
}
|
||||
case 2:
|
||||
if bytes.Compare([]byte{n(tokens[0]), n(tokens[1])}, []byte{2, 7}) < 0 {
|
||||
puncher = func(*os.File, int64, int64) error { return nil }
|
||||
}
|
||||
default:
|
||||
puncher = func(*os.File, int64, int64) error { return nil }
|
||||
}
|
||||
}
|
||||
|
||||
var puncher = func(f *os.File, off, len int64) error {
|
||||
const (
|
||||
/*
|
||||
/usr/include/linux$ grep FL_ falloc.h
|
||||
*/
|
||||
_FALLOC_FL_KEEP_SIZE = 0x01 // default is extend size
|
||||
_FALLOC_FL_PUNCH_HOLE = 0x02 // de-allocates range
|
||||
)
|
||||
|
||||
_, _, errno := syscall.Syscall6(
|
||||
syscall.SYS_FALLOCATE,
|
||||
uintptr(f.Fd()),
|
||||
uintptr(_FALLOC_FL_KEEP_SIZE|_FALLOC_FL_PUNCH_HOLE),
|
||||
uintptr(off),
|
||||
uintptr(len),
|
||||
0, 0)
|
||||
if errno != 0 {
|
||||
return os.NewSyscallError("SYS_FALLOCATE", errno)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PunchHole deallocates space inside a file in the byte range starting at
|
||||
// offset and continuing for len bytes. No-op for kernels < 2.6.38 (or < 2.7).
|
||||
func PunchHole(f *os.File, off, len int64) error {
|
||||
return puncher(f, off, len)
|
||||
}
|
||||
|
||||
// Fadvise predeclares an access pattern for file data. See also 'man 2
|
||||
// posix_fadvise'.
|
||||
func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error {
|
||||
_, _, errno := syscall.Syscall6(
|
||||
syscall.SYS_FADVISE64,
|
||||
uintptr(f.Fd()),
|
||||
uintptr(off),
|
||||
uintptr(len),
|
||||
uintptr(advice),
|
||||
0, 0)
|
||||
return os.NewSyscallError("SYS_FADVISE64", errno)
|
||||
}
|
||||
|
||||
// IsEOF reports whether err is an EOF condition.
|
||||
func IsEOF(err error) bool { return err == io.EOF }
|
||||
29
vendor/github.com/cznic/fileutil/fileutil_netbsd.go
generated
vendored
29
vendor/github.com/cznic/fileutil/fileutil_netbsd.go
generated
vendored
@@ -1,29 +0,0 @@
|
||||
// Copyright (c) 2014 The fileutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !arm
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
const hasPunchHole = false
|
||||
|
||||
// PunchHole deallocates space inside a file in the byte range starting at
|
||||
// offset and continuing for len bytes. Similar to FreeBSD, this is
|
||||
// unimplemented.
|
||||
func PunchHole(f *os.File, off, len int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unimplemented on NetBSD.
|
||||
func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEOF reports whether err is an EOF condition.
|
||||
func IsEOF(err error) bool { return err == io.EOF }
|
||||
27
vendor/github.com/cznic/fileutil/fileutil_openbsd.go
generated
vendored
27
vendor/github.com/cznic/fileutil/fileutil_openbsd.go
generated
vendored
@@ -1,27 +0,0 @@
|
||||
// Copyright (c) 2014 The fileutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
const hasPunchHole = false
|
||||
|
||||
// PunchHole deallocates space inside a file in the byte range starting at
|
||||
// offset and continuing for len bytes. Similar to FreeBSD, this is
|
||||
// unimplemented.
|
||||
func PunchHole(f *os.File, off, len int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unimplemented on OpenBSD.
|
||||
func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEOF reports whether err is an EOF condition.
|
||||
func IsEOF(err error) bool { return err == io.EOF }
|
||||
27
vendor/github.com/cznic/fileutil/fileutil_plan9.go
generated
vendored
27
vendor/github.com/cznic/fileutil/fileutil_plan9.go
generated
vendored
@@ -1,27 +0,0 @@
|
||||
// Copyright (c) 2014 The fileutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
const hasPunchHole = false
|
||||
|
||||
// PunchHole deallocates space inside a file in the byte range starting at
|
||||
// offset and continuing for len bytes. Unimplemented on Plan 9.
|
||||
func PunchHole(f *os.File, off, len int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fadvise predeclares an access pattern for file data. See also 'man 2
|
||||
// posix_fadvise'. Unimplemented on Plan 9.
|
||||
func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEOF reports whether err is an EOF condition.
|
||||
func IsEOF(err error) bool { return err == io.EOF }
|
||||
29
vendor/github.com/cznic/fileutil/fileutil_solaris.go
generated
vendored
29
vendor/github.com/cznic/fileutil/fileutil_solaris.go
generated
vendored
@@ -1,29 +0,0 @@
|
||||
// Copyright (c) 2013 jnml. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.3
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
const hasPunchHole = false
|
||||
|
||||
// PunchHole deallocates space inside a file in the byte range starting at
|
||||
// offset and continuing for len bytes. Not supported on Solaris.
|
||||
func PunchHole(f *os.File, off, len int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fadvise predeclares an access pattern for file data. See also 'man 2
|
||||
// posix_fadvise'. Not supported on Solaris.
|
||||
func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEOF reports whether err is an EOF condition.
|
||||
func IsEOF(err error) bool { return err == io.EOF }
|
||||
185
vendor/github.com/cznic/fileutil/fileutil_windows.go
generated
vendored
185
vendor/github.com/cznic/fileutil/fileutil_windows.go
generated
vendored
@@ -1,185 +0,0 @@
|
||||
// Copyright (c) 2014 The fileutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const hasPunchHole = true
|
||||
|
||||
// PunchHole deallocates space inside a file in the byte range starting at
|
||||
// offset and continuing for len bytes. Not supported on Windows.
|
||||
func PunchHole(f *os.File, off, len int64) error {
|
||||
return puncher(f, off, len)
|
||||
}
|
||||
|
||||
// Fadvise predeclares an access pattern for file data. See also 'man 2
|
||||
// posix_fadvise'. Not supported on Windows.
|
||||
func Fadvise(f *os.File, off, len int64, advice FadviseAdvice) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEOF reports whether err is an EOF condition.
|
||||
func IsEOF(err error) bool {
|
||||
if err == io.EOF {
|
||||
return true
|
||||
}
|
||||
|
||||
// http://social.technet.microsoft.com/Forums/windowsserver/en-US/1a16311b-c625-46cf-830b-6a26af488435/how-to-solve-error-38-0x26-errorhandleeof-using-fsctlgetretrievalpointers
|
||||
x, ok := err.(*os.PathError)
|
||||
return ok && x.Op == "read" && x.Err.(syscall.Errno) == 0x26
|
||||
}
|
||||
|
||||
var (
|
||||
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
|
||||
procDeviceIOControl = modkernel32.NewProc("DeviceIoControl")
|
||||
|
||||
sparseFilesMu sync.Mutex
|
||||
sparseFiles map[uintptr]struct{}
|
||||
)
|
||||
|
||||
func init() {
|
||||
// sparseFiles is an fd set for already "sparsed" files - according to
|
||||
// msdn.microsoft.com/en-us/library/windows/desktop/aa364225(v=vs.85).aspx
|
||||
// the file handles are unique per process.
|
||||
sparseFiles = make(map[uintptr]struct{})
|
||||
}
|
||||
|
||||
// puncHoleWindows punches a hole into the given file starting at offset,
|
||||
// measuring "size" bytes
|
||||
// (http://msdn.microsoft.com/en-us/library/windows/desktop/aa364597%28v=vs.85%29.aspx)
|
||||
func puncher(file *os.File, offset, size int64) error {
|
||||
if err := ensureFileSparse(file); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// http://msdn.microsoft.com/en-us/library/windows/desktop/aa364411%28v=vs.85%29.aspx
|
||||
// typedef struct _FILE_ZERO_DATA_INFORMATION {
|
||||
// LARGE_INTEGER FileOffset;
|
||||
// LARGE_INTEGER BeyondFinalZero;
|
||||
//} FILE_ZERO_DATA_INFORMATION, *PFILE_ZERO_DATA_INFORMATION;
|
||||
type fileZeroDataInformation struct {
|
||||
FileOffset, BeyondFinalZero int64
|
||||
}
|
||||
|
||||
lpInBuffer := fileZeroDataInformation{
|
||||
FileOffset: offset,
|
||||
BeyondFinalZero: offset + size}
|
||||
return deviceIOControl(false, file.Fd(), uintptr(unsafe.Pointer(&lpInBuffer)), 16)
|
||||
}
|
||||
|
||||
// // http://msdn.microsoft.com/en-us/library/windows/desktop/cc948908%28v=vs.85%29.aspx
|
||||
// type fileSetSparseBuffer struct {
|
||||
// SetSparse bool
|
||||
// }
|
||||
|
||||
func ensureFileSparse(file *os.File) (err error) {
|
||||
fd := file.Fd()
|
||||
sparseFilesMu.Lock()
|
||||
if _, ok := sparseFiles[fd]; ok {
|
||||
sparseFilesMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = deviceIOControl(true, fd, 0, 0); err == nil {
|
||||
sparseFiles[fd] = struct{}{}
|
||||
}
|
||||
sparseFilesMu.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
func deviceIOControl(setSparse bool, fd, inBuf, inBufLen uintptr) (err error) {
|
||||
const (
|
||||
//http://source.winehq.org/source/include/winnt.h#L4605
|
||||
file_read_data = 1
|
||||
file_write_data = 2
|
||||
|
||||
// METHOD_BUFFERED 0
|
||||
method_buffered = 0
|
||||
// FILE_ANY_ACCESS 0
|
||||
file_any_access = 0
|
||||
// FILE_DEVICE_FILE_SYSTEM 0x00000009
|
||||
file_device_file_system = 0x00000009
|
||||
// FILE_SPECIAL_ACCESS (FILE_ANY_ACCESS)
|
||||
file_special_access = file_any_access
|
||||
file_read_access = file_read_data
|
||||
file_write_access = file_write_data
|
||||
|
||||
// http://source.winehq.org/source/include/winioctl.h
|
||||
// #define CTL_CODE ( DeviceType,
|
||||
// Function,
|
||||
// Method,
|
||||
// Access )
|
||||
// ((DeviceType) << 16) | ((Access) << 14) | ((Function) << 2) | (Method)
|
||||
|
||||
// FSCTL_SET_COMPRESSION CTL_CODE(FILE_DEVICE_FILE_SYSTEM, 16, METHOD_BUFFERED, FILE_READ_DATA | FILE_WRITE_DATA)
|
||||
fsctl_set_compression = (file_device_file_system << 16) | ((file_read_access | file_write_access) << 14) | (16 << 2) | method_buffered
|
||||
// FSCTL_SET_SPARSE CTL_CODE(FILE_DEVICE_FILE_SYSTEM, 49, METHOD_BUFFERED, FILE_SPECIAL_ACCESS)
|
||||
fsctl_set_sparse = (file_device_file_system << 16) | (file_special_access << 14) | (49 << 2) | method_buffered
|
||||
// FSCTL_SET_ZERO_DATA CTL_CODE(FILE_DEVICE_FILE_SYSTEM, 50, METHOD_BUFFERED, FILE_WRITE_DATA)
|
||||
fsctl_set_zero_data = (file_device_file_system << 16) | (file_write_data << 14) | (50 << 2) | method_buffered
|
||||
)
|
||||
retPtr := uintptr(unsafe.Pointer(&(make([]byte, 8)[0])))
|
||||
var r1 uintptr
|
||||
var e1 syscall.Errno
|
||||
if setSparse {
|
||||
// BOOL
|
||||
// WINAPI
|
||||
// DeviceIoControl( (HANDLE) hDevice, // handle to a file
|
||||
// FSCTL_SET_SPARSE, // dwIoControlCode
|
||||
// (PFILE_SET_SPARSE_BUFFER) lpInBuffer, // input buffer
|
||||
// (DWORD) nInBufferSize, // size of input buffer
|
||||
// NULL, // lpOutBuffer
|
||||
// 0, // nOutBufferSize
|
||||
// (LPDWORD) lpBytesReturned, // number of bytes returned
|
||||
// (LPOVERLAPPED) lpOverlapped ); // OVERLAPPED structure
|
||||
r1, _, e1 = syscall.Syscall9(procDeviceIOControl.Addr(), 8,
|
||||
fd,
|
||||
uintptr(fsctl_set_sparse),
|
||||
// If the lpInBuffer parameter is NULL, the operation will behave the same as if the SetSparse member of the FILE_SET_SPARSE_BUFFER structure were TRUE. In other words, the operation sets the file to a sparse file.
|
||||
0, // uintptr(unsafe.Pointer(&lpInBuffer)),
|
||||
0, // 1,
|
||||
0,
|
||||
0,
|
||||
retPtr,
|
||||
0,
|
||||
0)
|
||||
} else {
|
||||
// BOOL
|
||||
// WINAPI
|
||||
// DeviceIoControl( (HANDLE) hDevice, // handle to a file
|
||||
// FSCTL_SET_ZERO_DATA, // dwIoControlCode
|
||||
// (LPVOID) lpInBuffer, // input buffer
|
||||
// (DWORD) nInBufferSize, // size of input buffer
|
||||
// NULL, // lpOutBuffer
|
||||
// 0, // nOutBufferSize
|
||||
// (LPDWORD) lpBytesReturned, // number of bytes returned
|
||||
// (LPOVERLAPPED) lpOverlapped ); // OVERLAPPED structure
|
||||
r1, _, e1 = syscall.Syscall9(procDeviceIOControl.Addr(), 8,
|
||||
fd,
|
||||
uintptr(fsctl_set_zero_data),
|
||||
inBuf,
|
||||
inBufLen,
|
||||
0,
|
||||
0,
|
||||
retPtr,
|
||||
0,
|
||||
0)
|
||||
}
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = error(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
153
vendor/github.com/cznic/fileutil/hdb/hdb.go
generated
vendored
153
vendor/github.com/cznic/fileutil/hdb/hdb.go
generated
vendored
@@ -1,153 +0,0 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
/*
|
||||
WIP: Package hdb provides a "handle"/value DB like store, but actually it's
|
||||
closer to the model of a process's virtual memory and its alloc, free and move
|
||||
methods.
|
||||
|
||||
The hdb package is a thin layer around falloc.File providing stable-only
|
||||
handles and the basic synchronizing primitives. The central functionality of
|
||||
hdb are the New, Set, Get and Delete methods of Store.
|
||||
|
||||
Conceptual analogy:
|
||||
New alloc(sizeof(content)), return new "memory" pointer (a handle).
|
||||
|
||||
Get memmove() from "memory" "pointed to" by handle to the result content.
|
||||
Note: Handle "knows" the size of its content.
|
||||
|
||||
Set memmove() from content to "memory" pointed to by handle.
|
||||
In contrast to real memory, the new content may have different
|
||||
size than the previously stored one w/o additional handling
|
||||
and the "pointer" handle remains the same.
|
||||
|
||||
Delete free() the "memory" "pointed to" by handle.
|
||||
*/
|
||||
package hdb
|
||||
|
||||
import (
|
||||
"github.com/cznic/fileutil/falloc"
|
||||
"github.com/cznic/fileutil/storage"
|
||||
)
|
||||
|
||||
type Store struct {
|
||||
f *falloc.File
|
||||
}
|
||||
|
||||
// New returns a newly created Store backed by accessor, discarding its conents if any.
|
||||
// If successful, methods on the returned Store can be used for I/O.
|
||||
// It returns the Store and an error, if any.
|
||||
func New(accessor storage.Accessor) (store *Store, err error) {
|
||||
s := &Store{}
|
||||
if s.f, err = falloc.New(accessor); err == nil {
|
||||
store = s
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Open opens the Store from accessor.
|
||||
// If successful, methods on the returned Store can be used for data exchange.
|
||||
// It returns the Store and an error, if any.
|
||||
func Open(accessor storage.Accessor) (store *Store, err error) {
|
||||
s := &Store{}
|
||||
if s.f, err = falloc.Open(accessor); err == nil {
|
||||
store = s
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Close closes the store. Further access to the store has undefined behavior and may panic.
|
||||
// It returns an error, if any.
|
||||
func (s *Store) Close() (err error) {
|
||||
defer func() {
|
||||
s.f = nil
|
||||
}()
|
||||
|
||||
return s.f.Close()
|
||||
}
|
||||
|
||||
// Delete deletes the data associated with handle.
|
||||
// It returns an error if any.
|
||||
func (s *Store) Delete(handle falloc.Handle) (err error) {
|
||||
return s.f.Free(handle)
|
||||
}
|
||||
|
||||
// Get gets the data associated with handle.
|
||||
// It returns the data and an error, if any.
|
||||
func (s *Store) Get(handle falloc.Handle) (b []byte, err error) {
|
||||
return s.f.Read(handle)
|
||||
}
|
||||
|
||||
// New associates data with a new handle.
|
||||
// It returns the handle and an error, if any.
|
||||
func (s *Store) New(b []byte) (handle falloc.Handle, err error) {
|
||||
return s.f.Alloc(b)
|
||||
}
|
||||
|
||||
// Set associates data with an existing handle.
|
||||
// It returns an error, if any.
|
||||
func (s *Store) Set(handle falloc.Handle, b []byte) (err error) {
|
||||
_, err = s.f.Realloc(handle, b, true)
|
||||
return
|
||||
}
|
||||
|
||||
// Root returns the handle of the DB root (top level directory, ...).
|
||||
func (s *Store) Root() falloc.Handle {
|
||||
return s.f.Root()
|
||||
}
|
||||
|
||||
// File returns the underlying falloc.File of 's'.
|
||||
func (s *Store) File() *falloc.File {
|
||||
return s.f
|
||||
}
|
||||
|
||||
// Lock locks 's' for writing. If the lock is already locked for reading or writing,
|
||||
// Lock blocks until the lock is available. To ensure that the lock eventually becomes available,
|
||||
// a blocked Lock call excludes new readers from acquiring the lock.
|
||||
func (s *Store) Lock() {
|
||||
s.f.Lock()
|
||||
}
|
||||
|
||||
// RLock locks 's' for reading. If the lock is already locked for writing or there is a writer
|
||||
// already waiting to release the lock, RLock blocks until the writer has released the lock.
|
||||
func (s *Store) RLock() {
|
||||
s.f.RLock()
|
||||
}
|
||||
|
||||
// Unlock unlocks 's' for writing. It's a run-time error if 's' is not locked for writing on entry to Unlock.
|
||||
//
|
||||
// As with Mutexes, a locked RWMutex is not associated with a particular goroutine.
|
||||
// One goroutine may RLock (Lock) 's' and then arrange for another goroutine to RUnlock (Unlock) it.
|
||||
func (s *Store) Unlock() {
|
||||
s.f.Unlock()
|
||||
}
|
||||
|
||||
// RUnlock undoes a single RLock call; it does not affect other simultaneous readers.
|
||||
// It's a run-time error if 's' is not locked for reading on entry to RUnlock.
|
||||
func (s *Store) RUnlock() {
|
||||
s.f.RUnlock()
|
||||
}
|
||||
|
||||
// LockedNew wraps New in a Lock/Unlock pair.
|
||||
func (s *Store) LockedNew(b []byte) (handle falloc.Handle, err error) {
|
||||
return s.f.LockedAlloc(b)
|
||||
}
|
||||
|
||||
// LockedDelete wraps Delete in a Lock/Unlock pair.
|
||||
func (s *Store) LockedDelete(handle falloc.Handle) (err error) {
|
||||
return s.f.LockedFree(handle)
|
||||
}
|
||||
|
||||
// LockedGet wraps Get in a RLock/RUnlock pair.
|
||||
func (s *Store) LockedGet(handle falloc.Handle) (b []byte, err error) {
|
||||
return s.f.LockedRead(handle)
|
||||
}
|
||||
|
||||
// LockedSet wraps Set in a Lock/Unlock pair.
|
||||
func (s *Store) LockedSet(handle falloc.Handle, b []byte) (err error) {
|
||||
_, err = s.f.Realloc(handle, b, true)
|
||||
return
|
||||
}
|
||||
13
vendor/github.com/cznic/fileutil/hdb/test_deps.go
generated
vendored
13
vendor/github.com/cznic/fileutil/hdb/test_deps.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
package hdb
|
||||
|
||||
// Pull test dependencies too.
|
||||
// Enables easy 'go test X' after 'go get X'
|
||||
import (
|
||||
// nothing yet
|
||||
)
|
||||
322
vendor/github.com/cznic/fileutil/storage/cache.go
generated
vendored
322
vendor/github.com/cznic/fileutil/storage/cache.go
generated
vendored
@@ -1,322 +0,0 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type cachepage struct {
|
||||
b [512]byte
|
||||
dirty bool
|
||||
lru *list.Element
|
||||
pi int64
|
||||
valid int // page content is b[:valid]
|
||||
}
|
||||
|
||||
func (p *cachepage) wr(b []byte, off int) (wasDirty bool) {
|
||||
copy(p.b[off:], b)
|
||||
if n := off + len(b); n > p.valid {
|
||||
p.valid = n
|
||||
}
|
||||
wasDirty = p.dirty
|
||||
p.dirty = true
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Cache) rd(off int64, read bool) (p *cachepage, ok bool) {
|
||||
c.Rq++
|
||||
pi := off >> 9
|
||||
if p, ok = c.m[pi]; ok {
|
||||
c.lru.MoveToBack(p.lru)
|
||||
return
|
||||
}
|
||||
|
||||
if !read {
|
||||
return
|
||||
}
|
||||
|
||||
fp := off &^ 511
|
||||
if fp >= c.size {
|
||||
return
|
||||
}
|
||||
|
||||
rq := 512
|
||||
if fp+512 > c.size {
|
||||
rq = int(c.size - fp)
|
||||
}
|
||||
p = &cachepage{pi: pi, valid: rq}
|
||||
p.lru = c.lru.PushBack(p)
|
||||
if n, err := c.f.ReadAt(p.b[:p.valid], fp); n != rq {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
c.Load++
|
||||
if c.advise != nil {
|
||||
c.advise(fp, 512, false)
|
||||
}
|
||||
c.m[pi], ok = p, true
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Cache) wr(off int64) (p *cachepage) {
|
||||
var ok bool
|
||||
if p, ok = c.rd(off, false); ok {
|
||||
return
|
||||
}
|
||||
|
||||
pi := off >> 9
|
||||
p = &cachepage{pi: pi}
|
||||
p.lru = c.lru.PushBack(p)
|
||||
c.m[pi] = p
|
||||
return
|
||||
}
|
||||
|
||||
// Cache provides caching support for another store Accessor.
|
||||
type Cache struct {
|
||||
advise func(int64, int, bool)
|
||||
clean chan bool
|
||||
cleaning int32
|
||||
close chan bool
|
||||
f Accessor
|
||||
fi *FileInfo
|
||||
lock sync.Mutex
|
||||
lru *list.List
|
||||
m map[int64]*cachepage
|
||||
maxpages int
|
||||
size int64
|
||||
sync chan bool
|
||||
wlist *list.List
|
||||
write chan bool
|
||||
writing int32
|
||||
Rq int64 // Pages requested from cache
|
||||
Load int64 // Pages loaded (cache miss)
|
||||
Purge int64 // Pages purged
|
||||
Top int // "High water" pages
|
||||
}
|
||||
|
||||
// Implementation of Accessor.
|
||||
func (c *Cache) BeginUpdate() error { return nil }
|
||||
|
||||
// Implementation of Accessor.
|
||||
func (c *Cache) EndUpdate() error { return nil }
|
||||
|
||||
// NewCache creates a caching Accessor from store with total of maxcache bytes.
|
||||
// NewCache returns the new Cache, implementing Accessor or an error if any.
|
||||
//
|
||||
// The LRU mechanism is used, so the cache tries to keep often accessed pages cached.
|
||||
//
|
||||
func NewCache(store Accessor, maxcache int64, advise func(int64, int, bool)) (c *Cache, err error) {
|
||||
var fi os.FileInfo
|
||||
if fi, err = store.Stat(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
x := maxcache >> 9
|
||||
if x > math.MaxInt32/2 {
|
||||
x = math.MaxInt32 / 2
|
||||
}
|
||||
c = &Cache{
|
||||
advise: advise,
|
||||
clean: make(chan bool, 1),
|
||||
close: make(chan bool),
|
||||
f: store,
|
||||
lru: list.New(), // front == oldest used, back == last recently used
|
||||
m: make(map[int64]*cachepage),
|
||||
maxpages: int(x),
|
||||
size: fi.Size(),
|
||||
sync: make(chan bool),
|
||||
wlist: list.New(),
|
||||
write: make(chan bool, 1),
|
||||
}
|
||||
c.fi = NewFileInfo(fi, c)
|
||||
go c.writer()
|
||||
go c.cleaner(int((int64(c.maxpages) * 95) / 100)) // hysteresis
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Cache) Accessor() Accessor {
|
||||
return c.f
|
||||
}
|
||||
|
||||
func (c *Cache) Close() (err error) {
|
||||
close(c.write)
|
||||
<-c.close
|
||||
close(c.clean)
|
||||
<-c.close
|
||||
return c.f.Close()
|
||||
}
|
||||
|
||||
func (c *Cache) Name() (s string) {
|
||||
return c.f.Name()
|
||||
}
|
||||
|
||||
func (c *Cache) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
po := int(off) & 0x1ff
|
||||
bp := 0
|
||||
rem := len(b)
|
||||
m := 0
|
||||
for rem != 0 {
|
||||
c.lock.Lock() // X1+
|
||||
p, ok := c.rd(off, true)
|
||||
if !ok {
|
||||
c.lock.Unlock() // X1-
|
||||
return -1, io.EOF
|
||||
}
|
||||
|
||||
rq := rem
|
||||
if po+rq > 512 {
|
||||
rq = 512 - po
|
||||
}
|
||||
if n := copy(b[bp:bp+rq], p.b[po:p.valid]); n != rq {
|
||||
c.lock.Unlock() // X1-
|
||||
return -1, io.EOF
|
||||
}
|
||||
|
||||
m = len(c.m)
|
||||
c.lock.Unlock() // X1-
|
||||
po = 0
|
||||
bp += rq
|
||||
off += int64(rq)
|
||||
rem -= rq
|
||||
n += rq
|
||||
}
|
||||
if m > c.maxpages && atomic.CompareAndSwapInt32(&c.cleaning, 0, 1) {
|
||||
if m > c.Top {
|
||||
c.Top = m
|
||||
}
|
||||
c.clean <- true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Cache) Stat() (fi os.FileInfo, err error) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
return c.fi, nil
|
||||
}
|
||||
|
||||
func (c *Cache) Sync() (err error) {
|
||||
c.write <- false
|
||||
<-c.sync
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Cache) Truncate(size int64) (err error) {
|
||||
c.Sync() //TODO improve (discard pages, the writer goroutine should also be aware, ...)
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.size = size
|
||||
return c.f.Truncate(size)
|
||||
}
|
||||
|
||||
func (c *Cache) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
po := int(off) & 0x1ff
|
||||
bp := 0
|
||||
rem := len(b)
|
||||
m := 0
|
||||
for rem != 0 {
|
||||
c.lock.Lock() // X+
|
||||
p := c.wr(off)
|
||||
rq := rem
|
||||
if po+rq > 512 {
|
||||
rq = 512 - po
|
||||
}
|
||||
if wasDirty := p.wr(b[bp:bp+rq], po); !wasDirty {
|
||||
c.wlist.PushBack(p)
|
||||
}
|
||||
m = len(c.m)
|
||||
po = 0
|
||||
bp += rq
|
||||
off += int64(rq)
|
||||
if off > c.size {
|
||||
c.size = off
|
||||
}
|
||||
c.lock.Unlock() // X-
|
||||
rem -= rq
|
||||
n += rq
|
||||
}
|
||||
if atomic.CompareAndSwapInt32(&c.writing, 0, 1) {
|
||||
c.write <- true
|
||||
}
|
||||
if m > c.maxpages && atomic.CompareAndSwapInt32(&c.cleaning, 0, 1) {
|
||||
if m > c.Top {
|
||||
c.Top = m
|
||||
}
|
||||
c.clean <- true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Cache) writer() {
|
||||
for ok := true; ok; {
|
||||
var wr bool
|
||||
var off int64
|
||||
wr, ok = <-c.write
|
||||
for {
|
||||
c.lock.Lock() // X1+
|
||||
item := c.wlist.Front()
|
||||
if item == nil {
|
||||
c.lock.Unlock() // X1-
|
||||
break
|
||||
}
|
||||
|
||||
p := item.Value.(*cachepage)
|
||||
off = p.pi << 9
|
||||
if n, err := c.f.WriteAt(p.b[:p.valid], off); n != p.valid {
|
||||
c.lock.Unlock() // X1-
|
||||
panic("TODO Cache.writer errchan") //TODO +errchan
|
||||
panic(err)
|
||||
}
|
||||
|
||||
p.dirty = false
|
||||
c.wlist.Remove(item)
|
||||
if c.advise != nil {
|
||||
c.advise(off, 512, true)
|
||||
}
|
||||
c.lock.Unlock() // X1-
|
||||
}
|
||||
switch {
|
||||
case wr:
|
||||
atomic.AddInt32(&c.writing, -1)
|
||||
case ok:
|
||||
c.sync <- true
|
||||
}
|
||||
}
|
||||
c.close <- true
|
||||
}
|
||||
|
||||
func (c *Cache) cleaner(limit int) {
|
||||
for _ = range c.clean {
|
||||
var item *list.Element
|
||||
for {
|
||||
c.lock.Lock() // X1+
|
||||
if len(c.m) < limit {
|
||||
c.lock.Unlock() // X1-
|
||||
break
|
||||
}
|
||||
|
||||
if item == nil {
|
||||
item = c.lru.Front()
|
||||
}
|
||||
if p := item.Value.(*cachepage); !p.dirty {
|
||||
delete(c.m, p.pi)
|
||||
c.lru.Remove(item)
|
||||
c.Purge++
|
||||
}
|
||||
item = item.Next()
|
||||
c.lock.Unlock() // X1-
|
||||
}
|
||||
atomic.AddInt32(&c.cleaning, -1)
|
||||
}
|
||||
c.close <- true
|
||||
}
|
||||
50
vendor/github.com/cznic/fileutil/storage/file.go
generated
vendored
50
vendor/github.com/cznic/fileutil/storage/file.go
generated
vendored
@@ -1,50 +0,0 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// FileAccessor is the concrete type returned by NewFile and OpenFile.
|
||||
type FileAccessor struct {
|
||||
*os.File
|
||||
}
|
||||
|
||||
// Implementation of Accessor.
|
||||
func (f *FileAccessor) BeginUpdate() error { return nil }
|
||||
|
||||
// Implementation of Accessor.
|
||||
func (f *FileAccessor) EndUpdate() error { return nil }
|
||||
|
||||
// NewFile returns an Accessor backed by an os.File named name, It opens the
|
||||
// named file with specified flag (os.O_RDWR etc.) and perm, (0666 etc.) if
|
||||
// applicable. If successful, methods on the returned Accessor can be used for
|
||||
// I/O. It returns the Accessor and an Error, if any.
|
||||
//
|
||||
// NOTE: The returned Accessor implements BeginUpdate and EndUpdate as a no op.
|
||||
func NewFile(name string, flag int, perm os.FileMode) (store Accessor, err error) {
|
||||
var f FileAccessor
|
||||
if f.File, err = os.OpenFile(name, flag, perm); err == nil {
|
||||
store = &f
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OpenFile returns an Accessor backed by an existing os.File named name, It
|
||||
// opens the named file with specified flag (os.O_RDWR etc.) and perm, (0666
|
||||
// etc.) if applicable. If successful, methods on the returned Accessor can be
|
||||
// used for I/O. It returns the Accessor and an Error, if any.
|
||||
//
|
||||
// NOTE: The returned Accessor implements BeginUpdate and EndUpdate as a no op.
|
||||
func OpenFile(name string, flag int, perm os.FileMode) (store Accessor, err error) {
|
||||
var f FileAccessor
|
||||
if f.File, err = os.OpenFile(name, flag, perm); err == nil {
|
||||
store = &f
|
||||
}
|
||||
return
|
||||
}
|
||||
161
vendor/github.com/cznic/fileutil/storage/mem.go
generated
vendored
161
vendor/github.com/cznic/fileutil/storage/mem.go
generated
vendored
@@ -1,161 +0,0 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
)
|
||||
|
||||
//TODO -> exported type w/ exported fields
|
||||
type memaccessor struct {
|
||||
f *os.File
|
||||
fi *FileInfo
|
||||
b []byte
|
||||
}
|
||||
|
||||
// Implementation of Accessor.
|
||||
func (m *memaccessor) BeginUpdate() error { return nil }
|
||||
|
||||
// Implementation of Accessor.
|
||||
func (f *memaccessor) EndUpdate() error { return nil }
|
||||
|
||||
// NewMem returns a new Accessor backed by an os.File. The returned Accessor
|
||||
// keeps all of the store content in memory. The memory and file images are
|
||||
// synced only by Sync and Close. Recomended for small amounts of data only
|
||||
// and content which may be lost on process kill/crash. NewMem return the
|
||||
// Accessor or an error of any.
|
||||
//
|
||||
// NOTE: The returned Accessor implements BeginUpdate and EndUpdate as a no op.
|
||||
func NewMem(f *os.File) (store Accessor, err error) {
|
||||
a := &memaccessor{f: f}
|
||||
if err = f.Truncate(0); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var fi os.FileInfo
|
||||
if fi, err = a.f.Stat(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
a.fi = NewFileInfo(fi, a)
|
||||
store = a
|
||||
return
|
||||
}
|
||||
|
||||
// OpenMem return a new Accessor backed by an os.File. The store content is
|
||||
// loaded from f. The returned Accessor keeps all of the store content in
|
||||
// memory. The memory and file images are synced only Sync and Close.
|
||||
// Recomended for small amounts of data only and content which may be lost on
|
||||
// process kill/crash. OpenMem return the Accessor or an error of any.
|
||||
//
|
||||
// NOTE: The returned Accessor implements BeginUpdate and EndUpdate as a no op.
|
||||
func OpenMem(f *os.File) (store Accessor, err error) {
|
||||
a := &memaccessor{f: f}
|
||||
if a.b, err = ioutil.ReadAll(a.f); err != nil {
|
||||
a.f.Close()
|
||||
return
|
||||
}
|
||||
|
||||
var fi os.FileInfo
|
||||
if fi, err = a.f.Stat(); err != nil {
|
||||
a.f.Close()
|
||||
return
|
||||
}
|
||||
|
||||
a.fi = NewFileInfo(fi, a)
|
||||
store = a
|
||||
return
|
||||
}
|
||||
|
||||
// Close implements Accessor. Specifically it synchronizes the memory and file images.
|
||||
func (a *memaccessor) Close() (err error) {
|
||||
defer func() {
|
||||
a.b = nil
|
||||
if a.f != nil {
|
||||
if e := a.f.Close(); e != nil && err == nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
a.f = nil
|
||||
}()
|
||||
|
||||
return a.Sync()
|
||||
}
|
||||
|
||||
func (a *memaccessor) Name() string {
|
||||
return a.f.Name()
|
||||
}
|
||||
|
||||
func (a *memaccessor) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
if off < 0 || off > math.MaxInt32 {
|
||||
return -1, fmt.Errorf("ReadAt: illegal offset %#x", off)
|
||||
}
|
||||
|
||||
rq, fp := len(b), int(off)
|
||||
if fp+rq > len(a.b) {
|
||||
return -1, fmt.Errorf("ReadAt: illegal rq %#x @ offset %#x, len %#x", rq, fp, len(a.b))
|
||||
}
|
||||
|
||||
copy(b, a.b[fp:])
|
||||
return
|
||||
}
|
||||
|
||||
func (a *memaccessor) Stat() (fi os.FileInfo, err error) {
|
||||
i := a.fi
|
||||
i.FSize = int64(len(a.b))
|
||||
fi = i
|
||||
return
|
||||
}
|
||||
|
||||
// Sync implements Accessor. Specifically it synchronizes the memory and file images.
|
||||
func (a *memaccessor) Sync() (err error) {
|
||||
var n int
|
||||
if n, err = a.f.WriteAt(a.b, 0); n != len(a.b) {
|
||||
return
|
||||
}
|
||||
|
||||
return a.f.Truncate(int64(len(a.b)))
|
||||
}
|
||||
|
||||
func (a *memaccessor) Truncate(size int64) (err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = e.(error)
|
||||
}
|
||||
}()
|
||||
|
||||
if size > math.MaxInt32 {
|
||||
panic(errors.New("truncate: illegal size"))
|
||||
}
|
||||
|
||||
a.b = a.b[:int(size)]
|
||||
return
|
||||
}
|
||||
|
||||
func (a *memaccessor) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
if off < 0 || off > math.MaxInt32 {
|
||||
return -1, errors.New("WriteAt: illegal offset")
|
||||
}
|
||||
|
||||
rq, fp, size := len(b), int(off), len(a.b)
|
||||
if need := rq + fp; need > size {
|
||||
if need <= cap(a.b) {
|
||||
a.b = a.b[:need]
|
||||
} else {
|
||||
nb := make([]byte, need, 2*need)
|
||||
copy(nb, a.b)
|
||||
a.b = nb
|
||||
}
|
||||
}
|
||||
|
||||
copy(a.b[int(off):], b)
|
||||
return
|
||||
}
|
||||
74
vendor/github.com/cznic/fileutil/storage/probe.go
generated
vendored
74
vendor/github.com/cznic/fileutil/storage/probe.go
generated
vendored
@@ -1,74 +0,0 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
package storage
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
// Probe collects usage statistics of the embeded Accessor.
|
||||
// Probe itself IS an Accessor.
|
||||
type Probe struct {
|
||||
Accessor
|
||||
Chain *Probe
|
||||
OpsRd int64
|
||||
OpsWr int64
|
||||
BytesRd int64
|
||||
BytesWr int64
|
||||
SectorsRd int64 // Assuming 512 byte sector size
|
||||
SectorsWr int64
|
||||
}
|
||||
|
||||
// NewProbe returns a newly created probe which embedes the src Accessor.
|
||||
// The retuned *Probe satisfies Accessor. if chain != nil then Reset()
|
||||
// is cascaded down the chained Probes.
|
||||
func NewProbe(src Accessor, chain *Probe) *Probe {
|
||||
return &Probe{Accessor: src, Chain: chain}
|
||||
}
|
||||
|
||||
func reset(n *int64) {
|
||||
atomic.AddInt64(n, -atomic.AddInt64(n, 0))
|
||||
}
|
||||
|
||||
// Reset zeroes the collected statistics of p.
|
||||
func (p *Probe) Reset() {
|
||||
if p.Chain != nil {
|
||||
p.Chain.Reset()
|
||||
}
|
||||
reset(&p.OpsRd)
|
||||
reset(&p.OpsWr)
|
||||
reset(&p.BytesRd)
|
||||
reset(&p.BytesWr)
|
||||
reset(&p.SectorsRd)
|
||||
reset(&p.SectorsWr)
|
||||
}
|
||||
|
||||
func (p *Probe) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
n, err = p.Accessor.ReadAt(b, off)
|
||||
atomic.AddInt64(&p.OpsRd, 1)
|
||||
atomic.AddInt64(&p.BytesRd, int64(n))
|
||||
if n <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
sectorFirst := off >> 9
|
||||
sectorLast := (off + int64(n) - 1) >> 9
|
||||
atomic.AddInt64(&p.SectorsRd, sectorLast-sectorFirst+1)
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Probe) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
n, err = p.Accessor.WriteAt(b, off)
|
||||
atomic.AddInt64(&p.OpsWr, 1)
|
||||
atomic.AddInt64(&p.BytesWr, int64(n))
|
||||
if n <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
sectorFirst := off >> 9
|
||||
sectorLast := (off + int64(n) - 1) >> 9
|
||||
atomic.AddInt64(&p.SectorsWr, sectorLast-sectorFirst+1)
|
||||
return
|
||||
}
|
||||
141
vendor/github.com/cznic/fileutil/storage/storage.go
generated
vendored
141
vendor/github.com/cznic/fileutil/storage/storage.go
generated
vendored
@@ -1,141 +0,0 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
// WIP: Package storage defines and implements storage providers and store accessors.
|
||||
package storage
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// FileInfo is a type implementing os.FileInfo which has setable fields, like
|
||||
// the older os.FileInfo used to have. It is used wehere e.g. the Size is
|
||||
// needed to be faked (encapsulated/memory only file, file cache, etc.).
|
||||
type FileInfo struct {
|
||||
FName string // base name of the file
|
||||
FSize int64 // length in bytes
|
||||
FMode os.FileMode // file mode bits
|
||||
FModTime time.Time // modification time
|
||||
FIsDir bool // abbreviation for Mode().IsDir()
|
||||
sys interface{} // underlying data source (can be nil)
|
||||
}
|
||||
|
||||
// NewFileInfo creates FileInfo from os.FileInfo fi.
|
||||
func NewFileInfo(fi os.FileInfo, sys interface{}) *FileInfo {
|
||||
return &FileInfo{fi.Name(), fi.Size(), fi.Mode(), fi.ModTime(), fi.IsDir(), sys}
|
||||
}
|
||||
|
||||
// Implementation of os.FileInfo
|
||||
func (fi *FileInfo) Name() string {
|
||||
return fi.FName
|
||||
}
|
||||
|
||||
// Implementation of os.FileInfo
|
||||
func (fi *FileInfo) Size() int64 {
|
||||
return fi.FSize
|
||||
}
|
||||
|
||||
// Implementation of os.FileInfo
|
||||
func (fi *FileInfo) Mode() os.FileMode {
|
||||
return fi.FMode
|
||||
}
|
||||
|
||||
// Implementation of os.FileInfo
|
||||
func (fi *FileInfo) ModTime() time.Time {
|
||||
return fi.FModTime
|
||||
}
|
||||
|
||||
// Implementation of os.FileInfo
|
||||
func (fi *FileInfo) IsDir() bool {
|
||||
return fi.FIsDir
|
||||
}
|
||||
|
||||
func (fi *FileInfo) Sys() interface{} {
|
||||
return fi.sys
|
||||
}
|
||||
|
||||
// Accessor provides I/O methods to access a store.
|
||||
type Accessor interface {
|
||||
|
||||
// Close closes the store, rendering it unusable for I/O. It returns an
|
||||
// error, if any.
|
||||
Close() error
|
||||
|
||||
// Name returns the name of the file as presented to Open.
|
||||
Name() string
|
||||
|
||||
// ReadAt reads len(b) bytes from the store starting at byte offset off.
|
||||
// It returns the number of bytes read and the error, if any.
|
||||
// EOF is signaled by a zero count with err set to os.EOF.
|
||||
// ReadAt always returns a non-nil Error when n != len(b).
|
||||
ReadAt(b []byte, off int64) (n int, err error)
|
||||
|
||||
// Stat returns the FileInfo structure describing the store. It returns
|
||||
// the os.FileInfo and an error, if any.
|
||||
Stat() (fi os.FileInfo, err error)
|
||||
|
||||
// Sync commits the current contents of the store to stable storage.
|
||||
// Typically, this means flushing the file system's in-memory copy of
|
||||
// recently written data to disk.
|
||||
Sync() (err error)
|
||||
|
||||
// Truncate changes the size of the store. It does not change the I/O
|
||||
// offset.
|
||||
Truncate(size int64) error
|
||||
|
||||
// WriteAt writes len(b) bytes to the store starting at byte offset off.
|
||||
// It returns the number of bytes written and an error, if any.
|
||||
// WriteAt returns a non-nil Error when n != len(b).
|
||||
WriteAt(b []byte, off int64) (n int, err error)
|
||||
|
||||
// Before every [structural] change of a store the BeginUpdate is to be
|
||||
// called and paired with EndUpdate after the change makes the store's
|
||||
// state consistent again. Invocations of BeginUpdate may nest. On
|
||||
// invoking the last non nested EndUpdate an implicit "commit" should
|
||||
// be performed by the store/provider. The concrete mechanism is
|
||||
// unspecified. It could be for example a write-ahead log. Stores may
|
||||
// implement BeginUpdate and EndUpdate as a (documented) no op.
|
||||
BeginUpdate() error
|
||||
EndUpdate() error
|
||||
}
|
||||
|
||||
// Mutate is a helper/wrapper for executing f in between a.BeginUpdate and
|
||||
// a.EndUpdate. Any parameters and/or return values except an error should be
|
||||
// captured by a function literal passed as f. The returned err is either nil
|
||||
// or the first non nil error returned from the sequence of execution:
|
||||
// BeginUpdate, [f,] EndUpdate. The pair BeginUpdate/EndUpdate *is* invoked
|
||||
// always regardles of any possible errors produced. Mutate doesn't handle
|
||||
// panic, it should be used only with a function [literal] which doesn't panic.
|
||||
// Otherwise the pairing of BeginUpdate/EndUpdate is not guaranteed.
|
||||
//
|
||||
// NOTE: If BeginUpdate, which is invoked before f, returns a non-nil error,
|
||||
// then f is not invoked at all (but EndUpdate still is).
|
||||
func Mutate(a Accessor, f func() error) (err error) {
|
||||
defer func() {
|
||||
if e := a.EndUpdate(); e != nil && err == nil {
|
||||
err = e
|
||||
}
|
||||
}()
|
||||
|
||||
if err = a.BeginUpdate(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return f()
|
||||
}
|
||||
|
||||
// LockedMutate wraps Mutate in yet another layer consisting of a
|
||||
// l.Lock/l.Unlock pair. All other limitations apply as in Mutate, e.g. no
|
||||
// panics are allowed to happen - otherwise no guarantees can be made about
|
||||
// Unlock matching the Lock.
|
||||
func LockedMutate(a Accessor, l sync.Locker, f func() error) (err error) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
|
||||
return Mutate(a, f)
|
||||
}
|
||||
13
vendor/github.com/cznic/fileutil/storage/test_deps.go
generated
vendored
13
vendor/github.com/cznic/fileutil/storage/test_deps.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
package storage
|
||||
|
||||
// Pull test dependencies too.
|
||||
// Enables easy 'go test X' after 'go get X'
|
||||
import (
|
||||
// nothing yet
|
||||
)
|
||||
13
vendor/github.com/cznic/fileutil/test_deps.go
generated
vendored
13
vendor/github.com/cznic/fileutil/test_deps.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
// Copyright (c) 2014 The fileutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
package fileutil
|
||||
|
||||
// Pull test dependencies too.
|
||||
// Enables easy 'go test X' after 'go get X'
|
||||
import (
|
||||
// nothing yet
|
||||
)
|
||||
27
vendor/github.com/cznic/golex/lex/LICENSE
generated
vendored
27
vendor/github.com/cznic/golex/lex/LICENSE
generated
vendored
@@ -1,27 +0,0 @@
|
||||
Copyright (c) 2014 The golex Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the names of the authors nor the names of the
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
410
vendor/github.com/cznic/golex/lex/api.go
generated
vendored
410
vendor/github.com/cznic/golex/lex/api.go
generated
vendored
@@ -1,410 +0,0 @@
|
||||
// Copyright (c) 2015 The golex Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lex
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/token"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// BOM handling modes which can be set by the BOMMode Option. Default is BOMIgnoreFirst.
|
||||
const (
|
||||
BOMError = iota // BOM is an error anywhere.
|
||||
BOMIgnoreFirst // Skip BOM if at beginning, report as error if anywhere else.
|
||||
BOMPassAll // No special handling of BOM.
|
||||
BOMPassFirst // No special handling of BOM if at beginning, report as error if anywhere else.
|
||||
)
|
||||
|
||||
const (
|
||||
NonASCII = 0x80 // DefaultRuneClass returns NonASCII for non ASCII runes.
|
||||
RuneEOF = -1 // Distinct from any valid Unicode rune value.
|
||||
)
|
||||
|
||||
// DefaultRuneClass returns the character class of r. If r is an ASCII code
|
||||
// then its class equals the ASCII code. Any other rune is of class NonASCII.
|
||||
//
|
||||
// DefaultRuneClass is the default implementation Lexer will use to convert
|
||||
// runes (21 bit entities) to scanner classes (8 bit entities).
|
||||
//
|
||||
// Non ASCII aware lexical analyzers will typically use their own
|
||||
// categorization function. To assign such custom function use the RuneClass
|
||||
// option.
|
||||
func DefaultRuneClass(r rune) int {
|
||||
if r >= 0 && r < 0x80 {
|
||||
return int(r)
|
||||
}
|
||||
|
||||
return NonASCII
|
||||
}
|
||||
|
||||
// Char represents a rune and its position.
|
||||
type Char struct {
|
||||
Rune rune
|
||||
pos int32
|
||||
}
|
||||
|
||||
// NewChar returns a new Char value.
|
||||
func NewChar(pos token.Pos, r rune) Char { return Char{pos: int32(pos), Rune: r} }
|
||||
|
||||
// IsValid reports whether c is not a zero Char.
|
||||
func (c Char) IsValid() bool { return c.Pos().IsValid() }
|
||||
|
||||
// Pos returns the token.Pos associated with c.
|
||||
func (c Char) Pos() token.Pos { return token.Pos(c.pos) }
|
||||
|
||||
// CharReader is a RuneReader providing additionally explicit position
|
||||
// information by returning a Char instead of a rune as its first result.
|
||||
type CharReader interface {
|
||||
ReadChar() (c Char, size int, err error)
|
||||
}
|
||||
|
||||
// Lexer suports golex[0] generated lexical analyzers.
|
||||
type Lexer struct {
|
||||
File *token.File // The *token.File passed to New.
|
||||
First Char // First remembers the lookahead char when Rule0 was invoked.
|
||||
Last Char // Last remembers the last Char returned by Next.
|
||||
Prev Char // Prev remembers the Char previous to Last.
|
||||
bomMode int // See the BOM* constants.
|
||||
bytesBuf bytes.Buffer // Used by TokenBytes.
|
||||
charSrc CharReader // Lexer alternative input.
|
||||
classf func(rune) int //
|
||||
errorf func(token.Pos, string) //
|
||||
lookahead Char // Lookahead if non zero.
|
||||
mark int // Longest match marker.
|
||||
off int // Used for File.AddLine.
|
||||
src io.RuneReader // Lexer input.
|
||||
tokenBuf []Char // Lexeme collector.
|
||||
ungetBuf []Char // Unget buffer.
|
||||
}
|
||||
|
||||
// New returns a new *Lexer. The result can be amended using opts.
|
||||
//
|
||||
// Non Unicode Input
|
||||
//
|
||||
// To consume sources in other encodings and still have exact position
|
||||
// information, pass an io.RuneReader which returns the next input character
|
||||
// reencoded as an Unicode rune but returns the size (number of bytes used to
|
||||
// encode it) of the original character, not the size of its UTF-8
|
||||
// representation after converted to an Unicode rune. Size is the second
|
||||
// returned value of io.RuneReader.ReadRune method[4].
|
||||
//
|
||||
// When src optionally implements CharReader its ReadChar method is used
|
||||
// instead of io.ReadRune.
|
||||
func New(file *token.File, src io.RuneReader, opts ...Option) (*Lexer, error) {
|
||||
r := &Lexer{
|
||||
File: file,
|
||||
bomMode: BOMIgnoreFirst,
|
||||
classf: DefaultRuneClass,
|
||||
src: src,
|
||||
}
|
||||
if x, ok := src.(CharReader); ok {
|
||||
r.charSrc = x
|
||||
}
|
||||
r.errorf = r.defaultErrorf
|
||||
for _, o := range opts {
|
||||
if err := o(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Abort handles the situation when the scanner does not successfully recognize
|
||||
// any token or when an attempt to find the longest match "overruns" from an
|
||||
// accepting state only to never reach an accepting state again. In the first
|
||||
// case the scanner was never in an accepting state since last call to Rule0
|
||||
// and then (true, previousLookahead rune) is returned, effectively consuming a
|
||||
// single Char token, avoiding scanner stall. Otherwise there was at least one
|
||||
// accepting scanner state marked using Mark. In this case Abort rollbacks the
|
||||
// lexer state to the marked state and returns (false, 0). The scanner must
|
||||
// then execute a prescribed goto statement. For example:
|
||||
//
|
||||
// %yyc c
|
||||
// %yyn c = l.Next()
|
||||
// %yym l.Mark()
|
||||
//
|
||||
// %{
|
||||
// package foo
|
||||
//
|
||||
// import (...)
|
||||
//
|
||||
// type lexer struct {
|
||||
// *lex.Lexer
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// func newLexer(...) *lexer {
|
||||
// return &lexer{
|
||||
// lex.NewLexer(...),
|
||||
// ...
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// func (l *lexer) scan() int {
|
||||
// c := l.Enter()
|
||||
// %}
|
||||
//
|
||||
// ... more lex defintions
|
||||
//
|
||||
// %%
|
||||
//
|
||||
// c = l.Rule0()
|
||||
//
|
||||
// ... lex rules
|
||||
//
|
||||
// %%
|
||||
//
|
||||
// if c, ok := l.Abort(); ok {
|
||||
// return c
|
||||
// }
|
||||
//
|
||||
// goto yyAction
|
||||
// }
|
||||
func (l *Lexer) Abort() (int, bool) {
|
||||
if l.mark >= 0 {
|
||||
if len(l.tokenBuf) > l.mark {
|
||||
l.Unget(l.lookahead)
|
||||
for i := len(l.tokenBuf) - 1; i >= l.mark; i-- {
|
||||
l.Unget(l.tokenBuf[i])
|
||||
}
|
||||
}
|
||||
l.tokenBuf = l.tokenBuf[:l.mark]
|
||||
return 0, false
|
||||
}
|
||||
|
||||
switch n := len(l.tokenBuf); n {
|
||||
case 0: // [] z
|
||||
c := l.lookahead
|
||||
l.Next()
|
||||
return int(c.Rune), true
|
||||
case 1: // [a] z
|
||||
return int(l.tokenBuf[0].Rune), true
|
||||
default: // [a, b, ...], z
|
||||
c := l.tokenBuf[0] // a
|
||||
l.Unget(l.lookahead) // z
|
||||
for i := n - 1; i > 1; i-- {
|
||||
l.Unget(l.tokenBuf[i]) // ...
|
||||
}
|
||||
l.lookahead = l.tokenBuf[1] // b
|
||||
l.tokenBuf = l.tokenBuf[:1]
|
||||
return int(c.Rune), true
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Lexer) class() int { return l.classf(l.lookahead.Rune) }
|
||||
|
||||
func (l *Lexer) defaultErrorf(pos token.Pos, msg string) {
|
||||
l.Error(fmt.Sprintf("%v: %v", l.File.Position(pos), msg))
|
||||
}
|
||||
|
||||
// Enter ensures the lexer has a valid lookahead Char and returns its class.
|
||||
// Typical use in an .l file
|
||||
//
|
||||
// func (l *lexer) scan() lex.Char {
|
||||
// c := l.Enter()
|
||||
// ...
|
||||
func (l *Lexer) Enter() int {
|
||||
if !l.lookahead.IsValid() {
|
||||
l.Next()
|
||||
}
|
||||
return l.class()
|
||||
}
|
||||
|
||||
// Error Implements yyLexer[2] by printing the msg to stderr.
|
||||
func (l *Lexer) Error(msg string) {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", msg)
|
||||
}
|
||||
|
||||
// Lookahead returns the current lookahead.
|
||||
func (l *Lexer) Lookahead() Char {
|
||||
if !l.lookahead.IsValid() {
|
||||
l.Next()
|
||||
}
|
||||
return l.lookahead
|
||||
}
|
||||
|
||||
// Mark records the current state of scanner as accepting. It implements the
|
||||
// golex macro %yym. Typical usage in an .l file:
|
||||
//
|
||||
// %yym l.Mark()
|
||||
func (l *Lexer) Mark() { l.mark = len(l.tokenBuf) }
|
||||
|
||||
func (l *Lexer) next() int {
|
||||
const bom = '\ufeff'
|
||||
|
||||
if c := l.lookahead; c.IsValid() {
|
||||
l.tokenBuf = append(l.tokenBuf, c)
|
||||
}
|
||||
if n := len(l.ungetBuf); n != 0 {
|
||||
l.lookahead = l.ungetBuf[n-1]
|
||||
l.ungetBuf = l.ungetBuf[:n-1]
|
||||
return l.class()
|
||||
}
|
||||
|
||||
if l.src == nil {
|
||||
return RuneEOF
|
||||
}
|
||||
|
||||
var r rune
|
||||
var sz int
|
||||
var err error
|
||||
var pos token.Pos
|
||||
var c Char
|
||||
again:
|
||||
off0 := l.off
|
||||
switch cs := l.charSrc; {
|
||||
case cs != nil:
|
||||
c, sz, err = cs.ReadChar()
|
||||
r = c.Rune
|
||||
pos = c.Pos()
|
||||
default:
|
||||
r, sz, err = l.src.ReadRune()
|
||||
pos = l.File.Pos(l.off)
|
||||
}
|
||||
l.off += sz
|
||||
if err != nil {
|
||||
l.src = nil
|
||||
r = RuneEOF
|
||||
if err != io.EOF {
|
||||
l.errorf(pos, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if r == bom {
|
||||
switch l.bomMode {
|
||||
default:
|
||||
fallthrough
|
||||
case BOMIgnoreFirst:
|
||||
if off0 != 0 {
|
||||
l.errorf(pos, "unicode (UTF-8) BOM in middle of file")
|
||||
}
|
||||
goto again
|
||||
case BOMPassAll:
|
||||
// nop
|
||||
case BOMPassFirst:
|
||||
if off0 != 0 {
|
||||
l.errorf(pos, "unicode (UTF-8) BOM in middle of file")
|
||||
goto again
|
||||
}
|
||||
case BOMError:
|
||||
switch {
|
||||
case off0 == 0:
|
||||
l.errorf(pos, "unicode (UTF-8) BOM at beginnig of file")
|
||||
default:
|
||||
l.errorf(pos, "unicode (UTF-8) BOM in middle of file")
|
||||
}
|
||||
goto again
|
||||
}
|
||||
}
|
||||
|
||||
l.lookahead = NewChar(pos, r)
|
||||
if r == '\n' {
|
||||
l.File.AddLine(l.off)
|
||||
}
|
||||
return l.class()
|
||||
}
|
||||
|
||||
// Next advances the scanner for one rune and returns the respective character
|
||||
// class of the new lookahead. Typical usage in an .l file:
|
||||
//
|
||||
// %yyn c = l.Next()
|
||||
func (l *Lexer) Next() int {
|
||||
l.Prev = l.Last
|
||||
r := l.next()
|
||||
l.Last = l.lookahead
|
||||
return r
|
||||
}
|
||||
|
||||
// Offset returns the current reading offset of the lexer's source.
|
||||
func (l *Lexer) Offset() int { return l.off }
|
||||
|
||||
// Rule0 initializes the scanner state before the attempt to recognize a token
|
||||
// starts. The token collecting buffer is cleared. Rule0 records the current
|
||||
// lookahead in l.First and returns its class. Typical usage in an .l file:
|
||||
//
|
||||
// ... lex definitions
|
||||
//
|
||||
// %%
|
||||
//
|
||||
// c := l.Rule0()
|
||||
//
|
||||
// first-pattern-regexp
|
||||
func (l *Lexer) Rule0() int {
|
||||
if !l.lookahead.IsValid() {
|
||||
l.Next()
|
||||
}
|
||||
l.First = l.lookahead
|
||||
l.mark = -1
|
||||
if len(l.tokenBuf) > 1<<18 { //DONE constant tuned
|
||||
l.tokenBuf = nil
|
||||
} else {
|
||||
l.tokenBuf = l.tokenBuf[:0]
|
||||
}
|
||||
return l.class()
|
||||
}
|
||||
|
||||
// Token returns the currently collected token chars. The result is R/O.
|
||||
func (l *Lexer) Token() []Char { return l.tokenBuf }
|
||||
|
||||
// TokenBytes returns the UTF-8 encoding of Token. If builder is not nil then
|
||||
// it's called instead to build the encoded token byte value into the buffer
|
||||
// passed to it.
|
||||
//
|
||||
// The Result is R/O.
|
||||
func (l *Lexer) TokenBytes(builder func(*bytes.Buffer)) []byte {
|
||||
if len(l.bytesBuf.Bytes()) < 1<<18 { //DONE constant tuned
|
||||
l.bytesBuf.Reset()
|
||||
} else {
|
||||
l.bytesBuf = bytes.Buffer{}
|
||||
}
|
||||
switch {
|
||||
case builder != nil:
|
||||
builder(&l.bytesBuf)
|
||||
default:
|
||||
for _, c := range l.Token() {
|
||||
l.bytesBuf.WriteRune(c.Rune)
|
||||
}
|
||||
}
|
||||
return l.bytesBuf.Bytes()
|
||||
}
|
||||
|
||||
// Unget unreads all chars in c.
|
||||
func (l *Lexer) Unget(c ...Char) {
|
||||
l.ungetBuf = append(l.ungetBuf, c...)
|
||||
l.lookahead = Char{} // Must invalidate lookahead.
|
||||
}
|
||||
|
||||
// Option is a function which can be passed as an optional argument to New.
|
||||
type Option func(*Lexer) error
|
||||
|
||||
// BOMMode option selects how the lexer handles BOMs. See the BOM* constants for details.
|
||||
func BOMMode(mode int) Option {
|
||||
return func(l *Lexer) error {
|
||||
l.bomMode = mode
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorFunc option sets a function called when an, for example I/O error,
|
||||
// occurs. The default is to call Error with the position and message already
|
||||
// formated as a string.
|
||||
func ErrorFunc(f func(token.Pos, string)) Option {
|
||||
return func(l *Lexer) error {
|
||||
l.errorf = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RuneClass option sets the function used to convert runes to character
|
||||
// classes.
|
||||
func RuneClass(f func(rune) int) Option {
|
||||
return func(l *Lexer) error {
|
||||
l.classf = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
40
vendor/github.com/cznic/golex/lex/doc.go
generated
vendored
40
vendor/github.com/cznic/golex/lex/doc.go
generated
vendored
@@ -1,40 +0,0 @@
|
||||
// Copyright (c) 2015 The golex Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package lex is a Unicode-friendly run time library for golex[0] generated
|
||||
// lexical analyzers[1].
|
||||
//
|
||||
// Changelog
|
||||
//
|
||||
// 2015-04-08: Initial release.
|
||||
//
|
||||
// Character classes
|
||||
//
|
||||
// Golex internally handles only 8 bit "characters". Many Unicode-aware
|
||||
// tokenizers do not actually need to recognize every Unicode rune, but only
|
||||
// some particular partitions/subsets. Like, for example, a particular Unicode
|
||||
// category, say upper case letters: Lu.
|
||||
//
|
||||
// The idea is to convert all runes in a particular set as a single 8 bit
|
||||
// character allocated outside the ASCII range of codes. The token value, a
|
||||
// string of runes and their exact positions is collected as usual (see the
|
||||
// Token and TokenBytes method), but the tokenizer DFA is simpler (and thus
|
||||
// smaller and perhaps also faster) when this technique is used. In the example
|
||||
// program (see below), recognizing (and skipping) white space, integer
|
||||
// literals, one keyword and Go identifiers requires only an 8 state DFA[5].
|
||||
//
|
||||
// To provide the conversion from runes to character classes, "install" your
|
||||
// converting function using the RuneClass option.
|
||||
//
|
||||
// References
|
||||
//
|
||||
// -
|
||||
//
|
||||
// [0]: http://godoc.org/github.com/cznic/golex
|
||||
// [1]: http://en.wikipedia.org/wiki/Lexical_analysis
|
||||
// [2]: http://golang.org/cmd/yacc/
|
||||
// [3]: https://github.com/cznic/golex/blob/master/lex/example.l
|
||||
// [4]: http://golang.org/pkg/io/#RuneReader
|
||||
// [5]: https://github.com/cznic/golex/blob/master/lex/dfa
|
||||
package lex
|
||||
27
vendor/github.com/cznic/internal/buffer/LICENSE
generated
vendored
27
vendor/github.com/cznic/internal/buffer/LICENSE
generated
vendored
@@ -1,27 +0,0 @@
|
||||
Copyright (c) 2016 The Internal Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the names of the authors nor the names of the
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
146
vendor/github.com/cznic/internal/buffer/buffer.go
generated
vendored
146
vendor/github.com/cznic/internal/buffer/buffer.go
generated
vendored
@@ -1,146 +0,0 @@
|
||||
// Copyright 2016 The Internal Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package buffer implements a pool of pointers to byte slices.
|
||||
//
|
||||
// Example usage pattern
|
||||
//
|
||||
// p := buffer.Get(size)
|
||||
// b := *p // Now you can use b in any way you need.
|
||||
// ...
|
||||
// // When b will not be used anymore
|
||||
// buffer.Put(p)
|
||||
// ...
|
||||
// // If b or p are not going out of scope soon, optionally
|
||||
// b = nil
|
||||
// p = nil
|
||||
//
|
||||
// Otherwise the pool cannot release the buffer on garbage collection.
|
||||
//
|
||||
// Do not do
|
||||
//
|
||||
// p := buffer.Get(size)
|
||||
// b := *p
|
||||
// ...
|
||||
// buffer.Put(&b)
|
||||
//
|
||||
// or
|
||||
//
|
||||
// b := *buffer.Get(size)
|
||||
// ...
|
||||
// buffer.Put(&b)
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"github.com/cznic/internal/slice"
|
||||
"io"
|
||||
)
|
||||
|
||||
// CGet returns a pointer to a byte slice of len size. The pointed to byte
|
||||
// slice is zeroed up to its cap. CGet panics for size < 0.
|
||||
//
|
||||
// CGet is safe for concurrent use by multiple goroutines.
|
||||
func CGet(size int) *[]byte { return slice.Bytes.CGet(size).(*[]byte) }
|
||||
|
||||
// Get returns a pointer to a byte slice of len size. The pointed to byte slice
|
||||
// is not zeroed. Get panics for size < 0.
|
||||
//
|
||||
// Get is safe for concurrent use by multiple goroutines.
|
||||
func Get(size int) *[]byte { return slice.Bytes.Get(size).(*[]byte) }
|
||||
|
||||
// Put puts a pointer to a byte slice into a pool for possible later reuse by
|
||||
// CGet or Get.
|
||||
//
|
||||
// Put is safe for concurrent use by multiple goroutines.
|
||||
func Put(p *[]byte) { slice.Bytes.Put(p) }
|
||||
|
||||
// Bytes is similar to bytes.Buffer but may generate less garbage when properly
|
||||
// Closed. Zero value is ready to use.
|
||||
type Bytes struct {
|
||||
p *[]byte
|
||||
}
|
||||
|
||||
// Bytes return the content of b. The result is R/O.
|
||||
func (b *Bytes) Bytes() []byte {
|
||||
if b.p != nil {
|
||||
return *b.p
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close will recycle the underlying storage, if any. After Close, b is again
|
||||
// the zero value.
|
||||
func (b *Bytes) Close() error {
|
||||
if b.p != nil {
|
||||
Put(b.p)
|
||||
b.p = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Len returns the size of content in b.
|
||||
func (b *Bytes) Len() int {
|
||||
if b.p != nil {
|
||||
return len(*b.p)
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// Reset discard the content of Bytes while keeping the internal storage, if any.
|
||||
func (b *Bytes) Reset() {
|
||||
if b.p != nil {
|
||||
*b.p = (*b.p)[:0]
|
||||
}
|
||||
}
|
||||
|
||||
// Write writes p into b and returns (len(p), nil).
|
||||
func (b *Bytes) Write(p []byte) (int, error) {
|
||||
n := b.Len()
|
||||
b.grow(n + len(p))
|
||||
copy((*b.p)[n:], p)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// WriteByte writes p into b and returns nil.
|
||||
func (b *Bytes) WriteByte(p byte) error {
|
||||
n := b.Len()
|
||||
b.grow(n + 1)
|
||||
(*b.p)[n] = p
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteTo writes b's content to w and returns the number of bytes written to w
|
||||
// and an error, if any.
|
||||
func (b *Bytes) WriteTo(w io.Writer) (int64, error) {
|
||||
n, err := w.Write(b.Bytes())
|
||||
return int64(n), err
|
||||
}
|
||||
|
||||
// WriteString writes s to b and returns (len(s), nil).
|
||||
func (b *Bytes) WriteString(s string) (int, error) {
|
||||
n := b.Len()
|
||||
b.grow(n + len(s))
|
||||
copy((*b.p)[n:], s)
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
func (b *Bytes) grow(n int) {
|
||||
if b.p != nil {
|
||||
if n <= cap(*b.p) {
|
||||
*b.p = (*b.p)[:n]
|
||||
return
|
||||
}
|
||||
|
||||
np := Get(2 * n)
|
||||
*np = (*np)[:n]
|
||||
copy(*np, *b.p)
|
||||
Put(b.p)
|
||||
b.p = np
|
||||
return
|
||||
}
|
||||
|
||||
b.p = Get(n)
|
||||
}
|
||||
27
vendor/github.com/cznic/internal/file/LICENSE
generated
vendored
27
vendor/github.com/cznic/internal/file/LICENSE
generated
vendored
@@ -1,27 +0,0 @@
|
||||
Copyright (c) 2016 The Internal Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the names of the authors nor the names of the
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
466
vendor/github.com/cznic/internal/file/file.go
generated
vendored
466
vendor/github.com/cznic/internal/file/file.go
generated
vendored
@@ -1,466 +0,0 @@
|
||||
// Copyright 2016 The Internal Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package file provides an os.File-like interface of a memory mapped file.
|
||||
package file
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/cznic/fileutil"
|
||||
"github.com/cznic/internal/buffer"
|
||||
"github.com/cznic/mathutil"
|
||||
"github.com/edsrzf/mmap-go"
|
||||
)
|
||||
|
||||
const copyBufSize = 1 << 20 // 1 MB.
|
||||
|
||||
var (
|
||||
_ Interface = (*mem)(nil)
|
||||
_ Interface = (*file)(nil)
|
||||
|
||||
_ os.FileInfo = stat{}
|
||||
|
||||
sysPage = os.Getpagesize()
|
||||
)
|
||||
|
||||
// Interface is a os.File-like entity.
|
||||
type Interface interface {
|
||||
io.ReaderAt
|
||||
io.ReaderFrom
|
||||
io.WriterAt
|
||||
io.WriterTo
|
||||
|
||||
Close() error
|
||||
Stat() (os.FileInfo, error)
|
||||
Sync() error
|
||||
Truncate(int64) error
|
||||
}
|
||||
|
||||
// Open returns a new Interface backed by f, or an error, if any.
|
||||
func Open(f *os.File) (Interface, error) { return newFile(f, 1<<30, 20) }
|
||||
|
||||
// OpenMem returns a new Interface, or an error, if any. The Interface content
|
||||
// is volatile, it's backed only by process' memory.
|
||||
func OpenMem(name string) (Interface, error) { return newMem(name, 18), nil }
|
||||
|
||||
type memMap map[int64]*[]byte
|
||||
|
||||
type mem struct {
|
||||
m memMap
|
||||
modTime time.Time
|
||||
name string
|
||||
pgBits uint
|
||||
pgMask int
|
||||
pgSize int
|
||||
size int64
|
||||
}
|
||||
|
||||
func newMem(name string, pgBits uint) *mem {
|
||||
pgSize := 1 << pgBits
|
||||
return &mem{
|
||||
m: memMap{},
|
||||
modTime: time.Now(),
|
||||
name: name,
|
||||
pgBits: pgBits,
|
||||
pgMask: pgSize - 1,
|
||||
pgSize: pgSize,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *mem) IsDir() bool { return false }
|
||||
func (f *mem) Mode() os.FileMode { return os.ModeTemporary + 0600 }
|
||||
func (f *mem) ModTime() time.Time { return f.modTime }
|
||||
func (f *mem) Name() string { return f.name }
|
||||
func (f *mem) ReadFrom(r io.Reader) (n int64, err error) { return readFrom(f, r) }
|
||||
func (f *mem) Size() (n int64) { return f.size }
|
||||
func (f *mem) Stat() (os.FileInfo, error) { return f, nil }
|
||||
func (f *mem) Sync() error { return nil }
|
||||
func (f *mem) Sys() interface{} { return nil }
|
||||
func (f *mem) WriteTo(w io.Writer) (n int64, err error) { return writeTo(f, w) }
|
||||
|
||||
func (f *mem) Close() error {
|
||||
f.Truncate(0)
|
||||
f.m = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *mem) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
avail := f.size - off
|
||||
pi := off >> f.pgBits
|
||||
po := int(off) & f.pgMask
|
||||
rem := len(b)
|
||||
if int64(rem) >= avail {
|
||||
rem = int(avail)
|
||||
err = io.EOF
|
||||
}
|
||||
var zeroPage *[]byte
|
||||
for rem != 0 && avail > 0 {
|
||||
pg := f.m[pi]
|
||||
if pg == nil {
|
||||
if zeroPage == nil {
|
||||
zeroPage = buffer.CGet(f.pgSize)
|
||||
defer buffer.Put(zeroPage)
|
||||
}
|
||||
pg = zeroPage
|
||||
}
|
||||
nc := copy(b[:mathutil.Min(rem, f.pgSize)], (*pg)[po:])
|
||||
pi++
|
||||
po = 0
|
||||
rem -= nc
|
||||
n += nc
|
||||
b = b[nc:]
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (f *mem) Truncate(size int64) (err error) {
|
||||
if size < 0 {
|
||||
return fmt.Errorf("invalid truncate size: %d", size)
|
||||
}
|
||||
|
||||
first := size >> f.pgBits
|
||||
if po := size & int64(f.pgMask); po != 0 {
|
||||
if p := f.m[first]; p != nil {
|
||||
b := (*p)[po:]
|
||||
for i := range b {
|
||||
b[i] = 0
|
||||
}
|
||||
}
|
||||
first++
|
||||
}
|
||||
last := f.size >> f.pgBits
|
||||
if po := f.size & int64(f.pgMask); po != 0 {
|
||||
if p := f.m[last]; p != nil {
|
||||
b := (*p)[po:]
|
||||
for i := range b {
|
||||
b[i] = 0
|
||||
}
|
||||
}
|
||||
last++
|
||||
}
|
||||
for ; first <= last; first++ {
|
||||
if p := f.m[first]; p != nil {
|
||||
buffer.Put(p)
|
||||
}
|
||||
delete(f.m, first)
|
||||
}
|
||||
|
||||
f.size = size
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *mem) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
if len(b) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
pi := off >> f.pgBits
|
||||
po := int(off) & f.pgMask
|
||||
n = len(b)
|
||||
rem := n
|
||||
var nc int
|
||||
for rem != 0 {
|
||||
pg := f.m[pi]
|
||||
if pg == nil {
|
||||
pg = buffer.CGet(f.pgSize)
|
||||
f.m[pi] = pg
|
||||
}
|
||||
nc = copy((*pg)[po:], b)
|
||||
pi++
|
||||
po = 0
|
||||
rem -= nc
|
||||
b = b[nc:]
|
||||
}
|
||||
f.size = mathutil.MaxInt64(f.size, off+int64(n))
|
||||
return n, nil
|
||||
}
|
||||
|
||||
type stat struct {
|
||||
os.FileInfo
|
||||
size int64
|
||||
}
|
||||
|
||||
func (s stat) Size() int64 { return s.size }
|
||||
|
||||
type fileMap map[int64]mmap.MMap
|
||||
|
||||
type file struct {
|
||||
f *os.File
|
||||
m fileMap
|
||||
maxPages int
|
||||
pgBits uint
|
||||
pgMask int
|
||||
pgSize int
|
||||
size int64
|
||||
fsize int64
|
||||
}
|
||||
|
||||
func newFile(f *os.File, maxSize int64, pgBits uint) (*file, error) {
|
||||
if maxSize < 0 {
|
||||
panic("internal error")
|
||||
}
|
||||
|
||||
pgSize := 1 << pgBits
|
||||
switch {
|
||||
case sysPage > pgSize:
|
||||
pgBits = uint(mathutil.Log2Uint64(uint64(sysPage)))
|
||||
default:
|
||||
pgBits = uint(mathutil.Log2Uint64(uint64(pgSize / sysPage * sysPage)))
|
||||
}
|
||||
pgSize = 1 << pgBits
|
||||
fi := &file{
|
||||
f: f,
|
||||
m: fileMap{},
|
||||
maxPages: int(mathutil.MinInt64(
|
||||
1024,
|
||||
mathutil.MaxInt64(maxSize/int64(pgSize), 1)),
|
||||
),
|
||||
pgBits: pgBits,
|
||||
pgMask: pgSize - 1,
|
||||
pgSize: pgSize,
|
||||
}
|
||||
info, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = fi.Truncate(info.Size()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
func (f *file) ReadFrom(r io.Reader) (n int64, err error) { return readFrom(f, r) }
|
||||
func (f *file) Sync() (err error) { return f.f.Sync() }
|
||||
func (f *file) WriteTo(w io.Writer) (n int64, err error) { return writeTo(f, w) }
|
||||
|
||||
func (f *file) Close() (err error) {
|
||||
for _, p := range f.m {
|
||||
if err = p.Unmap(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = f.f.Truncate(f.size); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = f.f.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = f.f.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.m = nil
|
||||
f.f = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *file) page(index int64) (mmap.MMap, error) {
|
||||
if len(f.m) == f.maxPages {
|
||||
for i, p := range f.m {
|
||||
if err := p.Unmap(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
delete(f.m, i)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
off := index << f.pgBits
|
||||
fsize := off + int64(f.pgSize)
|
||||
if fsize > f.fsize {
|
||||
if err := f.f.Truncate(fsize); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f.fsize = fsize
|
||||
}
|
||||
p, err := mmap.MapRegion(f.f, f.pgSize, mmap.RDWR, 0, off)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f.m[index] = p
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (f *file) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
avail := f.size - off
|
||||
pi := off >> f.pgBits
|
||||
po := int(off) & f.pgMask
|
||||
rem := len(b)
|
||||
if int64(rem) >= avail {
|
||||
rem = int(avail)
|
||||
err = io.EOF
|
||||
}
|
||||
for rem != 0 && avail > 0 {
|
||||
pg := f.m[pi]
|
||||
if pg == nil {
|
||||
if pg, err = f.page(pi); err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
nc := copy(b[:mathutil.Min(rem, f.pgSize)], pg[po:])
|
||||
pi++
|
||||
po = 0
|
||||
rem -= nc
|
||||
n += nc
|
||||
b = b[nc:]
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (f *file) Stat() (os.FileInfo, error) {
|
||||
fi, err := f.f.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return stat{fi, f.size}, nil
|
||||
}
|
||||
|
||||
func (f *file) Truncate(size int64) (err error) {
|
||||
if size < 0 {
|
||||
return fmt.Errorf("invalid truncate size: %d", size)
|
||||
}
|
||||
|
||||
first := size >> f.pgBits
|
||||
if po := size & int64(f.pgMask); po != 0 {
|
||||
if p := f.m[first]; p != nil {
|
||||
b := p[po:]
|
||||
for i := range b {
|
||||
b[i] = 0
|
||||
}
|
||||
}
|
||||
first++
|
||||
}
|
||||
last := f.size >> f.pgBits
|
||||
if po := f.size & int64(f.pgMask); po != 0 {
|
||||
if p := f.m[last]; p != nil {
|
||||
b := p[po:]
|
||||
for i := range b {
|
||||
b[i] = 0
|
||||
}
|
||||
}
|
||||
last++
|
||||
}
|
||||
for ; first <= last; first++ {
|
||||
if p := f.m[first]; p != nil {
|
||||
if err := p.Unmap(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
delete(f.m, first)
|
||||
}
|
||||
|
||||
f.size = size
|
||||
fsize := (size + int64(f.pgSize) - 1) &^ int64(f.pgMask)
|
||||
if fsize != f.fsize {
|
||||
if err := f.f.Truncate(fsize); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
f.fsize = fsize
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *file) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
if len(b) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
pi := off >> f.pgBits
|
||||
po := int(off) & f.pgMask
|
||||
n = len(b)
|
||||
rem := n
|
||||
var nc int
|
||||
for rem != 0 {
|
||||
pg := f.m[pi]
|
||||
if pg == nil {
|
||||
pg, err = f.page(pi)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
nc = copy(pg[po:], b)
|
||||
pi++
|
||||
po = 0
|
||||
rem -= nc
|
||||
b = b[nc:]
|
||||
}
|
||||
f.size = mathutil.MaxInt64(f.size, off+int64(n))
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
func readFrom(f Interface, r io.Reader) (n int64, err error) {
|
||||
f.Truncate(0)
|
||||
p := buffer.Get(copyBufSize)
|
||||
b := *p
|
||||
defer buffer.Put(p)
|
||||
|
||||
var off int64
|
||||
var werr error
|
||||
for {
|
||||
rn, rerr := r.Read(b)
|
||||
if rn != 0 {
|
||||
_, werr = f.WriteAt(b[:rn], off)
|
||||
n += int64(rn)
|
||||
off += int64(rn)
|
||||
}
|
||||
if rerr != nil {
|
||||
if !fileutil.IsEOF(rerr) {
|
||||
err = rerr
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if werr != nil {
|
||||
err = werr
|
||||
break
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func writeTo(f Interface, w io.Writer) (n int64, err error) {
|
||||
p := buffer.Get(copyBufSize)
|
||||
b := *p
|
||||
defer buffer.Put(p)
|
||||
|
||||
var off int64
|
||||
var werr error
|
||||
for {
|
||||
rn, rerr := f.ReadAt(b, off)
|
||||
if rn != 0 {
|
||||
_, werr = w.Write(b[:rn])
|
||||
n += int64(rn)
|
||||
off += int64(rn)
|
||||
}
|
||||
if rerr != nil {
|
||||
if !fileutil.IsEOF(rerr) {
|
||||
err = rerr
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if werr != nil {
|
||||
err = werr
|
||||
break
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
27
vendor/github.com/cznic/internal/slice/LICENSE
generated
vendored
27
vendor/github.com/cznic/internal/slice/LICENSE
generated
vendored
@@ -1,27 +0,0 @@
|
||||
Copyright (c) 2016 The Internal Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the names of the authors nor the names of the
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
173
vendor/github.com/cznic/internal/slice/pool.go
generated
vendored
173
vendor/github.com/cznic/internal/slice/pool.go
generated
vendored
@@ -1,173 +0,0 @@
|
||||
// Copyright 2016 The Internal Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package slice implements pools of pointers to slices.
|
||||
package slice
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
var (
|
||||
// Bytes is a ready to use *[]byte Pool.
|
||||
Bytes *Pool
|
||||
// Ints is a ready to use *[]int Pool.
|
||||
Ints *Pool
|
||||
)
|
||||
|
||||
func init() {
|
||||
Bytes = newBytes()
|
||||
Ints = NewPool(
|
||||
func(size int) interface{} { // create
|
||||
b := make([]int, size)
|
||||
return &b
|
||||
},
|
||||
func(s interface{}) { // clear
|
||||
b := *s.(*[]int)
|
||||
b = b[:cap(b)]
|
||||
for i := range b {
|
||||
b[i] = 0
|
||||
}
|
||||
},
|
||||
func(s interface{}, size int) { // setSize
|
||||
p := s.(*[]int)
|
||||
*p = (*p)[:size]
|
||||
},
|
||||
func(s interface{}) int { return cap(*s.(*[]int)) }, // cap
|
||||
)
|
||||
}
|
||||
|
||||
func newBytes() *Pool {
|
||||
return NewPool(
|
||||
func(size int) interface{} { // create
|
||||
b := make([]byte, size)
|
||||
return &b
|
||||
},
|
||||
func(s interface{}) { // clear
|
||||
b := *s.(*[]byte)
|
||||
b = b[:cap(b)]
|
||||
for i := range b {
|
||||
b[i] = 0
|
||||
}
|
||||
},
|
||||
func(s interface{}, size int) { // setSize
|
||||
p := s.(*[]byte)
|
||||
*p = (*p)[:size]
|
||||
},
|
||||
func(s interface{}) int { return cap(*s.(*[]byte)) }, // cap
|
||||
)
|
||||
}
|
||||
|
||||
// Pool implements a pool of pointers to slices.
|
||||
//
|
||||
// Example usage pattern (assuming pool is, for example, a *[]byte Pool)
|
||||
//
|
||||
// p := pool.Get(size).(*[]byte)
|
||||
// b := *p // Now you can use b in any way you need.
|
||||
// ...
|
||||
// // When b will not be used anymore
|
||||
// pool.Put(p)
|
||||
// ...
|
||||
// // If b or p are not going out of scope soon, optionally
|
||||
// b = nil
|
||||
// p = nil
|
||||
//
|
||||
// Otherwise the pool cannot release the slice on garbage collection.
|
||||
//
|
||||
// Do not do
|
||||
//
|
||||
// p := pool.Get(size).(*[]byte)
|
||||
// b := *p
|
||||
// ...
|
||||
// pool.Put(&b)
|
||||
//
|
||||
// or
|
||||
//
|
||||
// b := *pool.Get(size).(*[]byte)
|
||||
// ...
|
||||
// pool.Put(&b)
|
||||
type Pool struct {
|
||||
cap func(interface{}) int
|
||||
clear func(interface{})
|
||||
m [63]sync.Pool
|
||||
null interface{}
|
||||
setSize func(interface{}, int)
|
||||
}
|
||||
|
||||
// NewPool returns a newly created Pool. Assuming the desired slice type is
|
||||
// []T:
|
||||
//
|
||||
// The create function returns a *[]T of len == cap == size.
|
||||
//
|
||||
// The argument of clear is *[]T and the function sets all the slice elements
|
||||
// to the respective zero value.
|
||||
//
|
||||
// The setSize function gets a *[]T and sets its len to size.
|
||||
//
|
||||
// The cap function gets a *[]T and returns its capacity.
|
||||
func NewPool(
|
||||
create func(size int) interface{},
|
||||
clear func(interface{}),
|
||||
setSize func(p interface{}, size int),
|
||||
cap func(p interface{}) int,
|
||||
) *Pool {
|
||||
p := &Pool{clear: clear, setSize: setSize, cap: cap, null: create(0)}
|
||||
for i := range p.m {
|
||||
size := 1 << uint(i)
|
||||
p.m[i] = sync.Pool{New: func() interface{} {
|
||||
// 0: 1 - 1
|
||||
// 1: 10 - 10
|
||||
// 2: 11 - 100
|
||||
// 3: 101 - 1000
|
||||
// 4: 1001 - 10000
|
||||
// 5: 10001 - 100000
|
||||
return create(size)
|
||||
}}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// CGet returns a *[]T of len size. The pointed to slice is zeroed up to its
|
||||
// cap. CGet panics for size < 0.
|
||||
//
|
||||
// CGet is safe for concurrent use by multiple goroutines.
|
||||
func (p *Pool) CGet(size int) interface{} {
|
||||
s := p.Get(size)
|
||||
p.clear(s)
|
||||
return s
|
||||
}
|
||||
|
||||
// Get returns a *[]T of len size. The pointed to slice is not zeroed. Get
|
||||
// panics for size < 0.
|
||||
//
|
||||
// Get is safe for concurrent use by multiple goroutines.
|
||||
func (p *Pool) Get(size int) interface{} {
|
||||
var index int
|
||||
switch {
|
||||
case size < 0:
|
||||
panic("Pool.Get: negative size")
|
||||
case size == 0:
|
||||
return p.null
|
||||
case size > 1:
|
||||
index = mathutil.Log2Uint64(uint64(size-1)) + 1
|
||||
}
|
||||
s := p.m[index].Get()
|
||||
p.setSize(s, size)
|
||||
return s
|
||||
}
|
||||
|
||||
// Put puts a *[]T into a pool for possible later reuse by CGet or Get. Put
|
||||
// panics is its argument is not of type *[]T.
|
||||
//
|
||||
// Put is safe for concurrent use by multiple goroutines.
|
||||
func (p *Pool) Put(b interface{}) {
|
||||
size := p.cap(b)
|
||||
if size == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
p.m[mathutil.Log2Uint64(uint64(size))].Put(b)
|
||||
}
|
||||
400
vendor/github.com/cznic/lldb/2pc.go
generated
vendored
400
vendor/github.com/cznic/lldb/2pc.go
generated
vendored
@@ -1,400 +0,0 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Two Phase Commit & Structural ACID
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/cznic/fileutil"
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
var _ Filer = &ACIDFiler0{} // Ensure ACIDFiler0 is a Filer
|
||||
|
||||
type acidWrite struct {
|
||||
b []byte
|
||||
off int64
|
||||
}
|
||||
|
||||
type acidWriter0 ACIDFiler0
|
||||
|
||||
func (a *acidWriter0) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
f := (*ACIDFiler0)(a)
|
||||
if f.newEpoch {
|
||||
f.newEpoch = false
|
||||
f.data = f.data[:0]
|
||||
if err = a.writePacket([]interface{}{wpt00Header, walTypeACIDFiler0, ""}); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err = a.writePacket([]interface{}{wpt00WriteData, b, off}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
f.data = append(f.data, acidWrite{b, off})
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (a *acidWriter0) writePacket(items []interface{}) (err error) {
|
||||
f := (*ACIDFiler0)(a)
|
||||
b, err := EncodeScalars(items...)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var b4 [4]byte
|
||||
binary.BigEndian.PutUint32(b4[:], uint32(len(b)))
|
||||
if _, err = f.bwal.Write(b4[:]); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = f.bwal.Write(b); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if m := (4 + len(b)) % 16; m != 0 {
|
||||
var pad [15]byte
|
||||
_, err = f.bwal.Write(pad[:16-m])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// WAL Packet Tags
|
||||
const (
|
||||
wpt00Header = iota
|
||||
wpt00WriteData
|
||||
wpt00Checkpoint
|
||||
wpt00Empty
|
||||
)
|
||||
|
||||
const (
|
||||
walTypeACIDFiler0 = iota
|
||||
)
|
||||
|
||||
// ACIDFiler0 is a very simple, synchronous implementation of 2PC. It uses a
|
||||
// single write ahead log file to provide the structural atomicity
|
||||
// (BeginUpdate/EndUpdate/Rollback) and durability (DB can be recovered from
|
||||
// WAL if a crash occurred).
|
||||
//
|
||||
// ACIDFiler0 is a Filer.
|
||||
//
|
||||
// NOTE: Durable synchronous 2PC involves three fsyncs in this implementation
|
||||
// (WAL, DB, zero truncated WAL). Where possible, it's recommended to collect
|
||||
// transactions for, say one second before performing the two phase commit as
|
||||
// the typical performance for rotational hard disks is about few tens of
|
||||
// fsyncs per second atmost. For an example of such collective transaction
|
||||
// approach please see the colecting FSM STT in Dbm's documentation[1].
|
||||
//
|
||||
// [1]: http://godoc.org/github.com/cznic/exp/dbm
|
||||
type ACIDFiler0 struct {
|
||||
*RollbackFiler
|
||||
bwal *bufio.Writer
|
||||
data []acidWrite
|
||||
newEpoch bool
|
||||
peakWal int64 // tracks WAL maximum used size
|
||||
testHook bool // keeps WAL untruncated (once)
|
||||
wal *os.File
|
||||
walOptions walOptions
|
||||
}
|
||||
|
||||
type walOptions struct {
|
||||
headroom int64 // Minimum WAL size.
|
||||
}
|
||||
|
||||
// WALOption amends WAL properties.
|
||||
type WALOption func(*walOptions) error
|
||||
|
||||
// MinWAL sets the minimum size a WAL file will have. The "extra" allocated
|
||||
// file space serves as a headroom. Commits that fit into the headroom should
|
||||
// not fail due to 'not enough space on the volume' errors.
|
||||
//
|
||||
// The min parameter is first rounded-up to a non negative multiple of the size
|
||||
// of the Allocator atom.
|
||||
//
|
||||
// Note: Setting minimum WAL size may render the DB non-recoverable when a
|
||||
// crash occurs and the DB is opened in an earlier version of LLDB that does
|
||||
// not support minimum WAL sizes.
|
||||
func MinWAL(min int64) WALOption {
|
||||
min = mathutil.MaxInt64(0, min)
|
||||
if r := min % 16; r != 0 {
|
||||
min += 16 - r
|
||||
}
|
||||
return func(o *walOptions) error {
|
||||
o.headroom = min
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewACIDFiler0 returns a newly created ACIDFiler0 with WAL in wal.
|
||||
//
|
||||
// If the WAL is zero sized then a previous clean shutdown of db is taken for
|
||||
// granted and no recovery procedure is taken.
|
||||
//
|
||||
// If the WAL is of non zero size then it is checked for having a
|
||||
// committed/fully finished transaction not yet been reflected in db. If such
|
||||
// transaction exists it's committed to db. If the recovery process finishes
|
||||
// successfully, the WAL is truncated to the minimum WAL size and fsync'ed
|
||||
// prior to return from NewACIDFiler0.
|
||||
//
|
||||
// opts allow to amend WAL properties.
|
||||
func NewACIDFiler(db Filer, wal *os.File, opts ...WALOption) (r *ACIDFiler0, err error) {
|
||||
fi, err := wal.Stat()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
r = &ACIDFiler0{wal: wal}
|
||||
for _, o := range opts {
|
||||
if err := o(&r.walOptions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if fi.Size() != 0 {
|
||||
if err = r.recoverDb(db); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
r.bwal = bufio.NewWriter(r.wal)
|
||||
r.newEpoch = true
|
||||
acidWriter := (*acidWriter0)(r)
|
||||
|
||||
if r.RollbackFiler, err = NewRollbackFiler(
|
||||
db,
|
||||
func(sz int64) (err error) {
|
||||
// Checkpoint
|
||||
if err = acidWriter.writePacket([]interface{}{wpt00Checkpoint, sz}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = r.bwal.Flush(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = r.wal.Sync(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var wfi os.FileInfo
|
||||
if wfi, err = r.wal.Stat(); err != nil {
|
||||
return
|
||||
}
|
||||
r.peakWal = mathutil.MaxInt64(wfi.Size(), r.peakWal)
|
||||
|
||||
// Phase 1 commit complete
|
||||
|
||||
for _, v := range r.data {
|
||||
n := len(v.b)
|
||||
if m := v.off + int64(n); m > sz {
|
||||
if n -= int(m - sz); n <= 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if _, err = db.WriteAt(v.b[:n], v.off); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = db.Truncate(sz); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = db.Sync(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Phase 2 commit complete
|
||||
|
||||
if !r.testHook {
|
||||
if err := r.emptyWAL(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
r.testHook = false
|
||||
r.bwal.Reset(r.wal)
|
||||
r.newEpoch = true
|
||||
return r.wal.Sync()
|
||||
|
||||
},
|
||||
acidWriter,
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (a *ACIDFiler0) emptyWAL() error {
|
||||
if err := a.wal.Truncate(a.walOptions.headroom); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := a.wal.Seek(0, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if a.walOptions.headroom != 0 {
|
||||
a.bwal.Reset(a.wal)
|
||||
if err := (*acidWriter0)(a).writePacket([]interface{}{wpt00Empty}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := a.bwal.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := a.wal.Seek(0, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PeakWALSize reports the maximum size WAL has ever used.
|
||||
func (a ACIDFiler0) PeakWALSize() int64 {
|
||||
return a.peakWal
|
||||
}
|
||||
|
||||
func (a *ACIDFiler0) readPacket(f *bufio.Reader) (items []interface{}, err error) {
|
||||
var b4 [4]byte
|
||||
n, err := io.ReadAtLeast(f, b4[:], 4)
|
||||
if n != 4 {
|
||||
return
|
||||
}
|
||||
|
||||
ln := int(binary.BigEndian.Uint32(b4[:]))
|
||||
m := (4 + ln) % 16
|
||||
padd := (16 - m) % 16
|
||||
b := make([]byte, ln+padd)
|
||||
if n, err = io.ReadAtLeast(f, b, len(b)); n != len(b) {
|
||||
return
|
||||
}
|
||||
|
||||
return DecodeScalars(b[:ln])
|
||||
}
|
||||
|
||||
func (a *ACIDFiler0) recoverDb(db Filer) (err error) {
|
||||
fi, err := a.wal.Stat()
|
||||
if err != nil {
|
||||
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: err}
|
||||
}
|
||||
|
||||
if sz := fi.Size(); sz%16 != 0 {
|
||||
return &ErrILSEQ{Type: ErrFileSize, Name: a.wal.Name(), Arg: sz}
|
||||
}
|
||||
|
||||
f := bufio.NewReader(a.wal)
|
||||
items, err := a.readPacket(f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if items[0] == int64(wpt00Empty) {
|
||||
if len(items) != 1 {
|
||||
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("invalid packet items %#v", items)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(items) != 3 || items[0] != int64(wpt00Header) || items[1] != int64(walTypeACIDFiler0) {
|
||||
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("invalid packet items %#v", items)}
|
||||
}
|
||||
|
||||
tr := NewBTree(nil)
|
||||
|
||||
for {
|
||||
items, err = a.readPacket(f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(items) < 2 {
|
||||
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("too few packet items %#v", items)}
|
||||
}
|
||||
|
||||
switch items[0] {
|
||||
case int64(wpt00WriteData):
|
||||
if len(items) != 3 {
|
||||
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("invalid data packet items %#v", items)}
|
||||
}
|
||||
|
||||
b, off := items[1].([]byte), items[2].(int64)
|
||||
var key [8]byte
|
||||
binary.BigEndian.PutUint64(key[:], uint64(off))
|
||||
if err = tr.Set(key[:], b); err != nil {
|
||||
return
|
||||
}
|
||||
case int64(wpt00Checkpoint):
|
||||
var b1 [1]byte
|
||||
if n, err := f.Read(b1[:]); n != 0 || err == nil {
|
||||
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("checkpoint n %d, err %v", n, err)}
|
||||
}
|
||||
|
||||
if len(items) != 2 {
|
||||
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("checkpoint packet invalid items %#v", items)}
|
||||
}
|
||||
|
||||
sz := items[1].(int64)
|
||||
enum, err := tr.seekFirst()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
var k, v []byte
|
||||
k, v, err = enum.current()
|
||||
if err != nil {
|
||||
if fileutil.IsEOF(err) {
|
||||
break
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = db.WriteAt(v, int64(binary.BigEndian.Uint64(k))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = enum.next(); err != nil {
|
||||
if fileutil.IsEOF(err) {
|
||||
break
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = db.Truncate(sz); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = db.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Recovery complete
|
||||
|
||||
if err := a.emptyWAL(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return a.wal.Sync()
|
||||
default:
|
||||
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("packet tag %v", items[0])}
|
||||
}
|
||||
}
|
||||
}
|
||||
48
vendor/github.com/cznic/lldb/2pc_docs.go
generated
vendored
48
vendor/github.com/cznic/lldb/2pc_docs.go
generated
vendored
@@ -1,48 +0,0 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
|
||||
Anatomy of a WAL file
|
||||
|
||||
WAL file
|
||||
A sequence of packets
|
||||
|
||||
WAL packet, parts in slice notation
|
||||
[0:4], 4 bytes: N uint32 // network byte order
|
||||
[4:4+N], N bytes: payload []byte // gb encoded scalars
|
||||
|
||||
Packets, including the 4 byte 'size' prefix, MUST BE padded to size == 0 (mod
|
||||
16). The values of the padding bytes MUST BE zero.
|
||||
|
||||
Encoded scalars first item is a packet type number (packet tag). The meaning of
|
||||
any other item(s) of the payload depends on the packet tag.
|
||||
|
||||
Packet definitions
|
||||
|
||||
{wpt00Header int, typ int, s string}
|
||||
typ: Must be zero (ACIDFiler0 file).
|
||||
s: Any comment string, empty string is okay.
|
||||
|
||||
This packet must be present only once - as the first packet of
|
||||
a WAL file.
|
||||
|
||||
{wpt00WriteData int, b []byte, off int64}
|
||||
Write data (WriteAt(b, off)).
|
||||
|
||||
{wpt00Checkpoint int, sz int64}
|
||||
Checkpoint (Truncate(sz)).
|
||||
|
||||
This packet must be present only once - as the last packet of
|
||||
a WAL file.
|
||||
|
||||
{wpt00Empty int}
|
||||
The WAL size is of non-zero size due to configured headroom,
|
||||
but empty otherwise.
|
||||
|
||||
*/
|
||||
|
||||
package lldb
|
||||
|
||||
//TODO optimize bitfiler/wal/2pc data above final size
|
||||
27
vendor/github.com/cznic/lldb/LICENSE
generated
vendored
27
vendor/github.com/cznic/lldb/LICENSE
generated
vendored
@@ -1,27 +0,0 @@
|
||||
Copyright (c) 2014 The lldb Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the names of the authors nor the names of the
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
2346
vendor/github.com/cznic/lldb/btree.go
generated
vendored
2346
vendor/github.com/cznic/lldb/btree.go
generated
vendored
File diff suppressed because it is too large
Load Diff
170
vendor/github.com/cznic/lldb/errors.go
generated
vendored
170
vendor/github.com/cznic/lldb/errors.go
generated
vendored
@@ -1,170 +0,0 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Some errors returned by this package.
|
||||
//
|
||||
// Note that this package can return more errors than declared here, for
|
||||
// example io.EOF from Filer.ReadAt().
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ErrDecodeScalars is possibly returned from DecodeScalars
|
||||
type ErrDecodeScalars struct {
|
||||
B []byte // Data being decoded
|
||||
I int // offending offset
|
||||
}
|
||||
|
||||
// Error implements the built in error type.
|
||||
func (e *ErrDecodeScalars) Error() string {
|
||||
return fmt.Sprintf("DecodeScalars: corrupted data @ %d/%d", e.I, len(e.B))
|
||||
}
|
||||
|
||||
// ErrINVAL reports invalid values passed as parameters, for example negative
|
||||
// offsets where only non-negative ones are allowed or read from the DB.
|
||||
type ErrINVAL struct {
|
||||
Src string
|
||||
Val interface{}
|
||||
}
|
||||
|
||||
// Error implements the built in error type.
|
||||
func (e *ErrINVAL) Error() string {
|
||||
return fmt.Sprintf("%s: %+v", e.Src, e.Val)
|
||||
}
|
||||
|
||||
// ErrPERM is for example reported when a Filer is closed while BeginUpdate(s)
|
||||
// are not balanced with EndUpdate(s)/Rollback(s) or when EndUpdate or Rollback
|
||||
// is invoked which is not paired with a BeginUpdate.
|
||||
type ErrPERM struct {
|
||||
Src string
|
||||
}
|
||||
|
||||
// Error implements the built in error type.
|
||||
func (e *ErrPERM) Error() string {
|
||||
return fmt.Sprintf("%s: Operation not permitted", e.Src)
|
||||
}
|
||||
|
||||
// ErrTag represents an ErrILSEQ kind.
|
||||
type ErrType int
|
||||
|
||||
// ErrILSEQ types
|
||||
const (
|
||||
ErrOther ErrType = iota
|
||||
|
||||
ErrAdjacentFree // Adjacent free blocks (.Off and .Arg)
|
||||
ErrDecompress // Used compressed block: corrupted compression
|
||||
ErrExpFreeTag // Expected a free block tag, got .Arg
|
||||
ErrExpUsedTag // Expected a used block tag, got .Arg
|
||||
ErrFLT // Free block is invalid or referenced multiple times
|
||||
ErrFLTLoad // FLT truncated to .Off, need size >= .Arg
|
||||
ErrFLTSize // Free block size (.Arg) doesn't belong to its list min size: .Arg2
|
||||
ErrFileSize // File .Name size (.Arg) != 0 (mod 16)
|
||||
ErrFreeChaining // Free block, .prev.next doesn't point back to this block
|
||||
ErrFreeTailBlock // Last block is free
|
||||
ErrHead // Head of a free block list has non zero Prev (.Arg)
|
||||
ErrInvalidRelocTarget // Reloc doesn't target (.Arg) a short or long used block
|
||||
ErrInvalidWAL // Corrupted write ahead log. .Name: file name, .More: more
|
||||
ErrLongFreeBlkTooLong // Long free block spans beyond EOF, size .Arg
|
||||
ErrLongFreeBlkTooShort // Long free block must have at least 2 atoms, got only .Arg
|
||||
ErrLongFreeNextBeyondEOF // Long free block .Next (.Arg) spans beyond EOF
|
||||
ErrLongFreePrevBeyondEOF // Long free block .Prev (.Arg) spans beyond EOF
|
||||
ErrLongFreeTailTag // Expected a long free block tail tag, got .Arg
|
||||
ErrLostFreeBlock // Free block is not in any FLT list
|
||||
ErrNullReloc // Used reloc block with nil target
|
||||
ErrRelocBeyondEOF // Used reloc points (.Arg) beyond EOF
|
||||
ErrShortFreeTailTag // Expected a short free block tail tag, got .Arg
|
||||
ErrSmall // Request for a free block (.Arg) returned a too small one (.Arg2) at .Off
|
||||
ErrTailTag // Block at .Off has invalid tail CC (compression code) tag, got .Arg
|
||||
ErrUnexpReloc // Unexpected reloc block referred to from reloc block .Arg
|
||||
ErrVerifyPadding // Used block has nonzero padding
|
||||
ErrVerifyTailSize // Long free block size .Arg but tail size .Arg2
|
||||
ErrVerifyUsedSpan // Used block size (.Arg) spans beyond EOF
|
||||
)
|
||||
|
||||
// ErrILSEQ reports a corrupted file format. Details in fields according to Type.
|
||||
type ErrILSEQ struct {
|
||||
Type ErrType
|
||||
Off int64
|
||||
Arg int64
|
||||
Arg2 int64
|
||||
Arg3 int64
|
||||
Name string
|
||||
More interface{}
|
||||
}
|
||||
|
||||
// Error implements the built in error type.
|
||||
func (e *ErrILSEQ) Error() string {
|
||||
switch e.Type {
|
||||
case ErrAdjacentFree:
|
||||
return fmt.Sprintf("Adjacent free blocks at offset %#x and %#x", e.Off, e.Arg)
|
||||
case ErrDecompress:
|
||||
return fmt.Sprintf("Compressed block at offset %#x: Corrupted compressed content", e.Off)
|
||||
case ErrExpFreeTag:
|
||||
return fmt.Sprintf("Block at offset %#x: Expected a free block tag, got %#2x", e.Off, e.Arg)
|
||||
case ErrExpUsedTag:
|
||||
return fmt.Sprintf("Block at ofset %#x: Expected a used block tag, got %#2x", e.Off, e.Arg)
|
||||
case ErrFLT:
|
||||
return fmt.Sprintf("Free block at offset %#x is invalid or referenced multiple times", e.Off)
|
||||
case ErrFLTLoad:
|
||||
return fmt.Sprintf("FLT truncated to size %d, expected at least %d", e.Off, e.Arg)
|
||||
case ErrFLTSize:
|
||||
return fmt.Sprintf("Free block at offset %#x has size (%#x) should be at least (%#x)", e.Off, e.Arg, e.Arg2)
|
||||
case ErrFileSize:
|
||||
return fmt.Sprintf("File %q size (%#x) != 0 (mod 16)", e.Name, e.Arg)
|
||||
case ErrFreeChaining:
|
||||
return fmt.Sprintf("Free block at offset %#x: .prev.next doesn point back here.", e.Off)
|
||||
case ErrFreeTailBlock:
|
||||
return fmt.Sprintf("Free block at offset %#x: Cannot be last file block", e.Off)
|
||||
case ErrHead:
|
||||
return fmt.Sprintf("Block at offset %#x: Head of free block list has non zero .prev %#x", e.Off, e.Arg)
|
||||
case ErrInvalidRelocTarget:
|
||||
return fmt.Sprintf("Used reloc block at offset %#x: Target (%#x) is not a short or long used block", e.Off, e.Arg)
|
||||
case ErrInvalidWAL:
|
||||
return fmt.Sprintf("Corrupted write ahead log file: %q %v", e.Name, e.More)
|
||||
case ErrLongFreeBlkTooLong:
|
||||
return fmt.Sprintf("Long free block at offset %#x: Size (%#x) beyond EOF", e.Off, e.Arg)
|
||||
case ErrLongFreeBlkTooShort:
|
||||
return fmt.Sprintf("Long free block at offset %#x: Size (%#x) too small", e.Off, e.Arg)
|
||||
case ErrLongFreeNextBeyondEOF:
|
||||
return fmt.Sprintf("Long free block at offset %#x: Next (%#x) points beyond EOF", e.Off, e.Arg)
|
||||
case ErrLongFreePrevBeyondEOF:
|
||||
return fmt.Sprintf("Long free block at offset %#x: Prev (%#x) points beyond EOF", e.Off, e.Arg)
|
||||
case ErrLongFreeTailTag:
|
||||
return fmt.Sprintf("Block at offset %#x: Expected long free tail tag, got %#2x", e.Off, e.Arg)
|
||||
case ErrLostFreeBlock:
|
||||
return fmt.Sprintf("Free block at offset %#x: not in any FLT list", e.Off)
|
||||
case ErrNullReloc:
|
||||
return fmt.Sprintf("Used reloc block at offset %#x: Nil target", e.Off)
|
||||
case ErrRelocBeyondEOF:
|
||||
return fmt.Sprintf("Used reloc block at offset %#x: Link (%#x) points beyond EOF", e.Off, e.Arg)
|
||||
case ErrShortFreeTailTag:
|
||||
return fmt.Sprintf("Block at offset %#x: Expected short free tail tag, got %#2x", e.Off, e.Arg)
|
||||
case ErrSmall:
|
||||
return fmt.Sprintf("Request for of free block of size %d returned a too small (%d) one at offset %#x", e.Arg, e.Arg2, e.Off)
|
||||
case ErrTailTag:
|
||||
return fmt.Sprintf("Block at offset %#x: Invalid tail CC tag, got %#2x", e.Off, e.Arg)
|
||||
case ErrUnexpReloc:
|
||||
return fmt.Sprintf("Block at offset %#x: Unexpected reloc block. Referred to from reloc block at offset %#x", e.Off, e.Arg)
|
||||
case ErrVerifyPadding:
|
||||
return fmt.Sprintf("Used block at offset %#x: Nonzero padding", e.Off)
|
||||
case ErrVerifyTailSize:
|
||||
return fmt.Sprintf("Long free block at offset %#x: Size %#x, but tail size %#x", e.Off, e.Arg, e.Arg2)
|
||||
case ErrVerifyUsedSpan:
|
||||
return fmt.Sprintf("Used block at offset %#x: Size %#x spans beyond EOF", e.Off, e.Arg)
|
||||
}
|
||||
|
||||
more := ""
|
||||
if e.More != nil {
|
||||
more = fmt.Sprintf(", %v", e.More)
|
||||
}
|
||||
off := ""
|
||||
if e.Off != 0 {
|
||||
off = fmt.Sprintf(", off: %#x", e.Off)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("Error%s%s", off, more)
|
||||
}
|
||||
1999
vendor/github.com/cznic/lldb/falloc.go
generated
vendored
1999
vendor/github.com/cznic/lldb/falloc.go
generated
vendored
File diff suppressed because it is too large
Load Diff
184
vendor/github.com/cznic/lldb/filer.go
generated
vendored
184
vendor/github.com/cznic/lldb/filer.go
generated
vendored
@@ -1,184 +0,0 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// An abstraction of file like (persistent) storage with optional (abstracted)
|
||||
// support for structural integrity.
|
||||
|
||||
package lldb
|
||||
|
||||
import "github.com/cznic/mathutil"
|
||||
|
||||
// A Filer is a []byte-like model of a file or similar entity. It may
|
||||
// optionally implement support for structural transaction safety. In contrast
|
||||
// to a file stream, a Filer is not sequentially accessible. ReadAt and WriteAt
|
||||
// are always "addressed" by an offset and are assumed to perform atomically.
|
||||
// A Filer is not safe for concurrent access, it's designed for consumption by
|
||||
// the other objects in package, which should use a Filer from one goroutine
|
||||
// only or via a mutex. BeginUpdate, EndUpdate and Rollback must be either all
|
||||
// implemented by a Filer for structural integrity - or they should be all
|
||||
// no-ops; where/if that requirement is relaxed.
|
||||
//
|
||||
// If a Filer wraps another Filer implementation, it usually invokes the same
|
||||
// methods on the "inner" one, after some possible argument translations etc.
|
||||
// If a Filer implements the structural transactions handling methods
|
||||
// (BeginUpdate, EndUpdate and Rollback) as no-ops _and_ wraps another Filer:
|
||||
// it then still MUST invoke those methods on the inner Filer. This is
|
||||
// important for the case where a RollbackFiler exists somewhere down the
|
||||
// chain. It's also important for an Allocator - to know when it must
|
||||
// invalidate its FLT cache.
|
||||
type Filer interface {
|
||||
// BeginUpdate increments the "nesting" counter (initially zero). Every
|
||||
// call to BeginUpdate must be eventually "balanced" by exactly one of
|
||||
// EndUpdate or Rollback. Calls to BeginUpdate may nest.
|
||||
BeginUpdate() error
|
||||
|
||||
// Analogous to os.File.Close().
|
||||
Close() error
|
||||
|
||||
// EndUpdate decrements the "nesting" counter. If it's zero after that
|
||||
// then assume the "storage" has reached structural integrity (after a
|
||||
// batch of partial updates). If a Filer implements some support for
|
||||
// that (write ahead log, journal, etc.) then the appropriate actions
|
||||
// are to be taken for nesting == 0. Invocation of an unbalanced
|
||||
// EndUpdate is an error.
|
||||
EndUpdate() error
|
||||
|
||||
// Analogous to os.File.Name().
|
||||
Name() string
|
||||
|
||||
// PunchHole deallocates space inside a "file" in the byte range
|
||||
// starting at off and continuing for size bytes. The actual hole
|
||||
// created by PunchHole may be smaller than requested. The Filer size
|
||||
// (as reported by `Size()` does not change when hole punching, even
|
||||
// when punching the end of a file off. In contrast to the Linux
|
||||
// implementation of FALLOC_FL_PUNCH_HOLE in `fallocate`(2); a Filer is
|
||||
// free not only to ignore `PunchHole()` (implement it as a nop), but
|
||||
// additionally no guarantees about the content of the hole, when
|
||||
// eventually read back, are required, i.e. any data, not only zeros,
|
||||
// can be read from the "hole", including just anything what was left
|
||||
// there - with all of the possible security problems.
|
||||
PunchHole(off, size int64) error
|
||||
|
||||
// As os.File.ReadAt. Note: `off` is an absolute "file pointer"
|
||||
// address and cannot be negative even when a Filer is a InnerFiler.
|
||||
ReadAt(b []byte, off int64) (n int, err error)
|
||||
|
||||
// Rollback cancels and undoes the innermost pending update level.
|
||||
// Rollback decrements the "nesting" counter. If a Filer implements
|
||||
// some support for keeping structural integrity (write ahead log,
|
||||
// journal, etc.) then the appropriate actions are to be taken.
|
||||
// Invocation of an unbalanced Rollback is an error.
|
||||
Rollback() error
|
||||
|
||||
// Analogous to os.File.FileInfo().Size().
|
||||
Size() (int64, error)
|
||||
|
||||
// Analogous to os.Sync().
|
||||
Sync() (err error)
|
||||
|
||||
// Analogous to os.File.Truncate().
|
||||
Truncate(size int64) error
|
||||
|
||||
// Analogous to os.File.WriteAt(). Note: `off` is an absolute "file
|
||||
// pointer" address and cannot be negative even when a Filer is a
|
||||
// InnerFiler.
|
||||
WriteAt(b []byte, off int64) (n int, err error)
|
||||
}
|
||||
|
||||
var _ Filer = &InnerFiler{} // Ensure InnerFiler is a Filer.
|
||||
|
||||
// A InnerFiler is a Filer with added addressing/size translation.
|
||||
type InnerFiler struct {
|
||||
outer Filer
|
||||
off int64
|
||||
}
|
||||
|
||||
// NewInnerFiler returns a new InnerFiler wrapped by `outer` in a way which
|
||||
// adds `off` to every access.
|
||||
//
|
||||
// For example, considering:
|
||||
//
|
||||
// inner := NewInnerFiler(outer, 10)
|
||||
//
|
||||
// then
|
||||
//
|
||||
// inner.WriteAt([]byte{42}, 4)
|
||||
//
|
||||
// translates to
|
||||
//
|
||||
// outer.WriteAt([]byte{42}, 14)
|
||||
//
|
||||
// But an attempt to emulate
|
||||
//
|
||||
// outer.WriteAt([]byte{17}, 9)
|
||||
//
|
||||
// by
|
||||
//
|
||||
// inner.WriteAt([]byte{17}, -1)
|
||||
//
|
||||
// will fail as the `off` parameter can never be < 0. Also note that
|
||||
//
|
||||
// inner.Size() == outer.Size() - off,
|
||||
//
|
||||
// i.e. `inner` pretends no `outer` exists. Finally, after e.g.
|
||||
//
|
||||
// inner.Truncate(7)
|
||||
// outer.Size() == 17
|
||||
//
|
||||
// will be true.
|
||||
func NewInnerFiler(outer Filer, off int64) *InnerFiler { return &InnerFiler{outer, off} }
|
||||
|
||||
// BeginUpdate implements Filer.
|
||||
func (f *InnerFiler) BeginUpdate() error { return f.outer.BeginUpdate() }
|
||||
|
||||
// Close implements Filer.
|
||||
func (f *InnerFiler) Close() (err error) { return f.outer.Close() }
|
||||
|
||||
// EndUpdate implements Filer.
|
||||
func (f *InnerFiler) EndUpdate() error { return f.outer.EndUpdate() }
|
||||
|
||||
// Name implements Filer.
|
||||
func (f *InnerFiler) Name() string { return f.outer.Name() }
|
||||
|
||||
// PunchHole implements Filer. `off`, `size` must be >= 0.
|
||||
func (f *InnerFiler) PunchHole(off, size int64) error { return f.outer.PunchHole(f.off+off, size) }
|
||||
|
||||
// ReadAt implements Filer. `off` must be >= 0.
|
||||
func (f *InnerFiler) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
if off < 0 {
|
||||
return 0, &ErrINVAL{f.outer.Name() + ":ReadAt invalid off", off}
|
||||
}
|
||||
|
||||
return f.outer.ReadAt(b, f.off+off)
|
||||
}
|
||||
|
||||
// Rollback implements Filer.
|
||||
func (f *InnerFiler) Rollback() error { return f.outer.Rollback() }
|
||||
|
||||
// Size implements Filer.
|
||||
func (f *InnerFiler) Size() (int64, error) {
|
||||
sz, err := f.outer.Size()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return mathutil.MaxInt64(sz-f.off, 0), nil
|
||||
}
|
||||
|
||||
// Sync() implements Filer.
|
||||
func (f *InnerFiler) Sync() (err error) {
|
||||
return f.outer.Sync()
|
||||
}
|
||||
|
||||
// Truncate implements Filer.
|
||||
func (f *InnerFiler) Truncate(size int64) error { return f.outer.Truncate(size + f.off) }
|
||||
|
||||
// WriteAt implements Filer. `off` must be >= 0.
|
||||
func (f *InnerFiler) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
if off < 0 {
|
||||
return 0, &ErrINVAL{f.outer.Name() + ":WriteAt invalid off", off}
|
||||
}
|
||||
|
||||
return f.outer.WriteAt(b, f.off+off)
|
||||
}
|
||||
812
vendor/github.com/cznic/lldb/gb.go
generated
vendored
812
vendor/github.com/cznic/lldb/gb.go
generated
vendored
@@ -1,812 +0,0 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Utilities to encode/decode and collate Go predeclared scalar types (and the
|
||||
// typeless nil and []byte). The encoding format is a variation of the one
|
||||
// used by the "encoding/gob" package.
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
const (
|
||||
gbNull = iota // 0x00
|
||||
gbFalse // 0x01
|
||||
gbTrue // 0x02
|
||||
gbFloat0 // 0x03
|
||||
gbFloat1 // 0x04
|
||||
gbFloat2 // 0x05
|
||||
gbFloat3 // 0x06
|
||||
gbFloat4 // 0x07
|
||||
gbFloat5 // 0x08
|
||||
gbFloat6 // 0x09
|
||||
gbFloat7 // 0x0a
|
||||
gbFloat8 // 0x0b
|
||||
gbComplex0 // 0x0c
|
||||
gbComplex1 // 0x0d
|
||||
gbComplex2 // 0x0e
|
||||
gbComplex3 // 0x0f
|
||||
gbComplex4 // 0x10
|
||||
gbComplex5 // 0x11
|
||||
gbComplex6 // 0x12
|
||||
gbComplex7 // 0x13
|
||||
gbComplex8 // 0x14
|
||||
gbBytes00 // 0x15
|
||||
gbBytes01 // 0x16
|
||||
gbBytes02 // 0x17
|
||||
gbBytes03 // 0x18
|
||||
gbBytes04 // 0x19
|
||||
gbBytes05 // 0x1a
|
||||
gbBytes06 // 0x1b
|
||||
gbBytes07 // 0x1c
|
||||
gbBytes08 // 0x1d
|
||||
gbBytes09 // 0x1e
|
||||
gbBytes10 // 0x1f
|
||||
gbBytes11 // 0x20
|
||||
gbBytes12 // 0x21
|
||||
gbBytes13 // 0x22
|
||||
gbBytes14 // 0x23
|
||||
gbBytes15 // 0x24
|
||||
gbBytes16 // 0x25
|
||||
gbBytes17 // Ox26
|
||||
gbBytes1 // 0x27
|
||||
gbBytes2 // 0x28: Offset by one to allow 64kB sized []byte.
|
||||
gbString00 // 0x29
|
||||
gbString01 // 0x2a
|
||||
gbString02 // 0x2b
|
||||
gbString03 // 0x2c
|
||||
gbString04 // 0x2d
|
||||
gbString05 // 0x2e
|
||||
gbString06 // 0x2f
|
||||
gbString07 // 0x30
|
||||
gbString08 // 0x31
|
||||
gbString09 // 0x32
|
||||
gbString10 // 0x33
|
||||
gbString11 // 0x34
|
||||
gbString12 // 0x35
|
||||
gbString13 // 0x36
|
||||
gbString14 // 0x37
|
||||
gbString15 // 0x38
|
||||
gbString16 // 0x39
|
||||
gbString17 // 0x3a
|
||||
gbString1 // 0x3b
|
||||
gbString2 // 0x3c
|
||||
gbUintP1 // 0x3d
|
||||
gbUintP2 // 0x3e
|
||||
gbUintP3 // 0x3f
|
||||
gbUintP4 // 0x40
|
||||
gbUintP5 // 0x41
|
||||
gbUintP6 // 0x42
|
||||
gbUintP7 // 0x43
|
||||
gbUintP8 // 0x44
|
||||
gbIntM8 // 0x45
|
||||
gbIntM7 // 0x46
|
||||
gbIntM6 // 0x47
|
||||
gbIntM5 // 0x48
|
||||
gbIntM4 // 0x49
|
||||
gbIntM3 // 0x4a
|
||||
gbIntM2 // 0x4b
|
||||
gbIntM1 // 0x4c
|
||||
gbIntP1 // 0x4d
|
||||
gbIntP2 // 0x4e
|
||||
gbIntP3 // 0x4f
|
||||
gbIntP4 // 0x50
|
||||
gbIntP5 // 0x51
|
||||
gbIntP6 // 0x52
|
||||
gbIntP7 // 0x53
|
||||
gbIntP8 // 0x54
|
||||
gbInt0 // 0x55
|
||||
|
||||
gbIntMax = 255 - gbInt0 // 0xff == 170
|
||||
)
|
||||
|
||||
// EncodeScalars encodes a vector of predeclared scalar type values to a
|
||||
// []byte, making it suitable to store it as a "record" in a DB or to use it as
|
||||
// a key of a BTree.
|
||||
func EncodeScalars(scalars ...interface{}) (b []byte, err error) {
|
||||
for _, scalar := range scalars {
|
||||
switch x := scalar.(type) {
|
||||
default:
|
||||
return nil, &ErrINVAL{"EncodeScalars: unsupported type", fmt.Sprintf("%T in `%#v`", x, scalars)}
|
||||
|
||||
case nil:
|
||||
b = append(b, gbNull)
|
||||
|
||||
case bool:
|
||||
switch x {
|
||||
case false:
|
||||
b = append(b, gbFalse)
|
||||
case true:
|
||||
b = append(b, gbTrue)
|
||||
}
|
||||
|
||||
case float32:
|
||||
encFloat(float64(x), &b)
|
||||
case float64:
|
||||
encFloat(x, &b)
|
||||
|
||||
case complex64:
|
||||
encComplex(complex128(x), &b)
|
||||
case complex128:
|
||||
encComplex(x, &b)
|
||||
|
||||
case string:
|
||||
n := len(x)
|
||||
if n <= 17 {
|
||||
b = append(b, byte(gbString00+n))
|
||||
b = append(b, []byte(x)...)
|
||||
break
|
||||
}
|
||||
|
||||
if n > 65535 {
|
||||
return nil, fmt.Errorf("EncodeScalars: cannot encode string of length %d (limit 65536)", n)
|
||||
}
|
||||
|
||||
pref := byte(gbString1)
|
||||
if n > 255 {
|
||||
pref++
|
||||
}
|
||||
b = append(b, pref)
|
||||
encUint0(uint64(n), &b)
|
||||
b = append(b, []byte(x)...)
|
||||
|
||||
case int8:
|
||||
encInt(int64(x), &b)
|
||||
case int16:
|
||||
encInt(int64(x), &b)
|
||||
case int32:
|
||||
encInt(int64(x), &b)
|
||||
case int64:
|
||||
encInt(x, &b)
|
||||
case int:
|
||||
encInt(int64(x), &b)
|
||||
|
||||
case uint8:
|
||||
encUint(uint64(x), &b)
|
||||
case uint16:
|
||||
encUint(uint64(x), &b)
|
||||
case uint32:
|
||||
encUint(uint64(x), &b)
|
||||
case uint64:
|
||||
encUint(x, &b)
|
||||
case uint:
|
||||
encUint(uint64(x), &b)
|
||||
case []byte:
|
||||
n := len(x)
|
||||
if n <= 17 {
|
||||
b = append(b, byte(gbBytes00+n))
|
||||
b = append(b, x...)
|
||||
break
|
||||
}
|
||||
|
||||
if n > 655356 {
|
||||
return nil, fmt.Errorf("EncodeScalars: cannot encode []byte of length %d (limit 65536)", n)
|
||||
}
|
||||
|
||||
pref := byte(gbBytes1)
|
||||
if n > 255 {
|
||||
pref++
|
||||
}
|
||||
b = append(b, pref)
|
||||
if n <= 255 {
|
||||
b = append(b, byte(n))
|
||||
} else {
|
||||
n--
|
||||
b = append(b, byte(n>>8), byte(n))
|
||||
}
|
||||
b = append(b, x...)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func encComplex(f complex128, b *[]byte) {
|
||||
encFloatPrefix(gbComplex0, real(f), b)
|
||||
encFloatPrefix(gbComplex0, imag(f), b)
|
||||
}
|
||||
|
||||
func encFloatPrefix(prefix byte, f float64, b *[]byte) {
|
||||
u := math.Float64bits(f)
|
||||
var n uint64
|
||||
for i := 0; i < 8; i++ {
|
||||
n <<= 8
|
||||
n |= u & 0xFF
|
||||
u >>= 8
|
||||
}
|
||||
bits := mathutil.BitLenUint64(n)
|
||||
if bits == 0 {
|
||||
*b = append(*b, prefix)
|
||||
return
|
||||
}
|
||||
|
||||
// 0 1 2 3 4 5 6 7 8 9
|
||||
// . 1 1 1 1 1 1 1 1 2
|
||||
encUintPrefix(prefix+1+byte((bits-1)>>3), n, b)
|
||||
}
|
||||
|
||||
func encFloat(f float64, b *[]byte) {
|
||||
encFloatPrefix(gbFloat0, f, b)
|
||||
}
|
||||
|
||||
func encUint0(n uint64, b *[]byte) {
|
||||
switch {
|
||||
case n <= 0xff:
|
||||
*b = append(*b, byte(n))
|
||||
case n <= 0xffff:
|
||||
*b = append(*b, byte(n>>8), byte(n))
|
||||
case n <= 0xffffff:
|
||||
*b = append(*b, byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffff:
|
||||
*b = append(*b, byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffffff:
|
||||
*b = append(*b, byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffffffff:
|
||||
*b = append(*b, byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffffffffff:
|
||||
*b = append(*b, byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= math.MaxUint64:
|
||||
*b = append(*b, byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
}
|
||||
}
|
||||
|
||||
func encUintPrefix(prefix byte, n uint64, b *[]byte) {
|
||||
*b = append(*b, prefix)
|
||||
encUint0(n, b)
|
||||
}
|
||||
|
||||
func encUint(n uint64, b *[]byte) {
|
||||
bits := mathutil.Max(1, mathutil.BitLenUint64(n))
|
||||
encUintPrefix(gbUintP1+byte((bits-1)>>3), n, b)
|
||||
}
|
||||
|
||||
func encInt(n int64, b *[]byte) {
|
||||
switch {
|
||||
case n < -0x100000000000000:
|
||||
*b = append(*b, byte(gbIntM8), byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n < -0x1000000000000:
|
||||
*b = append(*b, byte(gbIntM7), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n < -0x10000000000:
|
||||
*b = append(*b, byte(gbIntM6), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n < -0x100000000:
|
||||
*b = append(*b, byte(gbIntM5), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n < -0x1000000:
|
||||
*b = append(*b, byte(gbIntM4), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n < -0x10000:
|
||||
*b = append(*b, byte(gbIntM3), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n < -0x100:
|
||||
*b = append(*b, byte(gbIntM2), byte(n>>8), byte(n))
|
||||
case n < 0:
|
||||
*b = append(*b, byte(gbIntM1), byte(n))
|
||||
case n <= gbIntMax:
|
||||
*b = append(*b, byte(gbInt0+n))
|
||||
case n <= 0xff:
|
||||
*b = append(*b, gbIntP1, byte(n))
|
||||
case n <= 0xffff:
|
||||
*b = append(*b, gbIntP2, byte(n>>8), byte(n))
|
||||
case n <= 0xffffff:
|
||||
*b = append(*b, gbIntP3, byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffff:
|
||||
*b = append(*b, gbIntP4, byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffffff:
|
||||
*b = append(*b, gbIntP5, byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffffffff:
|
||||
*b = append(*b, gbIntP6, byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffffffffff:
|
||||
*b = append(*b, gbIntP7, byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0x7fffffffffffffff:
|
||||
*b = append(*b, gbIntP8, byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
}
|
||||
}
|
||||
|
||||
func decodeFloat(b []byte) float64 {
|
||||
var u uint64
|
||||
for i, v := range b {
|
||||
u |= uint64(v) << uint((i+8-len(b))*8)
|
||||
}
|
||||
return math.Float64frombits(u)
|
||||
}
|
||||
|
||||
// DecodeScalars decodes a []byte produced by EncodeScalars.
|
||||
func DecodeScalars(b []byte) (scalars []interface{}, err error) {
|
||||
b0 := b
|
||||
for len(b) != 0 {
|
||||
switch tag := b[0]; tag {
|
||||
//default:
|
||||
//return nil, fmt.Errorf("tag %d(%#x) not supported", b[0], b[0])
|
||||
case gbNull:
|
||||
scalars = append(scalars, nil)
|
||||
b = b[1:]
|
||||
case gbFalse:
|
||||
scalars = append(scalars, false)
|
||||
b = b[1:]
|
||||
case gbTrue:
|
||||
scalars = append(scalars, true)
|
||||
b = b[1:]
|
||||
case gbFloat0:
|
||||
scalars = append(scalars, 0.0)
|
||||
b = b[1:]
|
||||
case gbFloat1, gbFloat2, gbFloat3, gbFloat4, gbFloat5, gbFloat6, gbFloat7, gbFloat8:
|
||||
n := 1 + int(tag) - gbFloat0
|
||||
if len(b) < n-1 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, decodeFloat(b[1:n]))
|
||||
b = b[n:]
|
||||
case gbComplex0, gbComplex1, gbComplex2, gbComplex3, gbComplex4, gbComplex5, gbComplex6, gbComplex7, gbComplex8:
|
||||
n := 1 + int(tag) - gbComplex0
|
||||
if len(b) < n-1 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
re := decodeFloat(b[1:n])
|
||||
b = b[n:]
|
||||
|
||||
if len(b) == 0 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
tag = b[0]
|
||||
if tag < gbComplex0 || tag > gbComplex8 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
n = 1 + int(tag) - gbComplex0
|
||||
if len(b) < n-1 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, complex(re, decodeFloat(b[1:n])))
|
||||
b = b[n:]
|
||||
case gbBytes00, gbBytes01, gbBytes02, gbBytes03, gbBytes04,
|
||||
gbBytes05, gbBytes06, gbBytes07, gbBytes08, gbBytes09,
|
||||
gbBytes10, gbBytes11, gbBytes12, gbBytes13, gbBytes14,
|
||||
gbBytes15, gbBytes16, gbBytes17:
|
||||
n := int(tag - gbBytes00)
|
||||
if len(b) < n+1 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, append([]byte(nil), b[1:n+1]...))
|
||||
b = b[n+1:]
|
||||
case gbBytes1:
|
||||
if len(b) < 2 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
n := int(b[1])
|
||||
b = b[2:]
|
||||
if len(b) < n {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, append([]byte(nil), b[:n]...))
|
||||
b = b[n:]
|
||||
case gbBytes2:
|
||||
if len(b) < 3 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
n := int(b[1])<<8 | int(b[2]) + 1
|
||||
b = b[3:]
|
||||
if len(b) < n {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, append([]byte(nil), b[:n]...))
|
||||
b = b[n:]
|
||||
case gbString00, gbString01, gbString02, gbString03, gbString04,
|
||||
gbString05, gbString06, gbString07, gbString08, gbString09,
|
||||
gbString10, gbString11, gbString12, gbString13, gbString14,
|
||||
gbString15, gbString16, gbString17:
|
||||
n := int(tag - gbString00)
|
||||
if len(b) < n+1 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, string(b[1:n+1]))
|
||||
b = b[n+1:]
|
||||
case gbString1:
|
||||
if len(b) < 2 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
n := int(b[1])
|
||||
b = b[2:]
|
||||
if len(b) < n {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, string(b[:n]))
|
||||
b = b[n:]
|
||||
case gbString2:
|
||||
if len(b) < 3 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
n := int(b[1])<<8 | int(b[2])
|
||||
b = b[3:]
|
||||
if len(b) < n {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, string(b[:n]))
|
||||
b = b[n:]
|
||||
case gbUintP1, gbUintP2, gbUintP3, gbUintP4, gbUintP5, gbUintP6, gbUintP7, gbUintP8:
|
||||
b = b[1:]
|
||||
n := 1 + int(tag) - gbUintP1
|
||||
if len(b) < n {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
var u uint64
|
||||
for _, v := range b[:n] {
|
||||
u = u<<8 | uint64(v)
|
||||
}
|
||||
scalars = append(scalars, u)
|
||||
b = b[n:]
|
||||
case gbIntM8, gbIntM7, gbIntM6, gbIntM5, gbIntM4, gbIntM3, gbIntM2, gbIntM1:
|
||||
b = b[1:]
|
||||
n := 8 - (int(tag) - gbIntM8)
|
||||
if len(b) < n {
|
||||
goto corrupted
|
||||
}
|
||||
u := uint64(math.MaxUint64)
|
||||
for _, v := range b[:n] {
|
||||
u = u<<8 | uint64(v)
|
||||
}
|
||||
scalars = append(scalars, int64(u))
|
||||
b = b[n:]
|
||||
case gbIntP1, gbIntP2, gbIntP3, gbIntP4, gbIntP5, gbIntP6, gbIntP7, gbIntP8:
|
||||
b = b[1:]
|
||||
n := 1 + int(tag) - gbIntP1
|
||||
if len(b) < n {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
i := int64(0)
|
||||
for _, v := range b[:n] {
|
||||
i = i<<8 | int64(v)
|
||||
}
|
||||
scalars = append(scalars, i)
|
||||
b = b[n:]
|
||||
default:
|
||||
scalars = append(scalars, int64(b[0])-gbInt0)
|
||||
b = b[1:]
|
||||
}
|
||||
}
|
||||
return append([]interface{}(nil), scalars...), nil
|
||||
|
||||
corrupted:
|
||||
return nil, &ErrDecodeScalars{append([]byte(nil), b0...), len(b0) - len(b)}
|
||||
}
|
||||
|
||||
func collateComplex(x, y complex128) int {
|
||||
switch rx, ry := real(x), real(y); {
|
||||
case rx < ry:
|
||||
return -1
|
||||
case rx == ry:
|
||||
switch ix, iy := imag(x), imag(y); {
|
||||
case ix < iy:
|
||||
return -1
|
||||
case ix == iy:
|
||||
return 0
|
||||
case ix > iy:
|
||||
return 1
|
||||
}
|
||||
}
|
||||
//case rx > ry:
|
||||
return 1
|
||||
}
|
||||
|
||||
func collateFloat(x, y float64) int {
|
||||
switch {
|
||||
case x < y:
|
||||
return -1
|
||||
case x == y:
|
||||
return 0
|
||||
}
|
||||
//case x > y:
|
||||
return 1
|
||||
}
|
||||
|
||||
func collateInt(x, y int64) int {
|
||||
switch {
|
||||
case x < y:
|
||||
return -1
|
||||
case x == y:
|
||||
return 0
|
||||
}
|
||||
//case x > y:
|
||||
return 1
|
||||
}
|
||||
|
||||
func collateUint(x, y uint64) int {
|
||||
switch {
|
||||
case x < y:
|
||||
return -1
|
||||
case x == y:
|
||||
return 0
|
||||
}
|
||||
//case x > y:
|
||||
return 1
|
||||
}
|
||||
|
||||
func collateIntUint(x int64, y uint64) int {
|
||||
if y > math.MaxInt64 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return collateInt(x, int64(y))
|
||||
}
|
||||
|
||||
func collateUintInt(x uint64, y int64) int {
|
||||
return -collateIntUint(y, x)
|
||||
}
|
||||
|
||||
func collateType(i interface{}) (r interface{}, err error) {
|
||||
switch x := i.(type) {
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid collate type %T", x)
|
||||
case nil:
|
||||
return i, nil
|
||||
case bool:
|
||||
return i, nil
|
||||
case int8:
|
||||
return int64(x), nil
|
||||
case int16:
|
||||
return int64(x), nil
|
||||
case int32:
|
||||
return int64(x), nil
|
||||
case int64:
|
||||
return i, nil
|
||||
case int:
|
||||
return int64(x), nil
|
||||
case uint8:
|
||||
return uint64(x), nil
|
||||
case uint16:
|
||||
return uint64(x), nil
|
||||
case uint32:
|
||||
return uint64(x), nil
|
||||
case uint64:
|
||||
return i, nil
|
||||
case uint:
|
||||
return uint64(x), nil
|
||||
case float32:
|
||||
return float64(x), nil
|
||||
case float64:
|
||||
return i, nil
|
||||
case complex64:
|
||||
return complex128(x), nil
|
||||
case complex128:
|
||||
return i, nil
|
||||
case []byte:
|
||||
return i, nil
|
||||
case string:
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Collate collates two arrays of Go predeclared scalar types (and the typeless
|
||||
// nil or []byte). If any other type appears in x or y, Collate will return a
|
||||
// non nil error. String items are collated using strCollate or lexically
|
||||
// byte-wise (as when using Go comparison operators) when strCollate is nil.
|
||||
// []byte items are collated using bytes.Compare.
|
||||
//
|
||||
// Collate returns:
|
||||
//
|
||||
// -1 if x < y
|
||||
// 0 if x == y
|
||||
// +1 if x > y
|
||||
//
|
||||
// The same value as defined above must be returned from strCollate.
|
||||
//
|
||||
// The "outer" ordering is: nil, bool, number, []byte, string. IOW, nil is
|
||||
// "smaller" than anything else except other nil, numbers collate before
|
||||
// []byte, []byte collate before strings, etc.
|
||||
//
|
||||
// Integers and real numbers collate as expected in math. However, complex
|
||||
// numbers are not ordered in Go. Here the ordering is defined: Complex numbers
|
||||
// are in comparison considered first only by their real part. Iff the result
|
||||
// is equality then the imaginary part is used to determine the ordering. In
|
||||
// this "second order" comparing, integers and real numbers are considered as
|
||||
// complex numbers with a zero imaginary part.
|
||||
func Collate(x, y []interface{}, strCollate func(string, string) int) (r int, err error) {
|
||||
nx, ny := len(x), len(y)
|
||||
|
||||
switch {
|
||||
case nx == 0 && ny != 0:
|
||||
return -1, nil
|
||||
case nx == 0 && ny == 0:
|
||||
return 0, nil
|
||||
case nx != 0 && ny == 0:
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
r = 1
|
||||
if nx > ny {
|
||||
x, y, r = y, x, -r
|
||||
}
|
||||
|
||||
var c int
|
||||
for i, xi0 := range x {
|
||||
yi0 := y[i]
|
||||
xi, err := collateType(xi0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
yi, err := collateType(yi0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch x := xi.(type) {
|
||||
default:
|
||||
panic(fmt.Errorf("internal error: %T", x))
|
||||
|
||||
case nil:
|
||||
switch yi.(type) {
|
||||
case nil:
|
||||
// nop
|
||||
default:
|
||||
return -r, nil
|
||||
}
|
||||
|
||||
case bool:
|
||||
switch y := yi.(type) {
|
||||
case nil:
|
||||
return r, nil
|
||||
case bool:
|
||||
switch {
|
||||
case !x && y:
|
||||
return -r, nil
|
||||
case x == y:
|
||||
// nop
|
||||
case x && !y:
|
||||
return r, nil
|
||||
}
|
||||
default:
|
||||
return -r, nil
|
||||
}
|
||||
|
||||
case int64:
|
||||
switch y := yi.(type) {
|
||||
case nil, bool:
|
||||
return r, nil
|
||||
case int64:
|
||||
c = collateInt(x, y)
|
||||
case uint64:
|
||||
c = collateIntUint(x, y)
|
||||
case float64:
|
||||
c = collateFloat(float64(x), y)
|
||||
case complex128:
|
||||
c = collateComplex(complex(float64(x), 0), y)
|
||||
case []byte:
|
||||
return -r, nil
|
||||
case string:
|
||||
return -r, nil
|
||||
}
|
||||
|
||||
if c != 0 {
|
||||
return c * r, nil
|
||||
}
|
||||
|
||||
case uint64:
|
||||
switch y := yi.(type) {
|
||||
case nil, bool:
|
||||
return r, nil
|
||||
case int64:
|
||||
c = collateUintInt(x, y)
|
||||
case uint64:
|
||||
c = collateUint(x, y)
|
||||
case float64:
|
||||
c = collateFloat(float64(x), y)
|
||||
case complex128:
|
||||
c = collateComplex(complex(float64(x), 0), y)
|
||||
case []byte:
|
||||
return -r, nil
|
||||
case string:
|
||||
return -r, nil
|
||||
}
|
||||
|
||||
if c != 0 {
|
||||
return c * r, nil
|
||||
}
|
||||
|
||||
case float64:
|
||||
switch y := yi.(type) {
|
||||
case nil, bool:
|
||||
return r, nil
|
||||
case int64:
|
||||
c = collateFloat(x, float64(y))
|
||||
case uint64:
|
||||
c = collateFloat(x, float64(y))
|
||||
case float64:
|
||||
c = collateFloat(x, y)
|
||||
case complex128:
|
||||
c = collateComplex(complex(x, 0), y)
|
||||
case []byte:
|
||||
return -r, nil
|
||||
case string:
|
||||
return -r, nil
|
||||
}
|
||||
|
||||
if c != 0 {
|
||||
return c * r, nil
|
||||
}
|
||||
|
||||
case complex128:
|
||||
switch y := yi.(type) {
|
||||
case nil, bool:
|
||||
return r, nil
|
||||
case int64:
|
||||
c = collateComplex(x, complex(float64(y), 0))
|
||||
case uint64:
|
||||
c = collateComplex(x, complex(float64(y), 0))
|
||||
case float64:
|
||||
c = collateComplex(x, complex(y, 0))
|
||||
case complex128:
|
||||
c = collateComplex(x, y)
|
||||
case []byte:
|
||||
return -r, nil
|
||||
case string:
|
||||
return -r, nil
|
||||
}
|
||||
|
||||
if c != 0 {
|
||||
return c * r, nil
|
||||
}
|
||||
|
||||
case []byte:
|
||||
switch y := yi.(type) {
|
||||
case nil, bool, int64, uint64, float64, complex128:
|
||||
return r, nil
|
||||
case []byte:
|
||||
c = bytes.Compare(x, y)
|
||||
case string:
|
||||
return -r, nil
|
||||
}
|
||||
|
||||
if c != 0 {
|
||||
return c * r, nil
|
||||
}
|
||||
|
||||
case string:
|
||||
switch y := yi.(type) {
|
||||
case nil, bool, int64, uint64, float64, complex128:
|
||||
return r, nil
|
||||
case []byte:
|
||||
return r, nil
|
||||
case string:
|
||||
switch {
|
||||
case strCollate != nil:
|
||||
c = strCollate(x, y)
|
||||
case x < y:
|
||||
return -r, nil
|
||||
case x == y:
|
||||
c = 0
|
||||
case x > y:
|
||||
return r, nil
|
||||
}
|
||||
}
|
||||
|
||||
if c != 0 {
|
||||
return c * r, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if nx == ny {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
return -r, nil
|
||||
}
|
||||
160
vendor/github.com/cznic/lldb/lldb.go
generated
vendored
160
vendor/github.com/cznic/lldb/lldb.go
generated
vendored
@@ -1,160 +0,0 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package lldb implements a low level database engine. The database model used
|
||||
// could be considered a specific implementation of some small(est)
|
||||
// intersection of models listed in [1]. As a settled term is lacking, it'll be
|
||||
// called here a 'Virtual memory model' (VMM).
|
||||
//
|
||||
// Changelog
|
||||
//
|
||||
// 2016-07-24: v1.0.4 brings some performance improvements.
|
||||
//
|
||||
// 2016-07-22: v1.0.3 brings some small performance improvements.
|
||||
//
|
||||
// 2016-07-12: v1.0.2 now uses packages from cznic/internal.
|
||||
//
|
||||
// 2016-07-12: v1.0.1 adds a license for testdata/fortunes.txt.
|
||||
//
|
||||
// 2016-07-11: First standalone release v1.0.0 of the package previously
|
||||
// published as experimental (github.com/cznic/exp/lldb).
|
||||
//
|
||||
// Filers
|
||||
//
|
||||
// A Filer is an abstraction of storage. A Filer may be a part of some process'
|
||||
// virtual address space, an OS file, a networked, remote file etc. Persistence
|
||||
// of the storage is optional, opaque to VMM and it is specific to a concrete
|
||||
// Filer implementation.
|
||||
//
|
||||
// Space management
|
||||
//
|
||||
// Mechanism to allocate, reallocate (resize), deallocate (and later reclaim
|
||||
// the unused) contiguous parts of a Filer, called blocks. Blocks are
|
||||
// identified and referred to by a handle, an int64.
|
||||
//
|
||||
// BTrees
|
||||
//
|
||||
// In addition to the VMM like services, lldb provides volatile and
|
||||
// non-volatile BTrees. Keys and values of a BTree are limited in size to 64kB
|
||||
// each (a bit more actually). Support for larger keys/values, if desired, can
|
||||
// be built atop a BTree to certain limits.
|
||||
//
|
||||
// Handles vs pointers
|
||||
//
|
||||
// A handle is the abstracted storage counterpart of a memory address. There
|
||||
// is one fundamental difference, though. Resizing a block never results in a
|
||||
// change to the handle which refers to the resized block, so a handle is more
|
||||
// akin to an unique numeric id/key. Yet it shares one property of pointers -
|
||||
// handles can be associated again with blocks after the original handle block
|
||||
// was deallocated. In other words, a handle uniqueness domain is the state of
|
||||
// the database and is not something comparable to e.g. an ever growing
|
||||
// numbering sequence.
|
||||
//
|
||||
// Also, as with memory pointers, dangling handles can be created and blocks
|
||||
// overwritten when such handles are used. Using a zero handle to refer to a
|
||||
// block will not panic; however, the resulting error is effectively the same
|
||||
// exceptional situation as dereferencing a nil pointer.
|
||||
//
|
||||
// Blocks
|
||||
//
|
||||
// Allocated/used blocks, are limited in size to only a little bit more than
|
||||
// 64kB. Bigger semantic entities/structures must be built in lldb's client
|
||||
// code. The content of a block has no semantics attached, it's only a fully
|
||||
// opaque `[]byte`.
|
||||
//
|
||||
// Scalars
|
||||
//
|
||||
// Use of "scalars" applies to EncodeScalars, DecodeScalars and Collate. Those
|
||||
// first two "to bytes" and "from bytes" functions are suggested for handling
|
||||
// multi-valued Allocator content items and/or keys/values of BTrees (using
|
||||
// Collate for keys). Types called "scalar" are:
|
||||
//
|
||||
// nil (the typeless one)
|
||||
// bool
|
||||
// all integral types: [u]int8, [u]int16, [u]int32, [u]int, [u]int64
|
||||
// all floating point types: float32, float64
|
||||
// all complex types: complex64, complex128
|
||||
// []byte (64kB max)
|
||||
// string (64kb max)
|
||||
//
|
||||
// Specific implementations
|
||||
//
|
||||
// Included are concrete implementations of some of the VMM interfaces included
|
||||
// to ease serving simple client code or for testing and possibly as an
|
||||
// example. More details in the documentation of such implementations.
|
||||
//
|
||||
// [1]: http://en.wikipedia.org/wiki/Database_model
|
||||
package lldb
|
||||
|
||||
const (
|
||||
fltSz = 0x70 // size of the FLT
|
||||
maxShort = 251
|
||||
maxRq = 65787
|
||||
maxFLTRq = 4112
|
||||
maxHandle = 1<<56 - 1
|
||||
atomLen = 16
|
||||
tagUsedLong = 0xfc
|
||||
tagUsedRelocated = 0xfd
|
||||
tagFreeShort = 0xfe
|
||||
tagFreeLong = 0xff
|
||||
tagNotCompressed = 0
|
||||
tagCompressed = 1
|
||||
)
|
||||
|
||||
// Content size n -> blocksize in atoms.
|
||||
func n2atoms(n int) int {
|
||||
if n > maxShort {
|
||||
n += 2
|
||||
}
|
||||
return (n+1)/16 + 1
|
||||
}
|
||||
|
||||
// Content size n -> number of padding zeros.
|
||||
func n2padding(n int) int {
|
||||
if n > maxShort {
|
||||
n += 2
|
||||
}
|
||||
return 15 - (n+1)&15
|
||||
}
|
||||
|
||||
// Handle <-> offset
|
||||
func h2off(h int64) int64 { return (h + 6) * 16 }
|
||||
func off2h(off int64) int64 { return off/16 - 6 }
|
||||
|
||||
// Get a 7B int64 from b
|
||||
func b2h(b []byte) (h int64) {
|
||||
for _, v := range b[:7] {
|
||||
h = h<<8 | int64(v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Put a 7B int64 into b
|
||||
func h2b(b []byte, h int64) []byte {
|
||||
for i := range b[:7] {
|
||||
b[i], h = byte(h>>48), h<<8
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Content length N (must be in [252, 65787]) to long used block M field.
|
||||
func n2m(n int) (m int) {
|
||||
return n % 0x10000
|
||||
}
|
||||
|
||||
// Long used block M (must be in [0, 65535]) field to content length N.
|
||||
func m2n(m int) (n int) {
|
||||
if m <= maxShort {
|
||||
m += 0x10000
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func bpack(a []byte) []byte {
|
||||
if cap(a) > len(a) {
|
||||
return append([]byte(nil), a...)
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
101
vendor/github.com/cznic/lldb/memfiler.go
generated
vendored
101
vendor/github.com/cznic/lldb/memfiler.go
generated
vendored
@@ -1,101 +0,0 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// A memory-only implementation of Filer.
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/cznic/internal/file"
|
||||
)
|
||||
|
||||
var _ Filer = &MemFiler{}
|
||||
|
||||
// MemFiler is a memory backed Filer. It implements BeginUpdate, EndUpdate and
|
||||
// Rollback as no-ops. MemFiler is not automatically persistent, but it has
|
||||
// ReadFrom and WriteTo methods.
|
||||
type MemFiler struct {
|
||||
fi file.Interface
|
||||
nest int
|
||||
}
|
||||
|
||||
// NewMemFiler returns a new MemFiler.
|
||||
func NewMemFiler() *MemFiler {
|
||||
fi, err := file.OpenMem("")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &MemFiler{fi: fi}
|
||||
}
|
||||
|
||||
// BeginUpdate implements Filer.
|
||||
func (f *MemFiler) BeginUpdate() error {
|
||||
f.nest++
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close implements Filer.
|
||||
func (f *MemFiler) Close() (err error) {
|
||||
if f.nest != 0 {
|
||||
return &ErrPERM{(f.Name() + ":Close")}
|
||||
}
|
||||
|
||||
return f.fi.Close()
|
||||
}
|
||||
|
||||
// EndUpdate implements Filer.
|
||||
func (f *MemFiler) EndUpdate() (err error) {
|
||||
if f.nest == 0 {
|
||||
return &ErrPERM{(f.Name() + ": EndUpdate")}
|
||||
}
|
||||
|
||||
f.nest--
|
||||
return
|
||||
}
|
||||
|
||||
// Name implements Filer.
|
||||
func (f *MemFiler) Name() string { return fmt.Sprintf("%p.memfiler", f) }
|
||||
|
||||
// PunchHole implements Filer.
|
||||
func (f *MemFiler) PunchHole(off, size int64) (err error) { return nil }
|
||||
|
||||
// ReadAt implements Filer.
|
||||
func (f *MemFiler) ReadAt(b []byte, off int64) (n int, err error) { return f.fi.ReadAt(b, off) }
|
||||
|
||||
// ReadFrom is a helper to populate MemFiler's content from r. 'n' reports the
|
||||
// number of bytes read from 'r'.
|
||||
func (f *MemFiler) ReadFrom(r io.Reader) (n int64, err error) { return f.fi.ReadFrom(r) }
|
||||
|
||||
// Rollback implements Filer.
|
||||
func (f *MemFiler) Rollback() (err error) { return nil }
|
||||
|
||||
// Size implements Filer.
|
||||
func (f *MemFiler) Size() (int64, error) {
|
||||
info, err := f.fi.Stat()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return info.Size(), nil
|
||||
}
|
||||
|
||||
// Sync implements Filer.
|
||||
func (f *MemFiler) Sync() error { return nil }
|
||||
|
||||
// Truncate implements Filer.
|
||||
func (f *MemFiler) Truncate(size int64) (err error) { return f.fi.Truncate(size) }
|
||||
|
||||
// WriteAt implements Filer.
|
||||
func (f *MemFiler) WriteAt(b []byte, off int64) (n int, err error) { return f.fi.WriteAt(b, off) }
|
||||
|
||||
// WriteTo is a helper to copy/persist MemFiler's content to w. If w is also
|
||||
// an io.WriterAt then WriteTo may attempt to _not_ write any big, for some
|
||||
// value of big, runs of zeros, i.e. it will attempt to punch holes, where
|
||||
// possible, in `w` if that happens to be a freshly created or to zero length
|
||||
// truncated OS file. 'n' reports the number of bytes written to 'w'.
|
||||
func (f *MemFiler) WriteTo(w io.Writer) (n int64, err error) { return f.fi.WriteTo(w) }
|
||||
130
vendor/github.com/cznic/lldb/osfiler.go
generated
vendored
130
vendor/github.com/cznic/lldb/osfiler.go
generated
vendored
@@ -1,130 +0,0 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
var _ Filer = (*OSFiler)(nil)
|
||||
|
||||
// OSFile is an os.File like minimal set of methods allowing to construct a
|
||||
// Filer.
|
||||
type OSFile interface {
|
||||
Name() string
|
||||
Stat() (fi os.FileInfo, err error)
|
||||
Sync() (err error)
|
||||
Truncate(size int64) (err error)
|
||||
io.Closer
|
||||
io.Reader
|
||||
io.ReaderAt
|
||||
io.Seeker
|
||||
io.Writer
|
||||
io.WriterAt
|
||||
}
|
||||
|
||||
// OSFiler is like a SimpleFileFiler but based on an OSFile.
|
||||
type OSFiler struct {
|
||||
f OSFile
|
||||
nest int
|
||||
size int64 // not set if < 0
|
||||
}
|
||||
|
||||
// NewOSFiler returns a Filer from an OSFile. This Filer is like the
|
||||
// SimpleFileFiler, it does not implement the transaction related methods.
|
||||
func NewOSFiler(f OSFile) (r *OSFiler) {
|
||||
return &OSFiler{
|
||||
f: f,
|
||||
size: -1,
|
||||
}
|
||||
}
|
||||
|
||||
// BeginUpdate implements Filer.
|
||||
func (f *OSFiler) BeginUpdate() (err error) {
|
||||
f.nest++
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close implements Filer.
|
||||
func (f *OSFiler) Close() (err error) {
|
||||
if f.nest != 0 {
|
||||
return &ErrPERM{(f.Name() + ":Close")}
|
||||
}
|
||||
|
||||
return f.f.Close()
|
||||
}
|
||||
|
||||
// EndUpdate implements Filer.
|
||||
func (f *OSFiler) EndUpdate() (err error) {
|
||||
if f.nest == 0 {
|
||||
return &ErrPERM{(f.Name() + ":EndUpdate")}
|
||||
}
|
||||
|
||||
f.nest--
|
||||
return
|
||||
}
|
||||
|
||||
// Name implements Filer.
|
||||
func (f *OSFiler) Name() string {
|
||||
return f.f.Name()
|
||||
}
|
||||
|
||||
// PunchHole implements Filer.
|
||||
func (f *OSFiler) PunchHole(off, size int64) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// ReadAt implements Filer.
|
||||
func (f *OSFiler) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
return f.f.ReadAt(b, off)
|
||||
}
|
||||
|
||||
// Rollback implements Filer.
|
||||
func (f *OSFiler) Rollback() (err error) { return }
|
||||
|
||||
// Size implements Filer.
|
||||
func (f *OSFiler) Size() (n int64, err error) {
|
||||
if f.size < 0 { // boot
|
||||
fi, err := f.f.Stat()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
f.size = fi.Size()
|
||||
}
|
||||
return f.size, nil
|
||||
}
|
||||
|
||||
// Sync implements Filer.
|
||||
func (f *OSFiler) Sync() (err error) {
|
||||
return f.f.Sync()
|
||||
}
|
||||
|
||||
// Truncate implements Filer.
|
||||
func (f *OSFiler) Truncate(size int64) (err error) {
|
||||
if size < 0 {
|
||||
return &ErrINVAL{"Truncate size", size}
|
||||
}
|
||||
|
||||
f.size = size
|
||||
return f.f.Truncate(size)
|
||||
}
|
||||
|
||||
// WriteAt implements Filer.
|
||||
func (f *OSFiler) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
if f.size < 0 { // boot
|
||||
fi, err := os.Stat(f.f.Name())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
f.size = fi.Size()
|
||||
}
|
||||
f.size = mathutil.MaxInt64(f.size, int64(len(b))+off)
|
||||
return f.f.WriteAt(b, off)
|
||||
}
|
||||
99
vendor/github.com/cznic/lldb/simplefilefiler.go
generated
vendored
99
vendor/github.com/cznic/lldb/simplefilefiler.go
generated
vendored
@@ -1,99 +0,0 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// A basic os.File backed Filer.
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/cznic/internal/file"
|
||||
)
|
||||
|
||||
var _ Filer = &SimpleFileFiler{}
|
||||
|
||||
// SimpleFileFiler is an os.File backed Filer intended for use where structural
|
||||
// consistency can be reached by other means (SimpleFileFiler is for example
|
||||
// wrapped in eg. an RollbackFiler or ACIDFiler0) or where persistence is not
|
||||
// required (temporary/working data sets).
|
||||
//
|
||||
// SimpleFileFiler is the most simple os.File backed Filer implementation as it
|
||||
// does not really implement BeginUpdate and EndUpdate/Rollback in any way
|
||||
// which would protect the structural integrity of data. If misused e.g. as a
|
||||
// real database storage w/o other measures, it can easily cause data loss
|
||||
// when, for example, a power outage occurs or the updating process terminates
|
||||
// abruptly.
|
||||
type SimpleFileFiler struct {
|
||||
fi file.Interface
|
||||
name string
|
||||
nest int
|
||||
}
|
||||
|
||||
// NewSimpleFileFiler returns a new SimpleFileFiler.
|
||||
func NewSimpleFileFiler(f *os.File) *SimpleFileFiler {
|
||||
fi, err := file.Open(f)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
sf := &SimpleFileFiler{fi: fi, name: f.Name()}
|
||||
return sf
|
||||
}
|
||||
|
||||
// BeginUpdate implements Filer.
|
||||
func (f *SimpleFileFiler) BeginUpdate() error {
|
||||
f.nest++
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close implements Filer.
|
||||
func (f *SimpleFileFiler) Close() (err error) {
|
||||
if f.nest != 0 {
|
||||
return &ErrPERM{(f.Name() + ":Close")}
|
||||
}
|
||||
|
||||
return f.fi.Close()
|
||||
}
|
||||
|
||||
// EndUpdate implements Filer.
|
||||
func (f *SimpleFileFiler) EndUpdate() (err error) {
|
||||
if f.nest == 0 {
|
||||
return &ErrPERM{(f.Name() + ":EndUpdate")}
|
||||
}
|
||||
|
||||
f.nest--
|
||||
return
|
||||
}
|
||||
|
||||
// Name implements Filer.
|
||||
func (f *SimpleFileFiler) Name() string { return f.name }
|
||||
|
||||
// PunchHole implements Filer.
|
||||
func (f *SimpleFileFiler) PunchHole(off, size int64) (err error) { return nil }
|
||||
|
||||
// ReadAt implements Filer.
|
||||
func (f *SimpleFileFiler) ReadAt(b []byte, off int64) (n int, err error) { return f.fi.ReadAt(b, off) }
|
||||
|
||||
// Rollback implements Filer.
|
||||
func (f *SimpleFileFiler) Rollback() (err error) { return nil }
|
||||
|
||||
// Size implements Filer.
|
||||
func (f *SimpleFileFiler) Size() (int64, error) {
|
||||
info, err := f.fi.Stat()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return info.Size(), nil
|
||||
}
|
||||
|
||||
// Sync implements Filer.
|
||||
func (f *SimpleFileFiler) Sync() error { return f.fi.Sync() }
|
||||
|
||||
// Truncate implements Filer.
|
||||
func (f *SimpleFileFiler) Truncate(size int64) (err error) { return f.fi.Truncate(size) }
|
||||
|
||||
// WriteAt implements Filer.
|
||||
func (f *SimpleFileFiler) WriteAt(b []byte, off int64) (n int, err error) { return f.fi.WriteAt(b, off) }
|
||||
615
vendor/github.com/cznic/lldb/xact.go
generated
vendored
615
vendor/github.com/cznic/lldb/xact.go
generated
vendored
@@ -1,615 +0,0 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Structural transactions.
|
||||
|
||||
package lldb
|
||||
|
||||
//DONE+ TransactionalMemoryFiler
|
||||
// ----
|
||||
// Use NewRollbackFiler(myMemFiler, ...)
|
||||
|
||||
/*
|
||||
|
||||
bfBits: 3
|
||||
BenchmarkRollbackFiler 20000000 102 ns/op 9.73 MB/s
|
||||
|
||||
bfBits: 4
|
||||
BenchmarkRollbackFiler 50000000 55.7 ns/op 17.95 MB/s
|
||||
|
||||
bfBits: 5
|
||||
BenchmarkRollbackFiler 100000000 32.2 ns/op 31.06 MB/s
|
||||
|
||||
bfBits: 6
|
||||
BenchmarkRollbackFiler 100000000 20.6 ns/op 48.46 MB/s
|
||||
|
||||
bfBits: 7
|
||||
BenchmarkRollbackFiler 100000000 15.1 ns/op 66.12 MB/s
|
||||
|
||||
bfBits: 8
|
||||
BenchmarkRollbackFiler 100000000 10.5 ns/op 95.66 MB/s
|
||||
|
||||
bfBits: 9
|
||||
BenchmarkRollbackFiler 200000000 8.02 ns/op 124.74 MB/s
|
||||
|
||||
bfBits: 10
|
||||
BenchmarkRollbackFiler 200000000 9.25 ns/op 108.09 MB/s
|
||||
|
||||
bfBits: 11
|
||||
BenchmarkRollbackFiler 100000000 11.7 ns/op 85.47 MB/s
|
||||
|
||||
bfBits: 12
|
||||
BenchmarkRollbackFiler 100000000 17.2 ns/op 57.99 MB/s
|
||||
|
||||
bfBits: 13
|
||||
BenchmarkRollbackFiler 100000000 32.7 ns/op 30.58 MB/s
|
||||
|
||||
bfBits: 14
|
||||
BenchmarkRollbackFiler 50000000 39.6 ns/op 25.27 MB/s
|
||||
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/cznic/fileutil"
|
||||
"github.com/cznic/internal/buffer"
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
var (
|
||||
_ Filer = &bitFiler{} // Ensure bitFiler is a Filer.
|
||||
_ Filer = &RollbackFiler{} // ditto
|
||||
)
|
||||
|
||||
const (
|
||||
bfBits = 12
|
||||
bfSize = 1 << bfBits
|
||||
bfMask = bfSize - 1
|
||||
)
|
||||
|
||||
type (
|
||||
bitPage struct {
|
||||
prev, next *bitPage
|
||||
pdata *[]byte
|
||||
data []byte
|
||||
dirty bool
|
||||
}
|
||||
|
||||
bitFilerMap map[int64]*bitPage
|
||||
|
||||
bitFiler struct {
|
||||
parent Filer
|
||||
m bitFilerMap
|
||||
size int64
|
||||
}
|
||||
)
|
||||
|
||||
func newBitFiler(parent Filer) (f *bitFiler, err error) {
|
||||
sz, err := parent.Size()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return &bitFiler{parent: parent, m: bitFilerMap{}, size: sz}, nil
|
||||
}
|
||||
|
||||
func (f *bitFiler) BeginUpdate() error { panic("internal error") }
|
||||
func (f *bitFiler) EndUpdate() error { panic("internal error") }
|
||||
func (f *bitFiler) Rollback() error { panic("internal error") }
|
||||
func (f *bitFiler) Sync() error { panic("internal error") }
|
||||
|
||||
func (f *bitFiler) Close() (err error) { return }
|
||||
func (f *bitFiler) Name() string { return fmt.Sprintf("%p.bitfiler", f) }
|
||||
func (f *bitFiler) Size() (int64, error) { return f.size, nil }
|
||||
|
||||
func (f *bitFiler) free() {
|
||||
for _, pg := range f.m {
|
||||
buffer.Put(pg.pdata)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *bitFiler) PunchHole(off, size int64) (err error) {
|
||||
first := off >> bfBits
|
||||
if off&bfMask != 0 {
|
||||
first++
|
||||
}
|
||||
off += size - 1
|
||||
last := off >> bfBits
|
||||
if off&bfMask != 0 {
|
||||
last--
|
||||
}
|
||||
if limit := f.size >> bfBits; last > limit {
|
||||
last = limit
|
||||
}
|
||||
for pgI := first; pgI <= last; pgI++ {
|
||||
pg := &bitPage{}
|
||||
pg.pdata = buffer.CGet(bfSize)
|
||||
pg.data = *pg.pdata
|
||||
pg.dirty = true
|
||||
f.m[pgI] = pg
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *bitFiler) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
avail := f.size - off
|
||||
pgI := off >> bfBits
|
||||
pgO := int(off & bfMask)
|
||||
rem := len(b)
|
||||
if int64(rem) >= avail {
|
||||
rem = int(avail)
|
||||
err = io.EOF
|
||||
}
|
||||
for rem != 0 && avail > 0 {
|
||||
pg := f.m[pgI]
|
||||
if pg == nil {
|
||||
pg = &bitPage{}
|
||||
pg.pdata = buffer.CGet(bfSize)
|
||||
pg.data = *pg.pdata
|
||||
if f.parent != nil {
|
||||
_, err = f.parent.ReadAt(pg.data, off&^bfMask)
|
||||
if err != nil && !fileutil.IsEOF(err) {
|
||||
return
|
||||
}
|
||||
|
||||
err = nil
|
||||
}
|
||||
f.m[pgI] = pg
|
||||
}
|
||||
nc := copy(b[:mathutil.Min(rem, bfSize)], pg.data[pgO:])
|
||||
pgI++
|
||||
pgO = 0
|
||||
rem -= nc
|
||||
n += nc
|
||||
b = b[nc:]
|
||||
off += int64(nc)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *bitFiler) Truncate(size int64) (err error) {
|
||||
switch {
|
||||
case size < 0:
|
||||
return &ErrINVAL{"Truncate size", size}
|
||||
case size == 0:
|
||||
f.m = bitFilerMap{}
|
||||
f.size = 0
|
||||
return
|
||||
}
|
||||
|
||||
first := size >> bfBits
|
||||
if size&bfMask != 0 {
|
||||
first++
|
||||
}
|
||||
last := f.size >> bfBits
|
||||
if f.size&bfMask != 0 {
|
||||
last++
|
||||
}
|
||||
for ; first < last; first++ {
|
||||
if bp, ok := f.m[first]; ok {
|
||||
buffer.Put(bp.pdata)
|
||||
}
|
||||
delete(f.m, first)
|
||||
}
|
||||
|
||||
f.size = size
|
||||
return
|
||||
}
|
||||
|
||||
func (f *bitFiler) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
off0 := off
|
||||
pgI := off >> bfBits
|
||||
pgO := int(off & bfMask)
|
||||
n = len(b)
|
||||
rem := n
|
||||
var nc int
|
||||
for rem != 0 {
|
||||
pg := f.m[pgI]
|
||||
if pg == nil {
|
||||
pg = &bitPage{}
|
||||
pg.pdata = buffer.CGet(bfSize)
|
||||
pg.data = *pg.pdata
|
||||
if f.parent != nil {
|
||||
_, err = f.parent.ReadAt(pg.data, off&^bfMask)
|
||||
if err != nil && !fileutil.IsEOF(err) {
|
||||
return
|
||||
}
|
||||
|
||||
err = nil
|
||||
}
|
||||
f.m[pgI] = pg
|
||||
}
|
||||
nc = copy(pg.data[pgO:], b)
|
||||
pgI++
|
||||
pg.dirty = true
|
||||
pgO = 0
|
||||
rem -= nc
|
||||
b = b[nc:]
|
||||
off += int64(nc)
|
||||
}
|
||||
f.size = mathutil.MaxInt64(f.size, off0+int64(n))
|
||||
return
|
||||
}
|
||||
|
||||
func (f *bitFiler) link() {
|
||||
for pgI, pg := range f.m {
|
||||
nx, ok := f.m[pgI+1]
|
||||
if !ok || !nx.dirty {
|
||||
continue
|
||||
}
|
||||
|
||||
nx.prev, pg.next = pg, nx
|
||||
}
|
||||
}
|
||||
|
||||
func (f *bitFiler) dumpDirty(w io.WriterAt) (nwr int, err error) {
|
||||
f.link()
|
||||
for pgI, pg := range f.m {
|
||||
if !pg.dirty {
|
||||
continue
|
||||
}
|
||||
|
||||
for pg.prev != nil && pg.prev.dirty {
|
||||
pg = pg.prev
|
||||
pgI--
|
||||
}
|
||||
|
||||
for pg != nil && pg.dirty {
|
||||
if _, err := w.WriteAt(pg.data, pgI<<bfBits); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
nwr++
|
||||
pg.dirty = false
|
||||
pg = pg.next
|
||||
pgI++
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// RollbackFiler is a Filer implementing structural transaction handling.
|
||||
// Structural transactions should be small and short lived because all non
|
||||
// committed data are held in memory until committed or discarded by a
|
||||
// Rollback.
|
||||
//
|
||||
// While using RollbackFiler, every intended update of the wrapped Filler, by
|
||||
// WriteAt, Truncate or PunchHole, _must_ be made within a transaction.
|
||||
// Attempts to do it outside of a transaction will return ErrPERM. OTOH,
|
||||
// invoking ReadAt outside of a transaction is not a problem.
|
||||
//
|
||||
// No nested transactions: All updates within a transaction are held in memory.
|
||||
// On a matching EndUpdate the updates held in memory are actually written to
|
||||
// the wrapped Filer.
|
||||
//
|
||||
// Nested transactions: Correct data will be seen from RollbackFiler when any
|
||||
// level of a nested transaction is rollbacked. The actual writing to the
|
||||
// wrapped Filer happens only when the outer most transaction nesting level is
|
||||
// closed.
|
||||
//
|
||||
// Invoking Rollback is an alternative to EndUpdate. It discards all changes
|
||||
// made at the current transaction level and returns the "state" (possibly not
|
||||
// yet persisted) of the Filer to what it was before the corresponding
|
||||
// BeginUpdate.
|
||||
//
|
||||
// During an open transaction, all reads (using ReadAt) are "dirty" reads,
|
||||
// seeing the uncommitted changes made to the Filer's data.
|
||||
//
|
||||
// Lldb databases should be based upon a RollbackFiler.
|
||||
//
|
||||
// With a wrapped MemFiler one gets transactional memory. With, for example a
|
||||
// wrapped disk based SimpleFileFiler it protects against at least some HW
|
||||
// errors - if Rollback is properly invoked on such failures and/or if there's
|
||||
// some WAL or 2PC or whatever other safe mechanism based recovery procedure
|
||||
// used by the client.
|
||||
//
|
||||
// The "real" writes to the wrapped Filer (or WAL instead) go through the
|
||||
// writerAt supplied to NewRollbackFiler.
|
||||
//
|
||||
// List of functions/methods which are recommended to be wrapped in a
|
||||
// BeginUpdate/EndUpdate structural transaction:
|
||||
//
|
||||
// Allocator.Alloc
|
||||
// Allocator.Free
|
||||
// Allocator.Realloc
|
||||
//
|
||||
// CreateBTree
|
||||
// RemoveBTree
|
||||
// BTree.Clear
|
||||
// BTree.Delete
|
||||
// BTree.DeleteAny
|
||||
// BTree.Clear
|
||||
// BTree.Extract
|
||||
// BTree.Get (it can mutate the DB)
|
||||
// BTree.Put
|
||||
// BTree.Set
|
||||
//
|
||||
// NOTE: RollbackFiler is a generic solution intended to wrap Filers provided
|
||||
// by this package which do not implement any of the transactional methods.
|
||||
// RollbackFiler thus _does not_ invoke any of the transactional methods of its
|
||||
// wrapped Filer.
|
||||
//
|
||||
// RollbackFiler is safe for concurrent use by multiple goroutines.
|
||||
type RollbackFiler struct {
|
||||
mu sync.RWMutex
|
||||
inCallbackMu sync.RWMutex
|
||||
bitFiler *bitFiler
|
||||
checkpoint func(int64) error
|
||||
f Filer
|
||||
writerAt io.WriterAt
|
||||
|
||||
// afterRollback, if not nil, is called after performing Rollback
|
||||
// without errros.
|
||||
afterRollback func() error
|
||||
tlevel int // transaction nesting level, 0 == not in transaction
|
||||
closed bool
|
||||
inCallback bool
|
||||
}
|
||||
|
||||
// NewRollbackFiler returns a RollbackFiler wrapping f.
|
||||
//
|
||||
// The checkpoint parameter
|
||||
//
|
||||
// The checkpoint function is called after closing (by EndUpdate) the upper
|
||||
// most level open transaction if all calls of writerAt were successful and the
|
||||
// DB (or eg. a WAL) is thus now in a consistent state (virtually, in the ideal
|
||||
// world with no write caches, no HW failures, no process crashes, ...).
|
||||
//
|
||||
// NOTE: In, for example, a 2PC it is necessary to reflect also the sz
|
||||
// parameter as the new file size (as in the parameter to Truncate). All
|
||||
// changes were successfully written already by writerAt before invoking
|
||||
// checkpoint.
|
||||
//
|
||||
// The writerAt parameter
|
||||
//
|
||||
// The writerAt interface is used to commit the updates of the wrapped Filer.
|
||||
// If any invocation of writerAt fails then a non nil error will be returned
|
||||
// from EndUpdate and checkpoint will _not_ ne called. Neither is necessary to
|
||||
// call Rollback. The rule of thumb: The [structural] transaction [level] is
|
||||
// closed by invoking exactly once one of EndUpdate _or_ Rollback.
|
||||
//
|
||||
// It is presumed that writerAt uses WAL or 2PC or whatever other safe
|
||||
// mechanism to physically commit the updates.
|
||||
//
|
||||
// Updates performed by invocations of writerAt are byte-precise, but not
|
||||
// necessarily maximum possible length precise. IOW, for example an update
|
||||
// crossing page boundaries may be performed by more than one writerAt
|
||||
// invocation. No offset sorting is performed. This may change if it proves
|
||||
// to be a problem. Such change would be considered backward compatible.
|
||||
//
|
||||
// NOTE: Using RollbackFiler, but failing to ever invoke a matching "closing"
|
||||
// EndUpdate after an "opening" BeginUpdate means neither writerAt or
|
||||
// checkpoint will ever get called - with all the possible data loss
|
||||
// consequences.
|
||||
func NewRollbackFiler(f Filer, checkpoint func(sz int64) error, writerAt io.WriterAt) (r *RollbackFiler, err error) {
|
||||
if f == nil || checkpoint == nil || writerAt == nil {
|
||||
return nil, &ErrINVAL{Src: "lldb.NewRollbackFiler, nil argument"}
|
||||
}
|
||||
|
||||
return &RollbackFiler{
|
||||
checkpoint: checkpoint,
|
||||
f: f,
|
||||
writerAt: writerAt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) BeginUpdate() (err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
parent := r.f
|
||||
if r.tlevel != 0 {
|
||||
parent = r.bitFiler
|
||||
}
|
||||
r.bitFiler, err = newBitFiler(parent)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
r.tlevel++
|
||||
return
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
//
|
||||
// Close will return an error if not invoked at nesting level 0. However, to
|
||||
// allow emergency closing from eg. a signal handler; if Close is invoked
|
||||
// within an open transaction(s), it rollbacks any non committed open
|
||||
// transactions and performs the Close operation.
|
||||
//
|
||||
// IOW: Regardless of the transaction nesting level the Close is always
|
||||
// performed but any uncommitted transaction data are lost.
|
||||
func (r *RollbackFiler) Close() (err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.closed {
|
||||
return &ErrPERM{r.f.Name() + ": Already closed"}
|
||||
}
|
||||
|
||||
r.closed = true
|
||||
if err = r.f.Close(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.tlevel != 0 {
|
||||
err = &ErrPERM{r.f.Name() + ": Close inside an open transaction"}
|
||||
}
|
||||
|
||||
if r.bitFiler != nil {
|
||||
r.bitFiler.free()
|
||||
r.bitFiler = nil
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) EndUpdate() (err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.tlevel == 0 {
|
||||
return &ErrPERM{r.f.Name() + " : EndUpdate outside of a transaction"}
|
||||
}
|
||||
|
||||
sz, err := r.size() // Cannot call .Size() -> deadlock
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
r.tlevel--
|
||||
bf := r.bitFiler
|
||||
parent := bf.parent
|
||||
w := r.writerAt
|
||||
if r.tlevel != 0 {
|
||||
w = parent
|
||||
}
|
||||
nwr, err := bf.dumpDirty(w)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
switch {
|
||||
case r.tlevel == 0:
|
||||
defer func() {
|
||||
r.bitFiler.free()
|
||||
r.bitFiler = nil
|
||||
}()
|
||||
|
||||
if nwr == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
return r.checkpoint(sz)
|
||||
default:
|
||||
r.bitFiler.free()
|
||||
r.bitFiler = parent.(*bitFiler)
|
||||
sz, _ := bf.Size() // bitFiler.Size() never returns err != nil
|
||||
return parent.Truncate(sz)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) Name() string {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
return r.f.Name()
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) PunchHole(off, size int64) error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.tlevel == 0 {
|
||||
return &ErrPERM{r.f.Name() + ": PunchHole outside of a transaction"}
|
||||
}
|
||||
|
||||
if off < 0 {
|
||||
return &ErrINVAL{r.f.Name() + ": PunchHole off", off}
|
||||
}
|
||||
|
||||
if size < 0 || off+size > r.bitFiler.size {
|
||||
return &ErrINVAL{r.f.Name() + ": PunchHole size", size}
|
||||
}
|
||||
|
||||
return r.bitFiler.PunchHole(off, size)
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
r.inCallbackMu.RLock()
|
||||
defer r.inCallbackMu.RUnlock()
|
||||
if !r.inCallback {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
}
|
||||
if r.tlevel == 0 {
|
||||
return r.f.ReadAt(b, off)
|
||||
}
|
||||
|
||||
return r.bitFiler.ReadAt(b, off)
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) Rollback() (err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.tlevel == 0 {
|
||||
return &ErrPERM{r.f.Name() + ": Rollback outside of a transaction"}
|
||||
}
|
||||
|
||||
if r.tlevel > 1 {
|
||||
r.bitFiler.free()
|
||||
r.bitFiler = r.bitFiler.parent.(*bitFiler)
|
||||
}
|
||||
r.tlevel--
|
||||
if f := r.afterRollback; f != nil {
|
||||
r.inCallbackMu.Lock()
|
||||
r.inCallback = true
|
||||
r.inCallbackMu.Unlock()
|
||||
defer func() {
|
||||
r.inCallbackMu.Lock()
|
||||
r.inCallback = false
|
||||
r.inCallbackMu.Unlock()
|
||||
}()
|
||||
return f()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *RollbackFiler) size() (sz int64, err error) {
|
||||
if r.tlevel == 0 {
|
||||
return r.f.Size()
|
||||
}
|
||||
|
||||
return r.bitFiler.Size()
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) Size() (sz int64, err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
return r.size()
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) Sync() error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
return r.f.Sync()
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) Truncate(size int64) error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.tlevel == 0 {
|
||||
return &ErrPERM{r.f.Name() + ": Truncate outside of a transaction"}
|
||||
}
|
||||
|
||||
return r.bitFiler.Truncate(size)
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.tlevel == 0 {
|
||||
return 0, &ErrPERM{r.f.Name() + ": WriteAt outside of a transaction"}
|
||||
}
|
||||
|
||||
return r.bitFiler.WriteAt(b, off)
|
||||
}
|
||||
27
vendor/github.com/cznic/mathutil/LICENSE
generated
vendored
27
vendor/github.com/cznic/mathutil/LICENSE
generated
vendored
@@ -1,27 +0,0 @@
|
||||
Copyright (c) 2014 The mathutil Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the names of the authors nor the names of the
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
207
vendor/github.com/cznic/mathutil/bits.go
generated
vendored
207
vendor/github.com/cznic/mathutil/bits.go
generated
vendored
@@ -1,207 +0,0 @@
|
||||
// Copyright (c) 2014 The mathutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mathutil
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// BitLenByte returns the bit width of the non zero part of n.
|
||||
func BitLenByte(n byte) int {
|
||||
return log2[n] + 1
|
||||
}
|
||||
|
||||
// BitLenUint16 returns the bit width of the non zero part of n.
|
||||
func BitLenUint16(n uint16) int {
|
||||
if b := n >> 8; b != 0 {
|
||||
return log2[b] + 8 + 1
|
||||
}
|
||||
|
||||
return log2[n] + 1
|
||||
}
|
||||
|
||||
// BitLenUint32 returns the bit width of the non zero part of n.
|
||||
func BitLenUint32(n uint32) int {
|
||||
if b := n >> 24; b != 0 {
|
||||
return log2[b] + 24 + 1
|
||||
}
|
||||
|
||||
if b := n >> 16; b != 0 {
|
||||
return log2[b] + 16 + 1
|
||||
}
|
||||
|
||||
if b := n >> 8; b != 0 {
|
||||
return log2[b] + 8 + 1
|
||||
}
|
||||
|
||||
return log2[n] + 1
|
||||
}
|
||||
|
||||
// BitLen returns the bit width of the non zero part of n.
|
||||
func BitLen(n int) int { // Should handle correctly [future] 64 bit Go ints
|
||||
if IntBits == 64 {
|
||||
return BitLenUint64(uint64(n))
|
||||
}
|
||||
|
||||
if b := byte(n >> 24); b != 0 {
|
||||
return log2[b] + 24 + 1
|
||||
}
|
||||
|
||||
if b := byte(n >> 16); b != 0 {
|
||||
return log2[b] + 16 + 1
|
||||
}
|
||||
|
||||
if b := byte(n >> 8); b != 0 {
|
||||
return log2[b] + 8 + 1
|
||||
}
|
||||
|
||||
return log2[byte(n)] + 1
|
||||
}
|
||||
|
||||
// BitLenUint returns the bit width of the non zero part of n.
|
||||
func BitLenUint(n uint) int { // Should handle correctly [future] 64 bit Go uints
|
||||
if IntBits == 64 {
|
||||
return BitLenUint64(uint64(n))
|
||||
}
|
||||
|
||||
if b := n >> 24; b != 0 {
|
||||
return log2[b] + 24 + 1
|
||||
}
|
||||
|
||||
if b := n >> 16; b != 0 {
|
||||
return log2[b] + 16 + 1
|
||||
}
|
||||
|
||||
if b := n >> 8; b != 0 {
|
||||
return log2[b] + 8 + 1
|
||||
}
|
||||
|
||||
return log2[n] + 1
|
||||
}
|
||||
|
||||
// BitLenUint64 returns the bit width of the non zero part of n.
|
||||
func BitLenUint64(n uint64) int {
|
||||
if b := n >> 56; b != 0 {
|
||||
return log2[b] + 56 + 1
|
||||
}
|
||||
|
||||
if b := n >> 48; b != 0 {
|
||||
return log2[b] + 48 + 1
|
||||
}
|
||||
|
||||
if b := n >> 40; b != 0 {
|
||||
return log2[b] + 40 + 1
|
||||
}
|
||||
|
||||
if b := n >> 32; b != 0 {
|
||||
return log2[b] + 32 + 1
|
||||
}
|
||||
|
||||
if b := n >> 24; b != 0 {
|
||||
return log2[b] + 24 + 1
|
||||
}
|
||||
|
||||
if b := n >> 16; b != 0 {
|
||||
return log2[b] + 16 + 1
|
||||
}
|
||||
|
||||
if b := n >> 8; b != 0 {
|
||||
return log2[b] + 8 + 1
|
||||
}
|
||||
|
||||
return log2[n] + 1
|
||||
}
|
||||
|
||||
// BitLenUintptr returns the bit width of the non zero part of n.
|
||||
func BitLenUintptr(n uintptr) int {
|
||||
if b := n >> 56; b != 0 {
|
||||
return log2[b] + 56 + 1
|
||||
}
|
||||
|
||||
if b := n >> 48; b != 0 {
|
||||
return log2[b] + 48 + 1
|
||||
}
|
||||
|
||||
if b := n >> 40; b != 0 {
|
||||
return log2[b] + 40 + 1
|
||||
}
|
||||
|
||||
if b := n >> 32; b != 0 {
|
||||
return log2[b] + 32 + 1
|
||||
}
|
||||
|
||||
if b := n >> 24; b != 0 {
|
||||
return log2[b] + 24 + 1
|
||||
}
|
||||
|
||||
if b := n >> 16; b != 0 {
|
||||
return log2[b] + 16 + 1
|
||||
}
|
||||
|
||||
if b := n >> 8; b != 0 {
|
||||
return log2[b] + 8 + 1
|
||||
}
|
||||
|
||||
return log2[n] + 1
|
||||
}
|
||||
|
||||
// PopCountByte returns population count of n (number of bits set in n).
|
||||
func PopCountByte(n byte) int {
|
||||
return int(popcnt[n])
|
||||
}
|
||||
|
||||
// PopCountUint16 returns population count of n (number of bits set in n).
|
||||
func PopCountUint16(n uint16) int {
|
||||
return int(popcnt[byte(n>>8)]) + int(popcnt[byte(n)])
|
||||
}
|
||||
|
||||
// PopCountUint32 returns population count of n (number of bits set in n).
|
||||
func PopCountUint32(n uint32) int {
|
||||
return int(popcnt[byte(n>>24)]) + int(popcnt[byte(n>>16)]) +
|
||||
int(popcnt[byte(n>>8)]) + int(popcnt[byte(n)])
|
||||
}
|
||||
|
||||
// PopCount returns population count of n (number of bits set in n).
|
||||
func PopCount(n int) int { // Should handle correctly [future] 64 bit Go ints
|
||||
if IntBits == 64 {
|
||||
return PopCountUint64(uint64(n))
|
||||
}
|
||||
|
||||
return PopCountUint32(uint32(n))
|
||||
}
|
||||
|
||||
// PopCountUint returns population count of n (number of bits set in n).
|
||||
func PopCountUint(n uint) int { // Should handle correctly [future] 64 bit Go uints
|
||||
if IntBits == 64 {
|
||||
return PopCountUint64(uint64(n))
|
||||
}
|
||||
|
||||
return PopCountUint32(uint32(n))
|
||||
}
|
||||
|
||||
// PopCountUintptr returns population count of n (number of bits set in n).
|
||||
func PopCountUintptr(n uintptr) int {
|
||||
if UintPtrBits == 64 {
|
||||
return PopCountUint64(uint64(n))
|
||||
}
|
||||
|
||||
return PopCountUint32(uint32(n))
|
||||
}
|
||||
|
||||
// PopCountUint64 returns population count of n (number of bits set in n).
|
||||
func PopCountUint64(n uint64) int {
|
||||
return int(popcnt[byte(n>>56)]) + int(popcnt[byte(n>>48)]) +
|
||||
int(popcnt[byte(n>>40)]) + int(popcnt[byte(n>>32)]) +
|
||||
int(popcnt[byte(n>>24)]) + int(popcnt[byte(n>>16)]) +
|
||||
int(popcnt[byte(n>>8)]) + int(popcnt[byte(n)])
|
||||
}
|
||||
|
||||
// PopCountBigInt returns population count of |n| (number of bits set in |n|).
|
||||
func PopCountBigInt(n *big.Int) (r int) {
|
||||
for _, v := range n.Bits() {
|
||||
r += PopCountUintptr(uintptr(v))
|
||||
}
|
||||
return
|
||||
}
|
||||
46
vendor/github.com/cznic/mathutil/envelope.go
generated
vendored
46
vendor/github.com/cznic/mathutil/envelope.go
generated
vendored
@@ -1,46 +0,0 @@
|
||||
// Copyright (c) 2014 The mathutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mathutil
|
||||
|
||||
import (
|
||||
"math"
|
||||
)
|
||||
|
||||
// Approximation type determines approximation methods used by e.g. Envelope.
|
||||
type Approximation int
|
||||
|
||||
// Specific approximation method tags
|
||||
const (
|
||||
_ Approximation = iota
|
||||
Linear // As named
|
||||
Sinusoidal // Smooth for all derivations
|
||||
)
|
||||
|
||||
// Envelope is an utility for defining simple curves using a small (usually)
|
||||
// set of data points. Envelope returns a value defined by x, points and
|
||||
// approximation. The value of x must be in [0,1) otherwise the result is
|
||||
// undefined or the function may panic. Points are interpreted as dividing the
|
||||
// [0,1) interval in len(points)-1 sections, so len(points) must be > 1 or the
|
||||
// function may panic. According to the left and right points closing/adjacent
|
||||
// to the section the resulting value is interpolated using the chosen
|
||||
// approximation method. Unsupported values of approximation are silently
|
||||
// interpreted as 'Linear'.
|
||||
func Envelope(x float64, points []float64, approximation Approximation) float64 {
|
||||
step := 1 / float64(len(points)-1)
|
||||
fslot := math.Floor(x / step)
|
||||
mod := x - fslot*step
|
||||
slot := int(fslot)
|
||||
l, r := points[slot], points[slot+1]
|
||||
rmod := mod / step
|
||||
switch approximation {
|
||||
case Sinusoidal:
|
||||
k := (math.Sin(math.Pi*(rmod-0.5)) + 1) / 2
|
||||
return l + (r-l)*k
|
||||
case Linear:
|
||||
fallthrough
|
||||
default:
|
||||
return l + (r-l)*rmod
|
||||
}
|
||||
}
|
||||
48
vendor/github.com/cznic/mathutil/example/example.go
generated
vendored
48
vendor/github.com/cznic/mathutil/example/example.go
generated
vendored
@@ -1,48 +0,0 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"github.com/cznic/mathutil"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
$ # Usage e.g.:
|
||||
$ go run example.go -max 1024 > mathutil.dat # generate 1kB of "random" data
|
||||
|
||||
*/
|
||||
func main() {
|
||||
r, err := mathutil.NewFC32(math.MinInt32, math.MaxInt32, true)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var mflag uint64
|
||||
flag.Uint64Var(&mflag, "max", 0, "limit output to max bytes")
|
||||
flag.Parse()
|
||||
stdout := bufio.NewWriter(os.Stdout)
|
||||
if mflag != 0 {
|
||||
for i := uint64(0); i < mflag; i++ {
|
||||
if err := stdout.WriteByte(byte(r.Next())); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
stdout.Flush()
|
||||
return
|
||||
}
|
||||
|
||||
for stdout.WriteByte(byte(r.Next())) == nil {
|
||||
}
|
||||
}
|
||||
66
vendor/github.com/cznic/mathutil/example2/example2.go
generated
vendored
66
vendor/github.com/cznic/mathutil/example2/example2.go
generated
vendored
@@ -1,66 +0,0 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/cznic/mathutil"
|
||||
"image"
|
||||
"image/png"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
// $ go run example2.go # view rand.png and rnd.png by your favorite pic viewer
|
||||
//
|
||||
// see http://www.boallen.com/random-numbers.html
|
||||
func main() {
|
||||
sqr := image.Rect(0, 0, 511, 511)
|
||||
r, err := mathutil.NewFC32(math.MinInt32, math.MaxInt32, true)
|
||||
if err != nil {
|
||||
log.Fatal("NewFC32", err)
|
||||
}
|
||||
|
||||
img := image.NewGray(sqr)
|
||||
for y := 0; y < 512; y++ {
|
||||
for x := 0; x < 512; x++ {
|
||||
if r.Next()&1 != 0 {
|
||||
img.Set(x, y, image.White)
|
||||
}
|
||||
}
|
||||
}
|
||||
buf := bytes.NewBuffer(nil)
|
||||
if err := png.Encode(buf, img); err != nil {
|
||||
log.Fatal("Encode rnd.png ", err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile("rnd.png", buf.Bytes(), 0666); err != nil {
|
||||
log.Fatal("ioutil.WriteFile/rnd.png ", err)
|
||||
}
|
||||
|
||||
r2 := rand.New(rand.NewSource(0))
|
||||
img = image.NewGray(sqr)
|
||||
for y := 0; y < 512; y++ {
|
||||
for x := 0; x < 512; x++ {
|
||||
if r2.Int()&1 != 0 {
|
||||
img.Set(x, y, image.White)
|
||||
}
|
||||
}
|
||||
}
|
||||
buf = bytes.NewBuffer(nil)
|
||||
if err := png.Encode(buf, img); err != nil {
|
||||
log.Fatal("Encode rand.png ", err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile("rand.png", buf.Bytes(), 0666); err != nil {
|
||||
log.Fatal("ioutil.WriteFile/rand.png ", err)
|
||||
}
|
||||
}
|
||||
43
vendor/github.com/cznic/mathutil/example3/example3.go
generated
vendored
43
vendor/github.com/cznic/mathutil/example3/example3.go
generated
vendored
@@ -1,43 +0,0 @@
|
||||
// Copyright (c) 2011 CZ.NIC z.s.p.o. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// blame: jnml, labs.nic.cz
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
$ # Usage e.g.:
|
||||
$ go run example3.go -max 1024 > rand.dat # generate 1kB of "random" data
|
||||
|
||||
*/
|
||||
func main() {
|
||||
r := rand.New(rand.NewSource(1))
|
||||
var mflag uint64
|
||||
flag.Uint64Var(&mflag, "max", 0, "limit output to max bytes")
|
||||
flag.Parse()
|
||||
stdout := bufio.NewWriter(os.Stdout)
|
||||
if mflag != 0 {
|
||||
for i := uint64(0); i < mflag; i++ {
|
||||
if err := stdout.WriteByte(byte(r.Int())); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
stdout.Flush()
|
||||
return
|
||||
}
|
||||
|
||||
for stdout.WriteByte(byte(r.Int())) == nil {
|
||||
}
|
||||
}
|
||||
90
vendor/github.com/cznic/mathutil/example4/main.go
generated
vendored
90
vendor/github.com/cznic/mathutil/example4/main.go
generated
vendored
@@ -1,90 +0,0 @@
|
||||
// Copyright (c) 2011 jnml. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// Let QRN be the number of quadratic residues of N. Let Q be QRN/N. From a
|
||||
// sorted list of primorial products < 2^32 find "record breakers". "Record
|
||||
// breaker" is N with new lowest Q.
|
||||
//
|
||||
// There are only 49 "record breakers" < 2^32.
|
||||
//
|
||||
// To run the example $ go run main.go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/cznic/mathutil"
|
||||
"github.com/cznic/sortutil"
|
||||
)
|
||||
|
||||
func main() {
|
||||
pp := mathutil.PrimorialProductsUint32(0, math.MaxUint32, 32)
|
||||
sort.Sort(sortutil.Uint32Slice(pp))
|
||||
var bestN, bestD uint32 = 1, 1
|
||||
order, checks := 0, 0
|
||||
var ixDirty uint32
|
||||
m := make([]byte, math.MaxUint32>>3)
|
||||
for _, n := range pp {
|
||||
for i := range m[:ixDirty+1] {
|
||||
m[i] = 0
|
||||
}
|
||||
ixDirty = 0
|
||||
checks++
|
||||
limit0 := mathutil.QScaleUint32(n, bestN, bestD)
|
||||
if limit0 > math.MaxUint32 {
|
||||
panic(0)
|
||||
}
|
||||
limit := uint32(limit0)
|
||||
n64 := uint64(n)
|
||||
hi := n64 >> 1
|
||||
hits := uint32(0)
|
||||
check := true
|
||||
fmt.Printf("\r%10d %d/%d", n, checks, len(pp))
|
||||
t0 := time.Now()
|
||||
for i := uint64(0); i < hi; i++ {
|
||||
sq := uint32(i * i % n64)
|
||||
ix := sq >> 3
|
||||
msk := byte(1 << (sq & 7))
|
||||
if m[ix]&msk == 0 {
|
||||
hits++
|
||||
if hits >= limit {
|
||||
check = false
|
||||
break
|
||||
}
|
||||
}
|
||||
m[ix] |= msk
|
||||
if ix > ixDirty {
|
||||
ixDirty = ix
|
||||
}
|
||||
}
|
||||
|
||||
adjPrime := ".." // Composite before
|
||||
if mathutil.IsPrime(n - 1) {
|
||||
adjPrime = "P." // Prime before
|
||||
}
|
||||
switch mathutil.IsPrime(n + 1) {
|
||||
case true:
|
||||
adjPrime += "P" // Prime after
|
||||
case false:
|
||||
adjPrime += "." // Composite after
|
||||
}
|
||||
|
||||
if check && mathutil.QCmpUint32(hits, n, bestN, bestD) < 0 {
|
||||
order++
|
||||
d := time.Since(t0)
|
||||
bestN, bestD = hits, n
|
||||
q := float64(hits) / float64(n)
|
||||
fmt.Printf(
|
||||
"\r%2s #%03d %d %d %.2f %.2E %s %s %v\n",
|
||||
adjPrime, order, n, hits,
|
||||
1/q, q, d, time.Now().Format("15:04:05"), mathutil.FactorInt(n),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
83
vendor/github.com/cznic/mathutil/ff/main.go
generated
vendored
83
vendor/github.com/cznic/mathutil/ff/main.go
generated
vendored
@@ -1,83 +0,0 @@
|
||||
// Copyright (c) jnml. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// Factor Finder - searches for Mersenne number factors of one specific special
|
||||
// form.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
const (
|
||||
pp = 1
|
||||
pp2 = 10
|
||||
)
|
||||
|
||||
var (
|
||||
_1 = big.NewInt(1)
|
||||
_2 = big.NewInt(2)
|
||||
)
|
||||
|
||||
func main() {
|
||||
runtime.GOMAXPROCS(2)
|
||||
oClass := flag.Uint64("c", 2, `factor "class" number`)
|
||||
oDuration := flag.Duration("d", time.Second, "duration to spend on one class")
|
||||
flag.Parse()
|
||||
class := *oClass
|
||||
for class&1 != 0 {
|
||||
class >>= 1
|
||||
}
|
||||
class = mathutil.MaxUint64(class, 2)
|
||||
|
||||
for {
|
||||
c := time.After(*oDuration)
|
||||
factor := big.NewInt(0)
|
||||
factor.SetUint64(class)
|
||||
exp := big.NewInt(0)
|
||||
oneClass:
|
||||
for {
|
||||
select {
|
||||
case <-c:
|
||||
break oneClass
|
||||
default:
|
||||
}
|
||||
|
||||
exp.Set(factor)
|
||||
factor.Lsh(factor, 1)
|
||||
factor.Add(factor, _1)
|
||||
if !factor.ProbablyPrime(pp) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !exp.ProbablyPrime(pp) {
|
||||
continue
|
||||
}
|
||||
|
||||
if mathutil.ModPowBigInt(_2, exp, factor).Cmp(_1) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if !factor.ProbablyPrime(pp2) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !exp.ProbablyPrime(pp2) {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Printf("%d: %s | M%s (%d bits)\n", class, factor, exp, factor.BitLen())
|
||||
}
|
||||
|
||||
class += 2
|
||||
}
|
||||
}
|
||||
1108
vendor/github.com/cznic/mathutil/mathutil.go
generated
vendored
1108
vendor/github.com/cznic/mathutil/mathutil.go
generated
vendored
File diff suppressed because it is too large
Load Diff
297
vendor/github.com/cznic/mathutil/mersenne/mersenne.go
generated
vendored
297
vendor/github.com/cznic/mathutil/mersenne/mersenne.go
generated
vendored
@@ -1,297 +0,0 @@
|
||||
// Copyright (c) 2014 The mersenne Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package mersenne collects utilities related to Mersenne numbers[1] and/or some
|
||||
of their properties.
|
||||
|
||||
Exponent
|
||||
|
||||
In this documentation the term 'exponent' refers to 'n' of a Mersenne number Mn
|
||||
equal to 2^n-1. This package supports only uint32 sized exponents. New()
|
||||
currently supports exponents only up to math.MaxInt32 (31 bits, up to 256 MB
|
||||
required to represent such Mn in memory as a big.Int).
|
||||
|
||||
Links
|
||||
|
||||
Referenced from above:
|
||||
[1] http://en.wikipedia.org/wiki/Mersenne_number
|
||||
*/
|
||||
package mersenne
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/big"
|
||||
|
||||
"github.com/cznic/mathutil"
|
||||
"github.com/remyoudompheng/bigfft"
|
||||
)
|
||||
|
||||
var (
|
||||
_0 = big.NewInt(0)
|
||||
_1 = big.NewInt(1)
|
||||
_2 = big.NewInt(2)
|
||||
)
|
||||
|
||||
// Knowns list the exponent of currently (March 2012) known Mersenne primes
|
||||
// exponents in order. See also: http://oeis.org/A000043 for a partial list.
|
||||
var Knowns = []uint32{
|
||||
2, // #1
|
||||
3, // #2
|
||||
5, // #3
|
||||
7, // #4
|
||||
13, // #5
|
||||
17, // #6
|
||||
19, // #7
|
||||
31, // #8
|
||||
61, // #9
|
||||
89, // #10
|
||||
|
||||
107, // #11
|
||||
127, // #12
|
||||
521, // #13
|
||||
607, // #14
|
||||
1279, // #15
|
||||
2203, // #16
|
||||
2281, // #17
|
||||
3217, // #18
|
||||
4253, // #19
|
||||
4423, // #20
|
||||
|
||||
9689, // #21
|
||||
9941, // #22
|
||||
11213, // #23
|
||||
19937, // #24
|
||||
21701, // #25
|
||||
23209, // #26
|
||||
44497, // #27
|
||||
86243, // #28
|
||||
110503, // #29
|
||||
132049, // #30
|
||||
|
||||
216091, // #31
|
||||
756839, // #32
|
||||
859433, // #33
|
||||
1257787, // #34
|
||||
1398269, // #35
|
||||
2976221, // #36
|
||||
3021377, // #37
|
||||
6972593, // #38
|
||||
13466917, // #39
|
||||
20996011, // #40
|
||||
|
||||
24036583, // #41
|
||||
25964951, // #42
|
||||
30402457, // #43
|
||||
32582657, // #44
|
||||
37156667, // #45
|
||||
42643801, // #46
|
||||
43112609, // #47
|
||||
57885161, // #48
|
||||
74207281, // #49
|
||||
}
|
||||
|
||||
// Known maps the exponent of known Mersenne primes its ordinal number/rank.
|
||||
// Ranks > 41 are currently provisional.
|
||||
var Known map[uint32]int
|
||||
|
||||
func init() {
|
||||
Known = map[uint32]int{}
|
||||
for i, v := range Knowns {
|
||||
Known[v] = i + 1
|
||||
}
|
||||
}
|
||||
|
||||
// New returns Mn == 2^n-1 for n <= math.MaxInt32 or nil otherwise.
|
||||
func New(n uint32) (m *big.Int) {
|
||||
if n > math.MaxInt32 {
|
||||
return
|
||||
}
|
||||
|
||||
m = big.NewInt(0)
|
||||
return m.Sub(m.SetBit(m, int(n), 1), _1)
|
||||
}
|
||||
|
||||
// HasFactorUint32 returns true if d | Mn. Typical run time for a 32 bit factor
|
||||
// and a 32 bit exponent is < 1 µs.
|
||||
func HasFactorUint32(d, n uint32) bool {
|
||||
return d == 1 || d&1 != 0 && mathutil.ModPowUint32(2, n, d) == 1
|
||||
}
|
||||
|
||||
// HasFactorUint64 returns true if d | Mn. Typical run time for a 64 bit factor
|
||||
// and a 32 bit exponent is < 30 µs.
|
||||
func HasFactorUint64(d uint64, n uint32) bool {
|
||||
return d == 1 || d&1 != 0 && mathutil.ModPowUint64(2, uint64(n), d) == 1
|
||||
}
|
||||
|
||||
// HasFactorBigInt returns true if d | Mn, d > 0. Typical run time for a 128
|
||||
// bit factor and a 32 bit exponent is < 75 µs.
|
||||
func HasFactorBigInt(d *big.Int, n uint32) bool {
|
||||
return d.Cmp(_1) == 0 || d.Sign() > 0 && d.Bit(0) == 1 &&
|
||||
mathutil.ModPowBigInt(_2, big.NewInt(int64(n)), d).Cmp(_1) == 0
|
||||
}
|
||||
|
||||
// HasFactorBigInt2 returns true if d | Mn, d > 0
|
||||
func HasFactorBigInt2(d, n *big.Int) bool {
|
||||
return d.Cmp(_1) == 0 || d.Sign() > 0 && d.Bit(0) == 1 &&
|
||||
mathutil.ModPowBigInt(_2, n, d).Cmp(_1) == 0
|
||||
}
|
||||
|
||||
/*
|
||||
FromFactorBigInt returns n such that d | Mn if n <= max and d is odd. In other
|
||||
cases zero is returned.
|
||||
|
||||
It is conjectured that every odd d ∊ N divides infinitely many Mersenne numbers.
|
||||
The returned n should be the exponent of smallest such Mn.
|
||||
|
||||
NOTE: The computation of n from a given d performs roughly in O(n). It is
|
||||
thus highly recommended to use the 'max' argument to limit the "searched"
|
||||
exponent upper bound as appropriate. Otherwise the computation can take a long
|
||||
time as a large factor can be a divisor of a Mn with exponent above the uint32
|
||||
limits.
|
||||
|
||||
The FromFactorBigInt function is a modification of the original Will
|
||||
Edgington's "reverse method", discussed here:
|
||||
http://tech.groups.yahoo.com/group/primenumbers/message/15061
|
||||
*/
|
||||
func FromFactorBigInt(d *big.Int, max uint32) (n uint32) {
|
||||
if d.Bit(0) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var m big.Int
|
||||
for n < max {
|
||||
m.Add(&m, d)
|
||||
i := 0
|
||||
for ; m.Bit(i) == 1; i++ {
|
||||
if n == math.MaxUint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
n++
|
||||
}
|
||||
m.Rsh(&m, uint(i))
|
||||
if m.Sign() == 0 {
|
||||
if n > max {
|
||||
n = 0
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Mod sets mod to n % Mexp and returns mod. It panics for exp == 0 || exp >=
|
||||
// math.MaxInt32 || n < 0.
|
||||
func Mod(mod, n *big.Int, exp uint32) *big.Int {
|
||||
if exp == 0 || exp >= math.MaxInt32 || n.Sign() < 0 {
|
||||
panic(0)
|
||||
}
|
||||
|
||||
m := New(exp)
|
||||
mod.Set(n)
|
||||
var x big.Int
|
||||
for mod.BitLen() > int(exp) {
|
||||
x.Set(mod)
|
||||
x.Rsh(&x, uint(exp))
|
||||
mod.And(mod, m)
|
||||
mod.Add(mod, &x)
|
||||
}
|
||||
if mod.BitLen() == int(exp) && mod.Cmp(m) == 0 {
|
||||
mod.SetInt64(0)
|
||||
}
|
||||
return mod
|
||||
}
|
||||
|
||||
// ModPow2 returns x such that 2^Me % Mm == 2^x. It panics for m < 2. Typical
|
||||
// run time is < 1 µs. Use instead of ModPow(2, e, m) wherever possible.
|
||||
func ModPow2(e, m uint32) (x uint32) {
|
||||
/*
|
||||
m < 2 -> panic
|
||||
e == 0 -> x == 0
|
||||
e == 1 -> x == 1
|
||||
|
||||
2^M1 % M2 == 2^1 % 3 == 2^1 10 // 2^1, 3, 5, 7 ... +2k
|
||||
2^M1 % M3 == 2^1 % 7 == 2^1 010 // 2^1, 4, 7, ... +3k
|
||||
2^M1 % M4 == 2^1 % 15 == 2^1 0010 // 2^1, 5, 9, 13... +4k
|
||||
2^M1 % M5 == 2^1 % 31 == 2^1 00010 // 2^1, 6, 11, 16... +5k
|
||||
|
||||
2^M2 % M2 == 2^3 % 3 == 2^1 10.. // 2^3, 5, 7, 9, 11, ... +2k
|
||||
2^M2 % M3 == 2^3 % 7 == 2^0 001... // 2^3, 6, 9, 12, 15, ... +3k
|
||||
2^M2 % M4 == 2^3 % 15 == 2^3 1000 // 2^3, 7, 11, 15, 19, ... +4k
|
||||
2^M2 % M5 == 2^3 % 31 == 2^3 01000 // 2^3, 8, 13, 18, 23, ... +5k
|
||||
|
||||
2^M3 % M2 == 2^7 % 3 == 2^1 10..--.. // 2^3, 5, 7... +2k
|
||||
2^M3 % M3 == 2^7 % 7 == 2^1 010...--- // 2^1, 4, 7... +3k
|
||||
2^M3 % M4 == 2^7 % 15 == 2^3 1000.... // +4k
|
||||
2^M3 % M5 == 2^7 % 31 == 2^2 00100..... // +5k
|
||||
2^M3 % M6 == 2^7 % 63 == 2^1 000010...... // +6k
|
||||
2^M3 % M7 == 2^7 % 127 == 2^0 0000001.......
|
||||
2^M3 % M8 == 2^7 % 255 == 2^7 10000000
|
||||
2^M3 % M9 == 2^7 % 511 == 2^7 010000000
|
||||
|
||||
2^M4 % M2 == 2^15 % 3 == 2^1 10..--..--..--..
|
||||
2^M4 % M3 == 2^15 % 7 == 2^0 1...---...---...
|
||||
2^M4 % M4 == 2^15 % 15 == 2^3 1000....----....
|
||||
2^M4 % M5 == 2^15 % 31 == 2^0 1.....-----.....
|
||||
2^M4 % M6 == 2^15 % 63 == 2^3 1000......------
|
||||
2^M4 % M7 == 2^15 % 127 == 2^1 10.......-------
|
||||
2^M4 % M8 == 2^15 % 255 == 2^7 10000000........
|
||||
2^M4 % M9 == 2^15 % 511 == 2^6 1000000.........
|
||||
*/
|
||||
switch {
|
||||
case m < 2:
|
||||
panic(0)
|
||||
case e < 2:
|
||||
return e
|
||||
}
|
||||
|
||||
if x = mathutil.ModPowUint32(2, e, m); x == 0 {
|
||||
return m - 1
|
||||
}
|
||||
|
||||
return x - 1
|
||||
}
|
||||
|
||||
// ModPow returns b^Me % Mm. Run time grows quickly with 'e' and/or 'm' when b
|
||||
// != 2 (then ModPow2 is used).
|
||||
func ModPow(b, e, m uint32) (r *big.Int) {
|
||||
if m == 1 {
|
||||
return big.NewInt(0)
|
||||
}
|
||||
|
||||
if b == 2 {
|
||||
x := ModPow2(e, m)
|
||||
r = big.NewInt(0)
|
||||
r.SetBit(r, int(x), 1)
|
||||
return
|
||||
}
|
||||
|
||||
bb := big.NewInt(int64(b))
|
||||
r = big.NewInt(1)
|
||||
for ; e != 0; e-- {
|
||||
r = bigfft.Mul(r, bb)
|
||||
Mod(r, r, m)
|
||||
bb = bigfft.Mul(bb, bb)
|
||||
Mod(bb, bb, m)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ProbablyPrime returns true if Mn is prime or is a pseudoprime to base a.
|
||||
// Note: Every Mp, prime p, is a prime or is a pseudoprime to base 2, actually
|
||||
// to every base 2^i, i ∊ [1, p). In contrast - it is conjectured (w/o any
|
||||
// known counterexamples) that no composite Mp, prime p, is a pseudoprime to
|
||||
// base 3.
|
||||
func ProbablyPrime(n, a uint32) bool {
|
||||
//TODO +test, +bench
|
||||
if a == 2 {
|
||||
return ModPow2(n-1, n) == 0
|
||||
}
|
||||
|
||||
nMinus1 := New(n)
|
||||
nMinus1.Sub(nMinus1, _1)
|
||||
x := ModPow(a, n-1, n)
|
||||
return x.Cmp(_1) == 0 || x.Cmp(nMinus1) == 0
|
||||
}
|
||||
39
vendor/github.com/cznic/mathutil/permute.go
generated
vendored
39
vendor/github.com/cznic/mathutil/permute.go
generated
vendored
@@ -1,39 +0,0 @@
|
||||
// Copyright (c) 2014 The mathutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mathutil
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// PermutationFirst generates the first permutation of data.
|
||||
func PermutationFirst(data sort.Interface) {
|
||||
sort.Sort(data)
|
||||
}
|
||||
|
||||
// PermutationNext generates the next permutation of data if possible and
|
||||
// return true. Return false if there is no more permutation left. Based on
|
||||
// the algorithm described here:
|
||||
// http://en.wikipedia.org/wiki/Permutation#Generation_in_lexicographic_order
|
||||
func PermutationNext(data sort.Interface) bool {
|
||||
var k, l int
|
||||
for k = data.Len() - 2; ; k-- { // 1.
|
||||
if k < 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if data.Less(k, k+1) {
|
||||
break
|
||||
}
|
||||
}
|
||||
for l = data.Len() - 1; !data.Less(k, l); l-- { // 2.
|
||||
}
|
||||
data.Swap(k, l) // 3.
|
||||
for i, j := k+1, data.Len()-1; i < j; i++ { // 4.
|
||||
data.Swap(i, j)
|
||||
j--
|
||||
}
|
||||
return true
|
||||
}
|
||||
111
vendor/github.com/cznic/mathutil/poly.go
generated
vendored
111
vendor/github.com/cznic/mathutil/poly.go
generated
vendored
@@ -1,111 +0,0 @@
|
||||
// Copyright (c) 2016 The mathutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mathutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func abs(n int) uint64 {
|
||||
if n >= 0 {
|
||||
return uint64(n)
|
||||
}
|
||||
|
||||
return uint64(-n)
|
||||
}
|
||||
|
||||
// QuadPolyDiscriminant returns the discriminant of a quadratic polynomial in
|
||||
// one variable of the form a*x^2+b*x+c with integer coefficients a, b, c, or
|
||||
// an error on overflow.
|
||||
//
|
||||
// ds is the square of the discriminant. If |ds| is a square number, d is set
|
||||
// to sqrt(|ds|), otherwise d is < 0.
|
||||
func QuadPolyDiscriminant(a, b, c int) (ds, d int, _ error) {
|
||||
if 2*BitLenUint64(abs(b)) > IntBits-1 ||
|
||||
2+BitLenUint64(abs(a))+BitLenUint64(abs(c)) > IntBits-1 {
|
||||
return 0, 0, fmt.Errorf("overflow")
|
||||
}
|
||||
|
||||
ds = b*b - 4*a*c
|
||||
s := ds
|
||||
if s < 0 {
|
||||
s = -s
|
||||
}
|
||||
d64 := SqrtUint64(uint64(s))
|
||||
if d64*d64 != uint64(s) {
|
||||
return ds, -1, nil
|
||||
}
|
||||
|
||||
return ds, int(d64), nil
|
||||
}
|
||||
|
||||
// PolyFactor describes an irreducible factor of a polynomial in one variable
|
||||
// with integer coefficients P, Q of the form P*x+Q.
|
||||
type PolyFactor struct {
|
||||
P, Q int
|
||||
}
|
||||
|
||||
// QuadPolyFactors returns the content and the irreducible factors of the
|
||||
// primitive part of a quadratic polynomial in one variable with integer
|
||||
// coefficients a, b, c of the form a*x^2+b*x+c in integers, or an error on
|
||||
// overflow.
|
||||
//
|
||||
// If the factorization in integers does not exists, the return value is (nil,
|
||||
// nil).
|
||||
//
|
||||
// See also:
|
||||
// https://en.wikipedia.org/wiki/Factorization_of_polynomials#Primitive_part.E2.80.93content_factorization
|
||||
func QuadPolyFactors(a, b, c int) (content int, primitivePart []PolyFactor, _ error) {
|
||||
content = int(GCDUint64(abs(a), GCDUint64(abs(b), abs(c))))
|
||||
switch {
|
||||
case content == 0:
|
||||
content = 1
|
||||
case content > 0:
|
||||
if a < 0 || a == 0 && b < 0 {
|
||||
content = -content
|
||||
}
|
||||
}
|
||||
a /= content
|
||||
b /= content
|
||||
c /= content
|
||||
if a == 0 {
|
||||
if b == 0 {
|
||||
return content, []PolyFactor{{0, c}}, nil
|
||||
}
|
||||
|
||||
if b < 0 && c < 0 {
|
||||
b = -b
|
||||
c = -c
|
||||
}
|
||||
if b < 0 {
|
||||
b = -b
|
||||
c = -c
|
||||
}
|
||||
return content, []PolyFactor{{b, c}}, nil
|
||||
}
|
||||
|
||||
ds, d, err := QuadPolyDiscriminant(a, b, c)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
if ds < 0 || d < 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
x1num := -b + d
|
||||
x1denom := 2 * a
|
||||
gcd := int(GCDUint64(abs(x1num), abs(x1denom)))
|
||||
x1num /= gcd
|
||||
x1denom /= gcd
|
||||
|
||||
x2num := -b - d
|
||||
x2denom := 2 * a
|
||||
gcd = int(GCDUint64(abs(x2num), abs(x2denom)))
|
||||
x2num /= gcd
|
||||
x2denom /= gcd
|
||||
|
||||
return content, []PolyFactor{{x1denom, -x1num}, {x2denom, -x2num}}, nil
|
||||
}
|
||||
335
vendor/github.com/cznic/mathutil/primes.go
generated
vendored
335
vendor/github.com/cznic/mathutil/primes.go
generated
vendored
@@ -1,335 +0,0 @@
|
||||
// Copyright (c) 2014 The mathutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mathutil
|
||||
|
||||
import (
|
||||
"math"
|
||||
)
|
||||
|
||||
// IsPrimeUint16 returns true if n is prime. Typical run time is few ns.
|
||||
func IsPrimeUint16(n uint16) bool {
|
||||
return n > 0 && primes16[n-1] == 1
|
||||
}
|
||||
|
||||
// NextPrimeUint16 returns first prime > n and true if successful or an
|
||||
// undefined value and false if there is no next prime in the uint16 limits.
|
||||
// Typical run time is few ns.
|
||||
func NextPrimeUint16(n uint16) (p uint16, ok bool) {
|
||||
return n + uint16(primes16[n]), n < 65521
|
||||
}
|
||||
|
||||
// IsPrime returns true if n is prime. Typical run time is about 100 ns.
|
||||
//
|
||||
//TODO rename to IsPrimeUint32
|
||||
func IsPrime(n uint32) bool {
|
||||
switch {
|
||||
case n&1 == 0:
|
||||
return n == 2
|
||||
case n%3 == 0:
|
||||
return n == 3
|
||||
case n%5 == 0:
|
||||
return n == 5
|
||||
case n%7 == 0:
|
||||
return n == 7
|
||||
case n%11 == 0:
|
||||
return n == 11
|
||||
case n%13 == 0:
|
||||
return n == 13
|
||||
case n%17 == 0:
|
||||
return n == 17
|
||||
case n%19 == 0:
|
||||
return n == 19
|
||||
case n%23 == 0:
|
||||
return n == 23
|
||||
case n%29 == 0:
|
||||
return n == 29
|
||||
case n%31 == 0:
|
||||
return n == 31
|
||||
case n%37 == 0:
|
||||
return n == 37
|
||||
case n%41 == 0:
|
||||
return n == 41
|
||||
case n%43 == 0:
|
||||
return n == 43
|
||||
case n%47 == 0:
|
||||
return n == 47
|
||||
case n%53 == 0:
|
||||
return n == 53 // Benchmarked optimum
|
||||
case n < 65536:
|
||||
// use table data
|
||||
return IsPrimeUint16(uint16(n))
|
||||
default:
|
||||
mod := ModPowUint32(2, (n+1)/2, n)
|
||||
if mod != 2 && mod != n-2 {
|
||||
return false
|
||||
}
|
||||
blk := &lohi[n>>24]
|
||||
lo, hi := blk.lo, blk.hi
|
||||
for lo <= hi {
|
||||
index := (lo + hi) >> 1
|
||||
liar := liars[index]
|
||||
switch {
|
||||
case n > liar:
|
||||
lo = index + 1
|
||||
case n < liar:
|
||||
hi = index - 1
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// IsPrimeUint64 returns true if n is prime. Typical run time is few tens of µs.
|
||||
//
|
||||
// SPRP bases: http://miller-rabin.appspot.com
|
||||
func IsPrimeUint64(n uint64) bool {
|
||||
switch {
|
||||
case n%2 == 0:
|
||||
return n == 2
|
||||
case n%3 == 0:
|
||||
return n == 3
|
||||
case n%5 == 0:
|
||||
return n == 5
|
||||
case n%7 == 0:
|
||||
return n == 7
|
||||
case n%11 == 0:
|
||||
return n == 11
|
||||
case n%13 == 0:
|
||||
return n == 13
|
||||
case n%17 == 0:
|
||||
return n == 17
|
||||
case n%19 == 0:
|
||||
return n == 19
|
||||
case n%23 == 0:
|
||||
return n == 23
|
||||
case n%29 == 0:
|
||||
return n == 29
|
||||
case n%31 == 0:
|
||||
return n == 31
|
||||
case n%37 == 0:
|
||||
return n == 37
|
||||
case n%41 == 0:
|
||||
return n == 41
|
||||
case n%43 == 0:
|
||||
return n == 43
|
||||
case n%47 == 0:
|
||||
return n == 47
|
||||
case n%53 == 0:
|
||||
return n == 53
|
||||
case n%59 == 0:
|
||||
return n == 59
|
||||
case n%61 == 0:
|
||||
return n == 61
|
||||
case n%67 == 0:
|
||||
return n == 67
|
||||
case n%71 == 0:
|
||||
return n == 71
|
||||
case n%73 == 0:
|
||||
return n == 73
|
||||
case n%79 == 0:
|
||||
return n == 79
|
||||
case n%83 == 0:
|
||||
return n == 83
|
||||
case n%89 == 0:
|
||||
return n == 89 // Benchmarked optimum
|
||||
case n <= math.MaxUint16:
|
||||
return IsPrimeUint16(uint16(n))
|
||||
case n <= math.MaxUint32:
|
||||
return ProbablyPrimeUint32(uint32(n), 11000544) &&
|
||||
ProbablyPrimeUint32(uint32(n), 31481107)
|
||||
case n < 105936894253:
|
||||
return ProbablyPrimeUint64_32(n, 2) &&
|
||||
ProbablyPrimeUint64_32(n, 1005905886) &&
|
||||
ProbablyPrimeUint64_32(n, 1340600841)
|
||||
case n < 31858317218647:
|
||||
return ProbablyPrimeUint64_32(n, 2) &&
|
||||
ProbablyPrimeUint64_32(n, 642735) &&
|
||||
ProbablyPrimeUint64_32(n, 553174392) &&
|
||||
ProbablyPrimeUint64_32(n, 3046413974)
|
||||
case n < 3071837692357849:
|
||||
return ProbablyPrimeUint64_32(n, 2) &&
|
||||
ProbablyPrimeUint64_32(n, 75088) &&
|
||||
ProbablyPrimeUint64_32(n, 642735) &&
|
||||
ProbablyPrimeUint64_32(n, 203659041) &&
|
||||
ProbablyPrimeUint64_32(n, 3613982119)
|
||||
default:
|
||||
return ProbablyPrimeUint64_32(n, 2) &&
|
||||
ProbablyPrimeUint64_32(n, 325) &&
|
||||
ProbablyPrimeUint64_32(n, 9375) &&
|
||||
ProbablyPrimeUint64_32(n, 28178) &&
|
||||
ProbablyPrimeUint64_32(n, 450775) &&
|
||||
ProbablyPrimeUint64_32(n, 9780504) &&
|
||||
ProbablyPrimeUint64_32(n, 1795265022)
|
||||
}
|
||||
}
|
||||
|
||||
// NextPrime returns first prime > n and true if successful or an undefined value and false if there
|
||||
// is no next prime in the uint32 limits. Typical run time is about 2 µs.
|
||||
//
|
||||
//TODO rename to NextPrimeUint32
|
||||
func NextPrime(n uint32) (p uint32, ok bool) {
|
||||
switch {
|
||||
case n < 65521:
|
||||
p16, _ := NextPrimeUint16(uint16(n))
|
||||
return uint32(p16), true
|
||||
case n >= math.MaxUint32-4:
|
||||
return
|
||||
}
|
||||
|
||||
n++
|
||||
var d0, d uint32
|
||||
switch mod := n % 6; mod {
|
||||
case 0:
|
||||
d0, d = 1, 4
|
||||
case 1:
|
||||
d = 4
|
||||
case 2, 3, 4:
|
||||
d0, d = 5-mod, 2
|
||||
case 5:
|
||||
d = 2
|
||||
}
|
||||
|
||||
p = n + d0
|
||||
if p < n { // overflow
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
if IsPrime(p) {
|
||||
return p, true
|
||||
}
|
||||
|
||||
p0 := p
|
||||
p += d
|
||||
if p < p0 { // overflow
|
||||
break
|
||||
}
|
||||
|
||||
d ^= 6
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NextPrimeUint64 returns first prime > n and true if successful or an undefined value and false if there
|
||||
// is no next prime in the uint64 limits. Typical run time is in hundreds of µs.
|
||||
func NextPrimeUint64(n uint64) (p uint64, ok bool) {
|
||||
switch {
|
||||
case n < 65521:
|
||||
p16, _ := NextPrimeUint16(uint16(n))
|
||||
return uint64(p16), true
|
||||
case n >= 18446744073709551557: // last uint64 prime
|
||||
return
|
||||
}
|
||||
|
||||
n++
|
||||
var d0, d uint64
|
||||
switch mod := n % 6; mod {
|
||||
case 0:
|
||||
d0, d = 1, 4
|
||||
case 1:
|
||||
d = 4
|
||||
case 2, 3, 4:
|
||||
d0, d = 5-mod, 2
|
||||
case 5:
|
||||
d = 2
|
||||
}
|
||||
|
||||
p = n + d0
|
||||
if p < n { // overflow
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
if ok = IsPrimeUint64(p); ok {
|
||||
break
|
||||
}
|
||||
|
||||
p0 := p
|
||||
p += d
|
||||
if p < p0 { // overflow
|
||||
break
|
||||
}
|
||||
|
||||
d ^= 6
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// FactorTerm is one term of an integer factorization.
|
||||
type FactorTerm struct {
|
||||
Prime uint32 // The divisor
|
||||
Power uint32 // Term == Prime^Power
|
||||
}
|
||||
|
||||
// FactorTerms represent a factorization of an integer
|
||||
type FactorTerms []FactorTerm
|
||||
|
||||
// FactorInt returns prime factorization of n > 1 or nil otherwise.
|
||||
// Resulting factors are ordered by Prime. Typical run time is few µs.
|
||||
func FactorInt(n uint32) (f FactorTerms) {
|
||||
switch {
|
||||
case n < 2:
|
||||
return
|
||||
case IsPrime(n):
|
||||
return []FactorTerm{{n, 1}}
|
||||
}
|
||||
|
||||
f, w := make([]FactorTerm, 9), 0
|
||||
for p := 2; p < len(primes16); p += int(primes16[p]) {
|
||||
if uint(p*p) > uint(n) {
|
||||
break
|
||||
}
|
||||
|
||||
power := uint32(0)
|
||||
for n%uint32(p) == 0 {
|
||||
n /= uint32(p)
|
||||
power++
|
||||
}
|
||||
if power != 0 {
|
||||
f[w] = FactorTerm{uint32(p), power}
|
||||
w++
|
||||
}
|
||||
if n == 1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if n != 1 {
|
||||
f[w] = FactorTerm{n, 1}
|
||||
w++
|
||||
}
|
||||
return f[:w]
|
||||
}
|
||||
|
||||
// PrimorialProductsUint32 returns a slice of numbers in [lo, hi] which are a
|
||||
// product of max 'max' primorials. The slice is not sorted.
|
||||
//
|
||||
// See also: http://en.wikipedia.org/wiki/Primorial
|
||||
func PrimorialProductsUint32(lo, hi, max uint32) (r []uint32) {
|
||||
lo64, hi64 := int64(lo), int64(hi)
|
||||
if max > 31 { // N/A
|
||||
max = 31
|
||||
}
|
||||
|
||||
var f func(int64, int64, uint32)
|
||||
f = func(n, p int64, emax uint32) {
|
||||
e := uint32(1)
|
||||
for n <= hi64 && e <= emax {
|
||||
n *= p
|
||||
if n >= lo64 && n <= hi64 {
|
||||
r = append(r, uint32(n))
|
||||
}
|
||||
if n < hi64 {
|
||||
p, _ := NextPrime(uint32(p))
|
||||
f(n, int64(p), e)
|
||||
}
|
||||
e++
|
||||
}
|
||||
}
|
||||
|
||||
f(1, 2, max)
|
||||
return
|
||||
}
|
||||
27
vendor/github.com/cznic/mathutil/rat.go
generated
vendored
27
vendor/github.com/cznic/mathutil/rat.go
generated
vendored
@@ -1,27 +0,0 @@
|
||||
// Copyright (c) 2014 The mathutil Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mathutil
|
||||
|
||||
// QCmpUint32 compares a/b and c/d and returns:
|
||||
//
|
||||
// -1 if a/b < c/d
|
||||
// 0 if a/b == c/d
|
||||
// +1 if a/b > c/d
|
||||
//
|
||||
func QCmpUint32(a, b, c, d uint32) int {
|
||||
switch x, y := uint64(a)*uint64(d), uint64(b)*uint64(c); {
|
||||
case x < y:
|
||||
return -1
|
||||
case x == y:
|
||||
return 0
|
||||
default: // x > y
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
// QScaleUint32 returns a such that a/b >= c/d.
|
||||
func QScaleUint32(b, c, d uint32) (a uint64) {
|
||||
return 1 + (uint64(b)*uint64(c))/uint64(d)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user