Update dependencies
This commit is contained in:
93
cmd/strelaypoolsrv/Godeps/Godeps.json
generated
93
cmd/strelaypoolsrv/Godeps/Godeps.json
generated
@@ -1,6 +1,9 @@
|
|||||||
{
|
{
|
||||||
"ImportPath": "github.com/syncthing/relaypoolsrv",
|
"ImportPath": "github.com/syncthing/relaypoolsrv",
|
||||||
"GoVersion": "go1.5.1",
|
"GoVersion": "go1.5.1",
|
||||||
|
"Packages": [
|
||||||
|
"."
|
||||||
|
],
|
||||||
"Deps": [
|
"Deps": [
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/bkaradzic/go-lz4",
|
"ImportPath": "github.com/bkaradzic/go-lz4",
|
||||||
@@ -16,115 +19,67 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/calmh/xdr",
|
"ImportPath": "github.com/calmh/xdr",
|
||||||
"Rev": "5f7208e86762911861c94f1849eddbfc0a60cbf0"
|
"Rev": "9eb3e1a622d9364deb39c831f7e5f164393d7e37"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/golang/groupcache/lru",
|
"ImportPath": "github.com/golang/groupcache/lru",
|
||||||
"Rev": "604ed5785183e59ae2789449d89e73f3a2a77987"
|
"Rev": "604ed5785183e59ae2789449d89e73f3a2a77987"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"ImportPath": "github.com/golang/snappy",
|
|
||||||
"Rev": "723cc1e459b8eea2dea4583200fd60757d40097a"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/juju/ratelimit",
|
"ImportPath": "github.com/juju/ratelimit",
|
||||||
"Rev": "77ed1c8a01217656d2080ad51981f6e99adaa177"
|
"Rev": "772f5c38e468398c4511514f4f6aa9a4185bc0a0"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/kardianos/osext",
|
|
||||||
"Rev": "efacde03154693404c65e7aa7d461ac9014acd0c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/rcrowley/go-metrics",
|
|
||||||
"Rev": "1ce93efbc8f9c568886b2ef85ce305b2217b3de3"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/syncthing/syncthing/lib/dialer",
|
"ImportPath": "github.com/syncthing/syncthing/lib/dialer",
|
||||||
"Comment": "v0.12.4-15-g321ef98",
|
"Comment": "v0.12.6-6-g38e9b92",
|
||||||
"Rev": "321ef9816c57f0f5de336c9d65c27b2480b2b191"
|
"Rev": "38e9b92c42d827292cdddada39d7ddde646e1677"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/syncthing/syncthing/lib/logger",
|
"ImportPath": "github.com/syncthing/syncthing/lib/logger",
|
||||||
"Comment": "v0.12.4-15-g321ef98",
|
"Comment": "v0.12.6-6-g38e9b92",
|
||||||
"Rev": "321ef9816c57f0f5de336c9d65c27b2480b2b191"
|
"Rev": "38e9b92c42d827292cdddada39d7ddde646e1677"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/syncthing/syncthing/lib/osutil",
|
"ImportPath": "github.com/syncthing/syncthing/lib/osutil",
|
||||||
"Comment": "v0.12.4-15-g321ef98",
|
"Comment": "v0.12.6-6-g38e9b92",
|
||||||
"Rev": "321ef9816c57f0f5de336c9d65c27b2480b2b191"
|
"Rev": "38e9b92c42d827292cdddada39d7ddde646e1677"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/syncthing/syncthing/lib/protocol",
|
"ImportPath": "github.com/syncthing/syncthing/lib/protocol",
|
||||||
"Comment": "v0.12.4-15-g321ef98",
|
"Comment": "v0.12.6-6-g38e9b92",
|
||||||
"Rev": "321ef9816c57f0f5de336c9d65c27b2480b2b191"
|
"Rev": "38e9b92c42d827292cdddada39d7ddde646e1677"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/syncthing/syncthing/lib/relay/client",
|
"ImportPath": "github.com/syncthing/syncthing/lib/relay/client",
|
||||||
"Comment": "v0.12.4-15-g321ef98",
|
"Comment": "v0.12.6-6-g38e9b92",
|
||||||
"Rev": "321ef9816c57f0f5de336c9d65c27b2480b2b191"
|
"Rev": "38e9b92c42d827292cdddada39d7ddde646e1677"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/syncthing/syncthing/lib/relay/protocol",
|
"ImportPath": "github.com/syncthing/syncthing/lib/relay/protocol",
|
||||||
"Comment": "v0.12.4-15-g321ef98",
|
"Comment": "v0.12.6-6-g38e9b92",
|
||||||
"Rev": "321ef9816c57f0f5de336c9d65c27b2480b2b191"
|
"Rev": "38e9b92c42d827292cdddada39d7ddde646e1677"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/syncthing/syncthing/lib/sync",
|
"ImportPath": "github.com/syncthing/syncthing/lib/sync",
|
||||||
"Comment": "v0.12.4-15-g321ef98",
|
"Comment": "v0.12.6-6-g38e9b92",
|
||||||
"Rev": "321ef9816c57f0f5de336c9d65c27b2480b2b191"
|
"Rev": "38e9b92c42d827292cdddada39d7ddde646e1677"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/syncthing/syncthing/lib/tlsutil",
|
"ImportPath": "github.com/syncthing/syncthing/lib/tlsutil",
|
||||||
"Comment": "v0.12.4-15-g321ef98",
|
"Comment": "v0.12.6-6-g38e9b92",
|
||||||
"Rev": "321ef9816c57f0f5de336c9d65c27b2480b2b191"
|
"Rev": "38e9b92c42d827292cdddada39d7ddde646e1677"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
|
||||||
"Rev": "87e4e645d80ae9c537e8f2dee52b28036a5dd75e"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/thejerf/suture",
|
|
||||||
"Rev": "ff19fb384c3fe30f42717967eaa69da91e5f317c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/vitrun/qart/coding",
|
|
||||||
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/vitrun/qart/gf256",
|
|
||||||
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/vitrun/qart/qr",
|
|
||||||
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/crypto/bcrypt",
|
|
||||||
"Rev": "c57d4a71915a248dbad846d60825145062b4c18e"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/crypto/blowfish",
|
|
||||||
"Rev": "c57d4a71915a248dbad846d60825145062b4c18e"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/net/internal/iana",
|
|
||||||
"Rev": "55cccaa02af1a99c69ba3213e33468628b61be4b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/net/ipv6",
|
|
||||||
"Rev": "55cccaa02af1a99c69ba3213e33468628b61be4b"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/proxy",
|
"ImportPath": "golang.org/x/net/proxy",
|
||||||
"Rev": "55cccaa02af1a99c69ba3213e33468628b61be4b"
|
"Rev": "042ba42fa6633b34205efc66ba5719cd3afd8d38"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/transform",
|
"ImportPath": "golang.org/x/text/transform",
|
||||||
"Rev": "723492b65e225eafcba054e76ba18bb9c5ac1ea2"
|
"Rev": "5eb8d4684c4796dd36c74f6452f2c0fa6c79597e"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||||
"Rev": "723492b65e225eafcba054e76ba18bb9c5ac1ea2"
|
"Rev": "5eb8d4684c4796dd36c74f6452f2c0fa6c79597e"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
63
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/bkaradzic/go-lz4/lz4_test.go
generated
vendored
63
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/bkaradzic/go-lz4/lz4_test.go
generated
vendored
@@ -1,63 +0,0 @@
|
|||||||
package lz4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io/ioutil"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
var testfile, _ = ioutil.ReadFile("testdata/pg1661.txt")
|
|
||||||
|
|
||||||
func roundtrip(t *testing.T, input []byte) {
|
|
||||||
|
|
||||||
dst, err := Encode(nil, input)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("got error during compression: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
output, err := Decode(nil, dst)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("got error during decompress: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !bytes.Equal(output, input) {
|
|
||||||
t.Errorf("roundtrip failed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEmpty(t *testing.T) {
|
|
||||||
roundtrip(t, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLengths(t *testing.T) {
|
|
||||||
|
|
||||||
for i := 0; i < 1024; i++ {
|
|
||||||
roundtrip(t, testfile[:i])
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 1024; i < 4096; i += 23 {
|
|
||||||
roundtrip(t, testfile[:i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWords(t *testing.T) {
|
|
||||||
roundtrip(t, testfile)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkLZ4Encode(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
Encode(nil, testfile)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkLZ4Decode(b *testing.B) {
|
|
||||||
|
|
||||||
var compressed, _ = Encode(nil, testfile)
|
|
||||||
|
|
||||||
b.ResetTimer()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
Decode(nil, compressed)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
13052
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/bkaradzic/go-lz4/testdata/pg1661.txt
generated
vendored
13052
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/bkaradzic/go-lz4/testdata/pg1661.txt
generated
vendored
File diff suppressed because it is too large
Load Diff
59
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/calmh/luhn/luhn_test.go
generated
vendored
59
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/calmh/luhn/luhn_test.go
generated
vendored
@@ -1,59 +0,0 @@
|
|||||||
// Copyright (C) 2014 Jakob Borg
|
|
||||||
|
|
||||||
package luhn_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/calmh/luhn"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestGenerate(t *testing.T) {
|
|
||||||
// Base 6 Luhn
|
|
||||||
a := luhn.Alphabet("abcdef")
|
|
||||||
c, err := a.Generate("abcdef")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if c != 'e' {
|
|
||||||
t.Errorf("Incorrect check digit %c != e", c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Base 10 Luhn
|
|
||||||
a = luhn.Alphabet("0123456789")
|
|
||||||
c, err = a.Generate("7992739871")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if c != '3' {
|
|
||||||
t.Errorf("Incorrect check digit %c != 3", c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInvalidString(t *testing.T) {
|
|
||||||
a := luhn.Alphabet("ABC")
|
|
||||||
_, err := a.Generate("7992739871")
|
|
||||||
t.Log(err)
|
|
||||||
if err == nil {
|
|
||||||
t.Error("Unexpected nil error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBadAlphabet(t *testing.T) {
|
|
||||||
a := luhn.Alphabet("01234566789")
|
|
||||||
_, err := a.Generate("7992739871")
|
|
||||||
t.Log(err)
|
|
||||||
if err == nil {
|
|
||||||
t.Error("Unexpected nil error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestValidate(t *testing.T) {
|
|
||||||
a := luhn.Alphabet("abcdef")
|
|
||||||
if !a.Validate("abcdefe") {
|
|
||||||
t.Errorf("Incorrect validation response for abcdefe")
|
|
||||||
}
|
|
||||||
if a.Validate("abcdefd") {
|
|
||||||
t.Errorf("Incorrect validation response for abcdefd")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
2
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/calmh/xdr/.travis.yml
generated
vendored
2
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/calmh/xdr/.travis.yml
generated
vendored
@@ -4,7 +4,7 @@ go:
|
|||||||
|
|
||||||
install:
|
install:
|
||||||
- export PATH=$PATH:$HOME/gopath/bin
|
- export PATH=$PATH:$HOME/gopath/bin
|
||||||
- go get code.google.com/p/go.tools/cmd/cover
|
- go get golang.org/x/tools/cover
|
||||||
- go get github.com/mattn/goveralls
|
- go get github.com/mattn/goveralls
|
||||||
|
|
||||||
script:
|
script:
|
||||||
|
|||||||
2
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/calmh/xdr/README.md
generated
vendored
2
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/calmh/xdr/README.md
generated
vendored
@@ -1,7 +1,7 @@
|
|||||||
xdr
|
xdr
|
||||||
===
|
===
|
||||||
|
|
||||||
[](https://travis-ci.org/calmh/xdr)
|
[](https://circleci.com/gh/calmh/xdr)
|
||||||
[](https://coveralls.io/r/calmh/xdr?branch=master)
|
[](https://coveralls.io/r/calmh/xdr?branch=master)
|
||||||
[](http://godoc.org/github.com/calmh/xdr)
|
[](http://godoc.org/github.com/calmh/xdr)
|
||||||
[](http://opensource.org/licenses/MIT)
|
[](http://opensource.org/licenses/MIT)
|
||||||
|
|||||||
117
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/calmh/xdr/bench_test.go
generated
vendored
117
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/calmh/xdr/bench_test.go
generated
vendored
@@ -1,117 +0,0 @@
|
|||||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
|
||||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package xdr_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/calmh/xdr"
|
|
||||||
)
|
|
||||||
|
|
||||||
type XDRBenchStruct struct {
|
|
||||||
I1 uint64
|
|
||||||
I2 uint32
|
|
||||||
I3 uint16
|
|
||||||
I4 uint8
|
|
||||||
Bs0 []byte // max:128
|
|
||||||
Bs1 []byte
|
|
||||||
S0 string // max:128
|
|
||||||
S1 string
|
|
||||||
}
|
|
||||||
|
|
||||||
var res []byte // no to be optimized away
|
|
||||||
var s = XDRBenchStruct{
|
|
||||||
I1: 42,
|
|
||||||
I2: 43,
|
|
||||||
I3: 44,
|
|
||||||
I4: 45,
|
|
||||||
Bs0: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
|
|
||||||
Bs1: []byte{11, 12, 13, 14, 15, 16, 17, 18, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
|
|
||||||
S0: "Hello World! String one.",
|
|
||||||
S1: "Hello World! String two.",
|
|
||||||
}
|
|
||||||
var e []byte
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
e, _ = s.MarshalXDR()
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkThisMarshal(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
res, _ = s.MarshalXDR()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkThisUnmarshal(b *testing.B) {
|
|
||||||
var t XDRBenchStruct
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
err := t.UnmarshalXDR(e)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkThisEncode(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
_, err := s.EncodeXDR(ioutil.Discard)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkThisEncoder(b *testing.B) {
|
|
||||||
w := xdr.NewWriter(ioutil.Discard)
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
_, err := s.EncodeXDRInto(w)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type repeatReader struct {
|
|
||||||
data []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *repeatReader) Read(bs []byte) (n int, err error) {
|
|
||||||
if len(bs) > len(r.data) {
|
|
||||||
err = io.EOF
|
|
||||||
}
|
|
||||||
n = copy(bs, r.data)
|
|
||||||
r.data = r.data[n:]
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *repeatReader) Reset(bs []byte) {
|
|
||||||
r.data = bs
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkThisDecode(b *testing.B) {
|
|
||||||
rr := &repeatReader{e}
|
|
||||||
var t XDRBenchStruct
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
err := t.DecodeXDR(rr)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
rr.Reset(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkThisDecoder(b *testing.B) {
|
|
||||||
rr := &repeatReader{e}
|
|
||||||
r := xdr.NewReader(rr)
|
|
||||||
var t XDRBenchStruct
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
err := t.DecodeXDRFrom(r)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
rr.Reset(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
201
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/calmh/xdr/bench_xdr_test.go
generated
vendored
201
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/calmh/xdr/bench_xdr_test.go
generated
vendored
@@ -1,201 +0,0 @@
|
|||||||
// ************************************************************
|
|
||||||
// This file is automatically generated by genxdr. Do not edit.
|
|
||||||
// ************************************************************
|
|
||||||
|
|
||||||
package xdr_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/calmh/xdr"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
|
|
||||||
XDRBenchStruct Structure:
|
|
||||||
|
|
||||||
0 1 2 3
|
|
||||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
| |
|
|
||||||
+ I1 (64 bits) +
|
|
||||||
| |
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
| I2 |
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
| 0x0000 | I3 |
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
/ /
|
|
||||||
\ uint8 Structure \
|
|
||||||
/ /
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
| Length of Bs0 |
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
/ /
|
|
||||||
\ Bs0 (variable length) \
|
|
||||||
/ /
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
| Length of Bs1 |
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
/ /
|
|
||||||
\ Bs1 (variable length) \
|
|
||||||
/ /
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
| Length of S0 |
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
/ /
|
|
||||||
\ S0 (variable length) \
|
|
||||||
/ /
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
| Length of S1 |
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
/ /
|
|
||||||
\ S1 (variable length) \
|
|
||||||
/ /
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
|
|
||||||
|
|
||||||
struct XDRBenchStruct {
|
|
||||||
unsigned hyper I1;
|
|
||||||
unsigned int I2;
|
|
||||||
unsigned int I3;
|
|
||||||
uint8 I4;
|
|
||||||
opaque Bs0<128>;
|
|
||||||
opaque Bs1<>;
|
|
||||||
string S0<128>;
|
|
||||||
string S1<>;
|
|
||||||
}
|
|
||||||
|
|
||||||
*/
|
|
||||||
|
|
||||||
func (o XDRBenchStruct) EncodeXDR(w io.Writer) (int, error) {
|
|
||||||
var xw = xdr.NewWriter(w)
|
|
||||||
return o.EncodeXDRInto(xw)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o XDRBenchStruct) MarshalXDR() ([]byte, error) {
|
|
||||||
return o.AppendXDR(make([]byte, 0, 128))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o XDRBenchStruct) MustMarshalXDR() []byte {
|
|
||||||
bs, err := o.MarshalXDR()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return bs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o XDRBenchStruct) AppendXDR(bs []byte) ([]byte, error) {
|
|
||||||
var aw = xdr.AppendWriter(bs)
|
|
||||||
var xw = xdr.NewWriter(&aw)
|
|
||||||
_, err := o.EncodeXDRInto(xw)
|
|
||||||
return []byte(aw), err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o XDRBenchStruct) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
|
||||||
xw.WriteUint64(o.I1)
|
|
||||||
xw.WriteUint32(o.I2)
|
|
||||||
xw.WriteUint16(o.I3)
|
|
||||||
xw.WriteUint8(o.I4)
|
|
||||||
if l := len(o.Bs0); l > 128 {
|
|
||||||
return xw.Tot(), xdr.ElementSizeExceeded("Bs0", l, 128)
|
|
||||||
}
|
|
||||||
xw.WriteBytes(o.Bs0)
|
|
||||||
xw.WriteBytes(o.Bs1)
|
|
||||||
if l := len(o.S0); l > 128 {
|
|
||||||
return xw.Tot(), xdr.ElementSizeExceeded("S0", l, 128)
|
|
||||||
}
|
|
||||||
xw.WriteString(o.S0)
|
|
||||||
xw.WriteString(o.S1)
|
|
||||||
return xw.Tot(), xw.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *XDRBenchStruct) DecodeXDR(r io.Reader) error {
|
|
||||||
xr := xdr.NewReader(r)
|
|
||||||
return o.DecodeXDRFrom(xr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *XDRBenchStruct) UnmarshalXDR(bs []byte) error {
|
|
||||||
var br = bytes.NewReader(bs)
|
|
||||||
var xr = xdr.NewReader(br)
|
|
||||||
return o.DecodeXDRFrom(xr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *XDRBenchStruct) DecodeXDRFrom(xr *xdr.Reader) error {
|
|
||||||
o.I1 = xr.ReadUint64()
|
|
||||||
o.I2 = xr.ReadUint32()
|
|
||||||
o.I3 = xr.ReadUint16()
|
|
||||||
o.I4 = xr.ReadUint8()
|
|
||||||
o.Bs0 = xr.ReadBytesMax(128)
|
|
||||||
o.Bs1 = xr.ReadBytes()
|
|
||||||
o.S0 = xr.ReadStringMax(128)
|
|
||||||
o.S1 = xr.ReadString()
|
|
||||||
return xr.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
|
|
||||||
repeatReader Structure:
|
|
||||||
|
|
||||||
0 1 2 3
|
|
||||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
| Length of data |
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
/ /
|
|
||||||
\ data (variable length) \
|
|
||||||
/ /
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
|
|
||||||
|
|
||||||
struct repeatReader {
|
|
||||||
opaque data<>;
|
|
||||||
}
|
|
||||||
|
|
||||||
*/
|
|
||||||
|
|
||||||
func (o repeatReader) EncodeXDR(w io.Writer) (int, error) {
|
|
||||||
var xw = xdr.NewWriter(w)
|
|
||||||
return o.EncodeXDRInto(xw)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o repeatReader) MarshalXDR() ([]byte, error) {
|
|
||||||
return o.AppendXDR(make([]byte, 0, 128))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o repeatReader) MustMarshalXDR() []byte {
|
|
||||||
bs, err := o.MarshalXDR()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return bs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o repeatReader) AppendXDR(bs []byte) ([]byte, error) {
|
|
||||||
var aw = xdr.AppendWriter(bs)
|
|
||||||
var xw = xdr.NewWriter(&aw)
|
|
||||||
_, err := o.EncodeXDRInto(xw)
|
|
||||||
return []byte(aw), err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o repeatReader) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
|
||||||
xw.WriteBytes(o.data)
|
|
||||||
return xw.Tot(), xw.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *repeatReader) DecodeXDR(r io.Reader) error {
|
|
||||||
xr := xdr.NewReader(r)
|
|
||||||
return o.DecodeXDRFrom(xr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *repeatReader) UnmarshalXDR(bs []byte) error {
|
|
||||||
var br = bytes.NewReader(bs)
|
|
||||||
var xr = xdr.NewReader(br)
|
|
||||||
return o.DecodeXDRFrom(xr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *repeatReader) DecodeXDRFrom(xr *xdr.Reader) error {
|
|
||||||
o.data = xr.ReadBytes()
|
|
||||||
return xr.Error()
|
|
||||||
}
|
|
||||||
3
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/calmh/xdr/circle.yml
generated
vendored
Normal file
3
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/calmh/xdr/circle.yml
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
dependencies:
|
||||||
|
post:
|
||||||
|
- ./generate.sh
|
||||||
85
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
generated
vendored
85
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
generated
vendored
@@ -28,6 +28,7 @@ type fieldInfo struct {
|
|||||||
Encoder string // the encoder name, i.e. "Uint64" for Read/WriteUint64
|
Encoder string // the encoder name, i.e. "Uint64" for Read/WriteUint64
|
||||||
Convert string // what to convert to when encoding, i.e. "uint64"
|
Convert string // what to convert to when encoding, i.e. "uint64"
|
||||||
Max int // max size for slices and strings
|
Max int // max size for slices and strings
|
||||||
|
Submax int // max size for strings inside slices
|
||||||
}
|
}
|
||||||
|
|
||||||
type structInfo struct {
|
type structInfo struct {
|
||||||
@@ -156,7 +157,11 @@ func (o *{{.TypeName}}) DecodeXDRFrom(xr *xdr.Reader) error {
|
|||||||
{{if ne $fieldInfo.Convert ""}}
|
{{if ne $fieldInfo.Convert ""}}
|
||||||
o.{{$fieldInfo.Name}}[i] = {{$fieldInfo.FieldType}}(xr.Read{{$fieldInfo.Encoder}}())
|
o.{{$fieldInfo.Name}}[i] = {{$fieldInfo.FieldType}}(xr.Read{{$fieldInfo.Encoder}}())
|
||||||
{{else if $fieldInfo.IsBasic}}
|
{{else if $fieldInfo.IsBasic}}
|
||||||
|
{{if ge $fieldInfo.Submax 1}}
|
||||||
|
o.{{$fieldInfo.Name}}[i] = xr.Read{{$fieldInfo.Encoder}}Max({{$fieldInfo.Submax}})
|
||||||
|
{{else}}
|
||||||
o.{{$fieldInfo.Name}}[i] = xr.Read{{$fieldInfo.Encoder}}()
|
o.{{$fieldInfo.Name}}[i] = xr.Read{{$fieldInfo.Encoder}}()
|
||||||
|
{{end}}
|
||||||
{{else}}
|
{{else}}
|
||||||
(&o.{{$fieldInfo.Name}}[i]).DecodeXDRFrom(xr)
|
(&o.{{$fieldInfo.Name}}[i]).DecodeXDRFrom(xr)
|
||||||
{{end}}
|
{{end}}
|
||||||
@@ -166,7 +171,40 @@ func (o *{{.TypeName}}) DecodeXDRFrom(xr *xdr.Reader) error {
|
|||||||
return xr.Error()
|
return xr.Error()
|
||||||
}`))
|
}`))
|
||||||
|
|
||||||
var maxRe = regexp.MustCompile(`\Wmax:(\d+)`)
|
var emptyTypeTpl = template.Must(template.New("encoder").Parse(`
|
||||||
|
func (o {{.TypeName}}) EncodeXDR(w io.Writer) (int, error) {
|
||||||
|
return 0, nil
|
||||||
|
}//+n
|
||||||
|
|
||||||
|
func (o {{.TypeName}}) MarshalXDR() ([]byte, error) {
|
||||||
|
return nil, nil
|
||||||
|
}//+n
|
||||||
|
|
||||||
|
func (o {{.TypeName}}) MustMarshalXDR() []byte {
|
||||||
|
return nil
|
||||||
|
}//+n
|
||||||
|
|
||||||
|
func (o {{.TypeName}}) AppendXDR(bs []byte) ([]byte, error) {
|
||||||
|
return bs, nil
|
||||||
|
}//+n
|
||||||
|
|
||||||
|
func (o {{.TypeName}}) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||||
|
return xw.Tot(), xw.Error()
|
||||||
|
}//+n
|
||||||
|
|
||||||
|
func (o *{{.TypeName}}) DecodeXDR(r io.Reader) error {
|
||||||
|
return nil
|
||||||
|
}//+n
|
||||||
|
|
||||||
|
func (o *{{.TypeName}}) UnmarshalXDR(bs []byte) error {
|
||||||
|
return nil
|
||||||
|
}//+n
|
||||||
|
|
||||||
|
func (o *{{.TypeName}}) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||||
|
return xr.Error()
|
||||||
|
}`))
|
||||||
|
|
||||||
|
var maxRe = regexp.MustCompile(`(?:\Wmax:)(\d+)(?:\s*,\s*(\d+))?`)
|
||||||
|
|
||||||
type typeSet struct {
|
type typeSet struct {
|
||||||
Type string
|
Type string
|
||||||
@@ -198,11 +236,15 @@ func handleStruct(t *ast.StructType) []fieldInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn := sf.Names[0].Name
|
fn := sf.Names[0].Name
|
||||||
var max = 0
|
var max1, max2 int
|
||||||
if sf.Comment != nil {
|
if sf.Comment != nil {
|
||||||
c := sf.Comment.List[0].Text
|
c := sf.Comment.List[0].Text
|
||||||
if m := maxRe.FindStringSubmatch(c); m != nil {
|
m := maxRe.FindStringSubmatch(c)
|
||||||
max, _ = strconv.Atoi(m[1])
|
if len(m) >= 2 {
|
||||||
|
max1, _ = strconv.Atoi(m[1])
|
||||||
|
}
|
||||||
|
if len(m) >= 3 {
|
||||||
|
max2, _ = strconv.Atoi(m[2])
|
||||||
}
|
}
|
||||||
if strings.Contains(c, "noencode") {
|
if strings.Contains(c, "noencode") {
|
||||||
continue
|
continue
|
||||||
@@ -220,14 +262,16 @@ func handleStruct(t *ast.StructType) []fieldInfo {
|
|||||||
FieldType: tn,
|
FieldType: tn,
|
||||||
Encoder: enc.Encoder,
|
Encoder: enc.Encoder,
|
||||||
Convert: enc.Type,
|
Convert: enc.Type,
|
||||||
Max: max,
|
Max: max1,
|
||||||
|
Submax: max2,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
f = fieldInfo{
|
f = fieldInfo{
|
||||||
Name: fn,
|
Name: fn,
|
||||||
IsBasic: false,
|
IsBasic: false,
|
||||||
FieldType: tn,
|
FieldType: tn,
|
||||||
Max: max,
|
Max: max1,
|
||||||
|
Submax: max2,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -245,7 +289,8 @@ func handleStruct(t *ast.StructType) []fieldInfo {
|
|||||||
FieldType: tn,
|
FieldType: tn,
|
||||||
Encoder: enc.Encoder,
|
Encoder: enc.Encoder,
|
||||||
Convert: enc.Type,
|
Convert: enc.Type,
|
||||||
Max: max,
|
Max: max1,
|
||||||
|
Submax: max2,
|
||||||
}
|
}
|
||||||
} else if enc, ok := xdrEncoders[tn]; ok {
|
} else if enc, ok := xdrEncoders[tn]; ok {
|
||||||
f = fieldInfo{
|
f = fieldInfo{
|
||||||
@@ -255,14 +300,16 @@ func handleStruct(t *ast.StructType) []fieldInfo {
|
|||||||
FieldType: tn,
|
FieldType: tn,
|
||||||
Encoder: enc.Encoder,
|
Encoder: enc.Encoder,
|
||||||
Convert: enc.Type,
|
Convert: enc.Type,
|
||||||
Max: max,
|
Max: max1,
|
||||||
|
Submax: max2,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
f = fieldInfo{
|
f = fieldInfo{
|
||||||
Name: fn,
|
Name: fn,
|
||||||
IsSlice: true,
|
IsSlice: true,
|
||||||
FieldType: tn,
|
FieldType: tn,
|
||||||
Max: max,
|
Max: max1,
|
||||||
|
Submax: max2,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -270,7 +317,8 @@ func handleStruct(t *ast.StructType) []fieldInfo {
|
|||||||
f = fieldInfo{
|
f = fieldInfo{
|
||||||
Name: fn,
|
Name: fn,
|
||||||
FieldType: ft.Sel.Name,
|
FieldType: ft.Sel.Name,
|
||||||
Max: max,
|
Max: max1,
|
||||||
|
Submax: max2,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -285,7 +333,14 @@ func generateCode(output io.Writer, s structInfo) {
|
|||||||
fs := s.Fields
|
fs := s.Fields
|
||||||
|
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
err := encodeTpl.Execute(&buf, map[string]interface{}{"TypeName": name, "Fields": fs})
|
var err error
|
||||||
|
if len(fs) == 0 {
|
||||||
|
// This is an empty type. We can create a quite simple codec for it.
|
||||||
|
err = emptyTypeTpl.Execute(&buf, map[string]interface{}{"TypeName": name})
|
||||||
|
} else {
|
||||||
|
// Generate with the default template.
|
||||||
|
err = encodeTpl.Execute(&buf, map[string]interface{}{"TypeName": name, "Fields": fs})
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -311,6 +366,14 @@ func generateDiagram(output io.Writer, s structInfo) {
|
|||||||
fs := s.Fields
|
fs := s.Fields
|
||||||
|
|
||||||
fmt.Fprintln(output, sn+" Structure:")
|
fmt.Fprintln(output, sn+" Structure:")
|
||||||
|
|
||||||
|
if len(fs) == 0 {
|
||||||
|
fmt.Fprintln(output, "(contains no fields)")
|
||||||
|
fmt.Fprintln(output)
|
||||||
|
fmt.Fprintln(output)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
fmt.Fprintln(output)
|
fmt.Fprintln(output)
|
||||||
fmt.Fprintln(output, " 0 1 2 3")
|
fmt.Fprintln(output, " 0 1 2 3")
|
||||||
fmt.Fprintln(output, " 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1")
|
fmt.Fprintln(output, " 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1")
|
||||||
|
|||||||
79
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/calmh/xdr/encdec_test.go
generated
vendored
79
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/calmh/xdr/encdec_test.go
generated
vendored
@@ -1,79 +0,0 @@
|
|||||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
|
||||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package xdr_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"math/rand"
|
|
||||||
"reflect"
|
|
||||||
"testing"
|
|
||||||
"testing/quick"
|
|
||||||
|
|
||||||
"github.com/calmh/xdr"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Contains all supported types
|
|
||||||
type TestStruct struct {
|
|
||||||
I int
|
|
||||||
I8 int8
|
|
||||||
UI8 uint8
|
|
||||||
I16 int16
|
|
||||||
UI16 uint16
|
|
||||||
I32 int32
|
|
||||||
UI32 uint32
|
|
||||||
I64 int64
|
|
||||||
UI64 uint64
|
|
||||||
BS []byte // max:1024
|
|
||||||
S string // max:1024
|
|
||||||
C Opaque
|
|
||||||
SS []string // max:1024
|
|
||||||
}
|
|
||||||
|
|
||||||
type Opaque [32]byte
|
|
||||||
|
|
||||||
func (u *Opaque) EncodeXDRInto(w *xdr.Writer) (int, error) {
|
|
||||||
return w.WriteRaw(u[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *Opaque) DecodeXDRFrom(r *xdr.Reader) (int, error) {
|
|
||||||
return r.ReadRaw(u[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (Opaque) Generate(rand *rand.Rand, size int) reflect.Value {
|
|
||||||
var u Opaque
|
|
||||||
for i := range u[:] {
|
|
||||||
u[i] = byte(rand.Int())
|
|
||||||
}
|
|
||||||
return reflect.ValueOf(u)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEncDec(t *testing.T) {
|
|
||||||
fn := func(t0 TestStruct) bool {
|
|
||||||
bs, err := t0.MarshalXDR()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var t1 TestStruct
|
|
||||||
err = t1.UnmarshalXDR(bs)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Not comparing with DeepEqual since we'll unmarshal nil slices as empty
|
|
||||||
if t0.I != t1.I ||
|
|
||||||
t0.I16 != t1.I16 || t0.UI16 != t1.UI16 ||
|
|
||||||
t0.I32 != t1.I32 || t0.UI32 != t1.UI32 ||
|
|
||||||
t0.I64 != t1.I64 || t0.UI64 != t1.UI64 ||
|
|
||||||
bytes.Compare(t0.BS, t1.BS) != 0 ||
|
|
||||||
t0.S != t1.S || t0.C != t1.C {
|
|
||||||
t.Logf("%#v", t0)
|
|
||||||
t.Logf("%#v", t1)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if err := quick.Check(fn, nil); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
185
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/calmh/xdr/encdec_xdr_test.go
generated
vendored
185
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/calmh/xdr/encdec_xdr_test.go
generated
vendored
@@ -1,185 +0,0 @@
|
|||||||
// ************************************************************
|
|
||||||
// This file is automatically generated by genxdr. Do not edit.
|
|
||||||
// ************************************************************
|
|
||||||
|
|
||||||
package xdr_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/calmh/xdr"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
|
|
||||||
TestStruct Structure:
|
|
||||||
|
|
||||||
0 1 2 3
|
|
||||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
/ /
|
|
||||||
\ int Structure \
|
|
||||||
/ /
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
/ /
|
|
||||||
\ int8 Structure \
|
|
||||||
/ /
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
/ /
|
|
||||||
\ uint8 Structure \
|
|
||||||
/ /
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
| 0x0000 | I16 |
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
| 0x0000 | UI16 |
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
| I32 |
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
| UI32 |
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
| |
|
|
||||||
+ I64 (64 bits) +
|
|
||||||
| |
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
| |
|
|
||||||
+ UI64 (64 bits) +
|
|
||||||
| |
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
| Length of BS |
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
/ /
|
|
||||||
\ BS (variable length) \
|
|
||||||
/ /
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
| Length of S |
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
/ /
|
|
||||||
\ S (variable length) \
|
|
||||||
/ /
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
/ /
|
|
||||||
\ Opaque Structure \
|
|
||||||
/ /
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
| Number of SS |
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
| Length of SS |
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
/ /
|
|
||||||
\ SS (variable length) \
|
|
||||||
/ /
|
|
||||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
|
||||||
|
|
||||||
|
|
||||||
struct TestStruct {
|
|
||||||
int I;
|
|
||||||
int8 I8;
|
|
||||||
uint8 UI8;
|
|
||||||
int I16;
|
|
||||||
unsigned int UI16;
|
|
||||||
int I32;
|
|
||||||
unsigned int UI32;
|
|
||||||
hyper I64;
|
|
||||||
unsigned hyper UI64;
|
|
||||||
opaque BS<1024>;
|
|
||||||
string S<1024>;
|
|
||||||
Opaque C;
|
|
||||||
string SS<1024>;
|
|
||||||
}
|
|
||||||
|
|
||||||
*/
|
|
||||||
|
|
||||||
func (o TestStruct) EncodeXDR(w io.Writer) (int, error) {
|
|
||||||
var xw = xdr.NewWriter(w)
|
|
||||||
return o.EncodeXDRInto(xw)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o TestStruct) MarshalXDR() ([]byte, error) {
|
|
||||||
return o.AppendXDR(make([]byte, 0, 128))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o TestStruct) MustMarshalXDR() []byte {
|
|
||||||
bs, err := o.MarshalXDR()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return bs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o TestStruct) AppendXDR(bs []byte) ([]byte, error) {
|
|
||||||
var aw = xdr.AppendWriter(bs)
|
|
||||||
var xw = xdr.NewWriter(&aw)
|
|
||||||
_, err := o.EncodeXDRInto(xw)
|
|
||||||
return []byte(aw), err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o TestStruct) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
|
||||||
xw.WriteUint64(uint64(o.I))
|
|
||||||
xw.WriteUint8(uint8(o.I8))
|
|
||||||
xw.WriteUint8(o.UI8)
|
|
||||||
xw.WriteUint16(uint16(o.I16))
|
|
||||||
xw.WriteUint16(o.UI16)
|
|
||||||
xw.WriteUint32(uint32(o.I32))
|
|
||||||
xw.WriteUint32(o.UI32)
|
|
||||||
xw.WriteUint64(uint64(o.I64))
|
|
||||||
xw.WriteUint64(o.UI64)
|
|
||||||
if l := len(o.BS); l > 1024 {
|
|
||||||
return xw.Tot(), xdr.ElementSizeExceeded("BS", l, 1024)
|
|
||||||
}
|
|
||||||
xw.WriteBytes(o.BS)
|
|
||||||
if l := len(o.S); l > 1024 {
|
|
||||||
return xw.Tot(), xdr.ElementSizeExceeded("S", l, 1024)
|
|
||||||
}
|
|
||||||
xw.WriteString(o.S)
|
|
||||||
_, err := o.C.EncodeXDRInto(xw)
|
|
||||||
if err != nil {
|
|
||||||
return xw.Tot(), err
|
|
||||||
}
|
|
||||||
if l := len(o.SS); l > 1024 {
|
|
||||||
return xw.Tot(), xdr.ElementSizeExceeded("SS", l, 1024)
|
|
||||||
}
|
|
||||||
xw.WriteUint32(uint32(len(o.SS)))
|
|
||||||
for i := range o.SS {
|
|
||||||
xw.WriteString(o.SS[i])
|
|
||||||
}
|
|
||||||
return xw.Tot(), xw.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *TestStruct) DecodeXDR(r io.Reader) error {
|
|
||||||
xr := xdr.NewReader(r)
|
|
||||||
return o.DecodeXDRFrom(xr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *TestStruct) UnmarshalXDR(bs []byte) error {
|
|
||||||
var br = bytes.NewReader(bs)
|
|
||||||
var xr = xdr.NewReader(br)
|
|
||||||
return o.DecodeXDRFrom(xr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *TestStruct) DecodeXDRFrom(xr *xdr.Reader) error {
|
|
||||||
o.I = int(xr.ReadUint64())
|
|
||||||
o.I8 = int8(xr.ReadUint8())
|
|
||||||
o.UI8 = xr.ReadUint8()
|
|
||||||
o.I16 = int16(xr.ReadUint16())
|
|
||||||
o.UI16 = xr.ReadUint16()
|
|
||||||
o.I32 = int32(xr.ReadUint32())
|
|
||||||
o.UI32 = xr.ReadUint32()
|
|
||||||
o.I64 = int64(xr.ReadUint64())
|
|
||||||
o.UI64 = xr.ReadUint64()
|
|
||||||
o.BS = xr.ReadBytesMax(1024)
|
|
||||||
o.S = xr.ReadStringMax(1024)
|
|
||||||
(&o.C).DecodeXDRFrom(xr)
|
|
||||||
_SSSize := int(xr.ReadUint32())
|
|
||||||
if _SSSize < 0 {
|
|
||||||
return xdr.ElementSizeExceeded("SS", _SSSize, 1024)
|
|
||||||
}
|
|
||||||
if _SSSize > 1024 {
|
|
||||||
return xdr.ElementSizeExceeded("SS", _SSSize, 1024)
|
|
||||||
}
|
|
||||||
o.SS = make([]string, _SSSize)
|
|
||||||
for i := range o.SS {
|
|
||||||
o.SS[i] = xr.ReadString()
|
|
||||||
}
|
|
||||||
return xr.Error()
|
|
||||||
}
|
|
||||||
44
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/calmh/xdr/refl_test.go
generated
vendored
44
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/calmh/xdr/refl_test.go
generated
vendored
@@ -1,44 +0,0 @@
|
|||||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
|
||||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build refl
|
|
||||||
|
|
||||||
package xdr_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
refl "github.com/davecgh/go-xdr/xdr"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCompareMarshals(t *testing.T) {
|
|
||||||
e0 := s.MarshalXDR()
|
|
||||||
e1, err := refl.Marshal(s)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if bytes.Compare(e0, e1) != 0 {
|
|
||||||
t.Fatalf("Encoding mismatch;\n\t%x (this)\n\t%x (refl)", e0, e1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkReflMarshal(b *testing.B) {
|
|
||||||
var err error
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
res, err = refl.Marshal(s)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkReflUnmarshal(b *testing.B) {
|
|
||||||
var t XDRBenchStruct
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
_, err := refl.Unmarshal(e, &t)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
93
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/calmh/xdr/xdr_test.go
generated
vendored
93
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/calmh/xdr/xdr_test.go
generated
vendored
@@ -1,93 +0,0 @@
|
|||||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
|
||||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package xdr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"testing/quick"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBytesNil(t *testing.T) {
|
|
||||||
fn := func(bs []byte) bool {
|
|
||||||
var b = new(bytes.Buffer)
|
|
||||||
var w = NewWriter(b)
|
|
||||||
var r = NewReader(b)
|
|
||||||
w.WriteBytes(bs)
|
|
||||||
w.WriteBytes(bs)
|
|
||||||
r.ReadBytes()
|
|
||||||
res := r.ReadBytes()
|
|
||||||
return bytes.Compare(bs, res) == 0
|
|
||||||
}
|
|
||||||
if err := quick.Check(fn, nil); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBytesGiven(t *testing.T) {
|
|
||||||
fn := func(bs []byte) bool {
|
|
||||||
var b = new(bytes.Buffer)
|
|
||||||
var w = NewWriter(b)
|
|
||||||
var r = NewReader(b)
|
|
||||||
w.WriteBytes(bs)
|
|
||||||
w.WriteBytes(bs)
|
|
||||||
res := make([]byte, 12)
|
|
||||||
res = r.ReadBytesInto(res)
|
|
||||||
res = r.ReadBytesInto(res)
|
|
||||||
return bytes.Compare(bs, res) == 0
|
|
||||||
}
|
|
||||||
if err := quick.Check(fn, nil); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadBytesMaxInto(t *testing.T) {
|
|
||||||
var max = 64
|
|
||||||
for tot := 32; tot < 128; tot++ {
|
|
||||||
for diff := -32; diff <= 32; diff++ {
|
|
||||||
var b = new(bytes.Buffer)
|
|
||||||
var r = NewReader(b)
|
|
||||||
var w = NewWriter(b)
|
|
||||||
|
|
||||||
var toWrite = make([]byte, tot)
|
|
||||||
w.WriteBytes(toWrite)
|
|
||||||
|
|
||||||
var buf = make([]byte, tot+diff)
|
|
||||||
var bs = r.ReadBytesMaxInto(max, buf)
|
|
||||||
|
|
||||||
if tot <= max {
|
|
||||||
if read := len(bs); read != tot {
|
|
||||||
t.Errorf("Incorrect read bytes, wrote=%d, buf=%d, max=%d, read=%d", tot, tot+diff, max, read)
|
|
||||||
}
|
|
||||||
} else if !strings.Contains(r.err.Error(), "exceeds size") {
|
|
||||||
t.Errorf("Unexpected non-ErrElementSizeExceeded error for wrote=%d, max=%d: %v", tot, max, r.err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadStringMax(t *testing.T) {
|
|
||||||
for tot := 42; tot < 72; tot++ {
|
|
||||||
for max := 0; max < 128; max++ {
|
|
||||||
var b = new(bytes.Buffer)
|
|
||||||
var r = NewReader(b)
|
|
||||||
var w = NewWriter(b)
|
|
||||||
|
|
||||||
var toWrite = make([]byte, tot)
|
|
||||||
w.WriteBytes(toWrite)
|
|
||||||
|
|
||||||
var str = r.ReadStringMax(max)
|
|
||||||
var read = len(str)
|
|
||||||
|
|
||||||
if max == 0 || tot <= max {
|
|
||||||
if read != tot {
|
|
||||||
t.Errorf("Incorrect read bytes, wrote=%d, max=%d, read=%d", tot, max, read)
|
|
||||||
}
|
|
||||||
} else if !strings.Contains(r.err.Error(), "exceeds size") {
|
|
||||||
t.Errorf("Unexpected non-ErrElementSizeExceeded error for wrote=%d, max=%d, read=%d: %v", tot, max, read, r.err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
73
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/golang/groupcache/lru/lru_test.go
generated
vendored
73
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/golang/groupcache/lru/lru_test.go
generated
vendored
@@ -1,73 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2013 Google Inc.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package lru
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
type simpleStruct struct {
|
|
||||||
int
|
|
||||||
string
|
|
||||||
}
|
|
||||||
|
|
||||||
type complexStruct struct {
|
|
||||||
int
|
|
||||||
simpleStruct
|
|
||||||
}
|
|
||||||
|
|
||||||
var getTests = []struct {
|
|
||||||
name string
|
|
||||||
keyToAdd interface{}
|
|
||||||
keyToGet interface{}
|
|
||||||
expectedOk bool
|
|
||||||
}{
|
|
||||||
{"string_hit", "myKey", "myKey", true},
|
|
||||||
{"string_miss", "myKey", "nonsense", false},
|
|
||||||
{"simple_struct_hit", simpleStruct{1, "two"}, simpleStruct{1, "two"}, true},
|
|
||||||
{"simeple_struct_miss", simpleStruct{1, "two"}, simpleStruct{0, "noway"}, false},
|
|
||||||
{"complex_struct_hit", complexStruct{1, simpleStruct{2, "three"}},
|
|
||||||
complexStruct{1, simpleStruct{2, "three"}}, true},
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGet(t *testing.T) {
|
|
||||||
for _, tt := range getTests {
|
|
||||||
lru := New(0)
|
|
||||||
lru.Add(tt.keyToAdd, 1234)
|
|
||||||
val, ok := lru.Get(tt.keyToGet)
|
|
||||||
if ok != tt.expectedOk {
|
|
||||||
t.Fatalf("%s: cache hit = %v; want %v", tt.name, ok, !ok)
|
|
||||||
} else if ok && val != 1234 {
|
|
||||||
t.Fatalf("%s expected get to return 1234 but got %v", tt.name, val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRemove(t *testing.T) {
|
|
||||||
lru := New(0)
|
|
||||||
lru.Add("myKey", 1234)
|
|
||||||
if val, ok := lru.Get("myKey"); !ok {
|
|
||||||
t.Fatal("TestRemove returned no match")
|
|
||||||
} else if val != 1234 {
|
|
||||||
t.Fatalf("TestRemove failed. Expected %d, got %v", 1234, val)
|
|
||||||
}
|
|
||||||
|
|
||||||
lru.Remove("myKey")
|
|
||||||
if _, ok := lru.Get("myKey"); ok {
|
|
||||||
t.Fatal("TestRemove returned a removed entry")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
14
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/golang/snappy/AUTHORS
generated
vendored
14
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/golang/snappy/AUTHORS
generated
vendored
@@ -1,14 +0,0 @@
|
|||||||
# This is the official list of Snappy-Go authors for copyright purposes.
|
|
||||||
# This file is distinct from the CONTRIBUTORS files.
|
|
||||||
# See the latter for an explanation.
|
|
||||||
|
|
||||||
# Names should be added to this file as
|
|
||||||
# Name or Organization <email address>
|
|
||||||
# The email address is not required for organizations.
|
|
||||||
|
|
||||||
# Please keep the list sorted.
|
|
||||||
|
|
||||||
Damian Gryski <dgryski@gmail.com>
|
|
||||||
Google Inc.
|
|
||||||
Jan Mercl <0xjnml@gmail.com>
|
|
||||||
Sebastien Binet <seb.binet@gmail.com>
|
|
||||||
36
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/golang/snappy/CONTRIBUTORS
generated
vendored
36
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/golang/snappy/CONTRIBUTORS
generated
vendored
@@ -1,36 +0,0 @@
|
|||||||
# This is the official list of people who can contribute
|
|
||||||
# (and typically have contributed) code to the Snappy-Go repository.
|
|
||||||
# The AUTHORS file lists the copyright holders; this file
|
|
||||||
# lists people. For example, Google employees are listed here
|
|
||||||
# but not in AUTHORS, because Google holds the copyright.
|
|
||||||
#
|
|
||||||
# The submission process automatically checks to make sure
|
|
||||||
# that people submitting code are listed in this file (by email address).
|
|
||||||
#
|
|
||||||
# Names should be added to this file only after verifying that
|
|
||||||
# the individual or the individual's organization has agreed to
|
|
||||||
# the appropriate Contributor License Agreement, found here:
|
|
||||||
#
|
|
||||||
# http://code.google.com/legal/individual-cla-v1.0.html
|
|
||||||
# http://code.google.com/legal/corporate-cla-v1.0.html
|
|
||||||
#
|
|
||||||
# The agreement for individuals can be filled out on the web.
|
|
||||||
#
|
|
||||||
# When adding J Random Contributor's name to this file,
|
|
||||||
# either J's name or J's organization's name should be
|
|
||||||
# added to the AUTHORS file, depending on whether the
|
|
||||||
# individual or corporate CLA was used.
|
|
||||||
|
|
||||||
# Names should be added to this file like so:
|
|
||||||
# Name <email address>
|
|
||||||
|
|
||||||
# Please keep the list sorted.
|
|
||||||
|
|
||||||
Damian Gryski <dgryski@gmail.com>
|
|
||||||
Jan Mercl <0xjnml@gmail.com>
|
|
||||||
Kai Backman <kaib@golang.org>
|
|
||||||
Marc-Antoine Ruel <maruel@chromium.org>
|
|
||||||
Nigel Tao <nigeltao@golang.org>
|
|
||||||
Rob Pike <r@golang.org>
|
|
||||||
Russ Cox <rsc@golang.org>
|
|
||||||
Sebastien Binet <seb.binet@gmail.com>
|
|
||||||
27
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/golang/snappy/LICENSE
generated
vendored
27
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/golang/snappy/LICENSE
generated
vendored
@@ -1,27 +0,0 @@
|
|||||||
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above
|
|
||||||
copyright notice, this list of conditions and the following disclaimer
|
|
||||||
in the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
* Neither the name of Google Inc. nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
7
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/golang/snappy/README
generated
vendored
7
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/golang/snappy/README
generated
vendored
@@ -1,7 +0,0 @@
|
|||||||
The Snappy compression format in the Go programming language.
|
|
||||||
|
|
||||||
To download and install from source:
|
|
||||||
$ go get github.com/golang/snappy
|
|
||||||
|
|
||||||
Unless otherwise noted, the Snappy-Go source files are distributed
|
|
||||||
under the BSD-style license found in the LICENSE file.
|
|
||||||
294
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/golang/snappy/decode.go
generated
vendored
294
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/golang/snappy/decode.go
generated
vendored
@@ -1,294 +0,0 @@
|
|||||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package snappy
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrCorrupt reports that the input is invalid.
|
|
||||||
ErrCorrupt = errors.New("snappy: corrupt input")
|
|
||||||
// ErrTooLarge reports that the uncompressed length is too large.
|
|
||||||
ErrTooLarge = errors.New("snappy: decoded block is too large")
|
|
||||||
// ErrUnsupported reports that the input isn't supported.
|
|
||||||
ErrUnsupported = errors.New("snappy: unsupported input")
|
|
||||||
)
|
|
||||||
|
|
||||||
// DecodedLen returns the length of the decoded block.
|
|
||||||
func DecodedLen(src []byte) (int, error) {
|
|
||||||
v, _, err := decodedLen(src)
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// decodedLen returns the length of the decoded block and the number of bytes
|
|
||||||
// that the length header occupied.
|
|
||||||
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
|
|
||||||
v, n := binary.Uvarint(src)
|
|
||||||
if n <= 0 || v > 0xffffffff {
|
|
||||||
return 0, 0, ErrCorrupt
|
|
||||||
}
|
|
||||||
|
|
||||||
const wordSize = 32 << (^uint(0) >> 32 & 1)
|
|
||||||
if wordSize == 32 && v > 0x7fffffff {
|
|
||||||
return 0, 0, ErrTooLarge
|
|
||||||
}
|
|
||||||
return int(v), n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode returns the decoded form of src. The returned slice may be a sub-
|
|
||||||
// slice of dst if dst was large enough to hold the entire decoded block.
|
|
||||||
// Otherwise, a newly allocated slice will be returned.
|
|
||||||
// It is valid to pass a nil dst.
|
|
||||||
func Decode(dst, src []byte) ([]byte, error) {
|
|
||||||
dLen, s, err := decodedLen(src)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(dst) < dLen {
|
|
||||||
dst = make([]byte, dLen)
|
|
||||||
}
|
|
||||||
|
|
||||||
var d, offset, length int
|
|
||||||
for s < len(src) {
|
|
||||||
switch src[s] & 0x03 {
|
|
||||||
case tagLiteral:
|
|
||||||
x := uint(src[s] >> 2)
|
|
||||||
switch {
|
|
||||||
case x < 60:
|
|
||||||
s++
|
|
||||||
case x == 60:
|
|
||||||
s += 2
|
|
||||||
if s > len(src) {
|
|
||||||
return nil, ErrCorrupt
|
|
||||||
}
|
|
||||||
x = uint(src[s-1])
|
|
||||||
case x == 61:
|
|
||||||
s += 3
|
|
||||||
if s > len(src) {
|
|
||||||
return nil, ErrCorrupt
|
|
||||||
}
|
|
||||||
x = uint(src[s-2]) | uint(src[s-1])<<8
|
|
||||||
case x == 62:
|
|
||||||
s += 4
|
|
||||||
if s > len(src) {
|
|
||||||
return nil, ErrCorrupt
|
|
||||||
}
|
|
||||||
x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
|
|
||||||
case x == 63:
|
|
||||||
s += 5
|
|
||||||
if s > len(src) {
|
|
||||||
return nil, ErrCorrupt
|
|
||||||
}
|
|
||||||
x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
|
|
||||||
}
|
|
||||||
length = int(x + 1)
|
|
||||||
if length <= 0 {
|
|
||||||
return nil, errors.New("snappy: unsupported literal length")
|
|
||||||
}
|
|
||||||
if length > len(dst)-d || length > len(src)-s {
|
|
||||||
return nil, ErrCorrupt
|
|
||||||
}
|
|
||||||
copy(dst[d:], src[s:s+length])
|
|
||||||
d += length
|
|
||||||
s += length
|
|
||||||
continue
|
|
||||||
|
|
||||||
case tagCopy1:
|
|
||||||
s += 2
|
|
||||||
if s > len(src) {
|
|
||||||
return nil, ErrCorrupt
|
|
||||||
}
|
|
||||||
length = 4 + int(src[s-2])>>2&0x7
|
|
||||||
offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
|
|
||||||
|
|
||||||
case tagCopy2:
|
|
||||||
s += 3
|
|
||||||
if s > len(src) {
|
|
||||||
return nil, ErrCorrupt
|
|
||||||
}
|
|
||||||
length = 1 + int(src[s-3])>>2
|
|
||||||
offset = int(src[s-2]) | int(src[s-1])<<8
|
|
||||||
|
|
||||||
case tagCopy4:
|
|
||||||
return nil, errors.New("snappy: unsupported COPY_4 tag")
|
|
||||||
}
|
|
||||||
|
|
||||||
end := d + length
|
|
||||||
if offset > d || end > len(dst) {
|
|
||||||
return nil, ErrCorrupt
|
|
||||||
}
|
|
||||||
for ; d < end; d++ {
|
|
||||||
dst[d] = dst[d-offset]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if d != dLen {
|
|
||||||
return nil, ErrCorrupt
|
|
||||||
}
|
|
||||||
return dst[:d], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReader returns a new Reader that decompresses from r, using the framing
|
|
||||||
// format described at
|
|
||||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
|
||||||
func NewReader(r io.Reader) *Reader {
|
|
||||||
return &Reader{
|
|
||||||
r: r,
|
|
||||||
decoded: make([]byte, maxUncompressedChunkLen),
|
|
||||||
buf: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reader is an io.Reader than can read Snappy-compressed bytes.
|
|
||||||
type Reader struct {
|
|
||||||
r io.Reader
|
|
||||||
err error
|
|
||||||
decoded []byte
|
|
||||||
buf []byte
|
|
||||||
// decoded[i:j] contains decoded bytes that have not yet been passed on.
|
|
||||||
i, j int
|
|
||||||
readHeader bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset discards any buffered data, resets all state, and switches the Snappy
|
|
||||||
// reader to read from r. This permits reusing a Reader rather than allocating
|
|
||||||
// a new one.
|
|
||||||
func (r *Reader) Reset(reader io.Reader) {
|
|
||||||
r.r = reader
|
|
||||||
r.err = nil
|
|
||||||
r.i = 0
|
|
||||||
r.j = 0
|
|
||||||
r.readHeader = false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Reader) readFull(p []byte) (ok bool) {
|
|
||||||
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
|
|
||||||
if r.err == io.ErrUnexpectedEOF {
|
|
||||||
r.err = ErrCorrupt
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read satisfies the io.Reader interface.
|
|
||||||
func (r *Reader) Read(p []byte) (int, error) {
|
|
||||||
if r.err != nil {
|
|
||||||
return 0, r.err
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
if r.i < r.j {
|
|
||||||
n := copy(p, r.decoded[r.i:r.j])
|
|
||||||
r.i += n
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
if !r.readFull(r.buf[:4]) {
|
|
||||||
return 0, r.err
|
|
||||||
}
|
|
||||||
chunkType := r.buf[0]
|
|
||||||
if !r.readHeader {
|
|
||||||
if chunkType != chunkTypeStreamIdentifier {
|
|
||||||
r.err = ErrCorrupt
|
|
||||||
return 0, r.err
|
|
||||||
}
|
|
||||||
r.readHeader = true
|
|
||||||
}
|
|
||||||
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
|
|
||||||
if chunkLen > len(r.buf) {
|
|
||||||
r.err = ErrUnsupported
|
|
||||||
return 0, r.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// The chunk types are specified at
|
|
||||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
|
||||||
switch chunkType {
|
|
||||||
case chunkTypeCompressedData:
|
|
||||||
// Section 4.2. Compressed data (chunk type 0x00).
|
|
||||||
if chunkLen < checksumSize {
|
|
||||||
r.err = ErrCorrupt
|
|
||||||
return 0, r.err
|
|
||||||
}
|
|
||||||
buf := r.buf[:chunkLen]
|
|
||||||
if !r.readFull(buf) {
|
|
||||||
return 0, r.err
|
|
||||||
}
|
|
||||||
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
|
|
||||||
buf = buf[checksumSize:]
|
|
||||||
|
|
||||||
n, err := DecodedLen(buf)
|
|
||||||
if err != nil {
|
|
||||||
r.err = err
|
|
||||||
return 0, r.err
|
|
||||||
}
|
|
||||||
if n > len(r.decoded) {
|
|
||||||
r.err = ErrCorrupt
|
|
||||||
return 0, r.err
|
|
||||||
}
|
|
||||||
if _, err := Decode(r.decoded, buf); err != nil {
|
|
||||||
r.err = err
|
|
||||||
return 0, r.err
|
|
||||||
}
|
|
||||||
if crc(r.decoded[:n]) != checksum {
|
|
||||||
r.err = ErrCorrupt
|
|
||||||
return 0, r.err
|
|
||||||
}
|
|
||||||
r.i, r.j = 0, n
|
|
||||||
continue
|
|
||||||
|
|
||||||
case chunkTypeUncompressedData:
|
|
||||||
// Section 4.3. Uncompressed data (chunk type 0x01).
|
|
||||||
if chunkLen < checksumSize {
|
|
||||||
r.err = ErrCorrupt
|
|
||||||
return 0, r.err
|
|
||||||
}
|
|
||||||
buf := r.buf[:checksumSize]
|
|
||||||
if !r.readFull(buf) {
|
|
||||||
return 0, r.err
|
|
||||||
}
|
|
||||||
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
|
|
||||||
// Read directly into r.decoded instead of via r.buf.
|
|
||||||
n := chunkLen - checksumSize
|
|
||||||
if !r.readFull(r.decoded[:n]) {
|
|
||||||
return 0, r.err
|
|
||||||
}
|
|
||||||
if crc(r.decoded[:n]) != checksum {
|
|
||||||
r.err = ErrCorrupt
|
|
||||||
return 0, r.err
|
|
||||||
}
|
|
||||||
r.i, r.j = 0, n
|
|
||||||
continue
|
|
||||||
|
|
||||||
case chunkTypeStreamIdentifier:
|
|
||||||
// Section 4.1. Stream identifier (chunk type 0xff).
|
|
||||||
if chunkLen != len(magicBody) {
|
|
||||||
r.err = ErrCorrupt
|
|
||||||
return 0, r.err
|
|
||||||
}
|
|
||||||
if !r.readFull(r.buf[:len(magicBody)]) {
|
|
||||||
return 0, r.err
|
|
||||||
}
|
|
||||||
for i := 0; i < len(magicBody); i++ {
|
|
||||||
if r.buf[i] != magicBody[i] {
|
|
||||||
r.err = ErrCorrupt
|
|
||||||
return 0, r.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if chunkType <= 0x7f {
|
|
||||||
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
|
|
||||||
r.err = ErrUnsupported
|
|
||||||
return 0, r.err
|
|
||||||
}
|
|
||||||
// Section 4.4 Padding (chunk type 0xfe).
|
|
||||||
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
|
|
||||||
if !r.readFull(r.buf[:chunkLen]) {
|
|
||||||
return 0, r.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
254
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/golang/snappy/encode.go
generated
vendored
254
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/golang/snappy/encode.go
generated
vendored
@@ -1,254 +0,0 @@
|
|||||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package snappy
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// We limit how far copy back-references can go, the same as the C++ code.
|
|
||||||
const maxOffset = 1 << 15
|
|
||||||
|
|
||||||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
|
||||||
func emitLiteral(dst, lit []byte) int {
|
|
||||||
i, n := 0, uint(len(lit)-1)
|
|
||||||
switch {
|
|
||||||
case n < 60:
|
|
||||||
dst[0] = uint8(n)<<2 | tagLiteral
|
|
||||||
i = 1
|
|
||||||
case n < 1<<8:
|
|
||||||
dst[0] = 60<<2 | tagLiteral
|
|
||||||
dst[1] = uint8(n)
|
|
||||||
i = 2
|
|
||||||
case n < 1<<16:
|
|
||||||
dst[0] = 61<<2 | tagLiteral
|
|
||||||
dst[1] = uint8(n)
|
|
||||||
dst[2] = uint8(n >> 8)
|
|
||||||
i = 3
|
|
||||||
case n < 1<<24:
|
|
||||||
dst[0] = 62<<2 | tagLiteral
|
|
||||||
dst[1] = uint8(n)
|
|
||||||
dst[2] = uint8(n >> 8)
|
|
||||||
dst[3] = uint8(n >> 16)
|
|
||||||
i = 4
|
|
||||||
case int64(n) < 1<<32:
|
|
||||||
dst[0] = 63<<2 | tagLiteral
|
|
||||||
dst[1] = uint8(n)
|
|
||||||
dst[2] = uint8(n >> 8)
|
|
||||||
dst[3] = uint8(n >> 16)
|
|
||||||
dst[4] = uint8(n >> 24)
|
|
||||||
i = 5
|
|
||||||
default:
|
|
||||||
panic("snappy: source buffer is too long")
|
|
||||||
}
|
|
||||||
if copy(dst[i:], lit) != len(lit) {
|
|
||||||
panic("snappy: destination buffer is too short")
|
|
||||||
}
|
|
||||||
return i + len(lit)
|
|
||||||
}
|
|
||||||
|
|
||||||
// emitCopy writes a copy chunk and returns the number of bytes written.
|
|
||||||
func emitCopy(dst []byte, offset, length int) int {
|
|
||||||
i := 0
|
|
||||||
for length > 0 {
|
|
||||||
x := length - 4
|
|
||||||
if 0 <= x && x < 1<<3 && offset < 1<<11 {
|
|
||||||
dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1
|
|
||||||
dst[i+1] = uint8(offset)
|
|
||||||
i += 2
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
x = length
|
|
||||||
if x > 1<<6 {
|
|
||||||
x = 1 << 6
|
|
||||||
}
|
|
||||||
dst[i+0] = uint8(x-1)<<2 | tagCopy2
|
|
||||||
dst[i+1] = uint8(offset)
|
|
||||||
dst[i+2] = uint8(offset >> 8)
|
|
||||||
i += 3
|
|
||||||
length -= x
|
|
||||||
}
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode returns the encoded form of src. The returned slice may be a sub-
|
|
||||||
// slice of dst if dst was large enough to hold the entire encoded block.
|
|
||||||
// Otherwise, a newly allocated slice will be returned.
|
|
||||||
// It is valid to pass a nil dst.
|
|
||||||
func Encode(dst, src []byte) []byte {
|
|
||||||
if n := MaxEncodedLen(len(src)); len(dst) < n {
|
|
||||||
dst = make([]byte, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The block starts with the varint-encoded length of the decompressed bytes.
|
|
||||||
d := binary.PutUvarint(dst, uint64(len(src)))
|
|
||||||
|
|
||||||
// Return early if src is short.
|
|
||||||
if len(src) <= 4 {
|
|
||||||
if len(src) != 0 {
|
|
||||||
d += emitLiteral(dst[d:], src)
|
|
||||||
}
|
|
||||||
return dst[:d]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
|
|
||||||
const maxTableSize = 1 << 14
|
|
||||||
shift, tableSize := uint(32-8), 1<<8
|
|
||||||
for tableSize < maxTableSize && tableSize < len(src) {
|
|
||||||
shift--
|
|
||||||
tableSize *= 2
|
|
||||||
}
|
|
||||||
var table [maxTableSize]int
|
|
||||||
|
|
||||||
// Iterate over the source bytes.
|
|
||||||
var (
|
|
||||||
s int // The iterator position.
|
|
||||||
t int // The last position with the same hash as s.
|
|
||||||
lit int // The start position of any pending literal bytes.
|
|
||||||
)
|
|
||||||
for s+3 < len(src) {
|
|
||||||
// Update the hash table.
|
|
||||||
b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3]
|
|
||||||
h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24
|
|
||||||
p := &table[(h*0x1e35a7bd)>>shift]
|
|
||||||
// We need to to store values in [-1, inf) in table. To save
|
|
||||||
// some initialization time, (re)use the table's zero value
|
|
||||||
// and shift the values against this zero: add 1 on writes,
|
|
||||||
// subtract 1 on reads.
|
|
||||||
t, *p = *p-1, s+1
|
|
||||||
// If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte.
|
|
||||||
if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] {
|
|
||||||
s++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Otherwise, we have a match. First, emit any pending literal bytes.
|
|
||||||
if lit != s {
|
|
||||||
d += emitLiteral(dst[d:], src[lit:s])
|
|
||||||
}
|
|
||||||
// Extend the match to be as long as possible.
|
|
||||||
s0 := s
|
|
||||||
s, t = s+4, t+4
|
|
||||||
for s < len(src) && src[s] == src[t] {
|
|
||||||
s++
|
|
||||||
t++
|
|
||||||
}
|
|
||||||
// Emit the copied bytes.
|
|
||||||
d += emitCopy(dst[d:], s-t, s-s0)
|
|
||||||
lit = s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Emit any final pending literal bytes and return.
|
|
||||||
if lit != len(src) {
|
|
||||||
d += emitLiteral(dst[d:], src[lit:])
|
|
||||||
}
|
|
||||||
return dst[:d]
|
|
||||||
}
|
|
||||||
|
|
||||||
// MaxEncodedLen returns the maximum length of a snappy block, given its
|
|
||||||
// uncompressed length.
|
|
||||||
func MaxEncodedLen(srcLen int) int {
|
|
||||||
// Compressed data can be defined as:
|
|
||||||
// compressed := item* literal*
|
|
||||||
// item := literal* copy
|
|
||||||
//
|
|
||||||
// The trailing literal sequence has a space blowup of at most 62/60
|
|
||||||
// since a literal of length 60 needs one tag byte + one extra byte
|
|
||||||
// for length information.
|
|
||||||
//
|
|
||||||
// Item blowup is trickier to measure. Suppose the "copy" op copies
|
|
||||||
// 4 bytes of data. Because of a special check in the encoding code,
|
|
||||||
// we produce a 4-byte copy only if the offset is < 65536. Therefore
|
|
||||||
// the copy op takes 3 bytes to encode, and this type of item leads
|
|
||||||
// to at most the 62/60 blowup for representing literals.
|
|
||||||
//
|
|
||||||
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
|
|
||||||
// enough, it will take 5 bytes to encode the copy op. Therefore the
|
|
||||||
// worst case here is a one-byte literal followed by a five-byte copy.
|
|
||||||
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
|
|
||||||
//
|
|
||||||
// This last factor dominates the blowup, so the final estimate is:
|
|
||||||
return 32 + srcLen + srcLen/6
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriter returns a new Writer that compresses to w, using the framing
|
|
||||||
// format described at
|
|
||||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
|
||||||
func NewWriter(w io.Writer) *Writer {
|
|
||||||
return &Writer{
|
|
||||||
w: w,
|
|
||||||
enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writer is an io.Writer than can write Snappy-compressed bytes.
|
|
||||||
type Writer struct {
|
|
||||||
w io.Writer
|
|
||||||
err error
|
|
||||||
enc []byte
|
|
||||||
buf [checksumSize + chunkHeaderSize]byte
|
|
||||||
wroteHeader bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset discards the writer's state and switches the Snappy writer to write to
|
|
||||||
// w. This permits reusing a Writer rather than allocating a new one.
|
|
||||||
func (w *Writer) Reset(writer io.Writer) {
|
|
||||||
w.w = writer
|
|
||||||
w.err = nil
|
|
||||||
w.wroteHeader = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write satisfies the io.Writer interface.
|
|
||||||
func (w *Writer) Write(p []byte) (n int, errRet error) {
|
|
||||||
if w.err != nil {
|
|
||||||
return 0, w.err
|
|
||||||
}
|
|
||||||
if !w.wroteHeader {
|
|
||||||
copy(w.enc, magicChunk)
|
|
||||||
if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil {
|
|
||||||
w.err = err
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
w.wroteHeader = true
|
|
||||||
}
|
|
||||||
for len(p) > 0 {
|
|
||||||
var uncompressed []byte
|
|
||||||
if len(p) > maxUncompressedChunkLen {
|
|
||||||
uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:]
|
|
||||||
} else {
|
|
||||||
uncompressed, p = p, nil
|
|
||||||
}
|
|
||||||
checksum := crc(uncompressed)
|
|
||||||
|
|
||||||
// Compress the buffer, discarding the result if the improvement
|
|
||||||
// isn't at least 12.5%.
|
|
||||||
chunkType := uint8(chunkTypeCompressedData)
|
|
||||||
chunkBody := Encode(w.enc, uncompressed)
|
|
||||||
if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 {
|
|
||||||
chunkType, chunkBody = chunkTypeUncompressedData, uncompressed
|
|
||||||
}
|
|
||||||
|
|
||||||
chunkLen := 4 + len(chunkBody)
|
|
||||||
w.buf[0] = chunkType
|
|
||||||
w.buf[1] = uint8(chunkLen >> 0)
|
|
||||||
w.buf[2] = uint8(chunkLen >> 8)
|
|
||||||
w.buf[3] = uint8(chunkLen >> 16)
|
|
||||||
w.buf[4] = uint8(checksum >> 0)
|
|
||||||
w.buf[5] = uint8(checksum >> 8)
|
|
||||||
w.buf[6] = uint8(checksum >> 16)
|
|
||||||
w.buf[7] = uint8(checksum >> 24)
|
|
||||||
if _, err := w.w.Write(w.buf[:]); err != nil {
|
|
||||||
w.err = err
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
if _, err := w.w.Write(chunkBody); err != nil {
|
|
||||||
w.err = err
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
n += len(uncompressed)
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
68
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/golang/snappy/snappy.go
generated
vendored
68
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/golang/snappy/snappy.go
generated
vendored
@@ -1,68 +0,0 @@
|
|||||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package snappy implements the snappy block-based compression format.
|
|
||||||
// It aims for very high speeds and reasonable compression.
|
|
||||||
//
|
|
||||||
// The C++ snappy implementation is at https://github.com/google/snappy
|
|
||||||
package snappy // import "github.com/golang/snappy"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"hash/crc32"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
Each encoded block begins with the varint-encoded length of the decoded data,
|
|
||||||
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
|
|
||||||
first byte of each chunk is broken into its 2 least and 6 most significant bits
|
|
||||||
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
|
|
||||||
Zero means a literal tag. All other values mean a copy tag.
|
|
||||||
|
|
||||||
For literal tags:
|
|
||||||
- If m < 60, the next 1 + m bytes are literal bytes.
|
|
||||||
- Otherwise, let n be the little-endian unsigned integer denoted by the next
|
|
||||||
m - 59 bytes. The next 1 + n bytes after that are literal bytes.
|
|
||||||
|
|
||||||
For copy tags, length bytes are copied from offset bytes ago, in the style of
|
|
||||||
Lempel-Ziv compression algorithms. In particular:
|
|
||||||
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
|
|
||||||
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
|
|
||||||
of the offset. The next byte is bits 0-7 of the offset.
|
|
||||||
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
|
|
||||||
The length is 1 + m. The offset is the little-endian unsigned integer
|
|
||||||
denoted by the next 2 bytes.
|
|
||||||
- For l == 3, this tag is a legacy format that is no longer supported.
|
|
||||||
*/
|
|
||||||
const (
|
|
||||||
tagLiteral = 0x00
|
|
||||||
tagCopy1 = 0x01
|
|
||||||
tagCopy2 = 0x02
|
|
||||||
tagCopy4 = 0x03
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
checksumSize = 4
|
|
||||||
chunkHeaderSize = 4
|
|
||||||
magicChunk = "\xff\x06\x00\x00" + magicBody
|
|
||||||
magicBody = "sNaPpY"
|
|
||||||
// https://github.com/google/snappy/blob/master/framing_format.txt says
|
|
||||||
// that "the uncompressed data in a chunk must be no longer than 65536 bytes".
|
|
||||||
maxUncompressedChunkLen = 65536
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
chunkTypeCompressedData = 0x00
|
|
||||||
chunkTypeUncompressedData = 0x01
|
|
||||||
chunkTypePadding = 0xfe
|
|
||||||
chunkTypeStreamIdentifier = 0xff
|
|
||||||
)
|
|
||||||
|
|
||||||
var crcTable = crc32.MakeTable(crc32.Castagnoli)
|
|
||||||
|
|
||||||
// crc implements the checksum specified in section 3 of
|
|
||||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
|
||||||
func crc(b []byte) uint32 {
|
|
||||||
c := crc32.Update(0, crcTable, b)
|
|
||||||
return uint32(c>>15|c<<17) + 0xa282ead8
|
|
||||||
}
|
|
||||||
377
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go
generated
vendored
377
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go
generated
vendored
@@ -1,377 +0,0 @@
|
|||||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package snappy
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"math/rand"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
|
|
||||||
testdata = flag.String("testdata", "testdata", "Directory containing the test data")
|
|
||||||
)
|
|
||||||
|
|
||||||
func roundtrip(b, ebuf, dbuf []byte) error {
|
|
||||||
d, err := Decode(dbuf, Encode(ebuf, b))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("decoding error: %v", err)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(b, d) {
|
|
||||||
return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot %v", b, d)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEmpty(t *testing.T) {
|
|
||||||
if err := roundtrip(nil, nil, nil); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSmallCopy(t *testing.T) {
|
|
||||||
for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
|
|
||||||
for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
|
|
||||||
for i := 0; i < 32; i++ {
|
|
||||||
s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb"
|
|
||||||
if err := roundtrip([]byte(s), ebuf, dbuf); err != nil {
|
|
||||||
t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSmallRand(t *testing.T) {
|
|
||||||
rng := rand.New(rand.NewSource(27354294))
|
|
||||||
for n := 1; n < 20000; n += 23 {
|
|
||||||
b := make([]byte, n)
|
|
||||||
for i := range b {
|
|
||||||
b[i] = uint8(rng.Uint32())
|
|
||||||
}
|
|
||||||
if err := roundtrip(b, nil, nil); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSmallRegular(t *testing.T) {
|
|
||||||
for n := 1; n < 20000; n += 23 {
|
|
||||||
b := make([]byte, n)
|
|
||||||
for i := range b {
|
|
||||||
b[i] = uint8(i%10 + 'a')
|
|
||||||
}
|
|
||||||
if err := roundtrip(b, nil, nil); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInvalidVarint(t *testing.T) {
|
|
||||||
data := []byte("\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00")
|
|
||||||
if _, err := DecodedLen(data); err != ErrCorrupt {
|
|
||||||
t.Errorf("DecodedLen: got %v, want ErrCorrupt", err)
|
|
||||||
}
|
|
||||||
if _, err := Decode(nil, data); err != ErrCorrupt {
|
|
||||||
t.Errorf("Decode: got %v, want ErrCorrupt", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The encoded varint overflows 32 bits
|
|
||||||
data = []byte("\xff\xff\xff\xff\xff\x00")
|
|
||||||
|
|
||||||
if _, err := DecodedLen(data); err != ErrCorrupt {
|
|
||||||
t.Errorf("DecodedLen: got %v, want ErrCorrupt", err)
|
|
||||||
}
|
|
||||||
if _, err := Decode(nil, data); err != ErrCorrupt {
|
|
||||||
t.Errorf("Decode: got %v, want ErrCorrupt", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func cmp(a, b []byte) error {
|
|
||||||
if len(a) != len(b) {
|
|
||||||
return fmt.Errorf("got %d bytes, want %d", len(a), len(b))
|
|
||||||
}
|
|
||||||
for i := range a {
|
|
||||||
if a[i] != b[i] {
|
|
||||||
return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFramingFormat(t *testing.T) {
|
|
||||||
// src is comprised of alternating 1e5-sized sequences of random
|
|
||||||
// (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen
|
|
||||||
// because it is larger than maxUncompressedChunkLen (64k).
|
|
||||||
src := make([]byte, 1e6)
|
|
||||||
rng := rand.New(rand.NewSource(1))
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
if i%2 == 0 {
|
|
||||||
for j := 0; j < 1e5; j++ {
|
|
||||||
src[1e5*i+j] = uint8(rng.Intn(256))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for j := 0; j < 1e5; j++ {
|
|
||||||
src[1e5*i+j] = uint8(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
if _, err := NewWriter(buf).Write(src); err != nil {
|
|
||||||
t.Fatalf("Write: encoding: %v", err)
|
|
||||||
}
|
|
||||||
dst, err := ioutil.ReadAll(NewReader(buf))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("ReadAll: decoding: %v", err)
|
|
||||||
}
|
|
||||||
if err := cmp(dst, src); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReaderReset(t *testing.T) {
|
|
||||||
gold := bytes.Repeat([]byte("All that is gold does not glitter,\n"), 10000)
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
if _, err := NewWriter(buf).Write(gold); err != nil {
|
|
||||||
t.Fatalf("Write: %v", err)
|
|
||||||
}
|
|
||||||
encoded, invalid, partial := buf.String(), "invalid", "partial"
|
|
||||||
r := NewReader(nil)
|
|
||||||
for i, s := range []string{encoded, invalid, partial, encoded, partial, invalid, encoded, encoded} {
|
|
||||||
if s == partial {
|
|
||||||
r.Reset(strings.NewReader(encoded))
|
|
||||||
if _, err := r.Read(make([]byte, 101)); err != nil {
|
|
||||||
t.Errorf("#%d: %v", i, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
r.Reset(strings.NewReader(s))
|
|
||||||
got, err := ioutil.ReadAll(r)
|
|
||||||
switch s {
|
|
||||||
case encoded:
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("#%d: %v", i, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := cmp(got, gold); err != nil {
|
|
||||||
t.Errorf("#%d: %v", i, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
case invalid:
|
|
||||||
if err == nil {
|
|
||||||
t.Errorf("#%d: got nil error, want non-nil", i)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWriterReset(t *testing.T) {
|
|
||||||
gold := bytes.Repeat([]byte("Not all those who wander are lost;\n"), 10000)
|
|
||||||
var gots, wants [][]byte
|
|
||||||
const n = 20
|
|
||||||
w, failed := NewWriter(nil), false
|
|
||||||
for i := 0; i <= n; i++ {
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
w.Reset(buf)
|
|
||||||
want := gold[:len(gold)*i/n]
|
|
||||||
if _, err := w.Write(want); err != nil {
|
|
||||||
t.Errorf("#%d: Write: %v", i, err)
|
|
||||||
failed = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
got, err := ioutil.ReadAll(NewReader(buf))
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("#%d: ReadAll: %v", i, err)
|
|
||||||
failed = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
gots = append(gots, got)
|
|
||||||
wants = append(wants, want)
|
|
||||||
}
|
|
||||||
if failed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for i := range gots {
|
|
||||||
if err := cmp(gots[i], wants[i]); err != nil {
|
|
||||||
t.Errorf("#%d: %v", i, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func benchDecode(b *testing.B, src []byte) {
|
|
||||||
encoded := Encode(nil, src)
|
|
||||||
// Bandwidth is in amount of uncompressed data.
|
|
||||||
b.SetBytes(int64(len(src)))
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
Decode(src, encoded)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func benchEncode(b *testing.B, src []byte) {
|
|
||||||
// Bandwidth is in amount of uncompressed data.
|
|
||||||
b.SetBytes(int64(len(src)))
|
|
||||||
dst := make([]byte, MaxEncodedLen(len(src)))
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
Encode(dst, src)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func readFile(b testing.TB, filename string) []byte {
|
|
||||||
src, err := ioutil.ReadFile(filename)
|
|
||||||
if err != nil {
|
|
||||||
b.Skipf("skipping benchmark: %v", err)
|
|
||||||
}
|
|
||||||
if len(src) == 0 {
|
|
||||||
b.Fatalf("%s has zero length", filename)
|
|
||||||
}
|
|
||||||
return src
|
|
||||||
}
|
|
||||||
|
|
||||||
// expand returns a slice of length n containing repeated copies of src.
|
|
||||||
func expand(src []byte, n int) []byte {
|
|
||||||
dst := make([]byte, n)
|
|
||||||
for x := dst; len(x) > 0; {
|
|
||||||
i := copy(x, src)
|
|
||||||
x = x[i:]
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
func benchWords(b *testing.B, n int, decode bool) {
|
|
||||||
// Note: the file is OS-language dependent so the resulting values are not
|
|
||||||
// directly comparable for non-US-English OS installations.
|
|
||||||
data := expand(readFile(b, "/usr/share/dict/words"), n)
|
|
||||||
if decode {
|
|
||||||
benchDecode(b, data)
|
|
||||||
} else {
|
|
||||||
benchEncode(b, data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) }
|
|
||||||
func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) }
|
|
||||||
func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) }
|
|
||||||
func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) }
|
|
||||||
func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) }
|
|
||||||
func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) }
|
|
||||||
func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) }
|
|
||||||
func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) }
|
|
||||||
|
|
||||||
// testFiles' values are copied directly from
|
|
||||||
// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc
|
|
||||||
// The label field is unused in snappy-go.
|
|
||||||
var testFiles = []struct {
|
|
||||||
label string
|
|
||||||
filename string
|
|
||||||
}{
|
|
||||||
{"html", "html"},
|
|
||||||
{"urls", "urls.10K"},
|
|
||||||
{"jpg", "fireworks.jpeg"},
|
|
||||||
{"jpg_200", "fireworks.jpeg"},
|
|
||||||
{"pdf", "paper-100k.pdf"},
|
|
||||||
{"html4", "html_x_4"},
|
|
||||||
{"txt1", "alice29.txt"},
|
|
||||||
{"txt2", "asyoulik.txt"},
|
|
||||||
{"txt3", "lcet10.txt"},
|
|
||||||
{"txt4", "plrabn12.txt"},
|
|
||||||
{"pb", "geo.protodata"},
|
|
||||||
{"gaviota", "kppkn.gtb"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// The test data files are present at this canonical URL.
|
|
||||||
const baseURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/"
|
|
||||||
|
|
||||||
func downloadTestdata(b *testing.B, basename string) (errRet error) {
|
|
||||||
filename := filepath.Join(*testdata, basename)
|
|
||||||
if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if !*download {
|
|
||||||
b.Skipf("test data not found; skipping benchmark without the -download flag")
|
|
||||||
}
|
|
||||||
// Download the official snappy C++ implementation reference test data
|
|
||||||
// files for benchmarking.
|
|
||||||
if err := os.Mkdir(*testdata, 0777); err != nil && !os.IsExist(err) {
|
|
||||||
return fmt.Errorf("failed to create testdata: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := os.Create(filename)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create %s: %s", filename, err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
defer func() {
|
|
||||||
if errRet != nil {
|
|
||||||
os.Remove(filename)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
url := baseURL + basename
|
|
||||||
resp, err := http.Get(url)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to download %s: %s", url, err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if s := resp.StatusCode; s != http.StatusOK {
|
|
||||||
return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s))
|
|
||||||
}
|
|
||||||
_, err = io.Copy(f, resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to download %s to %s: %s", url, filename, err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func benchFile(b *testing.B, n int, decode bool) {
|
|
||||||
if err := downloadTestdata(b, testFiles[n].filename); err != nil {
|
|
||||||
b.Fatalf("failed to download testdata: %s", err)
|
|
||||||
}
|
|
||||||
data := readFile(b, filepath.Join(*testdata, testFiles[n].filename))
|
|
||||||
if decode {
|
|
||||||
benchDecode(b, data)
|
|
||||||
} else {
|
|
||||||
benchEncode(b, data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Naming convention is kept similar to what snappy's C++ implementation uses.
|
|
||||||
func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) }
|
|
||||||
func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) }
|
|
||||||
func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) }
|
|
||||||
func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) }
|
|
||||||
func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) }
|
|
||||||
func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) }
|
|
||||||
func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) }
|
|
||||||
func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) }
|
|
||||||
func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) }
|
|
||||||
func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) }
|
|
||||||
func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) }
|
|
||||||
func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) }
|
|
||||||
func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) }
|
|
||||||
func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) }
|
|
||||||
func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) }
|
|
||||||
func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) }
|
|
||||||
func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) }
|
|
||||||
func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) }
|
|
||||||
func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) }
|
|
||||||
func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) }
|
|
||||||
func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) }
|
|
||||||
func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) }
|
|
||||||
func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) }
|
|
||||||
func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) }
|
|
||||||
26
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/juju/ratelimit/ratelimit.go
generated
vendored
26
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/juju/ratelimit/ratelimit.go
generated
vendored
@@ -8,10 +8,10 @@
|
|||||||
package ratelimit
|
package ratelimit
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
"math"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Bucket represents a token bucket that fills at a predetermined rate.
|
// Bucket represents a token bucket that fills at a predetermined rate.
|
||||||
@@ -171,30 +171,6 @@ func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 {
|
|||||||
return count
|
return count
|
||||||
}
|
}
|
||||||
|
|
||||||
// Available returns the number of available tokens. It will be negative
|
|
||||||
// when there are consumers waiting for tokens. Note that if this
|
|
||||||
// returns greater than zero, it does not guarantee that calls that take
|
|
||||||
// tokens from the buffer will succeed, as the number of available
|
|
||||||
// tokens could have changed in the meantime. This method is intended
|
|
||||||
// primarily for metrics reporting and debugging.
|
|
||||||
func (tb *Bucket) Available() int64 {
|
|
||||||
return tb.available(time.Now())
|
|
||||||
}
|
|
||||||
|
|
||||||
// available is the internal version of available - it takes the current time as
|
|
||||||
// an argument to enable easy testing.
|
|
||||||
func (tb *Bucket) available(now time.Time) int64 {
|
|
||||||
tb.mu.Lock()
|
|
||||||
defer tb.mu.Unlock()
|
|
||||||
tb.adjust(now)
|
|
||||||
return tb.avail
|
|
||||||
}
|
|
||||||
|
|
||||||
// Capacity returns the capacity that the bucket was created with.
|
|
||||||
func (tb *Bucket) Capacity() int64 {
|
|
||||||
return tb.capacity
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rate returns the fill rate of the bucket, in tokens per second.
|
// Rate returns the fill rate of the bucket, in tokens per second.
|
||||||
func (tb *Bucket) Rate() float64 {
|
func (tb *Bucket) Rate() float64 {
|
||||||
return 1e9 * float64(tb.quantum) / float64(tb.fillInterval)
|
return 1e9 * float64(tb.quantum) / float64(tb.fillInterval)
|
||||||
|
|||||||
389
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/juju/ratelimit/ratelimit_test.go
generated
vendored
389
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/juju/ratelimit/ratelimit_test.go
generated
vendored
@@ -1,389 +0,0 @@
|
|||||||
// Copyright 2014 Canonical Ltd.
|
|
||||||
// Licensed under the LGPLv3 with static-linking exception.
|
|
||||||
// See LICENCE file for details.
|
|
||||||
|
|
||||||
package ratelimit
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
gc "gopkg.in/check.v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestPackage(t *testing.T) {
|
|
||||||
gc.TestingT(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
type rateLimitSuite struct{}
|
|
||||||
|
|
||||||
var _ = gc.Suite(rateLimitSuite{})
|
|
||||||
|
|
||||||
type takeReq struct {
|
|
||||||
time time.Duration
|
|
||||||
count int64
|
|
||||||
expectWait time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
var takeTests = []struct {
|
|
||||||
about string
|
|
||||||
fillInterval time.Duration
|
|
||||||
capacity int64
|
|
||||||
reqs []takeReq
|
|
||||||
}{{
|
|
||||||
about: "serial requests",
|
|
||||||
fillInterval: 250 * time.Millisecond,
|
|
||||||
capacity: 10,
|
|
||||||
reqs: []takeReq{{
|
|
||||||
time: 0,
|
|
||||||
count: 0,
|
|
||||||
expectWait: 0,
|
|
||||||
}, {
|
|
||||||
time: 0,
|
|
||||||
count: 10,
|
|
||||||
expectWait: 0,
|
|
||||||
}, {
|
|
||||||
time: 0,
|
|
||||||
count: 1,
|
|
||||||
expectWait: 250 * time.Millisecond,
|
|
||||||
}, {
|
|
||||||
time: 250 * time.Millisecond,
|
|
||||||
count: 1,
|
|
||||||
expectWait: 250 * time.Millisecond,
|
|
||||||
}},
|
|
||||||
}, {
|
|
||||||
about: "concurrent requests",
|
|
||||||
fillInterval: 250 * time.Millisecond,
|
|
||||||
capacity: 10,
|
|
||||||
reqs: []takeReq{{
|
|
||||||
time: 0,
|
|
||||||
count: 10,
|
|
||||||
expectWait: 0,
|
|
||||||
}, {
|
|
||||||
time: 0,
|
|
||||||
count: 2,
|
|
||||||
expectWait: 500 * time.Millisecond,
|
|
||||||
}, {
|
|
||||||
time: 0,
|
|
||||||
count: 2,
|
|
||||||
expectWait: 1000 * time.Millisecond,
|
|
||||||
}, {
|
|
||||||
time: 0,
|
|
||||||
count: 1,
|
|
||||||
expectWait: 1250 * time.Millisecond,
|
|
||||||
}},
|
|
||||||
}, {
|
|
||||||
about: "more than capacity",
|
|
||||||
fillInterval: 1 * time.Millisecond,
|
|
||||||
capacity: 10,
|
|
||||||
reqs: []takeReq{{
|
|
||||||
time: 0,
|
|
||||||
count: 10,
|
|
||||||
expectWait: 0,
|
|
||||||
}, {
|
|
||||||
time: 20 * time.Millisecond,
|
|
||||||
count: 15,
|
|
||||||
expectWait: 5 * time.Millisecond,
|
|
||||||
}},
|
|
||||||
}, {
|
|
||||||
about: "sub-quantum time",
|
|
||||||
fillInterval: 10 * time.Millisecond,
|
|
||||||
capacity: 10,
|
|
||||||
reqs: []takeReq{{
|
|
||||||
time: 0,
|
|
||||||
count: 10,
|
|
||||||
expectWait: 0,
|
|
||||||
}, {
|
|
||||||
time: 7 * time.Millisecond,
|
|
||||||
count: 1,
|
|
||||||
expectWait: 3 * time.Millisecond,
|
|
||||||
}, {
|
|
||||||
time: 8 * time.Millisecond,
|
|
||||||
count: 1,
|
|
||||||
expectWait: 12 * time.Millisecond,
|
|
||||||
}},
|
|
||||||
}, {
|
|
||||||
about: "within capacity",
|
|
||||||
fillInterval: 10 * time.Millisecond,
|
|
||||||
capacity: 5,
|
|
||||||
reqs: []takeReq{{
|
|
||||||
time: 0,
|
|
||||||
count: 5,
|
|
||||||
expectWait: 0,
|
|
||||||
}, {
|
|
||||||
time: 60 * time.Millisecond,
|
|
||||||
count: 5,
|
|
||||||
expectWait: 0,
|
|
||||||
}, {
|
|
||||||
time: 60 * time.Millisecond,
|
|
||||||
count: 1,
|
|
||||||
expectWait: 10 * time.Millisecond,
|
|
||||||
}, {
|
|
||||||
time: 80 * time.Millisecond,
|
|
||||||
count: 2,
|
|
||||||
expectWait: 10 * time.Millisecond,
|
|
||||||
}},
|
|
||||||
}}
|
|
||||||
|
|
||||||
var availTests = []struct {
|
|
||||||
about string
|
|
||||||
capacity int64
|
|
||||||
fillInterval time.Duration
|
|
||||||
take int64
|
|
||||||
sleep time.Duration
|
|
||||||
|
|
||||||
expectCountAfterTake int64
|
|
||||||
expectCountAfterSleep int64
|
|
||||||
}{{
|
|
||||||
about: "should fill tokens after interval",
|
|
||||||
capacity: 5,
|
|
||||||
fillInterval: time.Second,
|
|
||||||
take: 5,
|
|
||||||
sleep: time.Second,
|
|
||||||
expectCountAfterTake: 0,
|
|
||||||
expectCountAfterSleep: 1,
|
|
||||||
}, {
|
|
||||||
about: "should fill tokens plus existing count",
|
|
||||||
capacity: 2,
|
|
||||||
fillInterval: time.Second,
|
|
||||||
take: 1,
|
|
||||||
sleep: time.Second,
|
|
||||||
expectCountAfterTake: 1,
|
|
||||||
expectCountAfterSleep: 2,
|
|
||||||
}, {
|
|
||||||
about: "shouldn't fill before interval",
|
|
||||||
capacity: 2,
|
|
||||||
fillInterval: 2 * time.Second,
|
|
||||||
take: 1,
|
|
||||||
sleep: time.Second,
|
|
||||||
expectCountAfterTake: 1,
|
|
||||||
expectCountAfterSleep: 1,
|
|
||||||
}, {
|
|
||||||
about: "should fill only once after 1*interval before 2*interval",
|
|
||||||
capacity: 2,
|
|
||||||
fillInterval: 2 * time.Second,
|
|
||||||
take: 1,
|
|
||||||
sleep: 3 * time.Second,
|
|
||||||
expectCountAfterTake: 1,
|
|
||||||
expectCountAfterSleep: 2,
|
|
||||||
}}
|
|
||||||
|
|
||||||
func (rateLimitSuite) TestTake(c *gc.C) {
|
|
||||||
for i, test := range takeTests {
|
|
||||||
tb := NewBucket(test.fillInterval, test.capacity)
|
|
||||||
for j, req := range test.reqs {
|
|
||||||
d, ok := tb.take(tb.startTime.Add(req.time), req.count, infinityDuration)
|
|
||||||
c.Assert(ok, gc.Equals, true)
|
|
||||||
if d != req.expectWait {
|
|
||||||
c.Fatalf("test %d.%d, %s, got %v want %v", i, j, test.about, d, req.expectWait)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rateLimitSuite) TestTakeMaxDuration(c *gc.C) {
|
|
||||||
for i, test := range takeTests {
|
|
||||||
tb := NewBucket(test.fillInterval, test.capacity)
|
|
||||||
for j, req := range test.reqs {
|
|
||||||
if req.expectWait > 0 {
|
|
||||||
d, ok := tb.take(tb.startTime.Add(req.time), req.count, req.expectWait-1)
|
|
||||||
c.Assert(ok, gc.Equals, false)
|
|
||||||
c.Assert(d, gc.Equals, time.Duration(0))
|
|
||||||
}
|
|
||||||
d, ok := tb.take(tb.startTime.Add(req.time), req.count, req.expectWait)
|
|
||||||
c.Assert(ok, gc.Equals, true)
|
|
||||||
if d != req.expectWait {
|
|
||||||
c.Fatalf("test %d.%d, %s, got %v want %v", i, j, test.about, d, req.expectWait)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type takeAvailableReq struct {
|
|
||||||
time time.Duration
|
|
||||||
count int64
|
|
||||||
expect int64
|
|
||||||
}
|
|
||||||
|
|
||||||
var takeAvailableTests = []struct {
|
|
||||||
about string
|
|
||||||
fillInterval time.Duration
|
|
||||||
capacity int64
|
|
||||||
reqs []takeAvailableReq
|
|
||||||
}{{
|
|
||||||
about: "serial requests",
|
|
||||||
fillInterval: 250 * time.Millisecond,
|
|
||||||
capacity: 10,
|
|
||||||
reqs: []takeAvailableReq{{
|
|
||||||
time: 0,
|
|
||||||
count: 0,
|
|
||||||
expect: 0,
|
|
||||||
}, {
|
|
||||||
time: 0,
|
|
||||||
count: 10,
|
|
||||||
expect: 10,
|
|
||||||
}, {
|
|
||||||
time: 0,
|
|
||||||
count: 1,
|
|
||||||
expect: 0,
|
|
||||||
}, {
|
|
||||||
time: 250 * time.Millisecond,
|
|
||||||
count: 1,
|
|
||||||
expect: 1,
|
|
||||||
}},
|
|
||||||
}, {
|
|
||||||
about: "concurrent requests",
|
|
||||||
fillInterval: 250 * time.Millisecond,
|
|
||||||
capacity: 10,
|
|
||||||
reqs: []takeAvailableReq{{
|
|
||||||
time: 0,
|
|
||||||
count: 5,
|
|
||||||
expect: 5,
|
|
||||||
}, {
|
|
||||||
time: 0,
|
|
||||||
count: 2,
|
|
||||||
expect: 2,
|
|
||||||
}, {
|
|
||||||
time: 0,
|
|
||||||
count: 5,
|
|
||||||
expect: 3,
|
|
||||||
}, {
|
|
||||||
time: 0,
|
|
||||||
count: 1,
|
|
||||||
expect: 0,
|
|
||||||
}},
|
|
||||||
}, {
|
|
||||||
about: "more than capacity",
|
|
||||||
fillInterval: 1 * time.Millisecond,
|
|
||||||
capacity: 10,
|
|
||||||
reqs: []takeAvailableReq{{
|
|
||||||
time: 0,
|
|
||||||
count: 10,
|
|
||||||
expect: 10,
|
|
||||||
}, {
|
|
||||||
time: 20 * time.Millisecond,
|
|
||||||
count: 15,
|
|
||||||
expect: 10,
|
|
||||||
}},
|
|
||||||
}, {
|
|
||||||
about: "within capacity",
|
|
||||||
fillInterval: 10 * time.Millisecond,
|
|
||||||
capacity: 5,
|
|
||||||
reqs: []takeAvailableReq{{
|
|
||||||
time: 0,
|
|
||||||
count: 5,
|
|
||||||
expect: 5,
|
|
||||||
}, {
|
|
||||||
time: 60 * time.Millisecond,
|
|
||||||
count: 5,
|
|
||||||
expect: 5,
|
|
||||||
}, {
|
|
||||||
time: 70 * time.Millisecond,
|
|
||||||
count: 1,
|
|
||||||
expect: 1,
|
|
||||||
}},
|
|
||||||
}}
|
|
||||||
|
|
||||||
func (rateLimitSuite) TestTakeAvailable(c *gc.C) {
|
|
||||||
for i, test := range takeAvailableTests {
|
|
||||||
tb := NewBucket(test.fillInterval, test.capacity)
|
|
||||||
for j, req := range test.reqs {
|
|
||||||
d := tb.takeAvailable(tb.startTime.Add(req.time), req.count)
|
|
||||||
if d != req.expect {
|
|
||||||
c.Fatalf("test %d.%d, %s, got %v want %v", i, j, test.about, d, req.expect)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rateLimitSuite) TestPanics(c *gc.C) {
|
|
||||||
c.Assert(func() { NewBucket(0, 1) }, gc.PanicMatches, "token bucket fill interval is not > 0")
|
|
||||||
c.Assert(func() { NewBucket(-2, 1) }, gc.PanicMatches, "token bucket fill interval is not > 0")
|
|
||||||
c.Assert(func() { NewBucket(1, 0) }, gc.PanicMatches, "token bucket capacity is not > 0")
|
|
||||||
c.Assert(func() { NewBucket(1, -2) }, gc.PanicMatches, "token bucket capacity is not > 0")
|
|
||||||
}
|
|
||||||
|
|
||||||
func isCloseTo(x, y, tolerance float64) bool {
|
|
||||||
return math.Abs(x-y)/y < tolerance
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rateLimitSuite) TestRate(c *gc.C) {
|
|
||||||
tb := NewBucket(1, 1)
|
|
||||||
if !isCloseTo(tb.Rate(), 1e9, 0.00001) {
|
|
||||||
c.Fatalf("got %v want 1e9", tb.Rate())
|
|
||||||
}
|
|
||||||
tb = NewBucket(2*time.Second, 1)
|
|
||||||
if !isCloseTo(tb.Rate(), 0.5, 0.00001) {
|
|
||||||
c.Fatalf("got %v want 0.5", tb.Rate())
|
|
||||||
}
|
|
||||||
tb = NewBucketWithQuantum(100*time.Millisecond, 1, 5)
|
|
||||||
if !isCloseTo(tb.Rate(), 50, 0.00001) {
|
|
||||||
c.Fatalf("got %v want 50", tb.Rate())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkRate(c *gc.C, rate float64) {
|
|
||||||
tb := NewBucketWithRate(rate, 1<<62)
|
|
||||||
if !isCloseTo(tb.Rate(), rate, rateMargin) {
|
|
||||||
c.Fatalf("got %g want %v", tb.Rate(), rate)
|
|
||||||
}
|
|
||||||
d, ok := tb.take(tb.startTime, 1<<62, infinityDuration)
|
|
||||||
c.Assert(ok, gc.Equals, true)
|
|
||||||
c.Assert(d, gc.Equals, time.Duration(0))
|
|
||||||
|
|
||||||
// Check that the actual rate is as expected by
|
|
||||||
// asking for a not-quite multiple of the bucket's
|
|
||||||
// quantum and checking that the wait time
|
|
||||||
// correct.
|
|
||||||
d, ok = tb.take(tb.startTime, tb.quantum*2-tb.quantum/2, infinityDuration)
|
|
||||||
c.Assert(ok, gc.Equals, true)
|
|
||||||
expectTime := 1e9 * float64(tb.quantum) * 2 / rate
|
|
||||||
if !isCloseTo(float64(d), expectTime, rateMargin) {
|
|
||||||
c.Fatalf("rate %g: got %g want %v", rate, float64(d), expectTime)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rateLimitSuite) TestNewWithRate(c *gc.C) {
|
|
||||||
for rate := float64(1); rate < 1e6; rate += 7 {
|
|
||||||
checkRate(c, rate)
|
|
||||||
}
|
|
||||||
for _, rate := range []float64{
|
|
||||||
1024 * 1024 * 1024,
|
|
||||||
1e-5,
|
|
||||||
0.9e-5,
|
|
||||||
0.5,
|
|
||||||
0.9,
|
|
||||||
0.9e8,
|
|
||||||
3e12,
|
|
||||||
4e18,
|
|
||||||
} {
|
|
||||||
checkRate(c, rate)
|
|
||||||
checkRate(c, rate/3)
|
|
||||||
checkRate(c, rate*1.3)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAvailable(t *testing.T) {
|
|
||||||
for i, tt := range availTests {
|
|
||||||
tb := NewBucket(tt.fillInterval, tt.capacity)
|
|
||||||
if c := tb.takeAvailable(tb.startTime, tt.take); c != tt.take {
|
|
||||||
t.Fatalf("#%d: %s, take = %d, want = %d", i, tt.about, c, tt.take)
|
|
||||||
}
|
|
||||||
if c := tb.available(tb.startTime); c != tt.expectCountAfterTake {
|
|
||||||
t.Fatalf("#%d: %s, after take, available = %d, want = %d", i, tt.about, c, tt.expectCountAfterTake)
|
|
||||||
}
|
|
||||||
if c := tb.available(tb.startTime.Add(tt.sleep)); c != tt.expectCountAfterSleep {
|
|
||||||
t.Fatalf("#%d: %s, after some time it should fill in new tokens, available = %d, want = %d",
|
|
||||||
i, tt.about, c, tt.expectCountAfterSleep)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkWait(b *testing.B) {
|
|
||||||
tb := NewBucket(1, 16*1024)
|
|
||||||
for i := b.N - 1; i >= 0; i-- {
|
|
||||||
tb.Wait(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
27
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/kardianos/osext/LICENSE
generated
vendored
27
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/kardianos/osext/LICENSE
generated
vendored
@@ -1,27 +0,0 @@
|
|||||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above
|
|
||||||
copyright notice, this list of conditions and the following disclaimer
|
|
||||||
in the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
* Neither the name of Google Inc. nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
14
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/kardianos/osext/README.md
generated
vendored
14
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/kardianos/osext/README.md
generated
vendored
@@ -1,14 +0,0 @@
|
|||||||
### Extensions to the "os" package.
|
|
||||||
|
|
||||||
## Find the current Executable and ExecutableFolder.
|
|
||||||
|
|
||||||
There is sometimes utility in finding the current executable file
|
|
||||||
that is running. This can be used for upgrading the current executable
|
|
||||||
or finding resources located relative to the executable file.
|
|
||||||
|
|
||||||
Multi-platform and supports:
|
|
||||||
* Linux
|
|
||||||
* OS X
|
|
||||||
* Windows
|
|
||||||
* Plan 9
|
|
||||||
* BSDs.
|
|
||||||
27
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/kardianos/osext/osext.go
generated
vendored
27
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/kardianos/osext/osext.go
generated
vendored
@@ -1,27 +0,0 @@
|
|||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Extensions to the standard "os" package.
|
|
||||||
package osext // import "github.com/kardianos/osext"
|
|
||||||
|
|
||||||
import "path/filepath"
|
|
||||||
|
|
||||||
// Executable returns an absolute path that can be used to
|
|
||||||
// re-invoke the current program.
|
|
||||||
// It may not be valid after the current program exits.
|
|
||||||
func Executable() (string, error) {
|
|
||||||
p, err := executable()
|
|
||||||
return filepath.Clean(p), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns same path as Executable, returns just the folder
|
|
||||||
// path. Excludes the executable name.
|
|
||||||
func ExecutableFolder() (string, error) {
|
|
||||||
p, err := Executable()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
folder, _ := filepath.Split(p)
|
|
||||||
return folder, nil
|
|
||||||
}
|
|
||||||
20
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/kardianos/osext/osext_plan9.go
generated
vendored
20
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/kardianos/osext/osext_plan9.go
generated
vendored
@@ -1,20 +0,0 @@
|
|||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package osext
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
func executable() (string, error) {
|
|
||||||
f, err := os.Open("/proc/" + strconv.Itoa(os.Getpid()) + "/text")
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
return syscall.Fd2path(int(f.Fd()))
|
|
||||||
}
|
|
||||||
34
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/kardianos/osext/osext_procfs.go
generated
vendored
34
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/kardianos/osext/osext_procfs.go
generated
vendored
@@ -1,34 +0,0 @@
|
|||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build linux netbsd openbsd solaris dragonfly
|
|
||||||
|
|
||||||
package osext
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func executable() (string, error) {
|
|
||||||
switch runtime.GOOS {
|
|
||||||
case "linux":
|
|
||||||
const deletedSuffix = " (deleted)"
|
|
||||||
execpath, err := os.Readlink("/proc/self/exe")
|
|
||||||
if err != nil {
|
|
||||||
return execpath, err
|
|
||||||
}
|
|
||||||
return strings.TrimSuffix(execpath, deletedSuffix), nil
|
|
||||||
case "netbsd":
|
|
||||||
return os.Readlink("/proc/curproc/exe")
|
|
||||||
case "openbsd", "dragonfly":
|
|
||||||
return os.Readlink("/proc/curproc/file")
|
|
||||||
case "solaris":
|
|
||||||
return os.Readlink(fmt.Sprintf("/proc/%d/path/a.out", os.Getpid()))
|
|
||||||
}
|
|
||||||
return "", errors.New("ExecPath not implemented for " + runtime.GOOS)
|
|
||||||
}
|
|
||||||
79
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/kardianos/osext/osext_sysctl.go
generated
vendored
79
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/kardianos/osext/osext_sysctl.go
generated
vendored
@@ -1,79 +0,0 @@
|
|||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build darwin freebsd
|
|
||||||
|
|
||||||
package osext
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
var initCwd, initCwdErr = os.Getwd()
|
|
||||||
|
|
||||||
func executable() (string, error) {
|
|
||||||
var mib [4]int32
|
|
||||||
switch runtime.GOOS {
|
|
||||||
case "freebsd":
|
|
||||||
mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1}
|
|
||||||
case "darwin":
|
|
||||||
mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1}
|
|
||||||
}
|
|
||||||
|
|
||||||
n := uintptr(0)
|
|
||||||
// Get length.
|
|
||||||
_, _, errNum := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
|
|
||||||
if errNum != 0 {
|
|
||||||
return "", errNum
|
|
||||||
}
|
|
||||||
if n == 0 { // This shouldn't happen.
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
buf := make([]byte, n)
|
|
||||||
_, _, errNum = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0)
|
|
||||||
if errNum != 0 {
|
|
||||||
return "", errNum
|
|
||||||
}
|
|
||||||
if n == 0 { // This shouldn't happen.
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
for i, v := range buf {
|
|
||||||
if v == 0 {
|
|
||||||
buf = buf[:i]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
execPath := string(buf)
|
|
||||||
// execPath will not be empty due to above checks.
|
|
||||||
// Try to get the absolute path if the execPath is not rooted.
|
|
||||||
if execPath[0] != '/' {
|
|
||||||
execPath, err = getAbs(execPath)
|
|
||||||
if err != nil {
|
|
||||||
return execPath, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// For darwin KERN_PROCARGS may return the path to a symlink rather than the
|
|
||||||
// actual executable.
|
|
||||||
if runtime.GOOS == "darwin" {
|
|
||||||
if execPath, err = filepath.EvalSymlinks(execPath); err != nil {
|
|
||||||
return execPath, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return execPath, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getAbs(execPath string) (string, error) {
|
|
||||||
if initCwdErr != nil {
|
|
||||||
return execPath, initCwdErr
|
|
||||||
}
|
|
||||||
// The execPath may begin with a "../" or a "./" so clean it first.
|
|
||||||
// Join the two paths, trailing and starting slashes undetermined, so use
|
|
||||||
// the generic Join function.
|
|
||||||
return filepath.Join(initCwd, filepath.Clean(execPath)), nil
|
|
||||||
}
|
|
||||||
180
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/kardianos/osext/osext_test.go
generated
vendored
180
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/kardianos/osext/osext_test.go
generated
vendored
@@ -1,180 +0,0 @@
|
|||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build darwin linux freebsd netbsd windows
|
|
||||||
|
|
||||||
package osext
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
executableEnvVar = "OSTEST_OUTPUT_EXECUTABLE"
|
|
||||||
|
|
||||||
executableEnvValueMatch = "match"
|
|
||||||
executableEnvValueDelete = "delete"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestExecutableMatch(t *testing.T) {
|
|
||||||
ep, err := Executable()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Executable failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// fullpath to be of the form "dir/prog".
|
|
||||||
dir := filepath.Dir(filepath.Dir(ep))
|
|
||||||
fullpath, err := filepath.Rel(dir, ep)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("filepath.Rel: %v", err)
|
|
||||||
}
|
|
||||||
// Make child start with a relative program path.
|
|
||||||
// Alter argv[0] for child to verify getting real path without argv[0].
|
|
||||||
cmd := &exec.Cmd{
|
|
||||||
Dir: dir,
|
|
||||||
Path: fullpath,
|
|
||||||
Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueMatch)},
|
|
||||||
}
|
|
||||||
out, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("exec(self) failed: %v", err)
|
|
||||||
}
|
|
||||||
outs := string(out)
|
|
||||||
if !filepath.IsAbs(outs) {
|
|
||||||
t.Fatalf("Child returned %q, want an absolute path", out)
|
|
||||||
}
|
|
||||||
if !sameFile(outs, ep) {
|
|
||||||
t.Fatalf("Child returned %q, not the same file as %q", out, ep)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExecutableDelete(t *testing.T) {
|
|
||||||
if runtime.GOOS != "linux" {
|
|
||||||
t.Skip()
|
|
||||||
}
|
|
||||||
fpath, err := Executable()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Executable failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
r, w := io.Pipe()
|
|
||||||
stderrBuff := &bytes.Buffer{}
|
|
||||||
stdoutBuff := &bytes.Buffer{}
|
|
||||||
cmd := &exec.Cmd{
|
|
||||||
Path: fpath,
|
|
||||||
Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueDelete)},
|
|
||||||
Stdin: r,
|
|
||||||
Stderr: stderrBuff,
|
|
||||||
Stdout: stdoutBuff,
|
|
||||||
}
|
|
||||||
err = cmd.Start()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("exec(self) start failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
tempPath := fpath + "_copy"
|
|
||||||
_ = os.Remove(tempPath)
|
|
||||||
|
|
||||||
err = copyFile(tempPath, fpath)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("copy file failed: %v", err)
|
|
||||||
}
|
|
||||||
err = os.Remove(fpath)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("remove running test file failed: %v", err)
|
|
||||||
}
|
|
||||||
err = os.Rename(tempPath, fpath)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("rename copy to previous name failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Write([]byte{0})
|
|
||||||
w.Close()
|
|
||||||
|
|
||||||
err = cmd.Wait()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("exec wait failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
childPath := stderrBuff.String()
|
|
||||||
if !filepath.IsAbs(childPath) {
|
|
||||||
t.Fatalf("Child returned %q, want an absolute path", childPath)
|
|
||||||
}
|
|
||||||
if !sameFile(childPath, fpath) {
|
|
||||||
t.Fatalf("Child returned %q, not the same file as %q", childPath, fpath)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func sameFile(fn1, fn2 string) bool {
|
|
||||||
fi1, err := os.Stat(fn1)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
fi2, err := os.Stat(fn2)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return os.SameFile(fi1, fi2)
|
|
||||||
}
|
|
||||||
func copyFile(dest, src string) error {
|
|
||||||
df, err := os.Create(dest)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer df.Close()
|
|
||||||
|
|
||||||
sf, err := os.Open(src)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer sf.Close()
|
|
||||||
|
|
||||||
_, err = io.Copy(df, sf)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
|
||||||
env := os.Getenv(executableEnvVar)
|
|
||||||
switch env {
|
|
||||||
case "":
|
|
||||||
os.Exit(m.Run())
|
|
||||||
case executableEnvValueMatch:
|
|
||||||
// First chdir to another path.
|
|
||||||
dir := "/"
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
dir = filepath.VolumeName(".")
|
|
||||||
}
|
|
||||||
os.Chdir(dir)
|
|
||||||
if ep, err := Executable(); err != nil {
|
|
||||||
fmt.Fprint(os.Stderr, "ERROR: ", err)
|
|
||||||
} else {
|
|
||||||
fmt.Fprint(os.Stderr, ep)
|
|
||||||
}
|
|
||||||
case executableEnvValueDelete:
|
|
||||||
bb := make([]byte, 1)
|
|
||||||
var err error
|
|
||||||
n, err := os.Stdin.Read(bb)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprint(os.Stderr, "ERROR: ", err)
|
|
||||||
os.Exit(2)
|
|
||||||
}
|
|
||||||
if n != 1 {
|
|
||||||
fmt.Fprint(os.Stderr, "ERROR: n != 1, n == ", n)
|
|
||||||
os.Exit(2)
|
|
||||||
}
|
|
||||||
if ep, err := Executable(); err != nil {
|
|
||||||
fmt.Fprint(os.Stderr, "ERROR: ", err)
|
|
||||||
} else {
|
|
||||||
fmt.Fprint(os.Stderr, ep)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
34
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/kardianos/osext/osext_windows.go
generated
vendored
34
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/kardianos/osext/osext_windows.go
generated
vendored
@@ -1,34 +0,0 @@
|
|||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package osext
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
"unicode/utf16"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
kernel = syscall.MustLoadDLL("kernel32.dll")
|
|
||||||
getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW")
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetModuleFileName() with hModule = NULL
|
|
||||||
func executable() (exePath string, err error) {
|
|
||||||
return getModuleFileName()
|
|
||||||
}
|
|
||||||
|
|
||||||
func getModuleFileName() (string, error) {
|
|
||||||
var n uint32
|
|
||||||
b := make([]uint16, syscall.MAX_PATH)
|
|
||||||
size := uint32(len(b))
|
|
||||||
|
|
||||||
r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size))
|
|
||||||
n = uint32(r0)
|
|
||||||
if n == 0 {
|
|
||||||
return "", e1
|
|
||||||
}
|
|
||||||
return string(utf16.Decode(b[0:n])), nil
|
|
||||||
}
|
|
||||||
9
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/.gitignore
generated
vendored
9
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/.gitignore
generated
vendored
@@ -1,9 +0,0 @@
|
|||||||
*.[68]
|
|
||||||
*.a
|
|
||||||
*.out
|
|
||||||
*.swp
|
|
||||||
_obj
|
|
||||||
_testmain.go
|
|
||||||
cmd/metrics-bench/metrics-bench
|
|
||||||
cmd/metrics-example/metrics-example
|
|
||||||
cmd/never-read/never-read
|
|
||||||
13
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/.travis.yml
generated
vendored
13
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/.travis.yml
generated
vendored
@@ -1,13 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.2
|
|
||||||
- 1.3
|
|
||||||
- 1.4
|
|
||||||
|
|
||||||
script:
|
|
||||||
- ./validate.sh
|
|
||||||
|
|
||||||
# this should give us faster builds according to
|
|
||||||
# http://docs.travis-ci.com/user/migrating-from-legacy/
|
|
||||||
sudo: false
|
|
||||||
29
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/LICENSE
generated
vendored
29
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/LICENSE
generated
vendored
@@ -1,29 +0,0 @@
|
|||||||
Copyright 2012 Richard Crowley. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
1. Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
|
|
||||||
2. Redistributions in binary form must reproduce the above
|
|
||||||
copyright notice, this list of conditions and the following
|
|
||||||
disclaimer in the documentation and/or other materials provided
|
|
||||||
with the distribution.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS
|
|
||||||
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
||||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
||||||
DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE
|
|
||||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
|
||||||
THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
The views and conclusions contained in the software and documentation
|
|
||||||
are those of the authors and should not be interpreted as representing
|
|
||||||
official policies, either expressed or implied, of Richard Crowley.
|
|
||||||
126
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/README.md
generated
vendored
126
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/README.md
generated
vendored
@@ -1,126 +0,0 @@
|
|||||||
go-metrics
|
|
||||||
==========
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
Go port of Coda Hale's Metrics library: <https://github.com/dropwizard/metrics>.
|
|
||||||
|
|
||||||
Documentation: <http://godoc.org/github.com/rcrowley/go-metrics>.
|
|
||||||
|
|
||||||
Usage
|
|
||||||
-----
|
|
||||||
|
|
||||||
Create and update metrics:
|
|
||||||
|
|
||||||
```go
|
|
||||||
c := metrics.NewCounter()
|
|
||||||
metrics.Register("foo", c)
|
|
||||||
c.Inc(47)
|
|
||||||
|
|
||||||
g := metrics.NewGauge()
|
|
||||||
metrics.Register("bar", g)
|
|
||||||
g.Update(47)
|
|
||||||
|
|
||||||
s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028)
|
|
||||||
h := metrics.NewHistogram(s)
|
|
||||||
metrics.Register("baz", h)
|
|
||||||
h.Update(47)
|
|
||||||
|
|
||||||
m := metrics.NewMeter()
|
|
||||||
metrics.Register("quux", m)
|
|
||||||
m.Mark(47)
|
|
||||||
|
|
||||||
t := metrics.NewTimer()
|
|
||||||
metrics.Register("bang", t)
|
|
||||||
t.Time(func() {})
|
|
||||||
t.Update(47)
|
|
||||||
```
|
|
||||||
|
|
||||||
Periodically log every metric in human-readable form to standard error:
|
|
||||||
|
|
||||||
```go
|
|
||||||
go metrics.Log(metrics.DefaultRegistry, 60e9, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
|
|
||||||
```
|
|
||||||
|
|
||||||
Periodically log every metric in slightly-more-parseable form to syslog:
|
|
||||||
|
|
||||||
```go
|
|
||||||
w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics")
|
|
||||||
go metrics.Syslog(metrics.DefaultRegistry, 60e9, w)
|
|
||||||
```
|
|
||||||
|
|
||||||
Periodically emit every metric to Graphite using the [Graphite client](https://github.com/cyberdelia/go-metrics-graphite):
|
|
||||||
|
|
||||||
```go
|
|
||||||
|
|
||||||
import "github.com/cyberdelia/go-metrics-graphite"
|
|
||||||
|
|
||||||
addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
|
|
||||||
go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr)
|
|
||||||
```
|
|
||||||
|
|
||||||
Periodically emit every metric into InfluxDB:
|
|
||||||
|
|
||||||
**NOTE:** this has been pulled out of the library due to constant fluctuations
|
|
||||||
in the InfluxDB API. In fact, all client libraries are on their way out. see
|
|
||||||
issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and
|
|
||||||
[#124](https://github.com/rcrowley/go-metrics/issues/124) for progress and details.
|
|
||||||
|
|
||||||
```go
|
|
||||||
import "github.com/rcrowley/go-metrics/influxdb"
|
|
||||||
|
|
||||||
go influxdb.Influxdb(metrics.DefaultRegistry, 10e9, &influxdb.Config{
|
|
||||||
Host: "127.0.0.1:8086",
|
|
||||||
Database: "metrics",
|
|
||||||
Username: "test",
|
|
||||||
Password: "test",
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato):
|
|
||||||
|
|
||||||
**Note**: the client included with this repository under the `librato` package
|
|
||||||
has been deprecated and moved to the repository linked above.
|
|
||||||
|
|
||||||
```go
|
|
||||||
import "github.com/mihasya/go-metrics-librato"
|
|
||||||
|
|
||||||
go librato.Librato(metrics.DefaultRegistry,
|
|
||||||
10e9, // interval
|
|
||||||
"example@example.com", // account owner email address
|
|
||||||
"token", // Librato API token
|
|
||||||
"hostname", // source
|
|
||||||
[]float64{0.95}, // percentiles to send
|
|
||||||
time.Millisecond, // time unit
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
Periodically emit every metric to StatHat:
|
|
||||||
|
|
||||||
```go
|
|
||||||
import "github.com/rcrowley/go-metrics/stathat"
|
|
||||||
|
|
||||||
go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com")
|
|
||||||
```
|
|
||||||
|
|
||||||
Installation
|
|
||||||
------------
|
|
||||||
|
|
||||||
```sh
|
|
||||||
go get github.com/rcrowley/go-metrics
|
|
||||||
```
|
|
||||||
|
|
||||||
StatHat support additionally requires their Go client:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
go get github.com/stathat/go
|
|
||||||
```
|
|
||||||
|
|
||||||
Publishing Metrics
|
|
||||||
------------------
|
|
||||||
|
|
||||||
Clients are available for the following destinations:
|
|
||||||
|
|
||||||
* Librato - [https://github.com/mihasya/go-metrics-librato](https://github.com/mihasya/go-metrics-librato)
|
|
||||||
* Graphite - [https://github.com/cyberdelia/go-metrics-graphite](https://github.com/cyberdelia/go-metrics-graphite)
|
|
||||||
* InfluxDB - [https://github.com/vrischmann/go-metrics-influxdb](https://github.com/vrischmann/go-metrics-influxdb)
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/rcrowley/go-metrics"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
r := metrics.NewRegistry()
|
|
||||||
for i := 0; i < 10000; i++ {
|
|
||||||
r.Register(fmt.Sprintf("counter-%d", i), metrics.NewCounter())
|
|
||||||
r.Register(fmt.Sprintf("gauge-%d", i), metrics.NewGauge())
|
|
||||||
r.Register(fmt.Sprintf("gaugefloat64-%d", i), metrics.NewGaugeFloat64())
|
|
||||||
r.Register(fmt.Sprintf("histogram-uniform-%d", i), metrics.NewHistogram(metrics.NewUniformSample(1028)))
|
|
||||||
r.Register(fmt.Sprintf("histogram-exp-%d", i), metrics.NewHistogram(metrics.NewExpDecaySample(1028, 0.015)))
|
|
||||||
r.Register(fmt.Sprintf("meter-%d", i), metrics.NewMeter())
|
|
||||||
}
|
|
||||||
time.Sleep(600e9)
|
|
||||||
}
|
|
||||||
@@ -1,154 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"github.com/rcrowley/go-metrics"
|
|
||||||
// "github.com/rcrowley/go-metrics/stathat"
|
|
||||||
"log"
|
|
||||||
"math/rand"
|
|
||||||
"os"
|
|
||||||
// "syslog"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const fanout = 10
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
|
|
||||||
r := metrics.NewRegistry()
|
|
||||||
|
|
||||||
c := metrics.NewCounter()
|
|
||||||
r.Register("foo", c)
|
|
||||||
for i := 0; i < fanout; i++ {
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
c.Dec(19)
|
|
||||||
time.Sleep(300e6)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
c.Inc(47)
|
|
||||||
time.Sleep(400e6)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
g := metrics.NewGauge()
|
|
||||||
r.Register("bar", g)
|
|
||||||
for i := 0; i < fanout; i++ {
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
g.Update(19)
|
|
||||||
time.Sleep(300e6)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
g.Update(47)
|
|
||||||
time.Sleep(400e6)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
gf := metrics.NewGaugeFloat64()
|
|
||||||
r.Register("barfloat64", gf)
|
|
||||||
for i := 0; i < fanout; i++ {
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
g.Update(19.0)
|
|
||||||
time.Sleep(300e6)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
g.Update(47.0)
|
|
||||||
time.Sleep(400e6)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
hc := metrics.NewHealthcheck(func(h metrics.Healthcheck) {
|
|
||||||
if 0 < rand.Intn(2) {
|
|
||||||
h.Healthy()
|
|
||||||
} else {
|
|
||||||
h.Unhealthy(errors.New("baz"))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
r.Register("baz", hc)
|
|
||||||
|
|
||||||
s := metrics.NewExpDecaySample(1028, 0.015)
|
|
||||||
//s := metrics.NewUniformSample(1028)
|
|
||||||
h := metrics.NewHistogram(s)
|
|
||||||
r.Register("bang", h)
|
|
||||||
for i := 0; i < fanout; i++ {
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
h.Update(19)
|
|
||||||
time.Sleep(300e6)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
h.Update(47)
|
|
||||||
time.Sleep(400e6)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
m := metrics.NewMeter()
|
|
||||||
r.Register("quux", m)
|
|
||||||
for i := 0; i < fanout; i++ {
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
m.Mark(19)
|
|
||||||
time.Sleep(300e6)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
m.Mark(47)
|
|
||||||
time.Sleep(400e6)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
t := metrics.NewTimer()
|
|
||||||
r.Register("hooah", t)
|
|
||||||
for i := 0; i < fanout; i++ {
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
t.Time(func() { time.Sleep(300e6) })
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
t.Time(func() { time.Sleep(400e6) })
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
metrics.RegisterDebugGCStats(r)
|
|
||||||
go metrics.CaptureDebugGCStats(r, 5e9)
|
|
||||||
|
|
||||||
metrics.RegisterRuntimeMemStats(r)
|
|
||||||
go metrics.CaptureRuntimeMemStats(r, 5e9)
|
|
||||||
|
|
||||||
metrics.Log(r, 60e9, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
|
|
||||||
|
|
||||||
/*
|
|
||||||
w, err := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics")
|
|
||||||
if nil != err { log.Fatalln(err) }
|
|
||||||
metrics.Syslog(r, 60e9, w)
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
|
|
||||||
metrics.Graphite(r, 10e9, "metrics", addr)
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
stathat.Stathat(r, 10e9, "example@example.com")
|
|
||||||
*/
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
|
|
||||||
l, err := net.ListenTCP("tcp", addr)
|
|
||||||
if nil != err {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
log.Println("listening", l.Addr())
|
|
||||||
for {
|
|
||||||
c, err := l.AcceptTCP()
|
|
||||||
if nil != err {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
log.Println("accepted", c.RemoteAddr())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
112
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/counter.go
generated
vendored
112
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/counter.go
generated
vendored
@@ -1,112 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import "sync/atomic"
|
|
||||||
|
|
||||||
// Counters hold an int64 value that can be incremented and decremented.
|
|
||||||
type Counter interface {
|
|
||||||
Clear()
|
|
||||||
Count() int64
|
|
||||||
Dec(int64)
|
|
||||||
Inc(int64)
|
|
||||||
Snapshot() Counter
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetOrRegisterCounter returns an existing Counter or constructs and registers
|
|
||||||
// a new StandardCounter.
|
|
||||||
func GetOrRegisterCounter(name string, r Registry) Counter {
|
|
||||||
if nil == r {
|
|
||||||
r = DefaultRegistry
|
|
||||||
}
|
|
||||||
return r.GetOrRegister(name, NewCounter).(Counter)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCounter constructs a new StandardCounter.
|
|
||||||
func NewCounter() Counter {
|
|
||||||
if UseNilMetrics {
|
|
||||||
return NilCounter{}
|
|
||||||
}
|
|
||||||
return &StandardCounter{0}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRegisteredCounter constructs and registers a new StandardCounter.
|
|
||||||
func NewRegisteredCounter(name string, r Registry) Counter {
|
|
||||||
c := NewCounter()
|
|
||||||
if nil == r {
|
|
||||||
r = DefaultRegistry
|
|
||||||
}
|
|
||||||
r.Register(name, c)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// CounterSnapshot is a read-only copy of another Counter.
|
|
||||||
type CounterSnapshot int64
|
|
||||||
|
|
||||||
// Clear panics.
|
|
||||||
func (CounterSnapshot) Clear() {
|
|
||||||
panic("Clear called on a CounterSnapshot")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count returns the count at the time the snapshot was taken.
|
|
||||||
func (c CounterSnapshot) Count() int64 { return int64(c) }
|
|
||||||
|
|
||||||
// Dec panics.
|
|
||||||
func (CounterSnapshot) Dec(int64) {
|
|
||||||
panic("Dec called on a CounterSnapshot")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inc panics.
|
|
||||||
func (CounterSnapshot) Inc(int64) {
|
|
||||||
panic("Inc called on a CounterSnapshot")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Snapshot returns the snapshot.
|
|
||||||
func (c CounterSnapshot) Snapshot() Counter { return c }
|
|
||||||
|
|
||||||
// NilCounter is a no-op Counter.
|
|
||||||
type NilCounter struct{}
|
|
||||||
|
|
||||||
// Clear is a no-op.
|
|
||||||
func (NilCounter) Clear() {}
|
|
||||||
|
|
||||||
// Count is a no-op.
|
|
||||||
func (NilCounter) Count() int64 { return 0 }
|
|
||||||
|
|
||||||
// Dec is a no-op.
|
|
||||||
func (NilCounter) Dec(i int64) {}
|
|
||||||
|
|
||||||
// Inc is a no-op.
|
|
||||||
func (NilCounter) Inc(i int64) {}
|
|
||||||
|
|
||||||
// Snapshot is a no-op.
|
|
||||||
func (NilCounter) Snapshot() Counter { return NilCounter{} }
|
|
||||||
|
|
||||||
// StandardCounter is the standard implementation of a Counter and uses the
|
|
||||||
// sync/atomic package to manage a single int64 value.
|
|
||||||
type StandardCounter struct {
|
|
||||||
count int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear sets the counter to zero.
|
|
||||||
func (c *StandardCounter) Clear() {
|
|
||||||
atomic.StoreInt64(&c.count, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count returns the current count.
|
|
||||||
func (c *StandardCounter) Count() int64 {
|
|
||||||
return atomic.LoadInt64(&c.count)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dec decrements the counter by the given amount.
|
|
||||||
func (c *StandardCounter) Dec(i int64) {
|
|
||||||
atomic.AddInt64(&c.count, -i)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inc increments the counter by the given amount.
|
|
||||||
func (c *StandardCounter) Inc(i int64) {
|
|
||||||
atomic.AddInt64(&c.count, i)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Snapshot returns a read-only copy of the counter.
|
|
||||||
func (c *StandardCounter) Snapshot() Counter {
|
|
||||||
return CounterSnapshot(c.Count())
|
|
||||||
}
|
|
||||||
77
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/counter_test.go
generated
vendored
77
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/counter_test.go
generated
vendored
@@ -1,77 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import "testing"
|
|
||||||
|
|
||||||
func BenchmarkCounter(b *testing.B) {
|
|
||||||
c := NewCounter()
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
c.Inc(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCounterClear(t *testing.T) {
|
|
||||||
c := NewCounter()
|
|
||||||
c.Inc(1)
|
|
||||||
c.Clear()
|
|
||||||
if count := c.Count(); 0 != count {
|
|
||||||
t.Errorf("c.Count(): 0 != %v\n", count)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCounterDec1(t *testing.T) {
|
|
||||||
c := NewCounter()
|
|
||||||
c.Dec(1)
|
|
||||||
if count := c.Count(); -1 != count {
|
|
||||||
t.Errorf("c.Count(): -1 != %v\n", count)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCounterDec2(t *testing.T) {
|
|
||||||
c := NewCounter()
|
|
||||||
c.Dec(2)
|
|
||||||
if count := c.Count(); -2 != count {
|
|
||||||
t.Errorf("c.Count(): -2 != %v\n", count)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCounterInc1(t *testing.T) {
|
|
||||||
c := NewCounter()
|
|
||||||
c.Inc(1)
|
|
||||||
if count := c.Count(); 1 != count {
|
|
||||||
t.Errorf("c.Count(): 1 != %v\n", count)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCounterInc2(t *testing.T) {
|
|
||||||
c := NewCounter()
|
|
||||||
c.Inc(2)
|
|
||||||
if count := c.Count(); 2 != count {
|
|
||||||
t.Errorf("c.Count(): 2 != %v\n", count)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCounterSnapshot(t *testing.T) {
|
|
||||||
c := NewCounter()
|
|
||||||
c.Inc(1)
|
|
||||||
snapshot := c.Snapshot()
|
|
||||||
c.Inc(1)
|
|
||||||
if count := snapshot.Count(); 1 != count {
|
|
||||||
t.Errorf("c.Count(): 1 != %v\n", count)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCounterZero(t *testing.T) {
|
|
||||||
c := NewCounter()
|
|
||||||
if count := c.Count(); 0 != count {
|
|
||||||
t.Errorf("c.Count(): 0 != %v\n", count)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetOrRegisterCounter(t *testing.T) {
|
|
||||||
r := NewRegistry()
|
|
||||||
NewRegisteredCounter("foo", r).Inc(47)
|
|
||||||
if c := GetOrRegisterCounter("foo", r); 47 != c.Count() {
|
|
||||||
t.Fatal(c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
76
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/debug.go
generated
vendored
76
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/debug.go
generated
vendored
@@ -1,76 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime/debug"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
debugMetrics struct {
|
|
||||||
GCStats struct {
|
|
||||||
LastGC Gauge
|
|
||||||
NumGC Gauge
|
|
||||||
Pause Histogram
|
|
||||||
//PauseQuantiles Histogram
|
|
||||||
PauseTotal Gauge
|
|
||||||
}
|
|
||||||
ReadGCStats Timer
|
|
||||||
}
|
|
||||||
gcStats debug.GCStats
|
|
||||||
)
|
|
||||||
|
|
||||||
// Capture new values for the Go garbage collector statistics exported in
|
|
||||||
// debug.GCStats. This is designed to be called as a goroutine.
|
|
||||||
func CaptureDebugGCStats(r Registry, d time.Duration) {
|
|
||||||
for _ = range time.Tick(d) {
|
|
||||||
CaptureDebugGCStatsOnce(r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Capture new values for the Go garbage collector statistics exported in
|
|
||||||
// debug.GCStats. This is designed to be called in a background goroutine.
|
|
||||||
// Giving a registry which has not been given to RegisterDebugGCStats will
|
|
||||||
// panic.
|
|
||||||
//
|
|
||||||
// Be careful (but much less so) with this because debug.ReadGCStats calls
|
|
||||||
// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world
|
|
||||||
// operation, isn't something you want to be doing all the time.
|
|
||||||
func CaptureDebugGCStatsOnce(r Registry) {
|
|
||||||
lastGC := gcStats.LastGC
|
|
||||||
t := time.Now()
|
|
||||||
debug.ReadGCStats(&gcStats)
|
|
||||||
debugMetrics.ReadGCStats.UpdateSince(t)
|
|
||||||
|
|
||||||
debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano()))
|
|
||||||
debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC))
|
|
||||||
if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) {
|
|
||||||
debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0]))
|
|
||||||
}
|
|
||||||
//debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles)
|
|
||||||
debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register metrics for the Go garbage collector statistics exported in
|
|
||||||
// debug.GCStats. The metrics are named by their fully-qualified Go symbols,
|
|
||||||
// i.e. debug.GCStats.PauseTotal.
|
|
||||||
func RegisterDebugGCStats(r Registry) {
|
|
||||||
debugMetrics.GCStats.LastGC = NewGauge()
|
|
||||||
debugMetrics.GCStats.NumGC = NewGauge()
|
|
||||||
debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015))
|
|
||||||
//debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015))
|
|
||||||
debugMetrics.GCStats.PauseTotal = NewGauge()
|
|
||||||
debugMetrics.ReadGCStats = NewTimer()
|
|
||||||
|
|
||||||
r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC)
|
|
||||||
r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC)
|
|
||||||
r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause)
|
|
||||||
//r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles)
|
|
||||||
r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal)
|
|
||||||
r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allocate an initial slice for gcStats.Pause to avoid allocations during
|
|
||||||
// normal operation.
|
|
||||||
func init() {
|
|
||||||
gcStats.Pause = make([]time.Duration, 11)
|
|
||||||
}
|
|
||||||
48
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/debug_test.go
generated
vendored
48
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/debug_test.go
generated
vendored
@@ -1,48 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime"
|
|
||||||
"runtime/debug"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func BenchmarkDebugGCStats(b *testing.B) {
|
|
||||||
r := NewRegistry()
|
|
||||||
RegisterDebugGCStats(r)
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
CaptureDebugGCStatsOnce(r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDebugGCStatsBlocking(t *testing.T) {
|
|
||||||
if g := runtime.GOMAXPROCS(0); g < 2 {
|
|
||||||
t.Skipf("skipping TestDebugGCMemStatsBlocking with GOMAXPROCS=%d\n", g)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ch := make(chan int)
|
|
||||||
go testDebugGCStatsBlocking(ch)
|
|
||||||
var gcStats debug.GCStats
|
|
||||||
t0 := time.Now()
|
|
||||||
debug.ReadGCStats(&gcStats)
|
|
||||||
t1 := time.Now()
|
|
||||||
t.Log("i++ during debug.ReadGCStats:", <-ch)
|
|
||||||
go testDebugGCStatsBlocking(ch)
|
|
||||||
d := t1.Sub(t0)
|
|
||||||
t.Log(d)
|
|
||||||
time.Sleep(d)
|
|
||||||
t.Log("i++ during time.Sleep:", <-ch)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testDebugGCStatsBlocking(ch chan int) {
|
|
||||||
i := 0
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case ch <- i:
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
118
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/ewma.go
generated
vendored
118
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/ewma.go
generated
vendored
@@ -1,118 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
)
|
|
||||||
|
|
||||||
// EWMAs continuously calculate an exponentially-weighted moving average
|
|
||||||
// based on an outside source of clock ticks.
|
|
||||||
type EWMA interface {
|
|
||||||
Rate() float64
|
|
||||||
Snapshot() EWMA
|
|
||||||
Tick()
|
|
||||||
Update(int64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEWMA constructs a new EWMA with the given alpha.
|
|
||||||
func NewEWMA(alpha float64) EWMA {
|
|
||||||
if UseNilMetrics {
|
|
||||||
return NilEWMA{}
|
|
||||||
}
|
|
||||||
return &StandardEWMA{alpha: alpha}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEWMA1 constructs a new EWMA for a one-minute moving average.
|
|
||||||
func NewEWMA1() EWMA {
|
|
||||||
return NewEWMA(1 - math.Exp(-5.0/60.0/1))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEWMA5 constructs a new EWMA for a five-minute moving average.
|
|
||||||
func NewEWMA5() EWMA {
|
|
||||||
return NewEWMA(1 - math.Exp(-5.0/60.0/5))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
|
|
||||||
func NewEWMA15() EWMA {
|
|
||||||
return NewEWMA(1 - math.Exp(-5.0/60.0/15))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EWMASnapshot is a read-only copy of another EWMA.
|
|
||||||
type EWMASnapshot float64
|
|
||||||
|
|
||||||
// Rate returns the rate of events per second at the time the snapshot was
|
|
||||||
// taken.
|
|
||||||
func (a EWMASnapshot) Rate() float64 { return float64(a) }
|
|
||||||
|
|
||||||
// Snapshot returns the snapshot.
|
|
||||||
func (a EWMASnapshot) Snapshot() EWMA { return a }
|
|
||||||
|
|
||||||
// Tick panics.
|
|
||||||
func (EWMASnapshot) Tick() {
|
|
||||||
panic("Tick called on an EWMASnapshot")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update panics.
|
|
||||||
func (EWMASnapshot) Update(int64) {
|
|
||||||
panic("Update called on an EWMASnapshot")
|
|
||||||
}
|
|
||||||
|
|
||||||
// NilEWMA is a no-op EWMA.
|
|
||||||
type NilEWMA struct{}
|
|
||||||
|
|
||||||
// Rate is a no-op.
|
|
||||||
func (NilEWMA) Rate() float64 { return 0.0 }
|
|
||||||
|
|
||||||
// Snapshot is a no-op.
|
|
||||||
func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
|
|
||||||
|
|
||||||
// Tick is a no-op.
|
|
||||||
func (NilEWMA) Tick() {}
|
|
||||||
|
|
||||||
// Update is a no-op.
|
|
||||||
func (NilEWMA) Update(n int64) {}
|
|
||||||
|
|
||||||
// StandardEWMA is the standard implementation of an EWMA and tracks the number
|
|
||||||
// of uncounted events and processes them on each tick. It uses the
|
|
||||||
// sync/atomic package to manage uncounted events.
|
|
||||||
type StandardEWMA struct {
|
|
||||||
uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
|
|
||||||
alpha float64
|
|
||||||
rate float64
|
|
||||||
init bool
|
|
||||||
mutex sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rate returns the moving average rate of events per second.
|
|
||||||
func (a *StandardEWMA) Rate() float64 {
|
|
||||||
a.mutex.Lock()
|
|
||||||
defer a.mutex.Unlock()
|
|
||||||
return a.rate * float64(1e9)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Snapshot returns a read-only copy of the EWMA.
|
|
||||||
func (a *StandardEWMA) Snapshot() EWMA {
|
|
||||||
return EWMASnapshot(a.Rate())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tick ticks the clock to update the moving average. It assumes it is called
|
|
||||||
// every five seconds.
|
|
||||||
func (a *StandardEWMA) Tick() {
|
|
||||||
count := atomic.LoadInt64(&a.uncounted)
|
|
||||||
atomic.AddInt64(&a.uncounted, -count)
|
|
||||||
instantRate := float64(count) / float64(5e9)
|
|
||||||
a.mutex.Lock()
|
|
||||||
defer a.mutex.Unlock()
|
|
||||||
if a.init {
|
|
||||||
a.rate += a.alpha * (instantRate - a.rate)
|
|
||||||
} else {
|
|
||||||
a.init = true
|
|
||||||
a.rate = instantRate
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update adds n uncounted events.
|
|
||||||
func (a *StandardEWMA) Update(n int64) {
|
|
||||||
atomic.AddInt64(&a.uncounted, n)
|
|
||||||
}
|
|
||||||
225
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/ewma_test.go
generated
vendored
225
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/ewma_test.go
generated
vendored
@@ -1,225 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import "testing"
|
|
||||||
|
|
||||||
func BenchmarkEWMA(b *testing.B) {
|
|
||||||
a := NewEWMA1()
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
a.Update(1)
|
|
||||||
a.Tick()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEWMA1(t *testing.T) {
|
|
||||||
a := NewEWMA1()
|
|
||||||
a.Update(3)
|
|
||||||
a.Tick()
|
|
||||||
if rate := a.Rate(); 0.6 != rate {
|
|
||||||
t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.22072766470286553 != rate {
|
|
||||||
t.Errorf("1 minute a.Rate(): 0.22072766470286553 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.08120116994196772 != rate {
|
|
||||||
t.Errorf("2 minute a.Rate(): 0.08120116994196772 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.029872241020718428 != rate {
|
|
||||||
t.Errorf("3 minute a.Rate(): 0.029872241020718428 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.01098938333324054 != rate {
|
|
||||||
t.Errorf("4 minute a.Rate(): 0.01098938333324054 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.004042768199451294 != rate {
|
|
||||||
t.Errorf("5 minute a.Rate(): 0.004042768199451294 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.0014872513059998212 != rate {
|
|
||||||
t.Errorf("6 minute a.Rate(): 0.0014872513059998212 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.0005471291793327122 != rate {
|
|
||||||
t.Errorf("7 minute a.Rate(): 0.0005471291793327122 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.00020127757674150815 != rate {
|
|
||||||
t.Errorf("8 minute a.Rate(): 0.00020127757674150815 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 7.404588245200814e-05 != rate {
|
|
||||||
t.Errorf("9 minute a.Rate(): 7.404588245200814e-05 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 2.7239957857491083e-05 != rate {
|
|
||||||
t.Errorf("10 minute a.Rate(): 2.7239957857491083e-05 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 1.0021020474147462e-05 != rate {
|
|
||||||
t.Errorf("11 minute a.Rate(): 1.0021020474147462e-05 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 3.6865274119969525e-06 != rate {
|
|
||||||
t.Errorf("12 minute a.Rate(): 3.6865274119969525e-06 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 1.3561976441886433e-06 != rate {
|
|
||||||
t.Errorf("13 minute a.Rate(): 1.3561976441886433e-06 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 4.989172314621449e-07 != rate {
|
|
||||||
t.Errorf("14 minute a.Rate(): 4.989172314621449e-07 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 1.8354139230109722e-07 != rate {
|
|
||||||
t.Errorf("15 minute a.Rate(): 1.8354139230109722e-07 != %v\n", rate)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEWMA5(t *testing.T) {
|
|
||||||
a := NewEWMA5()
|
|
||||||
a.Update(3)
|
|
||||||
a.Tick()
|
|
||||||
if rate := a.Rate(); 0.6 != rate {
|
|
||||||
t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.49123845184678905 != rate {
|
|
||||||
t.Errorf("1 minute a.Rate(): 0.49123845184678905 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.4021920276213837 != rate {
|
|
||||||
t.Errorf("2 minute a.Rate(): 0.4021920276213837 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.32928698165641596 != rate {
|
|
||||||
t.Errorf("3 minute a.Rate(): 0.32928698165641596 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.269597378470333 != rate {
|
|
||||||
t.Errorf("4 minute a.Rate(): 0.269597378470333 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.2207276647028654 != rate {
|
|
||||||
t.Errorf("5 minute a.Rate(): 0.2207276647028654 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.18071652714732128 != rate {
|
|
||||||
t.Errorf("6 minute a.Rate(): 0.18071652714732128 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.14795817836496392 != rate {
|
|
||||||
t.Errorf("7 minute a.Rate(): 0.14795817836496392 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.12113791079679326 != rate {
|
|
||||||
t.Errorf("8 minute a.Rate(): 0.12113791079679326 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.09917933293295193 != rate {
|
|
||||||
t.Errorf("9 minute a.Rate(): 0.09917933293295193 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.08120116994196763 != rate {
|
|
||||||
t.Errorf("10 minute a.Rate(): 0.08120116994196763 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.06648189501740036 != rate {
|
|
||||||
t.Errorf("11 minute a.Rate(): 0.06648189501740036 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.05443077197364752 != rate {
|
|
||||||
t.Errorf("12 minute a.Rate(): 0.05443077197364752 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.04456414692860035 != rate {
|
|
||||||
t.Errorf("13 minute a.Rate(): 0.04456414692860035 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.03648603757513079 != rate {
|
|
||||||
t.Errorf("14 minute a.Rate(): 0.03648603757513079 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.0298722410207183831020718428 != rate {
|
|
||||||
t.Errorf("15 minute a.Rate(): 0.0298722410207183831020718428 != %v\n", rate)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEWMA15(t *testing.T) {
|
|
||||||
a := NewEWMA15()
|
|
||||||
a.Update(3)
|
|
||||||
a.Tick()
|
|
||||||
if rate := a.Rate(); 0.6 != rate {
|
|
||||||
t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.5613041910189706 != rate {
|
|
||||||
t.Errorf("1 minute a.Rate(): 0.5613041910189706 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.5251039914257684 != rate {
|
|
||||||
t.Errorf("2 minute a.Rate(): 0.5251039914257684 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.4912384518467888184678905 != rate {
|
|
||||||
t.Errorf("3 minute a.Rate(): 0.4912384518467888184678905 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.459557003018789 != rate {
|
|
||||||
t.Errorf("4 minute a.Rate(): 0.459557003018789 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.4299187863442732 != rate {
|
|
||||||
t.Errorf("5 minute a.Rate(): 0.4299187863442732 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.4021920276213831 != rate {
|
|
||||||
t.Errorf("6 minute a.Rate(): 0.4021920276213831 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.37625345116383313 != rate {
|
|
||||||
t.Errorf("7 minute a.Rate(): 0.37625345116383313 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.3519877317060185 != rate {
|
|
||||||
t.Errorf("8 minute a.Rate(): 0.3519877317060185 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.3292869816564153165641596 != rate {
|
|
||||||
t.Errorf("9 minute a.Rate(): 0.3292869816564153165641596 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.3080502714195546 != rate {
|
|
||||||
t.Errorf("10 minute a.Rate(): 0.3080502714195546 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.2881831806538789 != rate {
|
|
||||||
t.Errorf("11 minute a.Rate(): 0.2881831806538789 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.26959737847033216 != rate {
|
|
||||||
t.Errorf("12 minute a.Rate(): 0.26959737847033216 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.2522102307052083 != rate {
|
|
||||||
t.Errorf("13 minute a.Rate(): 0.2522102307052083 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.23594443252115815 != rate {
|
|
||||||
t.Errorf("14 minute a.Rate(): 0.23594443252115815 != %v\n", rate)
|
|
||||||
}
|
|
||||||
elapseMinute(a)
|
|
||||||
if rate := a.Rate(); 0.2207276647028646247028654470286553 != rate {
|
|
||||||
t.Errorf("15 minute a.Rate(): 0.2207276647028646247028654470286553 != %v\n", rate)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func elapseMinute(a EWMA) {
|
|
||||||
for i := 0; i < 12; i++ {
|
|
||||||
a.Tick()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
84
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/gauge.go
generated
vendored
84
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/gauge.go
generated
vendored
@@ -1,84 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import "sync/atomic"
|
|
||||||
|
|
||||||
// Gauges hold an int64 value that can be set arbitrarily.
|
|
||||||
type Gauge interface {
|
|
||||||
Snapshot() Gauge
|
|
||||||
Update(int64)
|
|
||||||
Value() int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetOrRegisterGauge returns an existing Gauge or constructs and registers a
|
|
||||||
// new StandardGauge.
|
|
||||||
func GetOrRegisterGauge(name string, r Registry) Gauge {
|
|
||||||
if nil == r {
|
|
||||||
r = DefaultRegistry
|
|
||||||
}
|
|
||||||
return r.GetOrRegister(name, NewGauge).(Gauge)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGauge constructs a new StandardGauge.
|
|
||||||
func NewGauge() Gauge {
|
|
||||||
if UseNilMetrics {
|
|
||||||
return NilGauge{}
|
|
||||||
}
|
|
||||||
return &StandardGauge{0}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRegisteredGauge constructs and registers a new StandardGauge.
|
|
||||||
func NewRegisteredGauge(name string, r Registry) Gauge {
|
|
||||||
c := NewGauge()
|
|
||||||
if nil == r {
|
|
||||||
r = DefaultRegistry
|
|
||||||
}
|
|
||||||
r.Register(name, c)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// GaugeSnapshot is a read-only copy of another Gauge.
|
|
||||||
type GaugeSnapshot int64
|
|
||||||
|
|
||||||
// Snapshot returns the snapshot.
|
|
||||||
func (g GaugeSnapshot) Snapshot() Gauge { return g }
|
|
||||||
|
|
||||||
// Update panics.
|
|
||||||
func (GaugeSnapshot) Update(int64) {
|
|
||||||
panic("Update called on a GaugeSnapshot")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the value at the time the snapshot was taken.
|
|
||||||
func (g GaugeSnapshot) Value() int64 { return int64(g) }
|
|
||||||
|
|
||||||
// NilGauge is a no-op Gauge.
|
|
||||||
type NilGauge struct{}
|
|
||||||
|
|
||||||
// Snapshot is a no-op.
|
|
||||||
func (NilGauge) Snapshot() Gauge { return NilGauge{} }
|
|
||||||
|
|
||||||
// Update is a no-op.
|
|
||||||
func (NilGauge) Update(v int64) {}
|
|
||||||
|
|
||||||
// Value is a no-op.
|
|
||||||
func (NilGauge) Value() int64 { return 0 }
|
|
||||||
|
|
||||||
// StandardGauge is the standard implementation of a Gauge and uses the
|
|
||||||
// sync/atomic package to manage a single int64 value.
|
|
||||||
type StandardGauge struct {
|
|
||||||
value int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Snapshot returns a read-only copy of the gauge.
|
|
||||||
func (g *StandardGauge) Snapshot() Gauge {
|
|
||||||
return GaugeSnapshot(g.Value())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update updates the gauge's value.
|
|
||||||
func (g *StandardGauge) Update(v int64) {
|
|
||||||
atomic.StoreInt64(&g.value, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the gauge's current value.
|
|
||||||
func (g *StandardGauge) Value() int64 {
|
|
||||||
return atomic.LoadInt64(&g.value)
|
|
||||||
}
|
|
||||||
91
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/gauge_float64.go
generated
vendored
91
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/gauge_float64.go
generated
vendored
@@ -1,91 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import "sync"
|
|
||||||
|
|
||||||
// GaugeFloat64s hold a float64 value that can be set arbitrarily.
|
|
||||||
type GaugeFloat64 interface {
|
|
||||||
Snapshot() GaugeFloat64
|
|
||||||
Update(float64)
|
|
||||||
Value() float64
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a
|
|
||||||
// new StandardGaugeFloat64.
|
|
||||||
func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 {
|
|
||||||
if nil == r {
|
|
||||||
r = DefaultRegistry
|
|
||||||
}
|
|
||||||
return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGaugeFloat64 constructs a new StandardGaugeFloat64.
|
|
||||||
func NewGaugeFloat64() GaugeFloat64 {
|
|
||||||
if UseNilMetrics {
|
|
||||||
return NilGaugeFloat64{}
|
|
||||||
}
|
|
||||||
return &StandardGaugeFloat64{
|
|
||||||
value: 0.0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64.
|
|
||||||
func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 {
|
|
||||||
c := NewGaugeFloat64()
|
|
||||||
if nil == r {
|
|
||||||
r = DefaultRegistry
|
|
||||||
}
|
|
||||||
r.Register(name, c)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64.
|
|
||||||
type GaugeFloat64Snapshot float64
|
|
||||||
|
|
||||||
// Snapshot returns the snapshot.
|
|
||||||
func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g }
|
|
||||||
|
|
||||||
// Update panics.
|
|
||||||
func (GaugeFloat64Snapshot) Update(float64) {
|
|
||||||
panic("Update called on a GaugeFloat64Snapshot")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the value at the time the snapshot was taken.
|
|
||||||
func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) }
|
|
||||||
|
|
||||||
// NilGauge is a no-op Gauge.
|
|
||||||
type NilGaugeFloat64 struct{}
|
|
||||||
|
|
||||||
// Snapshot is a no-op.
|
|
||||||
func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} }
|
|
||||||
|
|
||||||
// Update is a no-op.
|
|
||||||
func (NilGaugeFloat64) Update(v float64) {}
|
|
||||||
|
|
||||||
// Value is a no-op.
|
|
||||||
func (NilGaugeFloat64) Value() float64 { return 0.0 }
|
|
||||||
|
|
||||||
// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses
|
|
||||||
// sync.Mutex to manage a single float64 value.
|
|
||||||
type StandardGaugeFloat64 struct {
|
|
||||||
mutex sync.Mutex
|
|
||||||
value float64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Snapshot returns a read-only copy of the gauge.
|
|
||||||
func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 {
|
|
||||||
return GaugeFloat64Snapshot(g.Value())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update updates the gauge's value.
|
|
||||||
func (g *StandardGaugeFloat64) Update(v float64) {
|
|
||||||
g.mutex.Lock()
|
|
||||||
defer g.mutex.Unlock()
|
|
||||||
g.value = v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the gauge's current value.
|
|
||||||
func (g *StandardGaugeFloat64) Value() float64 {
|
|
||||||
g.mutex.Lock()
|
|
||||||
defer g.mutex.Unlock()
|
|
||||||
return g.value
|
|
||||||
}
|
|
||||||
@@ -1,38 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import "testing"
|
|
||||||
|
|
||||||
func BenchmarkGuageFloat64(b *testing.B) {
|
|
||||||
g := NewGaugeFloat64()
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
g.Update(float64(i))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGaugeFloat64(t *testing.T) {
|
|
||||||
g := NewGaugeFloat64()
|
|
||||||
g.Update(float64(47.0))
|
|
||||||
if v := g.Value(); float64(47.0) != v {
|
|
||||||
t.Errorf("g.Value(): 47.0 != %v\n", v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGaugeFloat64Snapshot(t *testing.T) {
|
|
||||||
g := NewGaugeFloat64()
|
|
||||||
g.Update(float64(47.0))
|
|
||||||
snapshot := g.Snapshot()
|
|
||||||
g.Update(float64(0))
|
|
||||||
if v := snapshot.Value(); float64(47.0) != v {
|
|
||||||
t.Errorf("g.Value(): 47.0 != %v\n", v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetOrRegisterGaugeFloat64(t *testing.T) {
|
|
||||||
r := NewRegistry()
|
|
||||||
NewRegisteredGaugeFloat64("foo", r).Update(float64(47.0))
|
|
||||||
t.Logf("registry: %v", r)
|
|
||||||
if g := GetOrRegisterGaugeFloat64("foo", r); float64(47.0) != g.Value() {
|
|
||||||
t.Fatal(g)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
37
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/gauge_test.go
generated
vendored
37
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/gauge_test.go
generated
vendored
@@ -1,37 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import "testing"
|
|
||||||
|
|
||||||
func BenchmarkGuage(b *testing.B) {
|
|
||||||
g := NewGauge()
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
g.Update(int64(i))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGauge(t *testing.T) {
|
|
||||||
g := NewGauge()
|
|
||||||
g.Update(int64(47))
|
|
||||||
if v := g.Value(); 47 != v {
|
|
||||||
t.Errorf("g.Value(): 47 != %v\n", v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGaugeSnapshot(t *testing.T) {
|
|
||||||
g := NewGauge()
|
|
||||||
g.Update(int64(47))
|
|
||||||
snapshot := g.Snapshot()
|
|
||||||
g.Update(int64(0))
|
|
||||||
if v := snapshot.Value(); 47 != v {
|
|
||||||
t.Errorf("g.Value(): 47 != %v\n", v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetOrRegisterGauge(t *testing.T) {
|
|
||||||
r := NewRegistry()
|
|
||||||
NewRegisteredGauge("foo", r).Update(47)
|
|
||||||
if g := GetOrRegisterGauge("foo", r); 47 != g.Value() {
|
|
||||||
t.Fatal(g)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
113
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/graphite.go
generated
vendored
113
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/graphite.go
generated
vendored
@@ -1,113 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GraphiteConfig provides a container with configuration parameters for
|
|
||||||
// the Graphite exporter
|
|
||||||
type GraphiteConfig struct {
|
|
||||||
Addr *net.TCPAddr // Network address to connect to
|
|
||||||
Registry Registry // Registry to be exported
|
|
||||||
FlushInterval time.Duration // Flush interval
|
|
||||||
DurationUnit time.Duration // Time conversion unit for durations
|
|
||||||
Prefix string // Prefix to be prepended to metric names
|
|
||||||
Percentiles []float64 // Percentiles to export from timers and histograms
|
|
||||||
}
|
|
||||||
|
|
||||||
// Graphite is a blocking exporter function which reports metrics in r
|
|
||||||
// to a graphite server located at addr, flushing them every d duration
|
|
||||||
// and prepending metric names with prefix.
|
|
||||||
func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
|
|
||||||
GraphiteWithConfig(GraphiteConfig{
|
|
||||||
Addr: addr,
|
|
||||||
Registry: r,
|
|
||||||
FlushInterval: d,
|
|
||||||
DurationUnit: time.Nanosecond,
|
|
||||||
Prefix: prefix,
|
|
||||||
Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GraphiteWithConfig is a blocking exporter function just like Graphite,
|
|
||||||
// but it takes a GraphiteConfig instead.
|
|
||||||
func GraphiteWithConfig(c GraphiteConfig) {
|
|
||||||
log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
|
|
||||||
for _ = range time.Tick(c.FlushInterval) {
|
|
||||||
if err := graphite(&c); nil != err {
|
|
||||||
log.Println(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GraphiteOnce performs a single submission to Graphite, returning a
|
|
||||||
// non-nil error on failed connections. This can be used in a loop
|
|
||||||
// similar to GraphiteWithConfig for custom error handling.
|
|
||||||
func GraphiteOnce(c GraphiteConfig) error {
|
|
||||||
log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
|
|
||||||
return graphite(&c)
|
|
||||||
}
|
|
||||||
|
|
||||||
func graphite(c *GraphiteConfig) error {
|
|
||||||
now := time.Now().Unix()
|
|
||||||
du := float64(c.DurationUnit)
|
|
||||||
conn, err := net.DialTCP("tcp", nil, c.Addr)
|
|
||||||
if nil != err {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer conn.Close()
|
|
||||||
w := bufio.NewWriter(conn)
|
|
||||||
c.Registry.Each(func(name string, i interface{}) {
|
|
||||||
switch metric := i.(type) {
|
|
||||||
case Counter:
|
|
||||||
fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now)
|
|
||||||
case Gauge:
|
|
||||||
fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now)
|
|
||||||
case GaugeFloat64:
|
|
||||||
fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now)
|
|
||||||
case Histogram:
|
|
||||||
h := metric.Snapshot()
|
|
||||||
ps := h.Percentiles(c.Percentiles)
|
|
||||||
fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now)
|
|
||||||
fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now)
|
|
||||||
fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now)
|
|
||||||
fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now)
|
|
||||||
fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now)
|
|
||||||
for psIdx, psKey := range c.Percentiles {
|
|
||||||
key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
|
|
||||||
fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
|
|
||||||
}
|
|
||||||
case Meter:
|
|
||||||
m := metric.Snapshot()
|
|
||||||
fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now)
|
|
||||||
fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now)
|
|
||||||
fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now)
|
|
||||||
fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now)
|
|
||||||
fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now)
|
|
||||||
case Timer:
|
|
||||||
t := metric.Snapshot()
|
|
||||||
ps := t.Percentiles(c.Percentiles)
|
|
||||||
fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now)
|
|
||||||
fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now)
|
|
||||||
fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now)
|
|
||||||
fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now)
|
|
||||||
fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now)
|
|
||||||
for psIdx, psKey := range c.Percentiles {
|
|
||||||
key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
|
|
||||||
fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now)
|
|
||||||
fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now)
|
|
||||||
fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now)
|
|
||||||
fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now)
|
|
||||||
}
|
|
||||||
w.Flush()
|
|
||||||
})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
22
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/graphite_test.go
generated
vendored
22
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/graphite_test.go
generated
vendored
@@ -1,22 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func ExampleGraphite() {
|
|
||||||
addr, _ := net.ResolveTCPAddr("net", ":2003")
|
|
||||||
go Graphite(DefaultRegistry, 1*time.Second, "some.prefix", addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleGraphiteWithConfig() {
|
|
||||||
addr, _ := net.ResolveTCPAddr("net", ":2003")
|
|
||||||
go GraphiteWithConfig(GraphiteConfig{
|
|
||||||
Addr: addr,
|
|
||||||
Registry: DefaultRegistry,
|
|
||||||
FlushInterval: 1 * time.Second,
|
|
||||||
DurationUnit: time.Millisecond,
|
|
||||||
Percentiles: []float64{0.5, 0.75, 0.99, 0.999},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
61
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/healthcheck.go
generated
vendored
61
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/healthcheck.go
generated
vendored
@@ -1,61 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
// Healthchecks hold an error value describing an arbitrary up/down status.
|
|
||||||
type Healthcheck interface {
|
|
||||||
Check()
|
|
||||||
Error() error
|
|
||||||
Healthy()
|
|
||||||
Unhealthy(error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHealthcheck constructs a new Healthcheck which will use the given
|
|
||||||
// function to update its status.
|
|
||||||
func NewHealthcheck(f func(Healthcheck)) Healthcheck {
|
|
||||||
if UseNilMetrics {
|
|
||||||
return NilHealthcheck{}
|
|
||||||
}
|
|
||||||
return &StandardHealthcheck{nil, f}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NilHealthcheck is a no-op.
|
|
||||||
type NilHealthcheck struct{}
|
|
||||||
|
|
||||||
// Check is a no-op.
|
|
||||||
func (NilHealthcheck) Check() {}
|
|
||||||
|
|
||||||
// Error is a no-op.
|
|
||||||
func (NilHealthcheck) Error() error { return nil }
|
|
||||||
|
|
||||||
// Healthy is a no-op.
|
|
||||||
func (NilHealthcheck) Healthy() {}
|
|
||||||
|
|
||||||
// Unhealthy is a no-op.
|
|
||||||
func (NilHealthcheck) Unhealthy(error) {}
|
|
||||||
|
|
||||||
// StandardHealthcheck is the standard implementation of a Healthcheck and
|
|
||||||
// stores the status and a function to call to update the status.
|
|
||||||
type StandardHealthcheck struct {
|
|
||||||
err error
|
|
||||||
f func(Healthcheck)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check runs the healthcheck function to update the healthcheck's status.
|
|
||||||
func (h *StandardHealthcheck) Check() {
|
|
||||||
h.f(h)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns the healthcheck's status, which will be nil if it is healthy.
|
|
||||||
func (h *StandardHealthcheck) Error() error {
|
|
||||||
return h.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Healthy marks the healthcheck as healthy.
|
|
||||||
func (h *StandardHealthcheck) Healthy() {
|
|
||||||
h.err = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unhealthy marks the healthcheck as unhealthy. The error is stored and
|
|
||||||
// may be retrieved by the Error method.
|
|
||||||
func (h *StandardHealthcheck) Unhealthy(err error) {
|
|
||||||
h.err = err
|
|
||||||
}
|
|
||||||
202
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/histogram.go
generated
vendored
202
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/histogram.go
generated
vendored
@@ -1,202 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
// Histograms calculate distribution statistics from a series of int64 values.
|
|
||||||
type Histogram interface {
|
|
||||||
Clear()
|
|
||||||
Count() int64
|
|
||||||
Max() int64
|
|
||||||
Mean() float64
|
|
||||||
Min() int64
|
|
||||||
Percentile(float64) float64
|
|
||||||
Percentiles([]float64) []float64
|
|
||||||
Sample() Sample
|
|
||||||
Snapshot() Histogram
|
|
||||||
StdDev() float64
|
|
||||||
Sum() int64
|
|
||||||
Update(int64)
|
|
||||||
Variance() float64
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetOrRegisterHistogram returns an existing Histogram or constructs and
|
|
||||||
// registers a new StandardHistogram.
|
|
||||||
func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram {
|
|
||||||
if nil == r {
|
|
||||||
r = DefaultRegistry
|
|
||||||
}
|
|
||||||
return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHistogram constructs a new StandardHistogram from a Sample.
|
|
||||||
func NewHistogram(s Sample) Histogram {
|
|
||||||
if UseNilMetrics {
|
|
||||||
return NilHistogram{}
|
|
||||||
}
|
|
||||||
return &StandardHistogram{sample: s}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRegisteredHistogram constructs and registers a new StandardHistogram from
|
|
||||||
// a Sample.
|
|
||||||
func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram {
|
|
||||||
c := NewHistogram(s)
|
|
||||||
if nil == r {
|
|
||||||
r = DefaultRegistry
|
|
||||||
}
|
|
||||||
r.Register(name, c)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// HistogramSnapshot is a read-only copy of another Histogram.
|
|
||||||
type HistogramSnapshot struct {
|
|
||||||
sample *SampleSnapshot
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear panics.
|
|
||||||
func (*HistogramSnapshot) Clear() {
|
|
||||||
panic("Clear called on a HistogramSnapshot")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count returns the number of samples recorded at the time the snapshot was
|
|
||||||
// taken.
|
|
||||||
func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
|
|
||||||
|
|
||||||
// Max returns the maximum value in the sample at the time the snapshot was
|
|
||||||
// taken.
|
|
||||||
func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
|
|
||||||
|
|
||||||
// Mean returns the mean of the values in the sample at the time the snapshot
|
|
||||||
// was taken.
|
|
||||||
func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
|
|
||||||
|
|
||||||
// Min returns the minimum value in the sample at the time the snapshot was
|
|
||||||
// taken.
|
|
||||||
func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
|
|
||||||
|
|
||||||
// Percentile returns an arbitrary percentile of values in the sample at the
|
|
||||||
// time the snapshot was taken.
|
|
||||||
func (h *HistogramSnapshot) Percentile(p float64) float64 {
|
|
||||||
return h.sample.Percentile(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Percentiles returns a slice of arbitrary percentiles of values in the sample
|
|
||||||
// at the time the snapshot was taken.
|
|
||||||
func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
|
|
||||||
return h.sample.Percentiles(ps)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sample returns the Sample underlying the histogram.
|
|
||||||
func (h *HistogramSnapshot) Sample() Sample { return h.sample }
|
|
||||||
|
|
||||||
// Snapshot returns the snapshot.
|
|
||||||
func (h *HistogramSnapshot) Snapshot() Histogram { return h }
|
|
||||||
|
|
||||||
// StdDev returns the standard deviation of the values in the sample at the
|
|
||||||
// time the snapshot was taken.
|
|
||||||
func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
|
|
||||||
|
|
||||||
// Sum returns the sum in the sample at the time the snapshot was taken.
|
|
||||||
func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
|
|
||||||
|
|
||||||
// Update panics.
|
|
||||||
func (*HistogramSnapshot) Update(int64) {
|
|
||||||
panic("Update called on a HistogramSnapshot")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Variance returns the variance of inputs at the time the snapshot was taken.
|
|
||||||
func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
|
|
||||||
|
|
||||||
// NilHistogram is a no-op Histogram.
|
|
||||||
type NilHistogram struct{}
|
|
||||||
|
|
||||||
// Clear is a no-op.
|
|
||||||
func (NilHistogram) Clear() {}
|
|
||||||
|
|
||||||
// Count is a no-op.
|
|
||||||
func (NilHistogram) Count() int64 { return 0 }
|
|
||||||
|
|
||||||
// Max is a no-op.
|
|
||||||
func (NilHistogram) Max() int64 { return 0 }
|
|
||||||
|
|
||||||
// Mean is a no-op.
|
|
||||||
func (NilHistogram) Mean() float64 { return 0.0 }
|
|
||||||
|
|
||||||
// Min is a no-op.
|
|
||||||
func (NilHistogram) Min() int64 { return 0 }
|
|
||||||
|
|
||||||
// Percentile is a no-op.
|
|
||||||
func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
|
|
||||||
|
|
||||||
// Percentiles is a no-op.
|
|
||||||
func (NilHistogram) Percentiles(ps []float64) []float64 {
|
|
||||||
return make([]float64, len(ps))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sample is a no-op.
|
|
||||||
func (NilHistogram) Sample() Sample { return NilSample{} }
|
|
||||||
|
|
||||||
// Snapshot is a no-op.
|
|
||||||
func (NilHistogram) Snapshot() Histogram { return NilHistogram{} }
|
|
||||||
|
|
||||||
// StdDev is a no-op.
|
|
||||||
func (NilHistogram) StdDev() float64 { return 0.0 }
|
|
||||||
|
|
||||||
// Sum is a no-op.
|
|
||||||
func (NilHistogram) Sum() int64 { return 0 }
|
|
||||||
|
|
||||||
// Update is a no-op.
|
|
||||||
func (NilHistogram) Update(v int64) {}
|
|
||||||
|
|
||||||
// Variance is a no-op.
|
|
||||||
func (NilHistogram) Variance() float64 { return 0.0 }
|
|
||||||
|
|
||||||
// StandardHistogram is the standard implementation of a Histogram and uses a
|
|
||||||
// Sample to bound its memory use.
|
|
||||||
type StandardHistogram struct {
|
|
||||||
sample Sample
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear clears the histogram and its sample.
|
|
||||||
func (h *StandardHistogram) Clear() { h.sample.Clear() }
|
|
||||||
|
|
||||||
// Count returns the number of samples recorded since the histogram was last
|
|
||||||
// cleared.
|
|
||||||
func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
|
|
||||||
|
|
||||||
// Max returns the maximum value in the sample.
|
|
||||||
func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
|
|
||||||
|
|
||||||
// Mean returns the mean of the values in the sample.
|
|
||||||
func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
|
|
||||||
|
|
||||||
// Min returns the minimum value in the sample.
|
|
||||||
func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
|
|
||||||
|
|
||||||
// Percentile returns an arbitrary percentile of the values in the sample.
|
|
||||||
func (h *StandardHistogram) Percentile(p float64) float64 {
|
|
||||||
return h.sample.Percentile(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Percentiles returns a slice of arbitrary percentiles of the values in the
|
|
||||||
// sample.
|
|
||||||
func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
|
|
||||||
return h.sample.Percentiles(ps)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sample returns the Sample underlying the histogram.
|
|
||||||
func (h *StandardHistogram) Sample() Sample { return h.sample }
|
|
||||||
|
|
||||||
// Snapshot returns a read-only copy of the histogram.
|
|
||||||
func (h *StandardHistogram) Snapshot() Histogram {
|
|
||||||
return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// StdDev returns the standard deviation of the values in the sample.
|
|
||||||
func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
|
|
||||||
|
|
||||||
// Sum returns the sum in the sample.
|
|
||||||
func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
|
|
||||||
|
|
||||||
// Update samples a new value.
|
|
||||||
func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
|
|
||||||
|
|
||||||
// Variance returns the variance of the values in the sample.
|
|
||||||
func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }
|
|
||||||
@@ -1,95 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import "testing"
|
|
||||||
|
|
||||||
func BenchmarkHistogram(b *testing.B) {
|
|
||||||
h := NewHistogram(NewUniformSample(100))
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
h.Update(int64(i))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetOrRegisterHistogram(t *testing.T) {
|
|
||||||
r := NewRegistry()
|
|
||||||
s := NewUniformSample(100)
|
|
||||||
NewRegisteredHistogram("foo", r, s).Update(47)
|
|
||||||
if h := GetOrRegisterHistogram("foo", r, s); 1 != h.Count() {
|
|
||||||
t.Fatal(h)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHistogram10000(t *testing.T) {
|
|
||||||
h := NewHistogram(NewUniformSample(100000))
|
|
||||||
for i := 1; i <= 10000; i++ {
|
|
||||||
h.Update(int64(i))
|
|
||||||
}
|
|
||||||
testHistogram10000(t, h)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHistogramEmpty(t *testing.T) {
|
|
||||||
h := NewHistogram(NewUniformSample(100))
|
|
||||||
if count := h.Count(); 0 != count {
|
|
||||||
t.Errorf("h.Count(): 0 != %v\n", count)
|
|
||||||
}
|
|
||||||
if min := h.Min(); 0 != min {
|
|
||||||
t.Errorf("h.Min(): 0 != %v\n", min)
|
|
||||||
}
|
|
||||||
if max := h.Max(); 0 != max {
|
|
||||||
t.Errorf("h.Max(): 0 != %v\n", max)
|
|
||||||
}
|
|
||||||
if mean := h.Mean(); 0.0 != mean {
|
|
||||||
t.Errorf("h.Mean(): 0.0 != %v\n", mean)
|
|
||||||
}
|
|
||||||
if stdDev := h.StdDev(); 0.0 != stdDev {
|
|
||||||
t.Errorf("h.StdDev(): 0.0 != %v\n", stdDev)
|
|
||||||
}
|
|
||||||
ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
|
|
||||||
if 0.0 != ps[0] {
|
|
||||||
t.Errorf("median: 0.0 != %v\n", ps[0])
|
|
||||||
}
|
|
||||||
if 0.0 != ps[1] {
|
|
||||||
t.Errorf("75th percentile: 0.0 != %v\n", ps[1])
|
|
||||||
}
|
|
||||||
if 0.0 != ps[2] {
|
|
||||||
t.Errorf("99th percentile: 0.0 != %v\n", ps[2])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHistogramSnapshot(t *testing.T) {
|
|
||||||
h := NewHistogram(NewUniformSample(100000))
|
|
||||||
for i := 1; i <= 10000; i++ {
|
|
||||||
h.Update(int64(i))
|
|
||||||
}
|
|
||||||
snapshot := h.Snapshot()
|
|
||||||
h.Update(0)
|
|
||||||
testHistogram10000(t, snapshot)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testHistogram10000(t *testing.T, h Histogram) {
|
|
||||||
if count := h.Count(); 10000 != count {
|
|
||||||
t.Errorf("h.Count(): 10000 != %v\n", count)
|
|
||||||
}
|
|
||||||
if min := h.Min(); 1 != min {
|
|
||||||
t.Errorf("h.Min(): 1 != %v\n", min)
|
|
||||||
}
|
|
||||||
if max := h.Max(); 10000 != max {
|
|
||||||
t.Errorf("h.Max(): 10000 != %v\n", max)
|
|
||||||
}
|
|
||||||
if mean := h.Mean(); 5000.5 != mean {
|
|
||||||
t.Errorf("h.Mean(): 5000.5 != %v\n", mean)
|
|
||||||
}
|
|
||||||
if stdDev := h.StdDev(); 2886.751331514372 != stdDev {
|
|
||||||
t.Errorf("h.StdDev(): 2886.751331514372 != %v\n", stdDev)
|
|
||||||
}
|
|
||||||
ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
|
|
||||||
if 5000.5 != ps[0] {
|
|
||||||
t.Errorf("median: 5000.5 != %v\n", ps[0])
|
|
||||||
}
|
|
||||||
if 7500.75 != ps[1] {
|
|
||||||
t.Errorf("75th percentile: 7500.75 != %v\n", ps[1])
|
|
||||||
}
|
|
||||||
if 9900.99 != ps[2] {
|
|
||||||
t.Errorf("99th percentile: 9900.99 != %v\n", ps[2])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
83
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/json.go
generated
vendored
83
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/json.go
generated
vendored
@@ -1,83 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"io"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MarshalJSON returns a byte slice containing a JSON representation of all
|
|
||||||
// the metrics in the Registry.
|
|
||||||
func (r *StandardRegistry) MarshalJSON() ([]byte, error) {
|
|
||||||
data := make(map[string]map[string]interface{})
|
|
||||||
r.Each(func(name string, i interface{}) {
|
|
||||||
values := make(map[string]interface{})
|
|
||||||
switch metric := i.(type) {
|
|
||||||
case Counter:
|
|
||||||
values["count"] = metric.Count()
|
|
||||||
case Gauge:
|
|
||||||
values["value"] = metric.Value()
|
|
||||||
case GaugeFloat64:
|
|
||||||
values["value"] = metric.Value()
|
|
||||||
case Healthcheck:
|
|
||||||
values["error"] = nil
|
|
||||||
metric.Check()
|
|
||||||
if err := metric.Error(); nil != err {
|
|
||||||
values["error"] = metric.Error().Error()
|
|
||||||
}
|
|
||||||
case Histogram:
|
|
||||||
h := metric.Snapshot()
|
|
||||||
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
|
||||||
values["count"] = h.Count()
|
|
||||||
values["min"] = h.Min()
|
|
||||||
values["max"] = h.Max()
|
|
||||||
values["mean"] = h.Mean()
|
|
||||||
values["stddev"] = h.StdDev()
|
|
||||||
values["median"] = ps[0]
|
|
||||||
values["75%"] = ps[1]
|
|
||||||
values["95%"] = ps[2]
|
|
||||||
values["99%"] = ps[3]
|
|
||||||
values["99.9%"] = ps[4]
|
|
||||||
case Meter:
|
|
||||||
m := metric.Snapshot()
|
|
||||||
values["count"] = m.Count()
|
|
||||||
values["1m.rate"] = m.Rate1()
|
|
||||||
values["5m.rate"] = m.Rate5()
|
|
||||||
values["15m.rate"] = m.Rate15()
|
|
||||||
values["mean.rate"] = m.RateMean()
|
|
||||||
case Timer:
|
|
||||||
t := metric.Snapshot()
|
|
||||||
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
|
||||||
values["count"] = t.Count()
|
|
||||||
values["min"] = t.Min()
|
|
||||||
values["max"] = t.Max()
|
|
||||||
values["mean"] = t.Mean()
|
|
||||||
values["stddev"] = t.StdDev()
|
|
||||||
values["median"] = ps[0]
|
|
||||||
values["75%"] = ps[1]
|
|
||||||
values["95%"] = ps[2]
|
|
||||||
values["99%"] = ps[3]
|
|
||||||
values["99.9%"] = ps[4]
|
|
||||||
values["1m.rate"] = t.Rate1()
|
|
||||||
values["5m.rate"] = t.Rate5()
|
|
||||||
values["15m.rate"] = t.Rate15()
|
|
||||||
values["mean.rate"] = t.RateMean()
|
|
||||||
}
|
|
||||||
data[name] = values
|
|
||||||
})
|
|
||||||
return json.Marshal(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteJSON writes metrics from the given registry periodically to the
|
|
||||||
// specified io.Writer as JSON.
|
|
||||||
func WriteJSON(r Registry, d time.Duration, w io.Writer) {
|
|
||||||
for _ = range time.Tick(d) {
|
|
||||||
WriteJSONOnce(r, w)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteJSONOnce writes metrics from the given registry to the specified
|
|
||||||
// io.Writer as JSON.
|
|
||||||
func WriteJSONOnce(r Registry, w io.Writer) {
|
|
||||||
json.NewEncoder(w).Encode(r)
|
|
||||||
}
|
|
||||||
28
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/json_test.go
generated
vendored
28
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/json_test.go
generated
vendored
@@ -1,28 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestRegistryMarshallJSON(t *testing.T) {
|
|
||||||
b := &bytes.Buffer{}
|
|
||||||
enc := json.NewEncoder(b)
|
|
||||||
r := NewRegistry()
|
|
||||||
r.Register("counter", NewCounter())
|
|
||||||
enc.Encode(r)
|
|
||||||
if s := b.String(); "{\"counter\":{\"count\":0}}\n" != s {
|
|
||||||
t.Fatalf(s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRegistryWriteJSONOnce(t *testing.T) {
|
|
||||||
r := NewRegistry()
|
|
||||||
r.Register("counter", NewCounter())
|
|
||||||
b := &bytes.Buffer{}
|
|
||||||
WriteJSONOnce(r, b)
|
|
||||||
if s := b.String(); s != "{\"counter\":{\"count\":0}}\n" {
|
|
||||||
t.Fail()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
102
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/librato/client.go
generated
vendored
102
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/librato/client.go
generated
vendored
@@ -1,102 +0,0 @@
|
|||||||
package librato
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
const Operations = "operations"
|
|
||||||
const OperationsShort = "ops"
|
|
||||||
|
|
||||||
type LibratoClient struct {
|
|
||||||
Email, Token string
|
|
||||||
}
|
|
||||||
|
|
||||||
// property strings
|
|
||||||
const (
|
|
||||||
// display attributes
|
|
||||||
Color = "color"
|
|
||||||
DisplayMax = "display_max"
|
|
||||||
DisplayMin = "display_min"
|
|
||||||
DisplayUnitsLong = "display_units_long"
|
|
||||||
DisplayUnitsShort = "display_units_short"
|
|
||||||
DisplayStacked = "display_stacked"
|
|
||||||
DisplayTransform = "display_transform"
|
|
||||||
// special gauge display attributes
|
|
||||||
SummarizeFunction = "summarize_function"
|
|
||||||
Aggregate = "aggregate"
|
|
||||||
|
|
||||||
// metric keys
|
|
||||||
Name = "name"
|
|
||||||
Period = "period"
|
|
||||||
Description = "description"
|
|
||||||
DisplayName = "display_name"
|
|
||||||
Attributes = "attributes"
|
|
||||||
|
|
||||||
// measurement keys
|
|
||||||
MeasureTime = "measure_time"
|
|
||||||
Source = "source"
|
|
||||||
Value = "value"
|
|
||||||
|
|
||||||
// special gauge keys
|
|
||||||
Count = "count"
|
|
||||||
Sum = "sum"
|
|
||||||
Max = "max"
|
|
||||||
Min = "min"
|
|
||||||
SumSquares = "sum_squares"
|
|
||||||
|
|
||||||
// batch keys
|
|
||||||
Counters = "counters"
|
|
||||||
Gauges = "gauges"
|
|
||||||
|
|
||||||
MetricsPostUrl = "https://metrics-api.librato.com/v1/metrics"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Measurement map[string]interface{}
|
|
||||||
type Metric map[string]interface{}
|
|
||||||
|
|
||||||
type Batch struct {
|
|
||||||
Gauges []Measurement `json:"gauges,omitempty"`
|
|
||||||
Counters []Measurement `json:"counters,omitempty"`
|
|
||||||
MeasureTime int64 `json:"measure_time"`
|
|
||||||
Source string `json:"source"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (self *LibratoClient) PostMetrics(batch Batch) (err error) {
|
|
||||||
var (
|
|
||||||
js []byte
|
|
||||||
req *http.Request
|
|
||||||
resp *http.Response
|
|
||||||
)
|
|
||||||
|
|
||||||
if len(batch.Counters) == 0 && len(batch.Gauges) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if js, err = json.Marshal(batch); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if req, err = http.NewRequest("POST", MetricsPostUrl, bytes.NewBuffer(js)); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
req.SetBasicAuth(self.Email, self.Token)
|
|
||||||
|
|
||||||
if resp, err = http.DefaultClient.Do(req); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
var body []byte
|
|
||||||
if body, err = ioutil.ReadAll(resp.Body); err != nil {
|
|
||||||
body = []byte(fmt.Sprintf("(could not fetch response body for error: %s)", err))
|
|
||||||
}
|
|
||||||
err = fmt.Errorf("Unable to post to Librato: %d %s %s", resp.StatusCode, resp.Status, string(body))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
231
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/librato/librato.go
generated
vendored
231
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/librato/librato.go
generated
vendored
@@ -1,231 +0,0 @@
|
|||||||
package librato
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"math"
|
|
||||||
"regexp"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rcrowley/go-metrics"
|
|
||||||
)
|
|
||||||
|
|
||||||
// a regexp for extracting the unit from time.Duration.String
|
|
||||||
var unitRegexp = regexp.MustCompile("[^\\d]+$")
|
|
||||||
|
|
||||||
// a helper that turns a time.Duration into librato display attributes for timer metrics
|
|
||||||
func translateTimerAttributes(d time.Duration) (attrs map[string]interface{}) {
|
|
||||||
attrs = make(map[string]interface{})
|
|
||||||
attrs[DisplayTransform] = fmt.Sprintf("x/%d", int64(d))
|
|
||||||
attrs[DisplayUnitsShort] = string(unitRegexp.Find([]byte(d.String())))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
type Reporter struct {
|
|
||||||
Email, Token string
|
|
||||||
Source string
|
|
||||||
Interval time.Duration
|
|
||||||
Registry metrics.Registry
|
|
||||||
Percentiles []float64 // percentiles to report on histogram metrics
|
|
||||||
TimerAttributes map[string]interface{} // units in which timers will be displayed
|
|
||||||
intervalSec int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewReporter(r metrics.Registry, d time.Duration, e string, t string, s string, p []float64, u time.Duration) *Reporter {
|
|
||||||
return &Reporter{e, t, s, d, r, p, translateTimerAttributes(u), int64(d / time.Second)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Librato(r metrics.Registry, d time.Duration, e string, t string, s string, p []float64, u time.Duration) {
|
|
||||||
NewReporter(r, d, e, t, s, p, u).Run()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (self *Reporter) Run() {
|
|
||||||
log.Printf("WARNING: This client has been DEPRECATED! It has been moved to https://github.com/mihasya/go-metrics-librato and will be removed from rcrowley/go-metrics on August 5th 2015")
|
|
||||||
ticker := time.Tick(self.Interval)
|
|
||||||
metricsApi := &LibratoClient{self.Email, self.Token}
|
|
||||||
for now := range ticker {
|
|
||||||
var metrics Batch
|
|
||||||
var err error
|
|
||||||
if metrics, err = self.BuildRequest(now, self.Registry); err != nil {
|
|
||||||
log.Printf("ERROR constructing librato request body %s", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := metricsApi.PostMetrics(metrics); err != nil {
|
|
||||||
log.Printf("ERROR sending metrics to librato %s", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// calculate sum of squares from data provided by metrics.Histogram
|
|
||||||
// see http://en.wikipedia.org/wiki/Standard_deviation#Rapid_calculation_methods
|
|
||||||
func sumSquares(s metrics.Sample) float64 {
|
|
||||||
count := float64(s.Count())
|
|
||||||
sumSquared := math.Pow(count*s.Mean(), 2)
|
|
||||||
sumSquares := math.Pow(count*s.StdDev(), 2) + sumSquared/count
|
|
||||||
if math.IsNaN(sumSquares) {
|
|
||||||
return 0.0
|
|
||||||
}
|
|
||||||
return sumSquares
|
|
||||||
}
|
|
||||||
func sumSquaresTimer(t metrics.Timer) float64 {
|
|
||||||
count := float64(t.Count())
|
|
||||||
sumSquared := math.Pow(count*t.Mean(), 2)
|
|
||||||
sumSquares := math.Pow(count*t.StdDev(), 2) + sumSquared/count
|
|
||||||
if math.IsNaN(sumSquares) {
|
|
||||||
return 0.0
|
|
||||||
}
|
|
||||||
return sumSquares
|
|
||||||
}
|
|
||||||
|
|
||||||
func (self *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot Batch, err error) {
|
|
||||||
snapshot = Batch{
|
|
||||||
// coerce timestamps to a stepping fn so that they line up in Librato graphs
|
|
||||||
MeasureTime: (now.Unix() / self.intervalSec) * self.intervalSec,
|
|
||||||
Source: self.Source,
|
|
||||||
}
|
|
||||||
snapshot.Gauges = make([]Measurement, 0)
|
|
||||||
snapshot.Counters = make([]Measurement, 0)
|
|
||||||
histogramGaugeCount := 1 + len(self.Percentiles)
|
|
||||||
r.Each(func(name string, metric interface{}) {
|
|
||||||
measurement := Measurement{}
|
|
||||||
measurement[Period] = self.Interval.Seconds()
|
|
||||||
switch m := metric.(type) {
|
|
||||||
case metrics.Counter:
|
|
||||||
if m.Count() > 0 {
|
|
||||||
measurement[Name] = fmt.Sprintf("%s.%s", name, "count")
|
|
||||||
measurement[Value] = float64(m.Count())
|
|
||||||
measurement[Attributes] = map[string]interface{}{
|
|
||||||
DisplayUnitsLong: Operations,
|
|
||||||
DisplayUnitsShort: OperationsShort,
|
|
||||||
DisplayMin: "0",
|
|
||||||
}
|
|
||||||
snapshot.Counters = append(snapshot.Counters, measurement)
|
|
||||||
}
|
|
||||||
case metrics.Gauge:
|
|
||||||
measurement[Name] = name
|
|
||||||
measurement[Value] = float64(m.Value())
|
|
||||||
snapshot.Gauges = append(snapshot.Gauges, measurement)
|
|
||||||
case metrics.GaugeFloat64:
|
|
||||||
measurement[Name] = name
|
|
||||||
measurement[Value] = float64(m.Value())
|
|
||||||
snapshot.Gauges = append(snapshot.Gauges, measurement)
|
|
||||||
case metrics.Histogram:
|
|
||||||
if m.Count() > 0 {
|
|
||||||
gauges := make([]Measurement, histogramGaugeCount, histogramGaugeCount)
|
|
||||||
s := m.Sample()
|
|
||||||
measurement[Name] = fmt.Sprintf("%s.%s", name, "hist")
|
|
||||||
measurement[Count] = uint64(s.Count())
|
|
||||||
measurement[Max] = float64(s.Max())
|
|
||||||
measurement[Min] = float64(s.Min())
|
|
||||||
measurement[Sum] = float64(s.Sum())
|
|
||||||
measurement[SumSquares] = sumSquares(s)
|
|
||||||
gauges[0] = measurement
|
|
||||||
for i, p := range self.Percentiles {
|
|
||||||
gauges[i+1] = Measurement{
|
|
||||||
Name: fmt.Sprintf("%s.%.2f", measurement[Name], p),
|
|
||||||
Value: s.Percentile(p),
|
|
||||||
Period: measurement[Period],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
snapshot.Gauges = append(snapshot.Gauges, gauges...)
|
|
||||||
}
|
|
||||||
case metrics.Meter:
|
|
||||||
measurement[Name] = name
|
|
||||||
measurement[Value] = float64(m.Count())
|
|
||||||
snapshot.Counters = append(snapshot.Counters, measurement)
|
|
||||||
snapshot.Gauges = append(snapshot.Gauges,
|
|
||||||
Measurement{
|
|
||||||
Name: fmt.Sprintf("%s.%s", name, "1min"),
|
|
||||||
Value: m.Rate1(),
|
|
||||||
Period: int64(self.Interval.Seconds()),
|
|
||||||
Attributes: map[string]interface{}{
|
|
||||||
DisplayUnitsLong: Operations,
|
|
||||||
DisplayUnitsShort: OperationsShort,
|
|
||||||
DisplayMin: "0",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Measurement{
|
|
||||||
Name: fmt.Sprintf("%s.%s", name, "5min"),
|
|
||||||
Value: m.Rate5(),
|
|
||||||
Period: int64(self.Interval.Seconds()),
|
|
||||||
Attributes: map[string]interface{}{
|
|
||||||
DisplayUnitsLong: Operations,
|
|
||||||
DisplayUnitsShort: OperationsShort,
|
|
||||||
DisplayMin: "0",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Measurement{
|
|
||||||
Name: fmt.Sprintf("%s.%s", name, "15min"),
|
|
||||||
Value: m.Rate15(),
|
|
||||||
Period: int64(self.Interval.Seconds()),
|
|
||||||
Attributes: map[string]interface{}{
|
|
||||||
DisplayUnitsLong: Operations,
|
|
||||||
DisplayUnitsShort: OperationsShort,
|
|
||||||
DisplayMin: "0",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
case metrics.Timer:
|
|
||||||
measurement[Name] = name
|
|
||||||
measurement[Value] = float64(m.Count())
|
|
||||||
snapshot.Counters = append(snapshot.Counters, measurement)
|
|
||||||
if m.Count() > 0 {
|
|
||||||
libratoName := fmt.Sprintf("%s.%s", name, "timer.mean")
|
|
||||||
gauges := make([]Measurement, histogramGaugeCount, histogramGaugeCount)
|
|
||||||
gauges[0] = Measurement{
|
|
||||||
Name: libratoName,
|
|
||||||
Count: uint64(m.Count()),
|
|
||||||
Sum: m.Mean() * float64(m.Count()),
|
|
||||||
Max: float64(m.Max()),
|
|
||||||
Min: float64(m.Min()),
|
|
||||||
SumSquares: sumSquaresTimer(m),
|
|
||||||
Period: int64(self.Interval.Seconds()),
|
|
||||||
Attributes: self.TimerAttributes,
|
|
||||||
}
|
|
||||||
for i, p := range self.Percentiles {
|
|
||||||
gauges[i+1] = Measurement{
|
|
||||||
Name: fmt.Sprintf("%s.timer.%2.0f", name, p*100),
|
|
||||||
Value: m.Percentile(p),
|
|
||||||
Period: int64(self.Interval.Seconds()),
|
|
||||||
Attributes: self.TimerAttributes,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
snapshot.Gauges = append(snapshot.Gauges, gauges...)
|
|
||||||
snapshot.Gauges = append(snapshot.Gauges,
|
|
||||||
Measurement{
|
|
||||||
Name: fmt.Sprintf("%s.%s", name, "rate.1min"),
|
|
||||||
Value: m.Rate1(),
|
|
||||||
Period: int64(self.Interval.Seconds()),
|
|
||||||
Attributes: map[string]interface{}{
|
|
||||||
DisplayUnitsLong: Operations,
|
|
||||||
DisplayUnitsShort: OperationsShort,
|
|
||||||
DisplayMin: "0",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Measurement{
|
|
||||||
Name: fmt.Sprintf("%s.%s", name, "rate.5min"),
|
|
||||||
Value: m.Rate5(),
|
|
||||||
Period: int64(self.Interval.Seconds()),
|
|
||||||
Attributes: map[string]interface{}{
|
|
||||||
DisplayUnitsLong: Operations,
|
|
||||||
DisplayUnitsShort: OperationsShort,
|
|
||||||
DisplayMin: "0",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Measurement{
|
|
||||||
Name: fmt.Sprintf("%s.%s", name, "rate.15min"),
|
|
||||||
Value: m.Rate15(),
|
|
||||||
Period: int64(self.Interval.Seconds()),
|
|
||||||
Attributes: map[string]interface{}{
|
|
||||||
DisplayUnitsLong: Operations,
|
|
||||||
DisplayUnitsShort: OperationsShort,
|
|
||||||
DisplayMin: "0",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
70
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/log.go
generated
vendored
70
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/log.go
generated
vendored
@@ -1,70 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Output each metric in the given registry periodically using the given
|
|
||||||
// logger.
|
|
||||||
func Log(r Registry, d time.Duration, l *log.Logger) {
|
|
||||||
for _ = range time.Tick(d) {
|
|
||||||
r.Each(func(name string, i interface{}) {
|
|
||||||
switch metric := i.(type) {
|
|
||||||
case Counter:
|
|
||||||
l.Printf("counter %s\n", name)
|
|
||||||
l.Printf(" count: %9d\n", metric.Count())
|
|
||||||
case Gauge:
|
|
||||||
l.Printf("gauge %s\n", name)
|
|
||||||
l.Printf(" value: %9d\n", metric.Value())
|
|
||||||
case GaugeFloat64:
|
|
||||||
l.Printf("gauge %s\n", name)
|
|
||||||
l.Printf(" value: %f\n", metric.Value())
|
|
||||||
case Healthcheck:
|
|
||||||
metric.Check()
|
|
||||||
l.Printf("healthcheck %s\n", name)
|
|
||||||
l.Printf(" error: %v\n", metric.Error())
|
|
||||||
case Histogram:
|
|
||||||
h := metric.Snapshot()
|
|
||||||
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
|
||||||
l.Printf("histogram %s\n", name)
|
|
||||||
l.Printf(" count: %9d\n", h.Count())
|
|
||||||
l.Printf(" min: %9d\n", h.Min())
|
|
||||||
l.Printf(" max: %9d\n", h.Max())
|
|
||||||
l.Printf(" mean: %12.2f\n", h.Mean())
|
|
||||||
l.Printf(" stddev: %12.2f\n", h.StdDev())
|
|
||||||
l.Printf(" median: %12.2f\n", ps[0])
|
|
||||||
l.Printf(" 75%%: %12.2f\n", ps[1])
|
|
||||||
l.Printf(" 95%%: %12.2f\n", ps[2])
|
|
||||||
l.Printf(" 99%%: %12.2f\n", ps[3])
|
|
||||||
l.Printf(" 99.9%%: %12.2f\n", ps[4])
|
|
||||||
case Meter:
|
|
||||||
m := metric.Snapshot()
|
|
||||||
l.Printf("meter %s\n", name)
|
|
||||||
l.Printf(" count: %9d\n", m.Count())
|
|
||||||
l.Printf(" 1-min rate: %12.2f\n", m.Rate1())
|
|
||||||
l.Printf(" 5-min rate: %12.2f\n", m.Rate5())
|
|
||||||
l.Printf(" 15-min rate: %12.2f\n", m.Rate15())
|
|
||||||
l.Printf(" mean rate: %12.2f\n", m.RateMean())
|
|
||||||
case Timer:
|
|
||||||
t := metric.Snapshot()
|
|
||||||
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
|
||||||
l.Printf("timer %s\n", name)
|
|
||||||
l.Printf(" count: %9d\n", t.Count())
|
|
||||||
l.Printf(" min: %9d\n", t.Min())
|
|
||||||
l.Printf(" max: %9d\n", t.Max())
|
|
||||||
l.Printf(" mean: %12.2f\n", t.Mean())
|
|
||||||
l.Printf(" stddev: %12.2f\n", t.StdDev())
|
|
||||||
l.Printf(" median: %12.2f\n", ps[0])
|
|
||||||
l.Printf(" 75%%: %12.2f\n", ps[1])
|
|
||||||
l.Printf(" 95%%: %12.2f\n", ps[2])
|
|
||||||
l.Printf(" 99%%: %12.2f\n", ps[3])
|
|
||||||
l.Printf(" 99.9%%: %12.2f\n", ps[4])
|
|
||||||
l.Printf(" 1-min rate: %12.2f\n", t.Rate1())
|
|
||||||
l.Printf(" 5-min rate: %12.2f\n", t.Rate5())
|
|
||||||
l.Printf(" 15-min rate: %12.2f\n", t.Rate15())
|
|
||||||
l.Printf(" mean rate: %12.2f\n", t.RateMean())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
285
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/memory.md
generated
vendored
285
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/memory.md
generated
vendored
@@ -1,285 +0,0 @@
|
|||||||
Memory usage
|
|
||||||
============
|
|
||||||
|
|
||||||
(Highly unscientific.)
|
|
||||||
|
|
||||||
Command used to gather static memory usage:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status"
|
|
||||||
```
|
|
||||||
|
|
||||||
Program used to gather baseline memory usage:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
time.Sleep(600e9)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Baseline
|
|
||||||
--------
|
|
||||||
|
|
||||||
```
|
|
||||||
VmPeak: 42604 kB
|
|
||||||
VmSize: 42604 kB
|
|
||||||
VmLck: 0 kB
|
|
||||||
VmHWM: 1120 kB
|
|
||||||
VmRSS: 1120 kB
|
|
||||||
VmData: 35460 kB
|
|
||||||
VmStk: 136 kB
|
|
||||||
VmExe: 1020 kB
|
|
||||||
VmLib: 1848 kB
|
|
||||||
VmPTE: 36 kB
|
|
||||||
VmSwap: 0 kB
|
|
||||||
```
|
|
||||||
|
|
||||||
Program used to gather metric memory usage (with other metrics being similar):
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"metrics"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
fmt.Sprintf("foo")
|
|
||||||
metrics.NewRegistry()
|
|
||||||
time.Sleep(600e9)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
1000 counters registered
|
|
||||||
------------------------
|
|
||||||
|
|
||||||
```
|
|
||||||
VmPeak: 44016 kB
|
|
||||||
VmSize: 44016 kB
|
|
||||||
VmLck: 0 kB
|
|
||||||
VmHWM: 1928 kB
|
|
||||||
VmRSS: 1928 kB
|
|
||||||
VmData: 36868 kB
|
|
||||||
VmStk: 136 kB
|
|
||||||
VmExe: 1024 kB
|
|
||||||
VmLib: 1848 kB
|
|
||||||
VmPTE: 40 kB
|
|
||||||
VmSwap: 0 kB
|
|
||||||
```
|
|
||||||
|
|
||||||
**1.412 kB virtual, TODO 0.808 kB resident per counter.**
|
|
||||||
|
|
||||||
100000 counters registered
|
|
||||||
--------------------------
|
|
||||||
|
|
||||||
```
|
|
||||||
VmPeak: 55024 kB
|
|
||||||
VmSize: 55024 kB
|
|
||||||
VmLck: 0 kB
|
|
||||||
VmHWM: 12440 kB
|
|
||||||
VmRSS: 12440 kB
|
|
||||||
VmData: 47876 kB
|
|
||||||
VmStk: 136 kB
|
|
||||||
VmExe: 1024 kB
|
|
||||||
VmLib: 1848 kB
|
|
||||||
VmPTE: 64 kB
|
|
||||||
VmSwap: 0 kB
|
|
||||||
```
|
|
||||||
|
|
||||||
**0.1242 kB virtual, 0.1132 kB resident per counter.**
|
|
||||||
|
|
||||||
1000 gauges registered
|
|
||||||
----------------------
|
|
||||||
|
|
||||||
```
|
|
||||||
VmPeak: 44012 kB
|
|
||||||
VmSize: 44012 kB
|
|
||||||
VmLck: 0 kB
|
|
||||||
VmHWM: 1928 kB
|
|
||||||
VmRSS: 1928 kB
|
|
||||||
VmData: 36868 kB
|
|
||||||
VmStk: 136 kB
|
|
||||||
VmExe: 1020 kB
|
|
||||||
VmLib: 1848 kB
|
|
||||||
VmPTE: 40 kB
|
|
||||||
VmSwap: 0 kB
|
|
||||||
```
|
|
||||||
|
|
||||||
**1.408 kB virtual, 0.808 kB resident per counter.**
|
|
||||||
|
|
||||||
100000 gauges registered
|
|
||||||
------------------------
|
|
||||||
|
|
||||||
```
|
|
||||||
VmPeak: 55020 kB
|
|
||||||
VmSize: 55020 kB
|
|
||||||
VmLck: 0 kB
|
|
||||||
VmHWM: 12432 kB
|
|
||||||
VmRSS: 12432 kB
|
|
||||||
VmData: 47876 kB
|
|
||||||
VmStk: 136 kB
|
|
||||||
VmExe: 1020 kB
|
|
||||||
VmLib: 1848 kB
|
|
||||||
VmPTE: 60 kB
|
|
||||||
VmSwap: 0 kB
|
|
||||||
```
|
|
||||||
|
|
||||||
**0.12416 kB virtual, 0.11312 resident per gauge.**
|
|
||||||
|
|
||||||
1000 histograms with a uniform sample size of 1028
|
|
||||||
--------------------------------------------------
|
|
||||||
|
|
||||||
```
|
|
||||||
VmPeak: 72272 kB
|
|
||||||
VmSize: 72272 kB
|
|
||||||
VmLck: 0 kB
|
|
||||||
VmHWM: 16204 kB
|
|
||||||
VmRSS: 16204 kB
|
|
||||||
VmData: 65100 kB
|
|
||||||
VmStk: 136 kB
|
|
||||||
VmExe: 1048 kB
|
|
||||||
VmLib: 1848 kB
|
|
||||||
VmPTE: 80 kB
|
|
||||||
VmSwap: 0 kB
|
|
||||||
```
|
|
||||||
|
|
||||||
**29.668 kB virtual, TODO 15.084 resident per histogram.**
|
|
||||||
|
|
||||||
10000 histograms with a uniform sample size of 1028
|
|
||||||
---------------------------------------------------
|
|
||||||
|
|
||||||
```
|
|
||||||
VmPeak: 256912 kB
|
|
||||||
VmSize: 256912 kB
|
|
||||||
VmLck: 0 kB
|
|
||||||
VmHWM: 146204 kB
|
|
||||||
VmRSS: 146204 kB
|
|
||||||
VmData: 249740 kB
|
|
||||||
VmStk: 136 kB
|
|
||||||
VmExe: 1048 kB
|
|
||||||
VmLib: 1848 kB
|
|
||||||
VmPTE: 448 kB
|
|
||||||
VmSwap: 0 kB
|
|
||||||
```
|
|
||||||
|
|
||||||
**21.4308 kB virtual, 14.5084 kB resident per histogram.**
|
|
||||||
|
|
||||||
50000 histograms with a uniform sample size of 1028
|
|
||||||
---------------------------------------------------
|
|
||||||
|
|
||||||
```
|
|
||||||
VmPeak: 908112 kB
|
|
||||||
VmSize: 908112 kB
|
|
||||||
VmLck: 0 kB
|
|
||||||
VmHWM: 645832 kB
|
|
||||||
VmRSS: 645588 kB
|
|
||||||
VmData: 900940 kB
|
|
||||||
VmStk: 136 kB
|
|
||||||
VmExe: 1048 kB
|
|
||||||
VmLib: 1848 kB
|
|
||||||
VmPTE: 1716 kB
|
|
||||||
VmSwap: 1544 kB
|
|
||||||
```
|
|
||||||
|
|
||||||
**17.31016 kB virtual, 12.88936 kB resident per histogram.**
|
|
||||||
|
|
||||||
1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
|
|
||||||
-------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
```
|
|
||||||
VmPeak: 62480 kB
|
|
||||||
VmSize: 62480 kB
|
|
||||||
VmLck: 0 kB
|
|
||||||
VmHWM: 11572 kB
|
|
||||||
VmRSS: 11572 kB
|
|
||||||
VmData: 55308 kB
|
|
||||||
VmStk: 136 kB
|
|
||||||
VmExe: 1048 kB
|
|
||||||
VmLib: 1848 kB
|
|
||||||
VmPTE: 64 kB
|
|
||||||
VmSwap: 0 kB
|
|
||||||
```
|
|
||||||
|
|
||||||
**19.876 kB virtual, 10.452 kB resident per histogram.**
|
|
||||||
|
|
||||||
10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
|
|
||||||
--------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
```
|
|
||||||
VmPeak: 153296 kB
|
|
||||||
VmSize: 153296 kB
|
|
||||||
VmLck: 0 kB
|
|
||||||
VmHWM: 101176 kB
|
|
||||||
VmRSS: 101176 kB
|
|
||||||
VmData: 146124 kB
|
|
||||||
VmStk: 136 kB
|
|
||||||
VmExe: 1048 kB
|
|
||||||
VmLib: 1848 kB
|
|
||||||
VmPTE: 240 kB
|
|
||||||
VmSwap: 0 kB
|
|
||||||
```
|
|
||||||
|
|
||||||
**11.0692 kB virtual, 10.0056 kB resident per histogram.**
|
|
||||||
|
|
||||||
50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
|
|
||||||
--------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
```
|
|
||||||
VmPeak: 557264 kB
|
|
||||||
VmSize: 557264 kB
|
|
||||||
VmLck: 0 kB
|
|
||||||
VmHWM: 501056 kB
|
|
||||||
VmRSS: 501056 kB
|
|
||||||
VmData: 550092 kB
|
|
||||||
VmStk: 136 kB
|
|
||||||
VmExe: 1048 kB
|
|
||||||
VmLib: 1848 kB
|
|
||||||
VmPTE: 1032 kB
|
|
||||||
VmSwap: 0 kB
|
|
||||||
```
|
|
||||||
|
|
||||||
**10.2932 kB virtual, 9.99872 kB resident per histogram.**
|
|
||||||
|
|
||||||
1000 meters
|
|
||||||
-----------
|
|
||||||
|
|
||||||
```
|
|
||||||
VmPeak: 74504 kB
|
|
||||||
VmSize: 74504 kB
|
|
||||||
VmLck: 0 kB
|
|
||||||
VmHWM: 24124 kB
|
|
||||||
VmRSS: 24124 kB
|
|
||||||
VmData: 67340 kB
|
|
||||||
VmStk: 136 kB
|
|
||||||
VmExe: 1040 kB
|
|
||||||
VmLib: 1848 kB
|
|
||||||
VmPTE: 92 kB
|
|
||||||
VmSwap: 0 kB
|
|
||||||
```
|
|
||||||
|
|
||||||
**31.9 kB virtual, 23.004 kB resident per meter.**
|
|
||||||
|
|
||||||
10000 meters
|
|
||||||
------------
|
|
||||||
|
|
||||||
```
|
|
||||||
VmPeak: 278920 kB
|
|
||||||
VmSize: 278920 kB
|
|
||||||
VmLck: 0 kB
|
|
||||||
VmHWM: 227300 kB
|
|
||||||
VmRSS: 227300 kB
|
|
||||||
VmData: 271756 kB
|
|
||||||
VmStk: 136 kB
|
|
||||||
VmExe: 1040 kB
|
|
||||||
VmLib: 1848 kB
|
|
||||||
VmPTE: 488 kB
|
|
||||||
VmSwap: 0 kB
|
|
||||||
```
|
|
||||||
|
|
||||||
**23.6316 kB virtual, 22.618 kB resident per meter.**
|
|
||||||
233
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/meter.go
generated
vendored
233
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/meter.go
generated
vendored
@@ -1,233 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Meters count events to produce exponentially-weighted moving average rates
|
|
||||||
// at one-, five-, and fifteen-minutes and a mean rate.
|
|
||||||
type Meter interface {
|
|
||||||
Count() int64
|
|
||||||
Mark(int64)
|
|
||||||
Rate1() float64
|
|
||||||
Rate5() float64
|
|
||||||
Rate15() float64
|
|
||||||
RateMean() float64
|
|
||||||
Snapshot() Meter
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetOrRegisterMeter returns an existing Meter or constructs and registers a
|
|
||||||
// new StandardMeter.
|
|
||||||
func GetOrRegisterMeter(name string, r Registry) Meter {
|
|
||||||
if nil == r {
|
|
||||||
r = DefaultRegistry
|
|
||||||
}
|
|
||||||
return r.GetOrRegister(name, NewMeter).(Meter)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMeter constructs a new StandardMeter and launches a goroutine.
|
|
||||||
func NewMeter() Meter {
|
|
||||||
if UseNilMetrics {
|
|
||||||
return NilMeter{}
|
|
||||||
}
|
|
||||||
m := newStandardMeter()
|
|
||||||
arbiter.Lock()
|
|
||||||
defer arbiter.Unlock()
|
|
||||||
arbiter.meters = append(arbiter.meters, m)
|
|
||||||
if !arbiter.started {
|
|
||||||
arbiter.started = true
|
|
||||||
go arbiter.tick()
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMeter constructs and registers a new StandardMeter and launches a
|
|
||||||
// goroutine.
|
|
||||||
func NewRegisteredMeter(name string, r Registry) Meter {
|
|
||||||
c := NewMeter()
|
|
||||||
if nil == r {
|
|
||||||
r = DefaultRegistry
|
|
||||||
}
|
|
||||||
r.Register(name, c)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// MeterSnapshot is a read-only copy of another Meter.
|
|
||||||
type MeterSnapshot struct {
|
|
||||||
count int64
|
|
||||||
rate1, rate5, rate15, rateMean float64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count returns the count of events at the time the snapshot was taken.
|
|
||||||
func (m *MeterSnapshot) Count() int64 { return m.count }
|
|
||||||
|
|
||||||
// Mark panics.
|
|
||||||
func (*MeterSnapshot) Mark(n int64) {
|
|
||||||
panic("Mark called on a MeterSnapshot")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rate1 returns the one-minute moving average rate of events per second at the
|
|
||||||
// time the snapshot was taken.
|
|
||||||
func (m *MeterSnapshot) Rate1() float64 { return m.rate1 }
|
|
||||||
|
|
||||||
// Rate5 returns the five-minute moving average rate of events per second at
|
|
||||||
// the time the snapshot was taken.
|
|
||||||
func (m *MeterSnapshot) Rate5() float64 { return m.rate5 }
|
|
||||||
|
|
||||||
// Rate15 returns the fifteen-minute moving average rate of events per second
|
|
||||||
// at the time the snapshot was taken.
|
|
||||||
func (m *MeterSnapshot) Rate15() float64 { return m.rate15 }
|
|
||||||
|
|
||||||
// RateMean returns the meter's mean rate of events per second at the time the
|
|
||||||
// snapshot was taken.
|
|
||||||
func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
|
|
||||||
|
|
||||||
// Snapshot returns the snapshot.
|
|
||||||
func (m *MeterSnapshot) Snapshot() Meter { return m }
|
|
||||||
|
|
||||||
// NilMeter is a no-op Meter.
|
|
||||||
type NilMeter struct{}
|
|
||||||
|
|
||||||
// Count is a no-op.
|
|
||||||
func (NilMeter) Count() int64 { return 0 }
|
|
||||||
|
|
||||||
// Mark is a no-op.
|
|
||||||
func (NilMeter) Mark(n int64) {}
|
|
||||||
|
|
||||||
// Rate1 is a no-op.
|
|
||||||
func (NilMeter) Rate1() float64 { return 0.0 }
|
|
||||||
|
|
||||||
// Rate5 is a no-op.
|
|
||||||
func (NilMeter) Rate5() float64 { return 0.0 }
|
|
||||||
|
|
||||||
// Rate15is a no-op.
|
|
||||||
func (NilMeter) Rate15() float64 { return 0.0 }
|
|
||||||
|
|
||||||
// RateMean is a no-op.
|
|
||||||
func (NilMeter) RateMean() float64 { return 0.0 }
|
|
||||||
|
|
||||||
// Snapshot is a no-op.
|
|
||||||
func (NilMeter) Snapshot() Meter { return NilMeter{} }
|
|
||||||
|
|
||||||
// StandardMeter is the standard implementation of a Meter.
|
|
||||||
type StandardMeter struct {
|
|
||||||
lock sync.RWMutex
|
|
||||||
snapshot *MeterSnapshot
|
|
||||||
a1, a5, a15 EWMA
|
|
||||||
startTime time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func newStandardMeter() *StandardMeter {
|
|
||||||
return &StandardMeter{
|
|
||||||
snapshot: &MeterSnapshot{},
|
|
||||||
a1: NewEWMA1(),
|
|
||||||
a5: NewEWMA5(),
|
|
||||||
a15: NewEWMA15(),
|
|
||||||
startTime: time.Now(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count returns the number of events recorded.
|
|
||||||
func (m *StandardMeter) Count() int64 {
|
|
||||||
m.lock.RLock()
|
|
||||||
count := m.snapshot.count
|
|
||||||
m.lock.RUnlock()
|
|
||||||
return count
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mark records the occurance of n events.
|
|
||||||
func (m *StandardMeter) Mark(n int64) {
|
|
||||||
m.lock.Lock()
|
|
||||||
defer m.lock.Unlock()
|
|
||||||
m.snapshot.count += n
|
|
||||||
m.a1.Update(n)
|
|
||||||
m.a5.Update(n)
|
|
||||||
m.a15.Update(n)
|
|
||||||
m.updateSnapshot()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rate1 returns the one-minute moving average rate of events per second.
|
|
||||||
func (m *StandardMeter) Rate1() float64 {
|
|
||||||
m.lock.RLock()
|
|
||||||
rate1 := m.snapshot.rate1
|
|
||||||
m.lock.RUnlock()
|
|
||||||
return rate1
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rate5 returns the five-minute moving average rate of events per second.
|
|
||||||
func (m *StandardMeter) Rate5() float64 {
|
|
||||||
m.lock.RLock()
|
|
||||||
rate5 := m.snapshot.rate5
|
|
||||||
m.lock.RUnlock()
|
|
||||||
return rate5
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rate15 returns the fifteen-minute moving average rate of events per second.
|
|
||||||
func (m *StandardMeter) Rate15() float64 {
|
|
||||||
m.lock.RLock()
|
|
||||||
rate15 := m.snapshot.rate15
|
|
||||||
m.lock.RUnlock()
|
|
||||||
return rate15
|
|
||||||
}
|
|
||||||
|
|
||||||
// RateMean returns the meter's mean rate of events per second.
|
|
||||||
func (m *StandardMeter) RateMean() float64 {
|
|
||||||
m.lock.RLock()
|
|
||||||
rateMean := m.snapshot.rateMean
|
|
||||||
m.lock.RUnlock()
|
|
||||||
return rateMean
|
|
||||||
}
|
|
||||||
|
|
||||||
// Snapshot returns a read-only copy of the meter.
|
|
||||||
func (m *StandardMeter) Snapshot() Meter {
|
|
||||||
m.lock.RLock()
|
|
||||||
snapshot := *m.snapshot
|
|
||||||
m.lock.RUnlock()
|
|
||||||
return &snapshot
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *StandardMeter) updateSnapshot() {
|
|
||||||
// should run with write lock held on m.lock
|
|
||||||
snapshot := m.snapshot
|
|
||||||
snapshot.rate1 = m.a1.Rate()
|
|
||||||
snapshot.rate5 = m.a5.Rate()
|
|
||||||
snapshot.rate15 = m.a15.Rate()
|
|
||||||
snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *StandardMeter) tick() {
|
|
||||||
m.lock.Lock()
|
|
||||||
defer m.lock.Unlock()
|
|
||||||
m.a1.Tick()
|
|
||||||
m.a5.Tick()
|
|
||||||
m.a15.Tick()
|
|
||||||
m.updateSnapshot()
|
|
||||||
}
|
|
||||||
|
|
||||||
type meterArbiter struct {
|
|
||||||
sync.RWMutex
|
|
||||||
started bool
|
|
||||||
meters []*StandardMeter
|
|
||||||
ticker *time.Ticker
|
|
||||||
}
|
|
||||||
|
|
||||||
var arbiter = meterArbiter{ticker: time.NewTicker(5e9)}
|
|
||||||
|
|
||||||
// Ticks meters on the scheduled interval
|
|
||||||
func (ma *meterArbiter) tick() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ma.ticker.C:
|
|
||||||
ma.tickMeters()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ma *meterArbiter) tickMeters() {
|
|
||||||
ma.RLock()
|
|
||||||
defer ma.RUnlock()
|
|
||||||
for _, meter := range ma.meters {
|
|
||||||
meter.tick()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
60
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/meter_test.go
generated
vendored
60
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/meter_test.go
generated
vendored
@@ -1,60 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func BenchmarkMeter(b *testing.B) {
|
|
||||||
m := NewMeter()
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
m.Mark(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetOrRegisterMeter(t *testing.T) {
|
|
||||||
r := NewRegistry()
|
|
||||||
NewRegisteredMeter("foo", r).Mark(47)
|
|
||||||
if m := GetOrRegisterMeter("foo", r); 47 != m.Count() {
|
|
||||||
t.Fatal(m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMeterDecay(t *testing.T) {
|
|
||||||
ma := meterArbiter{
|
|
||||||
ticker: time.NewTicker(1),
|
|
||||||
}
|
|
||||||
m := newStandardMeter()
|
|
||||||
ma.meters = append(ma.meters, m)
|
|
||||||
go ma.tick()
|
|
||||||
m.Mark(1)
|
|
||||||
rateMean := m.RateMean()
|
|
||||||
time.Sleep(1)
|
|
||||||
if m.RateMean() >= rateMean {
|
|
||||||
t.Error("m.RateMean() didn't decrease")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMeterNonzero(t *testing.T) {
|
|
||||||
m := NewMeter()
|
|
||||||
m.Mark(3)
|
|
||||||
if count := m.Count(); 3 != count {
|
|
||||||
t.Errorf("m.Count(): 3 != %v\n", count)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMeterSnapshot(t *testing.T) {
|
|
||||||
m := NewMeter()
|
|
||||||
m.Mark(1)
|
|
||||||
if snapshot := m.Snapshot(); m.RateMean() != snapshot.RateMean() {
|
|
||||||
t.Fatal(snapshot)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMeterZero(t *testing.T) {
|
|
||||||
m := NewMeter()
|
|
||||||
if count := m.Count(); 0 != count {
|
|
||||||
t.Errorf("m.Count(): 0 != %v\n", count)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
13
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/metrics.go
generated
vendored
13
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/metrics.go
generated
vendored
@@ -1,13 +0,0 @@
|
|||||||
// Go port of Coda Hale's Metrics library
|
|
||||||
//
|
|
||||||
// <https://github.com/rcrowley/go-metrics>
|
|
||||||
//
|
|
||||||
// Coda Hale's original work: <https://github.com/codahale/metrics>
|
|
||||||
package metrics
|
|
||||||
|
|
||||||
// UseNilMetrics is checked by the constructor functions for all of the
|
|
||||||
// standard metrics. If it is true, the metric returned is a stub.
|
|
||||||
//
|
|
||||||
// This global kill-switch helps quantify the observer effect and makes
|
|
||||||
// for less cluttered pprof profiles.
|
|
||||||
var UseNilMetrics bool = false
|
|
||||||
107
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/metrics_test.go
generated
vendored
107
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/metrics_test.go
generated
vendored
@@ -1,107 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
const FANOUT = 128
|
|
||||||
|
|
||||||
// Stop the compiler from complaining during debugging.
|
|
||||||
var (
|
|
||||||
_ = ioutil.Discard
|
|
||||||
_ = log.LstdFlags
|
|
||||||
)
|
|
||||||
|
|
||||||
func BenchmarkMetrics(b *testing.B) {
|
|
||||||
r := NewRegistry()
|
|
||||||
c := NewRegisteredCounter("counter", r)
|
|
||||||
g := NewRegisteredGauge("gauge", r)
|
|
||||||
gf := NewRegisteredGaugeFloat64("gaugefloat64", r)
|
|
||||||
h := NewRegisteredHistogram("histogram", r, NewUniformSample(100))
|
|
||||||
m := NewRegisteredMeter("meter", r)
|
|
||||||
t := NewRegisteredTimer("timer", r)
|
|
||||||
RegisterDebugGCStats(r)
|
|
||||||
RegisterRuntimeMemStats(r)
|
|
||||||
b.ResetTimer()
|
|
||||||
ch := make(chan bool)
|
|
||||||
|
|
||||||
wgD := &sync.WaitGroup{}
|
|
||||||
/*
|
|
||||||
wgD.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wgD.Done()
|
|
||||||
//log.Println("go CaptureDebugGCStats")
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ch:
|
|
||||||
//log.Println("done CaptureDebugGCStats")
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
CaptureDebugGCStatsOnce(r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
//*/
|
|
||||||
|
|
||||||
wgR := &sync.WaitGroup{}
|
|
||||||
//*
|
|
||||||
wgR.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wgR.Done()
|
|
||||||
//log.Println("go CaptureRuntimeMemStats")
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ch:
|
|
||||||
//log.Println("done CaptureRuntimeMemStats")
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
CaptureRuntimeMemStatsOnce(r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
//*/
|
|
||||||
|
|
||||||
wgW := &sync.WaitGroup{}
|
|
||||||
/*
|
|
||||||
wgW.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wgW.Done()
|
|
||||||
//log.Println("go Write")
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ch:
|
|
||||||
//log.Println("done Write")
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
WriteOnce(r, ioutil.Discard)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
//*/
|
|
||||||
|
|
||||||
wg := &sync.WaitGroup{}
|
|
||||||
wg.Add(FANOUT)
|
|
||||||
for i := 0; i < FANOUT; i++ {
|
|
||||||
go func(i int) {
|
|
||||||
defer wg.Done()
|
|
||||||
//log.Println("go", i)
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
c.Inc(1)
|
|
||||||
g.Update(int64(i))
|
|
||||||
gf.Update(float64(i))
|
|
||||||
h.Update(int64(i))
|
|
||||||
m.Mark(1)
|
|
||||||
t.Update(1)
|
|
||||||
}
|
|
||||||
//log.Println("done", i)
|
|
||||||
}(i)
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
close(ch)
|
|
||||||
wgD.Wait()
|
|
||||||
wgR.Wait()
|
|
||||||
wgW.Wait()
|
|
||||||
}
|
|
||||||
119
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/opentsdb.go
generated
vendored
119
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/opentsdb.go
generated
vendored
@@ -1,119 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var shortHostName string = ""
|
|
||||||
|
|
||||||
// OpenTSDBConfig provides a container with configuration parameters for
|
|
||||||
// the OpenTSDB exporter
|
|
||||||
type OpenTSDBConfig struct {
|
|
||||||
Addr *net.TCPAddr // Network address to connect to
|
|
||||||
Registry Registry // Registry to be exported
|
|
||||||
FlushInterval time.Duration // Flush interval
|
|
||||||
DurationUnit time.Duration // Time conversion unit for durations
|
|
||||||
Prefix string // Prefix to be prepended to metric names
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenTSDB is a blocking exporter function which reports metrics in r
|
|
||||||
// to a TSDB server located at addr, flushing them every d duration
|
|
||||||
// and prepending metric names with prefix.
|
|
||||||
func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
|
|
||||||
OpenTSDBWithConfig(OpenTSDBConfig{
|
|
||||||
Addr: addr,
|
|
||||||
Registry: r,
|
|
||||||
FlushInterval: d,
|
|
||||||
DurationUnit: time.Nanosecond,
|
|
||||||
Prefix: prefix,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB,
|
|
||||||
// but it takes a OpenTSDBConfig instead.
|
|
||||||
func OpenTSDBWithConfig(c OpenTSDBConfig) {
|
|
||||||
for _ = range time.Tick(c.FlushInterval) {
|
|
||||||
if err := openTSDB(&c); nil != err {
|
|
||||||
log.Println(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getShortHostname() string {
|
|
||||||
if shortHostName == "" {
|
|
||||||
host, _ := os.Hostname()
|
|
||||||
if index := strings.Index(host, "."); index > 0 {
|
|
||||||
shortHostName = host[:index]
|
|
||||||
} else {
|
|
||||||
shortHostName = host
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return shortHostName
|
|
||||||
}
|
|
||||||
|
|
||||||
func openTSDB(c *OpenTSDBConfig) error {
|
|
||||||
shortHostname := getShortHostname()
|
|
||||||
now := time.Now().Unix()
|
|
||||||
du := float64(c.DurationUnit)
|
|
||||||
conn, err := net.DialTCP("tcp", nil, c.Addr)
|
|
||||||
if nil != err {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer conn.Close()
|
|
||||||
w := bufio.NewWriter(conn)
|
|
||||||
c.Registry.Each(func(name string, i interface{}) {
|
|
||||||
switch metric := i.(type) {
|
|
||||||
case Counter:
|
|
||||||
fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname)
|
|
||||||
case Gauge:
|
|
||||||
fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
|
|
||||||
case GaugeFloat64:
|
|
||||||
fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
|
|
||||||
case Histogram:
|
|
||||||
h := metric.Snapshot()
|
|
||||||
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
|
||||||
fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname)
|
|
||||||
case Meter:
|
|
||||||
m := metric.Snapshot()
|
|
||||||
fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname)
|
|
||||||
case Timer:
|
|
||||||
t := metric.Snapshot()
|
|
||||||
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
|
||||||
fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, t.Min()/int64(du), shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, t.Max()/int64(du), shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, t.Mean()/du, shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, t.StdDev()/du, shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0]/du, shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1]/du, shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2]/du, shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3]/du, shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4]/du, shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname)
|
|
||||||
fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname)
|
|
||||||
}
|
|
||||||
w.Flush()
|
|
||||||
})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
21
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/opentsdb_test.go
generated
vendored
21
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/opentsdb_test.go
generated
vendored
@@ -1,21 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func ExampleOpenTSDB() {
|
|
||||||
addr, _ := net.ResolveTCPAddr("net", ":2003")
|
|
||||||
go OpenTSDB(DefaultRegistry, 1*time.Second, "some.prefix", addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleOpenTSDBWithConfig() {
|
|
||||||
addr, _ := net.ResolveTCPAddr("net", ":2003")
|
|
||||||
go OpenTSDBWithConfig(OpenTSDBConfig{
|
|
||||||
Addr: addr,
|
|
||||||
Registry: DefaultRegistry,
|
|
||||||
FlushInterval: 1 * time.Second,
|
|
||||||
DurationUnit: time.Millisecond,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
240
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/registry.go
generated
vendored
240
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/registry.go
generated
vendored
@@ -1,240 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DuplicateMetric is the error returned by Registry.Register when a metric
|
|
||||||
// already exists. If you mean to Register that metric you must first
|
|
||||||
// Unregister the existing metric.
|
|
||||||
type DuplicateMetric string
|
|
||||||
|
|
||||||
func (err DuplicateMetric) Error() string {
|
|
||||||
return fmt.Sprintf("duplicate metric: %s", string(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Registry holds references to a set of metrics by name and can iterate
|
|
||||||
// over them, calling callback functions provided by the user.
|
|
||||||
//
|
|
||||||
// This is an interface so as to encourage other structs to implement
|
|
||||||
// the Registry API as appropriate.
|
|
||||||
type Registry interface {
|
|
||||||
|
|
||||||
// Call the given function for each registered metric.
|
|
||||||
Each(func(string, interface{}))
|
|
||||||
|
|
||||||
// Get the metric by the given name or nil if none is registered.
|
|
||||||
Get(string) interface{}
|
|
||||||
|
|
||||||
// Gets an existing metric or registers the given one.
|
|
||||||
// The interface can be the metric to register if not found in registry,
|
|
||||||
// or a function returning the metric for lazy instantiation.
|
|
||||||
GetOrRegister(string, interface{}) interface{}
|
|
||||||
|
|
||||||
// Register the given metric under the given name.
|
|
||||||
Register(string, interface{}) error
|
|
||||||
|
|
||||||
// Run all registered healthchecks.
|
|
||||||
RunHealthchecks()
|
|
||||||
|
|
||||||
// Unregister the metric with the given name.
|
|
||||||
Unregister(string)
|
|
||||||
|
|
||||||
// Unregister all metrics. (Mostly for testing.)
|
|
||||||
UnregisterAll()
|
|
||||||
}
|
|
||||||
|
|
||||||
// The standard implementation of a Registry is a mutex-protected map
|
|
||||||
// of names to metrics.
|
|
||||||
type StandardRegistry struct {
|
|
||||||
metrics map[string]interface{}
|
|
||||||
mutex sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new registry.
|
|
||||||
func NewRegistry() Registry {
|
|
||||||
return &StandardRegistry{metrics: make(map[string]interface{})}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call the given function for each registered metric.
|
|
||||||
func (r *StandardRegistry) Each(f func(string, interface{})) {
|
|
||||||
for name, i := range r.registered() {
|
|
||||||
f(name, i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the metric by the given name or nil if none is registered.
|
|
||||||
func (r *StandardRegistry) Get(name string) interface{} {
|
|
||||||
r.mutex.Lock()
|
|
||||||
defer r.mutex.Unlock()
|
|
||||||
return r.metrics[name]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gets an existing metric or creates and registers a new one. Threadsafe
|
|
||||||
// alternative to calling Get and Register on failure.
|
|
||||||
// The interface can be the metric to register if not found in registry,
|
|
||||||
// or a function returning the metric for lazy instantiation.
|
|
||||||
func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} {
|
|
||||||
r.mutex.Lock()
|
|
||||||
defer r.mutex.Unlock()
|
|
||||||
if metric, ok := r.metrics[name]; ok {
|
|
||||||
return metric
|
|
||||||
}
|
|
||||||
if v := reflect.ValueOf(i); v.Kind() == reflect.Func {
|
|
||||||
i = v.Call(nil)[0].Interface()
|
|
||||||
}
|
|
||||||
r.register(name, i)
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register the given metric under the given name. Returns a DuplicateMetric
|
|
||||||
// if a metric by the given name is already registered.
|
|
||||||
func (r *StandardRegistry) Register(name string, i interface{}) error {
|
|
||||||
r.mutex.Lock()
|
|
||||||
defer r.mutex.Unlock()
|
|
||||||
return r.register(name, i)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run all registered healthchecks.
|
|
||||||
func (r *StandardRegistry) RunHealthchecks() {
|
|
||||||
r.mutex.Lock()
|
|
||||||
defer r.mutex.Unlock()
|
|
||||||
for _, i := range r.metrics {
|
|
||||||
if h, ok := i.(Healthcheck); ok {
|
|
||||||
h.Check()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unregister the metric with the given name.
|
|
||||||
func (r *StandardRegistry) Unregister(name string) {
|
|
||||||
r.mutex.Lock()
|
|
||||||
defer r.mutex.Unlock()
|
|
||||||
delete(r.metrics, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unregister all metrics. (Mostly for testing.)
|
|
||||||
func (r *StandardRegistry) UnregisterAll() {
|
|
||||||
r.mutex.Lock()
|
|
||||||
defer r.mutex.Unlock()
|
|
||||||
for name, _ := range r.metrics {
|
|
||||||
delete(r.metrics, name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *StandardRegistry) register(name string, i interface{}) error {
|
|
||||||
if _, ok := r.metrics[name]; ok {
|
|
||||||
return DuplicateMetric(name)
|
|
||||||
}
|
|
||||||
switch i.(type) {
|
|
||||||
case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer:
|
|
||||||
r.metrics[name] = i
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *StandardRegistry) registered() map[string]interface{} {
|
|
||||||
r.mutex.Lock()
|
|
||||||
defer r.mutex.Unlock()
|
|
||||||
metrics := make(map[string]interface{}, len(r.metrics))
|
|
||||||
for name, i := range r.metrics {
|
|
||||||
metrics[name] = i
|
|
||||||
}
|
|
||||||
return metrics
|
|
||||||
}
|
|
||||||
|
|
||||||
type PrefixedRegistry struct {
|
|
||||||
underlying Registry
|
|
||||||
prefix string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewPrefixedRegistry(prefix string) Registry {
|
|
||||||
return &PrefixedRegistry{
|
|
||||||
underlying: NewRegistry(),
|
|
||||||
prefix: prefix,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call the given function for each registered metric.
|
|
||||||
func (r *PrefixedRegistry) Each(fn func(string, interface{})) {
|
|
||||||
r.underlying.Each(fn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the metric by the given name or nil if none is registered.
|
|
||||||
func (r *PrefixedRegistry) Get(name string) interface{} {
|
|
||||||
return r.underlying.Get(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gets an existing metric or registers the given one.
|
|
||||||
// The interface can be the metric to register if not found in registry,
|
|
||||||
// or a function returning the metric for lazy instantiation.
|
|
||||||
func (r *PrefixedRegistry) GetOrRegister(name string, metric interface{}) interface{} {
|
|
||||||
realName := r.prefix + name
|
|
||||||
return r.underlying.GetOrRegister(realName, metric)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register the given metric under the given name. The name will be prefixed.
|
|
||||||
func (r *PrefixedRegistry) Register(name string, metric interface{}) error {
|
|
||||||
realName := r.prefix + name
|
|
||||||
return r.underlying.Register(realName, metric)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run all registered healthchecks.
|
|
||||||
func (r *PrefixedRegistry) RunHealthchecks() {
|
|
||||||
r.underlying.RunHealthchecks()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unregister the metric with the given name. The name will be prefixed.
|
|
||||||
func (r *PrefixedRegistry) Unregister(name string) {
|
|
||||||
realName := r.prefix + name
|
|
||||||
r.underlying.Unregister(realName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unregister all metrics. (Mostly for testing.)
|
|
||||||
func (r *PrefixedRegistry) UnregisterAll() {
|
|
||||||
r.underlying.UnregisterAll()
|
|
||||||
}
|
|
||||||
|
|
||||||
var DefaultRegistry Registry = NewRegistry()
|
|
||||||
|
|
||||||
// Call the given function for each registered metric.
|
|
||||||
func Each(f func(string, interface{})) {
|
|
||||||
DefaultRegistry.Each(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the metric by the given name or nil if none is registered.
|
|
||||||
func Get(name string) interface{} {
|
|
||||||
return DefaultRegistry.Get(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gets an existing metric or creates and registers a new one. Threadsafe
|
|
||||||
// alternative to calling Get and Register on failure.
|
|
||||||
func GetOrRegister(name string, i interface{}) interface{} {
|
|
||||||
return DefaultRegistry.GetOrRegister(name, i)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register the given metric under the given name. Returns a DuplicateMetric
|
|
||||||
// if a metric by the given name is already registered.
|
|
||||||
func Register(name string, i interface{}) error {
|
|
||||||
return DefaultRegistry.Register(name, i)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register the given metric under the given name. Panics if a metric by the
|
|
||||||
// given name is already registered.
|
|
||||||
func MustRegister(name string, i interface{}) {
|
|
||||||
if err := Register(name, i); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run all registered healthchecks.
|
|
||||||
func RunHealthchecks() {
|
|
||||||
DefaultRegistry.RunHealthchecks()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unregister the metric with the given name.
|
|
||||||
func Unregister(name string) {
|
|
||||||
DefaultRegistry.Unregister(name)
|
|
||||||
}
|
|
||||||
165
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/registry_test.go
generated
vendored
165
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/registry_test.go
generated
vendored
@@ -1,165 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import "testing"
|
|
||||||
|
|
||||||
func BenchmarkRegistry(b *testing.B) {
|
|
||||||
r := NewRegistry()
|
|
||||||
r.Register("foo", NewCounter())
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
r.Each(func(string, interface{}) {})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRegistry(t *testing.T) {
|
|
||||||
r := NewRegistry()
|
|
||||||
r.Register("foo", NewCounter())
|
|
||||||
i := 0
|
|
||||||
r.Each(func(name string, iface interface{}) {
|
|
||||||
i++
|
|
||||||
if "foo" != name {
|
|
||||||
t.Fatal(name)
|
|
||||||
}
|
|
||||||
if _, ok := iface.(Counter); !ok {
|
|
||||||
t.Fatal(iface)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
if 1 != i {
|
|
||||||
t.Fatal(i)
|
|
||||||
}
|
|
||||||
r.Unregister("foo")
|
|
||||||
i = 0
|
|
||||||
r.Each(func(string, interface{}) { i++ })
|
|
||||||
if 0 != i {
|
|
||||||
t.Fatal(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRegistryDuplicate(t *testing.T) {
|
|
||||||
r := NewRegistry()
|
|
||||||
if err := r.Register("foo", NewCounter()); nil != err {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if err := r.Register("foo", NewGauge()); nil == err {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
i := 0
|
|
||||||
r.Each(func(name string, iface interface{}) {
|
|
||||||
i++
|
|
||||||
if _, ok := iface.(Counter); !ok {
|
|
||||||
t.Fatal(iface)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
if 1 != i {
|
|
||||||
t.Fatal(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRegistryGet(t *testing.T) {
|
|
||||||
r := NewRegistry()
|
|
||||||
r.Register("foo", NewCounter())
|
|
||||||
if count := r.Get("foo").(Counter).Count(); 0 != count {
|
|
||||||
t.Fatal(count)
|
|
||||||
}
|
|
||||||
r.Get("foo").(Counter).Inc(1)
|
|
||||||
if count := r.Get("foo").(Counter).Count(); 1 != count {
|
|
||||||
t.Fatal(count)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRegistryGetOrRegister(t *testing.T) {
|
|
||||||
r := NewRegistry()
|
|
||||||
|
|
||||||
// First metric wins with GetOrRegister
|
|
||||||
_ = r.GetOrRegister("foo", NewCounter())
|
|
||||||
m := r.GetOrRegister("foo", NewGauge())
|
|
||||||
if _, ok := m.(Counter); !ok {
|
|
||||||
t.Fatal(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
i := 0
|
|
||||||
r.Each(func(name string, iface interface{}) {
|
|
||||||
i++
|
|
||||||
if name != "foo" {
|
|
||||||
t.Fatal(name)
|
|
||||||
}
|
|
||||||
if _, ok := iface.(Counter); !ok {
|
|
||||||
t.Fatal(iface)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
if i != 1 {
|
|
||||||
t.Fatal(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRegistryGetOrRegisterWithLazyInstantiation(t *testing.T) {
|
|
||||||
r := NewRegistry()
|
|
||||||
|
|
||||||
// First metric wins with GetOrRegister
|
|
||||||
_ = r.GetOrRegister("foo", NewCounter)
|
|
||||||
m := r.GetOrRegister("foo", NewGauge)
|
|
||||||
if _, ok := m.(Counter); !ok {
|
|
||||||
t.Fatal(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
i := 0
|
|
||||||
r.Each(func(name string, iface interface{}) {
|
|
||||||
i++
|
|
||||||
if name != "foo" {
|
|
||||||
t.Fatal(name)
|
|
||||||
}
|
|
||||||
if _, ok := iface.(Counter); !ok {
|
|
||||||
t.Fatal(iface)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
if i != 1 {
|
|
||||||
t.Fatal(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPrefixedRegistryGetOrRegister(t *testing.T) {
|
|
||||||
r := NewPrefixedRegistry("prefix.")
|
|
||||||
|
|
||||||
_ = r.GetOrRegister("foo", NewCounter)
|
|
||||||
|
|
||||||
r.Each(func(name string, m interface{}) {
|
|
||||||
if name != "prefix.foo" {
|
|
||||||
t.Fatal(name)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPrefixedRegistryRegister(t *testing.T) {
|
|
||||||
r := NewPrefixedRegistry("prefix.")
|
|
||||||
|
|
||||||
_ = r.Register("foo", NewCounter)
|
|
||||||
|
|
||||||
r.Each(func(name string, m interface{}) {
|
|
||||||
if name != "prefix.foo" {
|
|
||||||
t.Fatal(name)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPrefixedRegistryUnregister(t *testing.T) {
|
|
||||||
r := NewPrefixedRegistry("prefix.")
|
|
||||||
|
|
||||||
_ = r.Register("foo", NewCounter)
|
|
||||||
|
|
||||||
r.Each(func(name string, m interface{}) {
|
|
||||||
if name != "prefix.foo" {
|
|
||||||
t.Fatal(name)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
r.Unregister("foo")
|
|
||||||
|
|
||||||
i := 0
|
|
||||||
r.Each(func(name string, m interface{}) {
|
|
||||||
i++
|
|
||||||
})
|
|
||||||
|
|
||||||
if i != 0 {
|
|
||||||
t.Fatal(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
200
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/runtime.go
generated
vendored
200
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/runtime.go
generated
vendored
@@ -1,200 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
memStats runtime.MemStats
|
|
||||||
runtimeMetrics struct {
|
|
||||||
MemStats struct {
|
|
||||||
Alloc Gauge
|
|
||||||
BuckHashSys Gauge
|
|
||||||
DebugGC Gauge
|
|
||||||
EnableGC Gauge
|
|
||||||
Frees Gauge
|
|
||||||
HeapAlloc Gauge
|
|
||||||
HeapIdle Gauge
|
|
||||||
HeapInuse Gauge
|
|
||||||
HeapObjects Gauge
|
|
||||||
HeapReleased Gauge
|
|
||||||
HeapSys Gauge
|
|
||||||
LastGC Gauge
|
|
||||||
Lookups Gauge
|
|
||||||
Mallocs Gauge
|
|
||||||
MCacheInuse Gauge
|
|
||||||
MCacheSys Gauge
|
|
||||||
MSpanInuse Gauge
|
|
||||||
MSpanSys Gauge
|
|
||||||
NextGC Gauge
|
|
||||||
NumGC Gauge
|
|
||||||
PauseNs Histogram
|
|
||||||
PauseTotalNs Gauge
|
|
||||||
StackInuse Gauge
|
|
||||||
StackSys Gauge
|
|
||||||
Sys Gauge
|
|
||||||
TotalAlloc Gauge
|
|
||||||
}
|
|
||||||
NumCgoCall Gauge
|
|
||||||
NumGoroutine Gauge
|
|
||||||
ReadMemStats Timer
|
|
||||||
}
|
|
||||||
frees uint64
|
|
||||||
lookups uint64
|
|
||||||
mallocs uint64
|
|
||||||
numGC uint32
|
|
||||||
numCgoCalls int64
|
|
||||||
)
|
|
||||||
|
|
||||||
// Capture new values for the Go runtime statistics exported in
|
|
||||||
// runtime.MemStats. This is designed to be called as a goroutine.
|
|
||||||
func CaptureRuntimeMemStats(r Registry, d time.Duration) {
|
|
||||||
for _ = range time.Tick(d) {
|
|
||||||
CaptureRuntimeMemStatsOnce(r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Capture new values for the Go runtime statistics exported in
|
|
||||||
// runtime.MemStats. This is designed to be called in a background
|
|
||||||
// goroutine. Giving a registry which has not been given to
|
|
||||||
// RegisterRuntimeMemStats will panic.
|
|
||||||
//
|
|
||||||
// Be very careful with this because runtime.ReadMemStats calls the C
|
|
||||||
// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld()
|
|
||||||
// and that last one does what it says on the tin.
|
|
||||||
func CaptureRuntimeMemStatsOnce(r Registry) {
|
|
||||||
t := time.Now()
|
|
||||||
runtime.ReadMemStats(&memStats) // This takes 50-200us.
|
|
||||||
runtimeMetrics.ReadMemStats.UpdateSince(t)
|
|
||||||
|
|
||||||
runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc))
|
|
||||||
runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys))
|
|
||||||
if memStats.DebugGC {
|
|
||||||
runtimeMetrics.MemStats.DebugGC.Update(1)
|
|
||||||
} else {
|
|
||||||
runtimeMetrics.MemStats.DebugGC.Update(0)
|
|
||||||
}
|
|
||||||
if memStats.EnableGC {
|
|
||||||
runtimeMetrics.MemStats.EnableGC.Update(1)
|
|
||||||
} else {
|
|
||||||
runtimeMetrics.MemStats.EnableGC.Update(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees))
|
|
||||||
runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc))
|
|
||||||
runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle))
|
|
||||||
runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse))
|
|
||||||
runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects))
|
|
||||||
runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased))
|
|
||||||
runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys))
|
|
||||||
runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC))
|
|
||||||
runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups))
|
|
||||||
runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs))
|
|
||||||
runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse))
|
|
||||||
runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys))
|
|
||||||
runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse))
|
|
||||||
runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys))
|
|
||||||
runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC))
|
|
||||||
runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC))
|
|
||||||
|
|
||||||
// <https://code.google.com/p/go/source/browse/src/pkg/runtime/mgc0.c>
|
|
||||||
i := numGC % uint32(len(memStats.PauseNs))
|
|
||||||
ii := memStats.NumGC % uint32(len(memStats.PauseNs))
|
|
||||||
if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) {
|
|
||||||
for i = 0; i < uint32(len(memStats.PauseNs)); i++ {
|
|
||||||
runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if i > ii {
|
|
||||||
for ; i < uint32(len(memStats.PauseNs)); i++ {
|
|
||||||
runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
|
|
||||||
}
|
|
||||||
i = 0
|
|
||||||
}
|
|
||||||
for ; i < ii; i++ {
|
|
||||||
runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
frees = memStats.Frees
|
|
||||||
lookups = memStats.Lookups
|
|
||||||
mallocs = memStats.Mallocs
|
|
||||||
numGC = memStats.NumGC
|
|
||||||
|
|
||||||
runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs))
|
|
||||||
runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse))
|
|
||||||
runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys))
|
|
||||||
runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys))
|
|
||||||
runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc))
|
|
||||||
|
|
||||||
currentNumCgoCalls := numCgoCall()
|
|
||||||
runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls)
|
|
||||||
numCgoCalls = currentNumCgoCalls
|
|
||||||
|
|
||||||
runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine()))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register runtimeMetrics for the Go runtime statistics exported in runtime and
|
|
||||||
// specifically runtime.MemStats. The runtimeMetrics are named by their
|
|
||||||
// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc.
|
|
||||||
func RegisterRuntimeMemStats(r Registry) {
|
|
||||||
runtimeMetrics.MemStats.Alloc = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.BuckHashSys = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.DebugGC = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.EnableGC = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.Frees = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.HeapAlloc = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.HeapIdle = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.HeapInuse = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.HeapObjects = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.HeapReleased = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.HeapSys = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.LastGC = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.Lookups = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.Mallocs = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.MCacheInuse = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.MCacheSys = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.MSpanInuse = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.MSpanSys = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.NextGC = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.NumGC = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015))
|
|
||||||
runtimeMetrics.MemStats.PauseTotalNs = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.StackInuse = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.StackSys = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.Sys = NewGauge()
|
|
||||||
runtimeMetrics.MemStats.TotalAlloc = NewGauge()
|
|
||||||
runtimeMetrics.NumCgoCall = NewGauge()
|
|
||||||
runtimeMetrics.NumGoroutine = NewGauge()
|
|
||||||
runtimeMetrics.ReadMemStats = NewTimer()
|
|
||||||
|
|
||||||
r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc)
|
|
||||||
r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys)
|
|
||||||
r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC)
|
|
||||||
r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC)
|
|
||||||
r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees)
|
|
||||||
r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc)
|
|
||||||
r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle)
|
|
||||||
r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse)
|
|
||||||
r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects)
|
|
||||||
r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased)
|
|
||||||
r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys)
|
|
||||||
r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC)
|
|
||||||
r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups)
|
|
||||||
r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs)
|
|
||||||
r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse)
|
|
||||||
r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys)
|
|
||||||
r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse)
|
|
||||||
r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys)
|
|
||||||
r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC)
|
|
||||||
r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC)
|
|
||||||
r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs)
|
|
||||||
r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs)
|
|
||||||
r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse)
|
|
||||||
r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys)
|
|
||||||
r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys)
|
|
||||||
r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc)
|
|
||||||
r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall)
|
|
||||||
r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine)
|
|
||||||
r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats)
|
|
||||||
}
|
|
||||||
10
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/runtime_cgo.go
generated
vendored
10
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/runtime_cgo.go
generated
vendored
@@ -1,10 +0,0 @@
|
|||||||
// +build cgo
|
|
||||||
// +build !appengine
|
|
||||||
|
|
||||||
package metrics
|
|
||||||
|
|
||||||
import "runtime"
|
|
||||||
|
|
||||||
func numCgoCall() int64 {
|
|
||||||
return runtime.NumCgoCall()
|
|
||||||
}
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// +build !cgo appengine
|
|
||||||
|
|
||||||
package metrics
|
|
||||||
|
|
||||||
func numCgoCall() int64 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
78
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/runtime_test.go
generated
vendored
78
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/runtime_test.go
generated
vendored
@@ -1,78 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func BenchmarkRuntimeMemStats(b *testing.B) {
|
|
||||||
r := NewRegistry()
|
|
||||||
RegisterRuntimeMemStats(r)
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
CaptureRuntimeMemStatsOnce(r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRuntimeMemStats(t *testing.T) {
|
|
||||||
r := NewRegistry()
|
|
||||||
RegisterRuntimeMemStats(r)
|
|
||||||
CaptureRuntimeMemStatsOnce(r)
|
|
||||||
zero := runtimeMetrics.MemStats.PauseNs.Count() // Get a "zero" since GC may have run before these tests.
|
|
||||||
runtime.GC()
|
|
||||||
CaptureRuntimeMemStatsOnce(r)
|
|
||||||
if count := runtimeMetrics.MemStats.PauseNs.Count(); 1 != count-zero {
|
|
||||||
t.Fatal(count - zero)
|
|
||||||
}
|
|
||||||
runtime.GC()
|
|
||||||
runtime.GC()
|
|
||||||
CaptureRuntimeMemStatsOnce(r)
|
|
||||||
if count := runtimeMetrics.MemStats.PauseNs.Count(); 3 != count-zero {
|
|
||||||
t.Fatal(count - zero)
|
|
||||||
}
|
|
||||||
for i := 0; i < 256; i++ {
|
|
||||||
runtime.GC()
|
|
||||||
}
|
|
||||||
CaptureRuntimeMemStatsOnce(r)
|
|
||||||
if count := runtimeMetrics.MemStats.PauseNs.Count(); 259 != count-zero {
|
|
||||||
t.Fatal(count - zero)
|
|
||||||
}
|
|
||||||
for i := 0; i < 257; i++ {
|
|
||||||
runtime.GC()
|
|
||||||
}
|
|
||||||
CaptureRuntimeMemStatsOnce(r)
|
|
||||||
if count := runtimeMetrics.MemStats.PauseNs.Count(); 515 != count-zero { // We lost one because there were too many GCs between captures.
|
|
||||||
t.Fatal(count - zero)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRuntimeMemStatsBlocking(t *testing.T) {
|
|
||||||
if g := runtime.GOMAXPROCS(0); g < 2 {
|
|
||||||
t.Skipf("skipping TestRuntimeMemStatsBlocking with GOMAXPROCS=%d\n", g)
|
|
||||||
}
|
|
||||||
ch := make(chan int)
|
|
||||||
go testRuntimeMemStatsBlocking(ch)
|
|
||||||
var memStats runtime.MemStats
|
|
||||||
t0 := time.Now()
|
|
||||||
runtime.ReadMemStats(&memStats)
|
|
||||||
t1 := time.Now()
|
|
||||||
t.Log("i++ during runtime.ReadMemStats:", <-ch)
|
|
||||||
go testRuntimeMemStatsBlocking(ch)
|
|
||||||
d := t1.Sub(t0)
|
|
||||||
t.Log(d)
|
|
||||||
time.Sleep(d)
|
|
||||||
t.Log("i++ during time.Sleep:", <-ch)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testRuntimeMemStatsBlocking(ch chan int) {
|
|
||||||
i := 0
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case ch <- i:
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
609
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/sample.go
generated
vendored
609
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/sample.go
generated
vendored
@@ -1,609 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"math/rand"
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const rescaleThreshold = time.Hour
|
|
||||||
|
|
||||||
// Samples maintain a statistically-significant selection of values from
|
|
||||||
// a stream.
|
|
||||||
type Sample interface {
|
|
||||||
Clear()
|
|
||||||
Count() int64
|
|
||||||
Max() int64
|
|
||||||
Mean() float64
|
|
||||||
Min() int64
|
|
||||||
Percentile(float64) float64
|
|
||||||
Percentiles([]float64) []float64
|
|
||||||
Size() int
|
|
||||||
Snapshot() Sample
|
|
||||||
StdDev() float64
|
|
||||||
Sum() int64
|
|
||||||
Update(int64)
|
|
||||||
Values() []int64
|
|
||||||
Variance() float64
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
|
|
||||||
// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time
|
|
||||||
// Decay Model for Streaming Systems".
|
|
||||||
//
|
|
||||||
// <http://www.research.att.com/people/Cormode_Graham/library/publications/CormodeShkapenyukSrivastavaXu09.pdf>
|
|
||||||
type ExpDecaySample struct {
|
|
||||||
alpha float64
|
|
||||||
count int64
|
|
||||||
mutex sync.Mutex
|
|
||||||
reservoirSize int
|
|
||||||
t0, t1 time.Time
|
|
||||||
values *expDecaySampleHeap
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewExpDecaySample constructs a new exponentially-decaying sample with the
|
|
||||||
// given reservoir size and alpha.
|
|
||||||
func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
|
|
||||||
if UseNilMetrics {
|
|
||||||
return NilSample{}
|
|
||||||
}
|
|
||||||
s := &ExpDecaySample{
|
|
||||||
alpha: alpha,
|
|
||||||
reservoirSize: reservoirSize,
|
|
||||||
t0: time.Now(),
|
|
||||||
values: newExpDecaySampleHeap(reservoirSize),
|
|
||||||
}
|
|
||||||
s.t1 = s.t0.Add(rescaleThreshold)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear clears all samples.
|
|
||||||
func (s *ExpDecaySample) Clear() {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
s.count = 0
|
|
||||||
s.t0 = time.Now()
|
|
||||||
s.t1 = s.t0.Add(rescaleThreshold)
|
|
||||||
s.values.Clear()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count returns the number of samples recorded, which may exceed the
|
|
||||||
// reservoir size.
|
|
||||||
func (s *ExpDecaySample) Count() int64 {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
return s.count
|
|
||||||
}
|
|
||||||
|
|
||||||
// Max returns the maximum value in the sample, which may not be the maximum
|
|
||||||
// value ever to be part of the sample.
|
|
||||||
func (s *ExpDecaySample) Max() int64 {
|
|
||||||
return SampleMax(s.Values())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mean returns the mean of the values in the sample.
|
|
||||||
func (s *ExpDecaySample) Mean() float64 {
|
|
||||||
return SampleMean(s.Values())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Min returns the minimum value in the sample, which may not be the minimum
|
|
||||||
// value ever to be part of the sample.
|
|
||||||
func (s *ExpDecaySample) Min() int64 {
|
|
||||||
return SampleMin(s.Values())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Percentile returns an arbitrary percentile of values in the sample.
|
|
||||||
func (s *ExpDecaySample) Percentile(p float64) float64 {
|
|
||||||
return SamplePercentile(s.Values(), p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Percentiles returns a slice of arbitrary percentiles of values in the
|
|
||||||
// sample.
|
|
||||||
func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
|
|
||||||
return SamplePercentiles(s.Values(), ps)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the size of the sample, which is at most the reservoir size.
|
|
||||||
func (s *ExpDecaySample) Size() int {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
return s.values.Size()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Snapshot returns a read-only copy of the sample.
|
|
||||||
func (s *ExpDecaySample) Snapshot() Sample {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
vals := s.values.Values()
|
|
||||||
values := make([]int64, len(vals))
|
|
||||||
for i, v := range vals {
|
|
||||||
values[i] = v.v
|
|
||||||
}
|
|
||||||
return &SampleSnapshot{
|
|
||||||
count: s.count,
|
|
||||||
values: values,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// StdDev returns the standard deviation of the values in the sample.
|
|
||||||
func (s *ExpDecaySample) StdDev() float64 {
|
|
||||||
return SampleStdDev(s.Values())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sum returns the sum of the values in the sample.
|
|
||||||
func (s *ExpDecaySample) Sum() int64 {
|
|
||||||
return SampleSum(s.Values())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update samples a new value.
|
|
||||||
func (s *ExpDecaySample) Update(v int64) {
|
|
||||||
s.update(time.Now(), v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Values returns a copy of the values in the sample.
|
|
||||||
func (s *ExpDecaySample) Values() []int64 {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
vals := s.values.Values()
|
|
||||||
values := make([]int64, len(vals))
|
|
||||||
for i, v := range vals {
|
|
||||||
values[i] = v.v
|
|
||||||
}
|
|
||||||
return values
|
|
||||||
}
|
|
||||||
|
|
||||||
// Variance returns the variance of the values in the sample.
|
|
||||||
func (s *ExpDecaySample) Variance() float64 {
|
|
||||||
return SampleVariance(s.Values())
|
|
||||||
}
|
|
||||||
|
|
||||||
// update samples a new value at a particular timestamp. This is a method all
|
|
||||||
// its own to facilitate testing.
|
|
||||||
func (s *ExpDecaySample) update(t time.Time, v int64) {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
s.count++
|
|
||||||
if s.values.Size() == s.reservoirSize {
|
|
||||||
s.values.Pop()
|
|
||||||
}
|
|
||||||
s.values.Push(expDecaySample{
|
|
||||||
k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(),
|
|
||||||
v: v,
|
|
||||||
})
|
|
||||||
if t.After(s.t1) {
|
|
||||||
values := s.values.Values()
|
|
||||||
t0 := s.t0
|
|
||||||
s.values.Clear()
|
|
||||||
s.t0 = t
|
|
||||||
s.t1 = s.t0.Add(rescaleThreshold)
|
|
||||||
for _, v := range values {
|
|
||||||
v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds())
|
|
||||||
s.values.Push(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NilSample is a no-op Sample.
|
|
||||||
type NilSample struct{}
|
|
||||||
|
|
||||||
// Clear is a no-op.
|
|
||||||
func (NilSample) Clear() {}
|
|
||||||
|
|
||||||
// Count is a no-op.
|
|
||||||
func (NilSample) Count() int64 { return 0 }
|
|
||||||
|
|
||||||
// Max is a no-op.
|
|
||||||
func (NilSample) Max() int64 { return 0 }
|
|
||||||
|
|
||||||
// Mean is a no-op.
|
|
||||||
func (NilSample) Mean() float64 { return 0.0 }
|
|
||||||
|
|
||||||
// Min is a no-op.
|
|
||||||
func (NilSample) Min() int64 { return 0 }
|
|
||||||
|
|
||||||
// Percentile is a no-op.
|
|
||||||
func (NilSample) Percentile(p float64) float64 { return 0.0 }
|
|
||||||
|
|
||||||
// Percentiles is a no-op.
|
|
||||||
func (NilSample) Percentiles(ps []float64) []float64 {
|
|
||||||
return make([]float64, len(ps))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size is a no-op.
|
|
||||||
func (NilSample) Size() int { return 0 }
|
|
||||||
|
|
||||||
// Sample is a no-op.
|
|
||||||
func (NilSample) Snapshot() Sample { return NilSample{} }
|
|
||||||
|
|
||||||
// StdDev is a no-op.
|
|
||||||
func (NilSample) StdDev() float64 { return 0.0 }
|
|
||||||
|
|
||||||
// Sum is a no-op.
|
|
||||||
func (NilSample) Sum() int64 { return 0 }
|
|
||||||
|
|
||||||
// Update is a no-op.
|
|
||||||
func (NilSample) Update(v int64) {}
|
|
||||||
|
|
||||||
// Values is a no-op.
|
|
||||||
func (NilSample) Values() []int64 { return []int64{} }
|
|
||||||
|
|
||||||
// Variance is a no-op.
|
|
||||||
func (NilSample) Variance() float64 { return 0.0 }
|
|
||||||
|
|
||||||
// SampleMax returns the maximum value of the slice of int64.
|
|
||||||
func SampleMax(values []int64) int64 {
|
|
||||||
if 0 == len(values) {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
var max int64 = math.MinInt64
|
|
||||||
for _, v := range values {
|
|
||||||
if max < v {
|
|
||||||
max = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return max
|
|
||||||
}
|
|
||||||
|
|
||||||
// SampleMean returns the mean value of the slice of int64.
|
|
||||||
func SampleMean(values []int64) float64 {
|
|
||||||
if 0 == len(values) {
|
|
||||||
return 0.0
|
|
||||||
}
|
|
||||||
return float64(SampleSum(values)) / float64(len(values))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SampleMin returns the minimum value of the slice of int64.
|
|
||||||
func SampleMin(values []int64) int64 {
|
|
||||||
if 0 == len(values) {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
var min int64 = math.MaxInt64
|
|
||||||
for _, v := range values {
|
|
||||||
if min > v {
|
|
||||||
min = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return min
|
|
||||||
}
|
|
||||||
|
|
||||||
// SamplePercentiles returns an arbitrary percentile of the slice of int64.
|
|
||||||
func SamplePercentile(values int64Slice, p float64) float64 {
|
|
||||||
return SamplePercentiles(values, []float64{p})[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
|
|
||||||
// int64.
|
|
||||||
func SamplePercentiles(values int64Slice, ps []float64) []float64 {
|
|
||||||
scores := make([]float64, len(ps))
|
|
||||||
size := len(values)
|
|
||||||
if size > 0 {
|
|
||||||
sort.Sort(values)
|
|
||||||
for i, p := range ps {
|
|
||||||
pos := p * float64(size+1)
|
|
||||||
if pos < 1.0 {
|
|
||||||
scores[i] = float64(values[0])
|
|
||||||
} else if pos >= float64(size) {
|
|
||||||
scores[i] = float64(values[size-1])
|
|
||||||
} else {
|
|
||||||
lower := float64(values[int(pos)-1])
|
|
||||||
upper := float64(values[int(pos)])
|
|
||||||
scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return scores
|
|
||||||
}
|
|
||||||
|
|
||||||
// SampleSnapshot is a read-only copy of another Sample.
|
|
||||||
type SampleSnapshot struct {
|
|
||||||
count int64
|
|
||||||
values []int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear panics.
|
|
||||||
func (*SampleSnapshot) Clear() {
|
|
||||||
panic("Clear called on a SampleSnapshot")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count returns the count of inputs at the time the snapshot was taken.
|
|
||||||
func (s *SampleSnapshot) Count() int64 { return s.count }
|
|
||||||
|
|
||||||
// Max returns the maximal value at the time the snapshot was taken.
|
|
||||||
func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
|
|
||||||
|
|
||||||
// Mean returns the mean value at the time the snapshot was taken.
|
|
||||||
func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
|
|
||||||
|
|
||||||
// Min returns the minimal value at the time the snapshot was taken.
|
|
||||||
func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
|
|
||||||
|
|
||||||
// Percentile returns an arbitrary percentile of values at the time the
|
|
||||||
// snapshot was taken.
|
|
||||||
func (s *SampleSnapshot) Percentile(p float64) float64 {
|
|
||||||
return SamplePercentile(s.values, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Percentiles returns a slice of arbitrary percentiles of values at the time
|
|
||||||
// the snapshot was taken.
|
|
||||||
func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
|
|
||||||
return SamplePercentiles(s.values, ps)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the size of the sample at the time the snapshot was taken.
|
|
||||||
func (s *SampleSnapshot) Size() int { return len(s.values) }
|
|
||||||
|
|
||||||
// Snapshot returns the snapshot.
|
|
||||||
func (s *SampleSnapshot) Snapshot() Sample { return s }
|
|
||||||
|
|
||||||
// StdDev returns the standard deviation of values at the time the snapshot was
|
|
||||||
// taken.
|
|
||||||
func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
|
|
||||||
|
|
||||||
// Sum returns the sum of values at the time the snapshot was taken.
|
|
||||||
func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
|
|
||||||
|
|
||||||
// Update panics.
|
|
||||||
func (*SampleSnapshot) Update(int64) {
|
|
||||||
panic("Update called on a SampleSnapshot")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Values returns a copy of the values in the sample.
|
|
||||||
func (s *SampleSnapshot) Values() []int64 {
|
|
||||||
values := make([]int64, len(s.values))
|
|
||||||
copy(values, s.values)
|
|
||||||
return values
|
|
||||||
}
|
|
||||||
|
|
||||||
// Variance returns the variance of values at the time the snapshot was taken.
|
|
||||||
func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
|
|
||||||
|
|
||||||
// SampleStdDev returns the standard deviation of the slice of int64.
|
|
||||||
func SampleStdDev(values []int64) float64 {
|
|
||||||
return math.Sqrt(SampleVariance(values))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SampleSum returns the sum of the slice of int64.
|
|
||||||
func SampleSum(values []int64) int64 {
|
|
||||||
var sum int64
|
|
||||||
for _, v := range values {
|
|
||||||
sum += v
|
|
||||||
}
|
|
||||||
return sum
|
|
||||||
}
|
|
||||||
|
|
||||||
// SampleVariance returns the variance of the slice of int64.
|
|
||||||
func SampleVariance(values []int64) float64 {
|
|
||||||
if 0 == len(values) {
|
|
||||||
return 0.0
|
|
||||||
}
|
|
||||||
m := SampleMean(values)
|
|
||||||
var sum float64
|
|
||||||
for _, v := range values {
|
|
||||||
d := float64(v) - m
|
|
||||||
sum += d * d
|
|
||||||
}
|
|
||||||
return sum / float64(len(values))
|
|
||||||
}
|
|
||||||
|
|
||||||
// A uniform sample using Vitter's Algorithm R.
|
|
||||||
//
|
|
||||||
// <http://www.cs.umd.edu/~samir/498/vitter.pdf>
|
|
||||||
type UniformSample struct {
|
|
||||||
count int64
|
|
||||||
mutex sync.Mutex
|
|
||||||
reservoirSize int
|
|
||||||
values []int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewUniformSample constructs a new uniform sample with the given reservoir
|
|
||||||
// size.
|
|
||||||
func NewUniformSample(reservoirSize int) Sample {
|
|
||||||
if UseNilMetrics {
|
|
||||||
return NilSample{}
|
|
||||||
}
|
|
||||||
return &UniformSample{
|
|
||||||
reservoirSize: reservoirSize,
|
|
||||||
values: make([]int64, 0, reservoirSize),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear clears all samples.
|
|
||||||
func (s *UniformSample) Clear() {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
s.count = 0
|
|
||||||
s.values = make([]int64, 0, s.reservoirSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count returns the number of samples recorded, which may exceed the
|
|
||||||
// reservoir size.
|
|
||||||
func (s *UniformSample) Count() int64 {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
return s.count
|
|
||||||
}
|
|
||||||
|
|
||||||
// Max returns the maximum value in the sample, which may not be the maximum
|
|
||||||
// value ever to be part of the sample.
|
|
||||||
func (s *UniformSample) Max() int64 {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
return SampleMax(s.values)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mean returns the mean of the values in the sample.
|
|
||||||
func (s *UniformSample) Mean() float64 {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
return SampleMean(s.values)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Min returns the minimum value in the sample, which may not be the minimum
|
|
||||||
// value ever to be part of the sample.
|
|
||||||
func (s *UniformSample) Min() int64 {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
return SampleMin(s.values)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Percentile returns an arbitrary percentile of values in the sample.
|
|
||||||
func (s *UniformSample) Percentile(p float64) float64 {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
return SamplePercentile(s.values, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Percentiles returns a slice of arbitrary percentiles of values in the
|
|
||||||
// sample.
|
|
||||||
func (s *UniformSample) Percentiles(ps []float64) []float64 {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
return SamplePercentiles(s.values, ps)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the size of the sample, which is at most the reservoir size.
|
|
||||||
func (s *UniformSample) Size() int {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
return len(s.values)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Snapshot returns a read-only copy of the sample.
|
|
||||||
func (s *UniformSample) Snapshot() Sample {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
values := make([]int64, len(s.values))
|
|
||||||
copy(values, s.values)
|
|
||||||
return &SampleSnapshot{
|
|
||||||
count: s.count,
|
|
||||||
values: values,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// StdDev returns the standard deviation of the values in the sample.
|
|
||||||
func (s *UniformSample) StdDev() float64 {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
return SampleStdDev(s.values)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sum returns the sum of the values in the sample.
|
|
||||||
func (s *UniformSample) Sum() int64 {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
return SampleSum(s.values)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update samples a new value.
|
|
||||||
func (s *UniformSample) Update(v int64) {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
s.count++
|
|
||||||
if len(s.values) < s.reservoirSize {
|
|
||||||
s.values = append(s.values, v)
|
|
||||||
} else {
|
|
||||||
r := rand.Int63n(s.count)
|
|
||||||
if r < int64(len(s.values)) {
|
|
||||||
s.values[int(r)] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Values returns a copy of the values in the sample.
|
|
||||||
func (s *UniformSample) Values() []int64 {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
values := make([]int64, len(s.values))
|
|
||||||
copy(values, s.values)
|
|
||||||
return values
|
|
||||||
}
|
|
||||||
|
|
||||||
// Variance returns the variance of the values in the sample.
|
|
||||||
func (s *UniformSample) Variance() float64 {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
return SampleVariance(s.values)
|
|
||||||
}
|
|
||||||
|
|
||||||
// expDecaySample represents an individual sample in a heap.
|
|
||||||
type expDecaySample struct {
|
|
||||||
k float64
|
|
||||||
v int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap {
|
|
||||||
return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// expDecaySampleHeap is a min-heap of expDecaySamples.
|
|
||||||
// The internal implementation is copied from the standard library's container/heap
|
|
||||||
type expDecaySampleHeap struct {
|
|
||||||
s []expDecaySample
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *expDecaySampleHeap) Clear() {
|
|
||||||
h.s = h.s[:0]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *expDecaySampleHeap) Push(s expDecaySample) {
|
|
||||||
n := len(h.s)
|
|
||||||
h.s = h.s[0 : n+1]
|
|
||||||
h.s[n] = s
|
|
||||||
h.up(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *expDecaySampleHeap) Pop() expDecaySample {
|
|
||||||
n := len(h.s) - 1
|
|
||||||
h.s[0], h.s[n] = h.s[n], h.s[0]
|
|
||||||
h.down(0, n)
|
|
||||||
|
|
||||||
n = len(h.s)
|
|
||||||
s := h.s[n-1]
|
|
||||||
h.s = h.s[0 : n-1]
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *expDecaySampleHeap) Size() int {
|
|
||||||
return len(h.s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *expDecaySampleHeap) Values() []expDecaySample {
|
|
||||||
return h.s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *expDecaySampleHeap) up(j int) {
|
|
||||||
for {
|
|
||||||
i := (j - 1) / 2 // parent
|
|
||||||
if i == j || !(h.s[j].k < h.s[i].k) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
h.s[i], h.s[j] = h.s[j], h.s[i]
|
|
||||||
j = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *expDecaySampleHeap) down(i, n int) {
|
|
||||||
for {
|
|
||||||
j1 := 2*i + 1
|
|
||||||
if j1 >= n || j1 < 0 { // j1 < 0 after int overflow
|
|
||||||
break
|
|
||||||
}
|
|
||||||
j := j1 // left child
|
|
||||||
if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) {
|
|
||||||
j = j2 // = 2*i + 2 // right child
|
|
||||||
}
|
|
||||||
if !(h.s[j].k < h.s[i].k) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
h.s[i], h.s[j] = h.s[j], h.s[i]
|
|
||||||
i = j
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type int64Slice []int64
|
|
||||||
|
|
||||||
func (p int64Slice) Len() int { return len(p) }
|
|
||||||
func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
|
|
||||||
func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
|
||||||
363
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/sample_test.go
generated
vendored
363
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/sample_test.go
generated
vendored
@@ -1,363 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/rand"
|
|
||||||
"runtime"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively
|
|
||||||
// expensive computations like Variance, the cost of copying the Sample, as
|
|
||||||
// approximated by a make and copy, is much greater than the cost of the
|
|
||||||
// computation for small samples and only slightly less for large samples.
|
|
||||||
func BenchmarkCompute1000(b *testing.B) {
|
|
||||||
s := make([]int64, 1000)
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
s[i] = int64(i)
|
|
||||||
}
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
SampleVariance(s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func BenchmarkCompute1000000(b *testing.B) {
|
|
||||||
s := make([]int64, 1000000)
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
s[i] = int64(i)
|
|
||||||
}
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
SampleVariance(s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func BenchmarkCopy1000(b *testing.B) {
|
|
||||||
s := make([]int64, 1000)
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
s[i] = int64(i)
|
|
||||||
}
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
sCopy := make([]int64, len(s))
|
|
||||||
copy(sCopy, s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func BenchmarkCopy1000000(b *testing.B) {
|
|
||||||
s := make([]int64, 1000000)
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
s[i] = int64(i)
|
|
||||||
}
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
sCopy := make([]int64, len(s))
|
|
||||||
copy(sCopy, s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkExpDecaySample257(b *testing.B) {
|
|
||||||
benchmarkSample(b, NewExpDecaySample(257, 0.015))
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkExpDecaySample514(b *testing.B) {
|
|
||||||
benchmarkSample(b, NewExpDecaySample(514, 0.015))
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkExpDecaySample1028(b *testing.B) {
|
|
||||||
benchmarkSample(b, NewExpDecaySample(1028, 0.015))
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkUniformSample257(b *testing.B) {
|
|
||||||
benchmarkSample(b, NewUniformSample(257))
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkUniformSample514(b *testing.B) {
|
|
||||||
benchmarkSample(b, NewUniformSample(514))
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkUniformSample1028(b *testing.B) {
|
|
||||||
benchmarkSample(b, NewUniformSample(1028))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExpDecaySample10(t *testing.T) {
|
|
||||||
rand.Seed(1)
|
|
||||||
s := NewExpDecaySample(100, 0.99)
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
s.Update(int64(i))
|
|
||||||
}
|
|
||||||
if size := s.Count(); 10 != size {
|
|
||||||
t.Errorf("s.Count(): 10 != %v\n", size)
|
|
||||||
}
|
|
||||||
if size := s.Size(); 10 != size {
|
|
||||||
t.Errorf("s.Size(): 10 != %v\n", size)
|
|
||||||
}
|
|
||||||
if l := len(s.Values()); 10 != l {
|
|
||||||
t.Errorf("len(s.Values()): 10 != %v\n", l)
|
|
||||||
}
|
|
||||||
for _, v := range s.Values() {
|
|
||||||
if v > 10 || v < 0 {
|
|
||||||
t.Errorf("out of range [0, 10): %v\n", v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExpDecaySample100(t *testing.T) {
|
|
||||||
rand.Seed(1)
|
|
||||||
s := NewExpDecaySample(1000, 0.01)
|
|
||||||
for i := 0; i < 100; i++ {
|
|
||||||
s.Update(int64(i))
|
|
||||||
}
|
|
||||||
if size := s.Count(); 100 != size {
|
|
||||||
t.Errorf("s.Count(): 100 != %v\n", size)
|
|
||||||
}
|
|
||||||
if size := s.Size(); 100 != size {
|
|
||||||
t.Errorf("s.Size(): 100 != %v\n", size)
|
|
||||||
}
|
|
||||||
if l := len(s.Values()); 100 != l {
|
|
||||||
t.Errorf("len(s.Values()): 100 != %v\n", l)
|
|
||||||
}
|
|
||||||
for _, v := range s.Values() {
|
|
||||||
if v > 100 || v < 0 {
|
|
||||||
t.Errorf("out of range [0, 100): %v\n", v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExpDecaySample1000(t *testing.T) {
|
|
||||||
rand.Seed(1)
|
|
||||||
s := NewExpDecaySample(100, 0.99)
|
|
||||||
for i := 0; i < 1000; i++ {
|
|
||||||
s.Update(int64(i))
|
|
||||||
}
|
|
||||||
if size := s.Count(); 1000 != size {
|
|
||||||
t.Errorf("s.Count(): 1000 != %v\n", size)
|
|
||||||
}
|
|
||||||
if size := s.Size(); 100 != size {
|
|
||||||
t.Errorf("s.Size(): 100 != %v\n", size)
|
|
||||||
}
|
|
||||||
if l := len(s.Values()); 100 != l {
|
|
||||||
t.Errorf("len(s.Values()): 100 != %v\n", l)
|
|
||||||
}
|
|
||||||
for _, v := range s.Values() {
|
|
||||||
if v > 1000 || v < 0 {
|
|
||||||
t.Errorf("out of range [0, 1000): %v\n", v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This test makes sure that the sample's priority is not amplified by using
|
|
||||||
// nanosecond duration since start rather than second duration since start.
|
|
||||||
// The priority becomes +Inf quickly after starting if this is done,
|
|
||||||
// effectively freezing the set of samples until a rescale step happens.
|
|
||||||
func TestExpDecaySampleNanosecondRegression(t *testing.T) {
|
|
||||||
rand.Seed(1)
|
|
||||||
s := NewExpDecaySample(100, 0.99)
|
|
||||||
for i := 0; i < 100; i++ {
|
|
||||||
s.Update(10)
|
|
||||||
}
|
|
||||||
time.Sleep(1 * time.Millisecond)
|
|
||||||
for i := 0; i < 100; i++ {
|
|
||||||
s.Update(20)
|
|
||||||
}
|
|
||||||
v := s.Values()
|
|
||||||
avg := float64(0)
|
|
||||||
for i := 0; i < len(v); i++ {
|
|
||||||
avg += float64(v[i])
|
|
||||||
}
|
|
||||||
avg /= float64(len(v))
|
|
||||||
if avg > 16 || avg < 14 {
|
|
||||||
t.Errorf("out of range [14, 16]: %v\n", avg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExpDecaySampleRescale(t *testing.T) {
|
|
||||||
s := NewExpDecaySample(2, 0.001).(*ExpDecaySample)
|
|
||||||
s.update(time.Now(), 1)
|
|
||||||
s.update(time.Now().Add(time.Hour+time.Microsecond), 1)
|
|
||||||
for _, v := range s.values.Values() {
|
|
||||||
if v.k == 0.0 {
|
|
||||||
t.Fatal("v.k == 0.0")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExpDecaySampleSnapshot(t *testing.T) {
|
|
||||||
now := time.Now()
|
|
||||||
rand.Seed(1)
|
|
||||||
s := NewExpDecaySample(100, 0.99)
|
|
||||||
for i := 1; i <= 10000; i++ {
|
|
||||||
s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
|
|
||||||
}
|
|
||||||
snapshot := s.Snapshot()
|
|
||||||
s.Update(1)
|
|
||||||
testExpDecaySampleStatistics(t, snapshot)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExpDecaySampleStatistics(t *testing.T) {
|
|
||||||
now := time.Now()
|
|
||||||
rand.Seed(1)
|
|
||||||
s := NewExpDecaySample(100, 0.99)
|
|
||||||
for i := 1; i <= 10000; i++ {
|
|
||||||
s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
|
|
||||||
}
|
|
||||||
testExpDecaySampleStatistics(t, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUniformSample(t *testing.T) {
|
|
||||||
rand.Seed(1)
|
|
||||||
s := NewUniformSample(100)
|
|
||||||
for i := 0; i < 1000; i++ {
|
|
||||||
s.Update(int64(i))
|
|
||||||
}
|
|
||||||
if size := s.Count(); 1000 != size {
|
|
||||||
t.Errorf("s.Count(): 1000 != %v\n", size)
|
|
||||||
}
|
|
||||||
if size := s.Size(); 100 != size {
|
|
||||||
t.Errorf("s.Size(): 100 != %v\n", size)
|
|
||||||
}
|
|
||||||
if l := len(s.Values()); 100 != l {
|
|
||||||
t.Errorf("len(s.Values()): 100 != %v\n", l)
|
|
||||||
}
|
|
||||||
for _, v := range s.Values() {
|
|
||||||
if v > 1000 || v < 0 {
|
|
||||||
t.Errorf("out of range [0, 100): %v\n", v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUniformSampleIncludesTail(t *testing.T) {
|
|
||||||
rand.Seed(1)
|
|
||||||
s := NewUniformSample(100)
|
|
||||||
max := 100
|
|
||||||
for i := 0; i < max; i++ {
|
|
||||||
s.Update(int64(i))
|
|
||||||
}
|
|
||||||
v := s.Values()
|
|
||||||
sum := 0
|
|
||||||
exp := (max - 1) * max / 2
|
|
||||||
for i := 0; i < len(v); i++ {
|
|
||||||
sum += int(v[i])
|
|
||||||
}
|
|
||||||
if exp != sum {
|
|
||||||
t.Errorf("sum: %v != %v\n", exp, sum)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUniformSampleSnapshot(t *testing.T) {
|
|
||||||
s := NewUniformSample(100)
|
|
||||||
for i := 1; i <= 10000; i++ {
|
|
||||||
s.Update(int64(i))
|
|
||||||
}
|
|
||||||
snapshot := s.Snapshot()
|
|
||||||
s.Update(1)
|
|
||||||
testUniformSampleStatistics(t, snapshot)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUniformSampleStatistics(t *testing.T) {
|
|
||||||
rand.Seed(1)
|
|
||||||
s := NewUniformSample(100)
|
|
||||||
for i := 1; i <= 10000; i++ {
|
|
||||||
s.Update(int64(i))
|
|
||||||
}
|
|
||||||
testUniformSampleStatistics(t, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func benchmarkSample(b *testing.B, s Sample) {
|
|
||||||
var memStats runtime.MemStats
|
|
||||||
runtime.ReadMemStats(&memStats)
|
|
||||||
pauseTotalNs := memStats.PauseTotalNs
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
s.Update(1)
|
|
||||||
}
|
|
||||||
b.StopTimer()
|
|
||||||
runtime.GC()
|
|
||||||
runtime.ReadMemStats(&memStats)
|
|
||||||
b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testExpDecaySampleStatistics(t *testing.T, s Sample) {
|
|
||||||
if count := s.Count(); 10000 != count {
|
|
||||||
t.Errorf("s.Count(): 10000 != %v\n", count)
|
|
||||||
}
|
|
||||||
if min := s.Min(); 107 != min {
|
|
||||||
t.Errorf("s.Min(): 107 != %v\n", min)
|
|
||||||
}
|
|
||||||
if max := s.Max(); 10000 != max {
|
|
||||||
t.Errorf("s.Max(): 10000 != %v\n", max)
|
|
||||||
}
|
|
||||||
if mean := s.Mean(); 4965.98 != mean {
|
|
||||||
t.Errorf("s.Mean(): 4965.98 != %v\n", mean)
|
|
||||||
}
|
|
||||||
if stdDev := s.StdDev(); 2959.825156930727 != stdDev {
|
|
||||||
t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev)
|
|
||||||
}
|
|
||||||
ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
|
|
||||||
if 4615 != ps[0] {
|
|
||||||
t.Errorf("median: 4615 != %v\n", ps[0])
|
|
||||||
}
|
|
||||||
if 7672 != ps[1] {
|
|
||||||
t.Errorf("75th percentile: 7672 != %v\n", ps[1])
|
|
||||||
}
|
|
||||||
if 9998.99 != ps[2] {
|
|
||||||
t.Errorf("99th percentile: 9998.99 != %v\n", ps[2])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testUniformSampleStatistics(t *testing.T, s Sample) {
|
|
||||||
if count := s.Count(); 10000 != count {
|
|
||||||
t.Errorf("s.Count(): 10000 != %v\n", count)
|
|
||||||
}
|
|
||||||
if min := s.Min(); 37 != min {
|
|
||||||
t.Errorf("s.Min(): 37 != %v\n", min)
|
|
||||||
}
|
|
||||||
if max := s.Max(); 9989 != max {
|
|
||||||
t.Errorf("s.Max(): 9989 != %v\n", max)
|
|
||||||
}
|
|
||||||
if mean := s.Mean(); 4748.14 != mean {
|
|
||||||
t.Errorf("s.Mean(): 4748.14 != %v\n", mean)
|
|
||||||
}
|
|
||||||
if stdDev := s.StdDev(); 2826.684117548333 != stdDev {
|
|
||||||
t.Errorf("s.StdDev(): 2826.684117548333 != %v\n", stdDev)
|
|
||||||
}
|
|
||||||
ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
|
|
||||||
if 4599 != ps[0] {
|
|
||||||
t.Errorf("median: 4599 != %v\n", ps[0])
|
|
||||||
}
|
|
||||||
if 7380.5 != ps[1] {
|
|
||||||
t.Errorf("75th percentile: 7380.5 != %v\n", ps[1])
|
|
||||||
}
|
|
||||||
if 9986.429999999998 != ps[2] {
|
|
||||||
t.Errorf("99th percentile: 9986.429999999998 != %v\n", ps[2])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestUniformSampleConcurrentUpdateCount would expose data race problems with
|
|
||||||
// concurrent Update and Count calls on Sample when test is called with -race
|
|
||||||
// argument
|
|
||||||
func TestUniformSampleConcurrentUpdateCount(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping in short mode")
|
|
||||||
}
|
|
||||||
s := NewUniformSample(100)
|
|
||||||
for i := 0; i < 100; i++ {
|
|
||||||
s.Update(int64(i))
|
|
||||||
}
|
|
||||||
quit := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
t := time.NewTicker(10 * time.Millisecond)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-t.C:
|
|
||||||
s.Update(rand.Int63())
|
|
||||||
case <-quit:
|
|
||||||
t.Stop()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
for i := 0; i < 1000; i++ {
|
|
||||||
s.Count()
|
|
||||||
time.Sleep(5 * time.Millisecond)
|
|
||||||
}
|
|
||||||
quit <- struct{}{}
|
|
||||||
}
|
|
||||||
@@ -1,69 +0,0 @@
|
|||||||
// Metrics output to StatHat.
|
|
||||||
package stathat
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/rcrowley/go-metrics"
|
|
||||||
"github.com/stathat/go"
|
|
||||||
"log"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Stathat(r metrics.Registry, d time.Duration, userkey string) {
|
|
||||||
for {
|
|
||||||
if err := sh(r, userkey); nil != err {
|
|
||||||
log.Println(err)
|
|
||||||
}
|
|
||||||
time.Sleep(d)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func sh(r metrics.Registry, userkey string) error {
|
|
||||||
r.Each(func(name string, i interface{}) {
|
|
||||||
switch metric := i.(type) {
|
|
||||||
case metrics.Counter:
|
|
||||||
stathat.PostEZCount(name, userkey, int(metric.Count()))
|
|
||||||
case metrics.Gauge:
|
|
||||||
stathat.PostEZValue(name, userkey, float64(metric.Value()))
|
|
||||||
case metrics.GaugeFloat64:
|
|
||||||
stathat.PostEZValue(name, userkey, float64(metric.Value()))
|
|
||||||
case metrics.Histogram:
|
|
||||||
h := metric.Snapshot()
|
|
||||||
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
|
||||||
stathat.PostEZCount(name+".count", userkey, int(h.Count()))
|
|
||||||
stathat.PostEZValue(name+".min", userkey, float64(h.Min()))
|
|
||||||
stathat.PostEZValue(name+".max", userkey, float64(h.Max()))
|
|
||||||
stathat.PostEZValue(name+".mean", userkey, float64(h.Mean()))
|
|
||||||
stathat.PostEZValue(name+".std-dev", userkey, float64(h.StdDev()))
|
|
||||||
stathat.PostEZValue(name+".50-percentile", userkey, float64(ps[0]))
|
|
||||||
stathat.PostEZValue(name+".75-percentile", userkey, float64(ps[1]))
|
|
||||||
stathat.PostEZValue(name+".95-percentile", userkey, float64(ps[2]))
|
|
||||||
stathat.PostEZValue(name+".99-percentile", userkey, float64(ps[3]))
|
|
||||||
stathat.PostEZValue(name+".999-percentile", userkey, float64(ps[4]))
|
|
||||||
case metrics.Meter:
|
|
||||||
m := metric.Snapshot()
|
|
||||||
stathat.PostEZCount(name+".count", userkey, int(m.Count()))
|
|
||||||
stathat.PostEZValue(name+".one-minute", userkey, float64(m.Rate1()))
|
|
||||||
stathat.PostEZValue(name+".five-minute", userkey, float64(m.Rate5()))
|
|
||||||
stathat.PostEZValue(name+".fifteen-minute", userkey, float64(m.Rate15()))
|
|
||||||
stathat.PostEZValue(name+".mean", userkey, float64(m.RateMean()))
|
|
||||||
case metrics.Timer:
|
|
||||||
t := metric.Snapshot()
|
|
||||||
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
|
||||||
stathat.PostEZCount(name+".count", userkey, int(t.Count()))
|
|
||||||
stathat.PostEZValue(name+".min", userkey, float64(t.Min()))
|
|
||||||
stathat.PostEZValue(name+".max", userkey, float64(t.Max()))
|
|
||||||
stathat.PostEZValue(name+".mean", userkey, float64(t.Mean()))
|
|
||||||
stathat.PostEZValue(name+".std-dev", userkey, float64(t.StdDev()))
|
|
||||||
stathat.PostEZValue(name+".50-percentile", userkey, float64(ps[0]))
|
|
||||||
stathat.PostEZValue(name+".75-percentile", userkey, float64(ps[1]))
|
|
||||||
stathat.PostEZValue(name+".95-percentile", userkey, float64(ps[2]))
|
|
||||||
stathat.PostEZValue(name+".99-percentile", userkey, float64(ps[3]))
|
|
||||||
stathat.PostEZValue(name+".999-percentile", userkey, float64(ps[4]))
|
|
||||||
stathat.PostEZValue(name+".one-minute", userkey, float64(t.Rate1()))
|
|
||||||
stathat.PostEZValue(name+".five-minute", userkey, float64(t.Rate5()))
|
|
||||||
stathat.PostEZValue(name+".fifteen-minute", userkey, float64(t.Rate15()))
|
|
||||||
stathat.PostEZValue(name+".mean-rate", userkey, float64(t.RateMean()))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
78
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/syslog.go
generated
vendored
78
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/syslog.go
generated
vendored
@@ -1,78 +0,0 @@
|
|||||||
// +build !windows
|
|
||||||
|
|
||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log/syslog"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Output each metric in the given registry to syslog periodically using
|
|
||||||
// the given syslogger.
|
|
||||||
func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
|
|
||||||
for _ = range time.Tick(d) {
|
|
||||||
r.Each(func(name string, i interface{}) {
|
|
||||||
switch metric := i.(type) {
|
|
||||||
case Counter:
|
|
||||||
w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count()))
|
|
||||||
case Gauge:
|
|
||||||
w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value()))
|
|
||||||
case GaugeFloat64:
|
|
||||||
w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value()))
|
|
||||||
case Healthcheck:
|
|
||||||
metric.Check()
|
|
||||||
w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error()))
|
|
||||||
case Histogram:
|
|
||||||
h := metric.Snapshot()
|
|
||||||
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
|
||||||
w.Info(fmt.Sprintf(
|
|
||||||
"histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f",
|
|
||||||
name,
|
|
||||||
h.Count(),
|
|
||||||
h.Min(),
|
|
||||||
h.Max(),
|
|
||||||
h.Mean(),
|
|
||||||
h.StdDev(),
|
|
||||||
ps[0],
|
|
||||||
ps[1],
|
|
||||||
ps[2],
|
|
||||||
ps[3],
|
|
||||||
ps[4],
|
|
||||||
))
|
|
||||||
case Meter:
|
|
||||||
m := metric.Snapshot()
|
|
||||||
w.Info(fmt.Sprintf(
|
|
||||||
"meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f",
|
|
||||||
name,
|
|
||||||
m.Count(),
|
|
||||||
m.Rate1(),
|
|
||||||
m.Rate5(),
|
|
||||||
m.Rate15(),
|
|
||||||
m.RateMean(),
|
|
||||||
))
|
|
||||||
case Timer:
|
|
||||||
t := metric.Snapshot()
|
|
||||||
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
|
||||||
w.Info(fmt.Sprintf(
|
|
||||||
"timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f",
|
|
||||||
name,
|
|
||||||
t.Count(),
|
|
||||||
t.Min(),
|
|
||||||
t.Max(),
|
|
||||||
t.Mean(),
|
|
||||||
t.StdDev(),
|
|
||||||
ps[0],
|
|
||||||
ps[1],
|
|
||||||
ps[2],
|
|
||||||
ps[3],
|
|
||||||
ps[4],
|
|
||||||
t.Rate1(),
|
|
||||||
t.Rate5(),
|
|
||||||
t.Rate15(),
|
|
||||||
t.RateMean(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
311
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/timer.go
generated
vendored
311
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/timer.go
generated
vendored
@@ -1,311 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Timers capture the duration and rate of events.
|
|
||||||
type Timer interface {
|
|
||||||
Count() int64
|
|
||||||
Max() int64
|
|
||||||
Mean() float64
|
|
||||||
Min() int64
|
|
||||||
Percentile(float64) float64
|
|
||||||
Percentiles([]float64) []float64
|
|
||||||
Rate1() float64
|
|
||||||
Rate5() float64
|
|
||||||
Rate15() float64
|
|
||||||
RateMean() float64
|
|
||||||
Snapshot() Timer
|
|
||||||
StdDev() float64
|
|
||||||
Sum() int64
|
|
||||||
Time(func())
|
|
||||||
Update(time.Duration)
|
|
||||||
UpdateSince(time.Time)
|
|
||||||
Variance() float64
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetOrRegisterTimer returns an existing Timer or constructs and registers a
|
|
||||||
// new StandardTimer.
|
|
||||||
func GetOrRegisterTimer(name string, r Registry) Timer {
|
|
||||||
if nil == r {
|
|
||||||
r = DefaultRegistry
|
|
||||||
}
|
|
||||||
return r.GetOrRegister(name, NewTimer).(Timer)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
|
|
||||||
func NewCustomTimer(h Histogram, m Meter) Timer {
|
|
||||||
if UseNilMetrics {
|
|
||||||
return NilTimer{}
|
|
||||||
}
|
|
||||||
return &StandardTimer{
|
|
||||||
histogram: h,
|
|
||||||
meter: m,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRegisteredTimer constructs and registers a new StandardTimer.
|
|
||||||
func NewRegisteredTimer(name string, r Registry) Timer {
|
|
||||||
c := NewTimer()
|
|
||||||
if nil == r {
|
|
||||||
r = DefaultRegistry
|
|
||||||
}
|
|
||||||
r.Register(name, c)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTimer constructs a new StandardTimer using an exponentially-decaying
|
|
||||||
// sample with the same reservoir size and alpha as UNIX load averages.
|
|
||||||
func NewTimer() Timer {
|
|
||||||
if UseNilMetrics {
|
|
||||||
return NilTimer{}
|
|
||||||
}
|
|
||||||
return &StandardTimer{
|
|
||||||
histogram: NewHistogram(NewExpDecaySample(1028, 0.015)),
|
|
||||||
meter: NewMeter(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NilTimer is a no-op Timer.
|
|
||||||
type NilTimer struct {
|
|
||||||
h Histogram
|
|
||||||
m Meter
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count is a no-op.
|
|
||||||
func (NilTimer) Count() int64 { return 0 }
|
|
||||||
|
|
||||||
// Max is a no-op.
|
|
||||||
func (NilTimer) Max() int64 { return 0 }
|
|
||||||
|
|
||||||
// Mean is a no-op.
|
|
||||||
func (NilTimer) Mean() float64 { return 0.0 }
|
|
||||||
|
|
||||||
// Min is a no-op.
|
|
||||||
func (NilTimer) Min() int64 { return 0 }
|
|
||||||
|
|
||||||
// Percentile is a no-op.
|
|
||||||
func (NilTimer) Percentile(p float64) float64 { return 0.0 }
|
|
||||||
|
|
||||||
// Percentiles is a no-op.
|
|
||||||
func (NilTimer) Percentiles(ps []float64) []float64 {
|
|
||||||
return make([]float64, len(ps))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rate1 is a no-op.
|
|
||||||
func (NilTimer) Rate1() float64 { return 0.0 }
|
|
||||||
|
|
||||||
// Rate5 is a no-op.
|
|
||||||
func (NilTimer) Rate5() float64 { return 0.0 }
|
|
||||||
|
|
||||||
// Rate15 is a no-op.
|
|
||||||
func (NilTimer) Rate15() float64 { return 0.0 }
|
|
||||||
|
|
||||||
// RateMean is a no-op.
|
|
||||||
func (NilTimer) RateMean() float64 { return 0.0 }
|
|
||||||
|
|
||||||
// Snapshot is a no-op.
|
|
||||||
func (NilTimer) Snapshot() Timer { return NilTimer{} }
|
|
||||||
|
|
||||||
// StdDev is a no-op.
|
|
||||||
func (NilTimer) StdDev() float64 { return 0.0 }
|
|
||||||
|
|
||||||
// Sum is a no-op.
|
|
||||||
func (NilTimer) Sum() int64 { return 0 }
|
|
||||||
|
|
||||||
// Time is a no-op.
|
|
||||||
func (NilTimer) Time(func()) {}
|
|
||||||
|
|
||||||
// Update is a no-op.
|
|
||||||
func (NilTimer) Update(time.Duration) {}
|
|
||||||
|
|
||||||
// UpdateSince is a no-op.
|
|
||||||
func (NilTimer) UpdateSince(time.Time) {}
|
|
||||||
|
|
||||||
// Variance is a no-op.
|
|
||||||
func (NilTimer) Variance() float64 { return 0.0 }
|
|
||||||
|
|
||||||
// StandardTimer is the standard implementation of a Timer and uses a Histogram
|
|
||||||
// and Meter.
|
|
||||||
type StandardTimer struct {
|
|
||||||
histogram Histogram
|
|
||||||
meter Meter
|
|
||||||
mutex sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count returns the number of events recorded.
|
|
||||||
func (t *StandardTimer) Count() int64 {
|
|
||||||
return t.histogram.Count()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Max returns the maximum value in the sample.
|
|
||||||
func (t *StandardTimer) Max() int64 {
|
|
||||||
return t.histogram.Max()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mean returns the mean of the values in the sample.
|
|
||||||
func (t *StandardTimer) Mean() float64 {
|
|
||||||
return t.histogram.Mean()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Min returns the minimum value in the sample.
|
|
||||||
func (t *StandardTimer) Min() int64 {
|
|
||||||
return t.histogram.Min()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Percentile returns an arbitrary percentile of the values in the sample.
|
|
||||||
func (t *StandardTimer) Percentile(p float64) float64 {
|
|
||||||
return t.histogram.Percentile(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Percentiles returns a slice of arbitrary percentiles of the values in the
|
|
||||||
// sample.
|
|
||||||
func (t *StandardTimer) Percentiles(ps []float64) []float64 {
|
|
||||||
return t.histogram.Percentiles(ps)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rate1 returns the one-minute moving average rate of events per second.
|
|
||||||
func (t *StandardTimer) Rate1() float64 {
|
|
||||||
return t.meter.Rate1()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rate5 returns the five-minute moving average rate of events per second.
|
|
||||||
func (t *StandardTimer) Rate5() float64 {
|
|
||||||
return t.meter.Rate5()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rate15 returns the fifteen-minute moving average rate of events per second.
|
|
||||||
func (t *StandardTimer) Rate15() float64 {
|
|
||||||
return t.meter.Rate15()
|
|
||||||
}
|
|
||||||
|
|
||||||
// RateMean returns the meter's mean rate of events per second.
|
|
||||||
func (t *StandardTimer) RateMean() float64 {
|
|
||||||
return t.meter.RateMean()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Snapshot returns a read-only copy of the timer.
|
|
||||||
func (t *StandardTimer) Snapshot() Timer {
|
|
||||||
t.mutex.Lock()
|
|
||||||
defer t.mutex.Unlock()
|
|
||||||
return &TimerSnapshot{
|
|
||||||
histogram: t.histogram.Snapshot().(*HistogramSnapshot),
|
|
||||||
meter: t.meter.Snapshot().(*MeterSnapshot),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// StdDev returns the standard deviation of the values in the sample.
|
|
||||||
func (t *StandardTimer) StdDev() float64 {
|
|
||||||
return t.histogram.StdDev()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sum returns the sum in the sample.
|
|
||||||
func (t *StandardTimer) Sum() int64 {
|
|
||||||
return t.histogram.Sum()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Record the duration of the execution of the given function.
|
|
||||||
func (t *StandardTimer) Time(f func()) {
|
|
||||||
ts := time.Now()
|
|
||||||
f()
|
|
||||||
t.Update(time.Since(ts))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Record the duration of an event.
|
|
||||||
func (t *StandardTimer) Update(d time.Duration) {
|
|
||||||
t.mutex.Lock()
|
|
||||||
defer t.mutex.Unlock()
|
|
||||||
t.histogram.Update(int64(d))
|
|
||||||
t.meter.Mark(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Record the duration of an event that started at a time and ends now.
|
|
||||||
func (t *StandardTimer) UpdateSince(ts time.Time) {
|
|
||||||
t.mutex.Lock()
|
|
||||||
defer t.mutex.Unlock()
|
|
||||||
t.histogram.Update(int64(time.Since(ts)))
|
|
||||||
t.meter.Mark(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Variance returns the variance of the values in the sample.
|
|
||||||
func (t *StandardTimer) Variance() float64 {
|
|
||||||
return t.histogram.Variance()
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimerSnapshot is a read-only copy of another Timer.
|
|
||||||
type TimerSnapshot struct {
|
|
||||||
histogram *HistogramSnapshot
|
|
||||||
meter *MeterSnapshot
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count returns the number of events recorded at the time the snapshot was
|
|
||||||
// taken.
|
|
||||||
func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
|
|
||||||
|
|
||||||
// Max returns the maximum value at the time the snapshot was taken.
|
|
||||||
func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
|
|
||||||
|
|
||||||
// Mean returns the mean value at the time the snapshot was taken.
|
|
||||||
func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
|
|
||||||
|
|
||||||
// Min returns the minimum value at the time the snapshot was taken.
|
|
||||||
func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
|
|
||||||
|
|
||||||
// Percentile returns an arbitrary percentile of sampled values at the time the
|
|
||||||
// snapshot was taken.
|
|
||||||
func (t *TimerSnapshot) Percentile(p float64) float64 {
|
|
||||||
return t.histogram.Percentile(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Percentiles returns a slice of arbitrary percentiles of sampled values at
|
|
||||||
// the time the snapshot was taken.
|
|
||||||
func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
|
|
||||||
return t.histogram.Percentiles(ps)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rate1 returns the one-minute moving average rate of events per second at the
|
|
||||||
// time the snapshot was taken.
|
|
||||||
func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
|
|
||||||
|
|
||||||
// Rate5 returns the five-minute moving average rate of events per second at
|
|
||||||
// the time the snapshot was taken.
|
|
||||||
func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
|
|
||||||
|
|
||||||
// Rate15 returns the fifteen-minute moving average rate of events per second
|
|
||||||
// at the time the snapshot was taken.
|
|
||||||
func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
|
|
||||||
|
|
||||||
// RateMean returns the meter's mean rate of events per second at the time the
|
|
||||||
// snapshot was taken.
|
|
||||||
func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
|
|
||||||
|
|
||||||
// Snapshot returns the snapshot.
|
|
||||||
func (t *TimerSnapshot) Snapshot() Timer { return t }
|
|
||||||
|
|
||||||
// StdDev returns the standard deviation of the values at the time the snapshot
|
|
||||||
// was taken.
|
|
||||||
func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
|
|
||||||
|
|
||||||
// Sum returns the sum at the time the snapshot was taken.
|
|
||||||
func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
|
|
||||||
|
|
||||||
// Time panics.
|
|
||||||
func (*TimerSnapshot) Time(func()) {
|
|
||||||
panic("Time called on a TimerSnapshot")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update panics.
|
|
||||||
func (*TimerSnapshot) Update(time.Duration) {
|
|
||||||
panic("Update called on a TimerSnapshot")
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateSince panics.
|
|
||||||
func (*TimerSnapshot) UpdateSince(time.Time) {
|
|
||||||
panic("UpdateSince called on a TimerSnapshot")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Variance returns the variance of the values at the time the snapshot was
|
|
||||||
// taken.
|
|
||||||
func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }
|
|
||||||
81
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/timer_test.go
generated
vendored
81
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/timer_test.go
generated
vendored
@@ -1,81 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func BenchmarkTimer(b *testing.B) {
|
|
||||||
tm := NewTimer()
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
tm.Update(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetOrRegisterTimer(t *testing.T) {
|
|
||||||
r := NewRegistry()
|
|
||||||
NewRegisteredTimer("foo", r).Update(47)
|
|
||||||
if tm := GetOrRegisterTimer("foo", r); 1 != tm.Count() {
|
|
||||||
t.Fatal(tm)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTimerExtremes(t *testing.T) {
|
|
||||||
tm := NewTimer()
|
|
||||||
tm.Update(math.MaxInt64)
|
|
||||||
tm.Update(0)
|
|
||||||
if stdDev := tm.StdDev(); 4.611686018427388e+18 != stdDev {
|
|
||||||
t.Errorf("tm.StdDev(): 4.611686018427388e+18 != %v\n", stdDev)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTimerFunc(t *testing.T) {
|
|
||||||
tm := NewTimer()
|
|
||||||
tm.Time(func() { time.Sleep(50e6) })
|
|
||||||
if max := tm.Max(); 45e6 > max || max > 55e6 {
|
|
||||||
t.Errorf("tm.Max(): 45e6 > %v || %v > 55e6\n", max, max)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTimerZero(t *testing.T) {
|
|
||||||
tm := NewTimer()
|
|
||||||
if count := tm.Count(); 0 != count {
|
|
||||||
t.Errorf("tm.Count(): 0 != %v\n", count)
|
|
||||||
}
|
|
||||||
if min := tm.Min(); 0 != min {
|
|
||||||
t.Errorf("tm.Min(): 0 != %v\n", min)
|
|
||||||
}
|
|
||||||
if max := tm.Max(); 0 != max {
|
|
||||||
t.Errorf("tm.Max(): 0 != %v\n", max)
|
|
||||||
}
|
|
||||||
if mean := tm.Mean(); 0.0 != mean {
|
|
||||||
t.Errorf("tm.Mean(): 0.0 != %v\n", mean)
|
|
||||||
}
|
|
||||||
if stdDev := tm.StdDev(); 0.0 != stdDev {
|
|
||||||
t.Errorf("tm.StdDev(): 0.0 != %v\n", stdDev)
|
|
||||||
}
|
|
||||||
ps := tm.Percentiles([]float64{0.5, 0.75, 0.99})
|
|
||||||
if 0.0 != ps[0] {
|
|
||||||
t.Errorf("median: 0.0 != %v\n", ps[0])
|
|
||||||
}
|
|
||||||
if 0.0 != ps[1] {
|
|
||||||
t.Errorf("75th percentile: 0.0 != %v\n", ps[1])
|
|
||||||
}
|
|
||||||
if 0.0 != ps[2] {
|
|
||||||
t.Errorf("99th percentile: 0.0 != %v\n", ps[2])
|
|
||||||
}
|
|
||||||
if rate1 := tm.Rate1(); 0.0 != rate1 {
|
|
||||||
t.Errorf("tm.Rate1(): 0.0 != %v\n", rate1)
|
|
||||||
}
|
|
||||||
if rate5 := tm.Rate5(); 0.0 != rate5 {
|
|
||||||
t.Errorf("tm.Rate5(): 0.0 != %v\n", rate5)
|
|
||||||
}
|
|
||||||
if rate15 := tm.Rate15(); 0.0 != rate15 {
|
|
||||||
t.Errorf("tm.Rate15(): 0.0 != %v\n", rate15)
|
|
||||||
}
|
|
||||||
if rateMean := tm.RateMean(); 0.0 != rateMean {
|
|
||||||
t.Errorf("tm.RateMean(): 0.0 != %v\n", rateMean)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
10
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/validate.sh
generated
vendored
10
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/validate.sh
generated
vendored
@@ -1,10 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# check there are no formatting issues
|
|
||||||
GOFMT_LINES=`gofmt -l . | wc -l | xargs`
|
|
||||||
test $GOFMT_LINES -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues"
|
|
||||||
|
|
||||||
# run the tests for the root package
|
|
||||||
go test .
|
|
||||||
100
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/writer.go
generated
vendored
100
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/writer.go
generated
vendored
@@ -1,100 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"sort"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Write sorts writes each metric in the given registry periodically to the
|
|
||||||
// given io.Writer.
|
|
||||||
func Write(r Registry, d time.Duration, w io.Writer) {
|
|
||||||
for _ = range time.Tick(d) {
|
|
||||||
WriteOnce(r, w)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteOnce sorts and writes metrics in the given registry to the given
|
|
||||||
// io.Writer.
|
|
||||||
func WriteOnce(r Registry, w io.Writer) {
|
|
||||||
var namedMetrics namedMetricSlice
|
|
||||||
r.Each(func(name string, i interface{}) {
|
|
||||||
namedMetrics = append(namedMetrics, namedMetric{name, i})
|
|
||||||
})
|
|
||||||
|
|
||||||
sort.Sort(namedMetrics)
|
|
||||||
for _, namedMetric := range namedMetrics {
|
|
||||||
switch metric := namedMetric.m.(type) {
|
|
||||||
case Counter:
|
|
||||||
fmt.Fprintf(w, "counter %s\n", namedMetric.name)
|
|
||||||
fmt.Fprintf(w, " count: %9d\n", metric.Count())
|
|
||||||
case Gauge:
|
|
||||||
fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
|
|
||||||
fmt.Fprintf(w, " value: %9d\n", metric.Value())
|
|
||||||
case GaugeFloat64:
|
|
||||||
fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
|
|
||||||
fmt.Fprintf(w, " value: %f\n", metric.Value())
|
|
||||||
case Healthcheck:
|
|
||||||
metric.Check()
|
|
||||||
fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name)
|
|
||||||
fmt.Fprintf(w, " error: %v\n", metric.Error())
|
|
||||||
case Histogram:
|
|
||||||
h := metric.Snapshot()
|
|
||||||
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
|
||||||
fmt.Fprintf(w, "histogram %s\n", namedMetric.name)
|
|
||||||
fmt.Fprintf(w, " count: %9d\n", h.Count())
|
|
||||||
fmt.Fprintf(w, " min: %9d\n", h.Min())
|
|
||||||
fmt.Fprintf(w, " max: %9d\n", h.Max())
|
|
||||||
fmt.Fprintf(w, " mean: %12.2f\n", h.Mean())
|
|
||||||
fmt.Fprintf(w, " stddev: %12.2f\n", h.StdDev())
|
|
||||||
fmt.Fprintf(w, " median: %12.2f\n", ps[0])
|
|
||||||
fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1])
|
|
||||||
fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2])
|
|
||||||
fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3])
|
|
||||||
fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4])
|
|
||||||
case Meter:
|
|
||||||
m := metric.Snapshot()
|
|
||||||
fmt.Fprintf(w, "meter %s\n", namedMetric.name)
|
|
||||||
fmt.Fprintf(w, " count: %9d\n", m.Count())
|
|
||||||
fmt.Fprintf(w, " 1-min rate: %12.2f\n", m.Rate1())
|
|
||||||
fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5())
|
|
||||||
fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15())
|
|
||||||
fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean())
|
|
||||||
case Timer:
|
|
||||||
t := metric.Snapshot()
|
|
||||||
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
|
||||||
fmt.Fprintf(w, "timer %s\n", namedMetric.name)
|
|
||||||
fmt.Fprintf(w, " count: %9d\n", t.Count())
|
|
||||||
fmt.Fprintf(w, " min: %9d\n", t.Min())
|
|
||||||
fmt.Fprintf(w, " max: %9d\n", t.Max())
|
|
||||||
fmt.Fprintf(w, " mean: %12.2f\n", t.Mean())
|
|
||||||
fmt.Fprintf(w, " stddev: %12.2f\n", t.StdDev())
|
|
||||||
fmt.Fprintf(w, " median: %12.2f\n", ps[0])
|
|
||||||
fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1])
|
|
||||||
fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2])
|
|
||||||
fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3])
|
|
||||||
fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4])
|
|
||||||
fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1())
|
|
||||||
fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5())
|
|
||||||
fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15())
|
|
||||||
fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type namedMetric struct {
|
|
||||||
name string
|
|
||||||
m interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// namedMetricSlice is a slice of namedMetrics that implements sort.Interface.
|
|
||||||
type namedMetricSlice []namedMetric
|
|
||||||
|
|
||||||
func (nms namedMetricSlice) Len() int { return len(nms) }
|
|
||||||
|
|
||||||
func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] }
|
|
||||||
|
|
||||||
func (nms namedMetricSlice) Less(i, j int) bool {
|
|
||||||
return nms[i].name < nms[j].name
|
|
||||||
}
|
|
||||||
22
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/writer_test.go
generated
vendored
22
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/rcrowley/go-metrics/writer_test.go
generated
vendored
@@ -1,22 +0,0 @@
|
|||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sort"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestMetricsSorting(t *testing.T) {
|
|
||||||
var namedMetrics = namedMetricSlice{
|
|
||||||
{name: "zzz"},
|
|
||||||
{name: "bbb"},
|
|
||||||
{name: "fff"},
|
|
||||||
{name: "ggg"},
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Sort(namedMetrics)
|
|
||||||
for i, name := range []string{"bbb", "fff", "ggg", "zzz"} {
|
|
||||||
if namedMetrics[i].name != name {
|
|
||||||
t.Fail()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,140 +0,0 @@
|
|||||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
|
||||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package logger
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAPI(t *testing.T) {
|
|
||||||
l := New()
|
|
||||||
l.SetFlags(0)
|
|
||||||
l.SetPrefix("testing")
|
|
||||||
|
|
||||||
debug := 0
|
|
||||||
l.AddHandler(LevelDebug, checkFunc(t, LevelDebug, &debug))
|
|
||||||
info := 0
|
|
||||||
l.AddHandler(LevelInfo, checkFunc(t, LevelInfo, &info))
|
|
||||||
ok := 0
|
|
||||||
l.AddHandler(LevelOK, checkFunc(t, LevelOK, &ok))
|
|
||||||
warn := 0
|
|
||||||
l.AddHandler(LevelWarn, checkFunc(t, LevelWarn, &warn))
|
|
||||||
|
|
||||||
l.Debugf("test %d", 0)
|
|
||||||
l.Debugln("test", 0)
|
|
||||||
l.Infof("test %d", 1)
|
|
||||||
l.Infoln("test", 1)
|
|
||||||
l.Okf("test %d", 2)
|
|
||||||
l.Okln("test", 2)
|
|
||||||
l.Warnf("test %d", 3)
|
|
||||||
l.Warnln("test", 3)
|
|
||||||
|
|
||||||
if debug != 8 {
|
|
||||||
t.Errorf("Debug handler called %d != 8 times", debug)
|
|
||||||
}
|
|
||||||
if info != 6 {
|
|
||||||
t.Errorf("Info handler called %d != 6 times", info)
|
|
||||||
}
|
|
||||||
if ok != 4 {
|
|
||||||
t.Errorf("Ok handler called %d != 4 times", ok)
|
|
||||||
}
|
|
||||||
if warn != 2 {
|
|
||||||
t.Errorf("Warn handler called %d != 2 times", warn)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkFunc(t *testing.T, expectl LogLevel, counter *int) func(LogLevel, string) {
|
|
||||||
return func(l LogLevel, msg string) {
|
|
||||||
*counter++
|
|
||||||
if l < expectl {
|
|
||||||
t.Errorf("Incorrect message level %d < %d", l, expectl)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFacilityDebugging(t *testing.T) {
|
|
||||||
l := New()
|
|
||||||
l.SetFlags(0)
|
|
||||||
|
|
||||||
msgs := 0
|
|
||||||
l.AddHandler(LevelDebug, func(l LogLevel, msg string) {
|
|
||||||
msgs++
|
|
||||||
if strings.Contains(msg, "f1") {
|
|
||||||
t.Fatal("Should not get message for facility f1")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
f0 := l.NewFacility("f0", "foo#0")
|
|
||||||
f1 := l.NewFacility("f1", "foo#1")
|
|
||||||
|
|
||||||
l.SetDebug("f0", true)
|
|
||||||
l.SetDebug("f1", false)
|
|
||||||
|
|
||||||
f0.Debugln("Debug line from f0")
|
|
||||||
f1.Debugln("Debug line from f1")
|
|
||||||
|
|
||||||
if msgs != 1 {
|
|
||||||
t.Fatalf("Incorrect number of messages, %d != 1", msgs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRecorder(t *testing.T) {
|
|
||||||
l := New()
|
|
||||||
l.SetFlags(0)
|
|
||||||
|
|
||||||
// Keep the last five warnings or higher, no special initial handling.
|
|
||||||
r0 := NewRecorder(l, LevelWarn, 5, 0)
|
|
||||||
// Keep the last ten infos or higher, with the first three being permanent.
|
|
||||||
r1 := NewRecorder(l, LevelInfo, 10, 3)
|
|
||||||
|
|
||||||
// Log a bunch of messages.
|
|
||||||
for i := 0; i < 15; i++ {
|
|
||||||
l.Debugf("Debug#%d", i)
|
|
||||||
l.Infof("Info#%d", i)
|
|
||||||
l.Warnf("Warn#%d", i)
|
|
||||||
}
|
|
||||||
|
|
||||||
// r0 should contain the last five warnings
|
|
||||||
lines := r0.Since(time.Time{})
|
|
||||||
if len(lines) != 5 {
|
|
||||||
t.Fatalf("Incorrect length %d != 5", len(lines))
|
|
||||||
}
|
|
||||||
for i := 0; i < 5; i++ {
|
|
||||||
expected := fmt.Sprintf("Warn#%d", i+10)
|
|
||||||
if lines[i].Message != expected {
|
|
||||||
t.Error("Incorrect warning in r0:", lines[i].Message, "!=", expected)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// r0 should contain:
|
|
||||||
// - The first three messages
|
|
||||||
// - A "..." marker
|
|
||||||
// - The last six messages
|
|
||||||
// (totalling ten)
|
|
||||||
lines = r1.Since(time.Time{})
|
|
||||||
if len(lines) != 10 {
|
|
||||||
t.Fatalf("Incorrect length %d != 10", len(lines))
|
|
||||||
}
|
|
||||||
expected := []string{
|
|
||||||
"Info#0",
|
|
||||||
"Warn#0",
|
|
||||||
"Info#1",
|
|
||||||
"...",
|
|
||||||
"Info#12",
|
|
||||||
"Warn#12",
|
|
||||||
"Info#13",
|
|
||||||
"Warn#13",
|
|
||||||
"Info#14",
|
|
||||||
"Warn#14",
|
|
||||||
}
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
if lines[i].Message != expected[i] {
|
|
||||||
t.Error("Incorrect warning in r0:", lines[i].Message, "!=", expected[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -37,11 +37,14 @@ func CreateAtomic(path string, mode os.FileMode) (*AtomicWriter, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// chmod fails on Android so don't even try
|
||||||
|
if runtime.GOOS != "android" {
|
||||||
if err := os.Chmod(fd.Name(), mode); err != nil {
|
if err := os.Chmod(fd.Name(), mode); err != nil {
|
||||||
fd.Close()
|
fd.Close()
|
||||||
os.Remove(fd.Name())
|
os.Remove(fd.Name())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
w := &AtomicWriter{
|
w := &AtomicWriter{
|
||||||
path: path,
|
path: path,
|
||||||
|
|||||||
@@ -1,85 +0,0 @@
|
|||||||
// Copyright (C) 2015 The Syncthing Authors.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package osutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCreateAtomicCreate(t *testing.T) {
|
|
||||||
os.RemoveAll("testdata")
|
|
||||||
defer os.RemoveAll("testdata")
|
|
||||||
|
|
||||||
if err := os.Mkdir("testdata", 0755); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
w, err := CreateAtomic("testdata/file", 0644)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := w.Write([]byte("hello"))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if n != 5 {
|
|
||||||
t.Fatal("written bytes", n, "!= 5")
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := ioutil.ReadFile("testdata/file"); err == nil {
|
|
||||||
t.Fatal("file should not exist")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.Close(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
bs, err := ioutil.ReadFile("testdata/file")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(bs, []byte("hello")) {
|
|
||||||
t.Error("incorrect data")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreateAtomicReplace(t *testing.T) {
|
|
||||||
os.RemoveAll("testdata")
|
|
||||||
defer os.RemoveAll("testdata")
|
|
||||||
|
|
||||||
if err := os.Mkdir("testdata", 0755); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ioutil.WriteFile("testdata/file", []byte("some old data"), 0644); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
w, err := CreateAtomic("testdata/file", 0644)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := w.Write([]byte("hello")); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if err := w.Close(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
bs, err := ioutil.ReadFile("testdata/file")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(bs, []byte("hello")) {
|
|
||||||
t.Error("incorrect data")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,181 +0,0 @@
|
|||||||
// Copyright (C) 2014 The Syncthing Authors.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package osutil_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/syncthing/syncthing/lib/osutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestInWriteableDir(t *testing.T) {
|
|
||||||
err := os.RemoveAll("testdata")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll("testdata")
|
|
||||||
|
|
||||||
os.Mkdir("testdata", 0700)
|
|
||||||
os.Mkdir("testdata/rw", 0700)
|
|
||||||
os.Mkdir("testdata/ro", 0500)
|
|
||||||
|
|
||||||
create := func(name string) error {
|
|
||||||
fd, err := os.Create(name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fd.Close()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// These should succeed
|
|
||||||
|
|
||||||
err = osutil.InWritableDir(create, "testdata/file")
|
|
||||||
if err != nil {
|
|
||||||
t.Error("testdata/file:", err)
|
|
||||||
}
|
|
||||||
err = osutil.InWritableDir(create, "testdata/rw/foo")
|
|
||||||
if err != nil {
|
|
||||||
t.Error("testdata/rw/foo:", err)
|
|
||||||
}
|
|
||||||
err = osutil.InWritableDir(os.Remove, "testdata/rw/foo")
|
|
||||||
if err != nil {
|
|
||||||
t.Error("testdata/rw/foo:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = osutil.InWritableDir(create, "testdata/ro/foo")
|
|
||||||
if err != nil {
|
|
||||||
t.Error("testdata/ro/foo:", err)
|
|
||||||
}
|
|
||||||
err = osutil.InWritableDir(os.Remove, "testdata/ro/foo")
|
|
||||||
if err != nil {
|
|
||||||
t.Error("testdata/ro/foo:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// These should not
|
|
||||||
|
|
||||||
err = osutil.InWritableDir(create, "testdata/nonexistent/foo")
|
|
||||||
if err == nil {
|
|
||||||
t.Error("testdata/nonexistent/foo returned nil error")
|
|
||||||
}
|
|
||||||
err = osutil.InWritableDir(create, "testdata/file/foo")
|
|
||||||
if err == nil {
|
|
||||||
t.Error("testdata/file/foo returned nil error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInWritableDirWindowsRemove(t *testing.T) {
|
|
||||||
if runtime.GOOS != "windows" {
|
|
||||||
t.Skipf("Tests not required")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err := os.RemoveAll("testdata")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll("testdata")
|
|
||||||
|
|
||||||
create := func(name string) error {
|
|
||||||
fd, err := os.Create(name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fd.Close()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
os.Mkdir("testdata", 0700)
|
|
||||||
|
|
||||||
os.Mkdir("testdata/windows", 0500)
|
|
||||||
os.Mkdir("testdata/windows/ro", 0500)
|
|
||||||
create("testdata/windows/ro/readonly")
|
|
||||||
os.Chmod("testdata/windows/ro/readonly", 0500)
|
|
||||||
|
|
||||||
for _, path := range []string{"testdata/windows/ro/readonly", "testdata/windows/ro", "testdata/windows"} {
|
|
||||||
err := os.Remove(path)
|
|
||||||
if err == nil {
|
|
||||||
t.Errorf("Expected error %s", path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, path := range []string{"testdata/windows/ro/readonly", "testdata/windows/ro", "testdata/windows"} {
|
|
||||||
err := osutil.InWritableDir(osutil.Remove, path)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Unexpected error %s: %s", path, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInWritableDirWindowsRename(t *testing.T) {
|
|
||||||
if runtime.GOOS != "windows" {
|
|
||||||
t.Skipf("Tests not required")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err := os.RemoveAll("testdata")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll("testdata")
|
|
||||||
|
|
||||||
create := func(name string) error {
|
|
||||||
fd, err := os.Create(name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fd.Close()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
os.Mkdir("testdata", 0700)
|
|
||||||
|
|
||||||
os.Mkdir("testdata/windows", 0500)
|
|
||||||
os.Mkdir("testdata/windows/ro", 0500)
|
|
||||||
create("testdata/windows/ro/readonly")
|
|
||||||
os.Chmod("testdata/windows/ro/readonly", 0500)
|
|
||||||
|
|
||||||
for _, path := range []string{"testdata/windows/ro/readonly", "testdata/windows/ro", "testdata/windows"} {
|
|
||||||
err := os.Rename(path, path+"new")
|
|
||||||
if err == nil {
|
|
||||||
t.Skipf("seem like this test doesn't work here")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rename := func(path string) error {
|
|
||||||
return osutil.Rename(path, path+"new")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, path := range []string{"testdata/windows/ro/readonly", "testdata/windows/ro", "testdata/windows"} {
|
|
||||||
err := osutil.InWritableDir(rename, path)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Unexpected error %s: %s", path, err)
|
|
||||||
}
|
|
||||||
_, err = os.Stat(path + "new")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Unexpected error %s: %s", path, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDiskUsage(t *testing.T) {
|
|
||||||
free, err := osutil.DiskFreePercentage(".")
|
|
||||||
if err != nil {
|
|
||||||
if runtime.GOOS == "netbsd" ||
|
|
||||||
runtime.GOOS == "openbsd" ||
|
|
||||||
runtime.GOOS == "solaris" {
|
|
||||||
t.Skip()
|
|
||||||
}
|
|
||||||
t.Errorf("Unexpected error: %s", err)
|
|
||||||
}
|
|
||||||
if free < 1 {
|
|
||||||
t.Error("Disk is full?", free)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
// Copyright (C) 2014 The Syncthing Authors.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package osutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
var testcases = []struct {
|
|
||||||
from byte
|
|
||||||
to []byte
|
|
||||||
a, b string
|
|
||||||
}{
|
|
||||||
{'\n', []byte{'\r', '\n'}, "", ""},
|
|
||||||
{'\n', []byte{'\r', '\n'}, "foo", "foo"},
|
|
||||||
{'\n', []byte{'\r', '\n'}, "foo\n", "foo\r\n"},
|
|
||||||
{'\n', []byte{'\r', '\n'}, "foo\nbar", "foo\r\nbar"},
|
|
||||||
{'\n', []byte{'\r', '\n'}, "foo\nbar\nbaz", "foo\r\nbar\r\nbaz"},
|
|
||||||
{'\n', []byte{'\r', '\n'}, "\nbar", "\r\nbar"},
|
|
||||||
{'o', []byte{'x', 'l', 'r'}, "\nfoo", "\nfxlrxlr"},
|
|
||||||
{'o', nil, "\nfoo", "\nf"},
|
|
||||||
{'f', []byte{}, "\nfoo", "\noo"},
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReplacingWriter(t *testing.T) {
|
|
||||||
for _, tc := range testcases {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
w := ReplacingWriter{
|
|
||||||
Writer: &buf,
|
|
||||||
From: tc.from,
|
|
||||||
To: tc.to,
|
|
||||||
}
|
|
||||||
fmt.Fprint(w, tc.a)
|
|
||||||
if buf.String() != tc.b {
|
|
||||||
t.Errorf("%q != %q", buf.String(), tc.b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,81 +0,0 @@
|
|||||||
// Copyright (C) 2014 The Protocol Authors.
|
|
||||||
|
|
||||||
package protocol
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type TestModel struct {
|
|
||||||
data []byte
|
|
||||||
folder string
|
|
||||||
name string
|
|
||||||
offset int64
|
|
||||||
size int
|
|
||||||
hash []byte
|
|
||||||
flags uint32
|
|
||||||
options []Option
|
|
||||||
closedCh chan bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTestModel() *TestModel {
|
|
||||||
return &TestModel{
|
|
||||||
closedCh: make(chan bool),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestModel) Index(deviceID DeviceID, folder string, files []FileInfo, flags uint32, options []Option) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo, flags uint32, options []Option) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestModel) Request(deviceID DeviceID, folder, name string, offset int64, hash []byte, flags uint32, options []Option, buf []byte) error {
|
|
||||||
t.folder = folder
|
|
||||||
t.name = name
|
|
||||||
t.offset = offset
|
|
||||||
t.size = len(buf)
|
|
||||||
t.hash = hash
|
|
||||||
t.flags = flags
|
|
||||||
t.options = options
|
|
||||||
copy(buf, t.data)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestModel) Close(deviceID DeviceID, err error) {
|
|
||||||
close(t.closedCh)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestModel) isClosed() bool {
|
|
||||||
select {
|
|
||||||
case <-t.closedCh:
|
|
||||||
return true
|
|
||||||
case <-time.After(1 * time.Second):
|
|
||||||
return false // Timeout
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type ErrPipe struct {
|
|
||||||
io.PipeWriter
|
|
||||||
written int
|
|
||||||
max int
|
|
||||||
err error
|
|
||||||
closed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *ErrPipe) Write(data []byte) (int, error) {
|
|
||||||
if e.closed {
|
|
||||||
return 0, e.err
|
|
||||||
}
|
|
||||||
if e.written+len(data) > e.max {
|
|
||||||
n, _ := e.PipeWriter.Write(data[:e.max-e.written])
|
|
||||||
e.PipeWriter.CloseWithError(e.err)
|
|
||||||
e.closed = true
|
|
||||||
return n, e.err
|
|
||||||
}
|
|
||||||
return e.PipeWriter.Write(data)
|
|
||||||
}
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
// Copyright (C) 2015 The Protocol Authors.
|
|
||||||
|
|
||||||
package protocol
|
|
||||||
|
|
||||||
import "testing"
|
|
||||||
|
|
||||||
func TestCompressionMarshal(t *testing.T) {
|
|
||||||
uTestcases := []struct {
|
|
||||||
s string
|
|
||||||
c Compression
|
|
||||||
}{
|
|
||||||
{"true", CompressMetadata},
|
|
||||||
{"false", CompressNever},
|
|
||||||
{"never", CompressNever},
|
|
||||||
{"metadata", CompressMetadata},
|
|
||||||
{"always", CompressAlways},
|
|
||||||
{"whatever", CompressMetadata},
|
|
||||||
}
|
|
||||||
|
|
||||||
mTestcases := []struct {
|
|
||||||
s string
|
|
||||||
c Compression
|
|
||||||
}{
|
|
||||||
{"never", CompressNever},
|
|
||||||
{"metadata", CompressMetadata},
|
|
||||||
{"always", CompressAlways},
|
|
||||||
}
|
|
||||||
|
|
||||||
var c Compression
|
|
||||||
for _, tc := range uTestcases {
|
|
||||||
err := c.UnmarshalText([]byte(tc.s))
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
if c != tc.c {
|
|
||||||
t.Errorf("%s unmarshalled to %d, not %d", tc.s, c, tc.c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range mTestcases {
|
|
||||||
bs, err := tc.c.MarshalText()
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
if s := string(bs); s != tc.s {
|
|
||||||
t.Errorf("%d marshalled to %q, not %q", tc.c, s, tc.s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
// Copyright (C) 2015 The Protocol Authors.
|
|
||||||
|
|
||||||
package protocol
|
|
||||||
|
|
||||||
import "testing"
|
|
||||||
|
|
||||||
func TestWinsConflict(t *testing.T) {
|
|
||||||
testcases := [][2]FileInfo{
|
|
||||||
// The first should always win over the second
|
|
||||||
{{Modified: 42}, {Modified: 41}},
|
|
||||||
{{Modified: 41}, {Modified: 42, Flags: FlagDeleted}},
|
|
||||||
{{Modified: 41, Version: Vector{{42, 2}, {43, 1}}}, {Modified: 41, Version: Vector{{42, 1}, {43, 2}}}},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range testcases {
|
|
||||||
if !tc[0].WinsConflict(tc[1]) {
|
|
||||||
t.Errorf("%v should win over %v", tc[0], tc[1])
|
|
||||||
}
|
|
||||||
if tc[1].WinsConflict(tc[0]) {
|
|
||||||
t.Errorf("%v should not win over %v", tc[1], tc[0])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,76 +0,0 @@
|
|||||||
// Copyright (C) 2014 The Protocol Authors.
|
|
||||||
|
|
||||||
package protocol
|
|
||||||
|
|
||||||
import "testing"
|
|
||||||
|
|
||||||
var formatted = "P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2"
|
|
||||||
var formatCases = []string{
|
|
||||||
"P56IOI-7MZJNU-2IQGDR-EYDM2M-GTMGL3-BXNPQ6-W5BTBB-Z4TJXZ-WICQ",
|
|
||||||
"P56IOI-7MZJNU2Y-IQGDR-EYDM2M-GTI-MGL3-BXNPQ6-W5BM-TBB-Z4TJXZ-WICQ2",
|
|
||||||
"P56IOI7 MZJNU2I QGDREYD M2MGTMGL 3BXNPQ6W 5BTB BZ4T JXZWICQ",
|
|
||||||
"P56IOI7 MZJNU2Y IQGDREY DM2MGTI MGL3BXN PQ6W5BM TBBZ4TJ XZWICQ2",
|
|
||||||
"P56IOI7MZJNU2IQGDREYDM2MGTMGL3BXNPQ6W5BTBBZ4TJXZWICQ",
|
|
||||||
"p56ioi7mzjnu2iqgdreydm2mgtmgl3bxnpq6w5btbbz4tjxzwicq",
|
|
||||||
"P56IOI7MZJNU2YIQGDREYDM2MGTIMGL3BXNPQ6W5BMTBBZ4TJXZWICQ2",
|
|
||||||
"P561017MZJNU2YIQGDREYDM2MGTIMGL3BXNPQ6W5BMT88Z4TJXZWICQ2",
|
|
||||||
"p56ioi7mzjnu2yiqgdreydm2mgtimgl3bxnpq6w5bmtbbz4tjxzwicq2",
|
|
||||||
"p561017mzjnu2yiqgdreydm2mgtimgl3bxnpq6w5bmt88z4tjxzwicq2",
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFormatDeviceID(t *testing.T) {
|
|
||||||
for i, tc := range formatCases {
|
|
||||||
var id DeviceID
|
|
||||||
err := id.UnmarshalText([]byte(tc))
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("#%d UnmarshalText(%q); %v", i, tc, err)
|
|
||||||
} else if f := id.String(); f != formatted {
|
|
||||||
t.Errorf("#%d FormatDeviceID(%q)\n\t%q !=\n\t%q", i, tc, f, formatted)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var validateCases = []struct {
|
|
||||||
s string
|
|
||||||
ok bool
|
|
||||||
}{
|
|
||||||
{"", false},
|
|
||||||
{"P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2", true},
|
|
||||||
{"P56IOI7-MZJNU2-IQGDREY-DM2MGT-MGL3BXN-PQ6W5B-TBBZ4TJ-XZWICQ", true},
|
|
||||||
{"P56IOI7 MZJNU2I QGDREYD M2MGTMGL 3BXNPQ6W 5BTB BZ4T JXZWICQ", true},
|
|
||||||
{"P56IOI7MZJNU2IQGDREYDM2MGTMGL3BXNPQ6W5BTBBZ4TJXZWICQ", true},
|
|
||||||
{"P56IOI7MZJNU2IQGDREYDM2MGTMGL3BXNPQ6W5BTBBZ4TJXZWICQCCCC", false},
|
|
||||||
{"p56ioi7mzjnu2iqgdreydm2mgtmgl3bxnpq6w5btbbz4tjxzwicq", true},
|
|
||||||
{"p56ioi7mzjnu2iqgdreydm2mgtmgl3bxnpq6w5btbbz4tjxzwicqCCCC", false},
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestValidateDeviceID(t *testing.T) {
|
|
||||||
for _, tc := range validateCases {
|
|
||||||
var id DeviceID
|
|
||||||
err := id.UnmarshalText([]byte(tc.s))
|
|
||||||
if (err == nil && !tc.ok) || (err != nil && tc.ok) {
|
|
||||||
t.Errorf("ValidateDeviceID(%q); %v != %v", tc.s, err, tc.ok)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMarshallingDeviceID(t *testing.T) {
|
|
||||||
n0 := DeviceID{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 10, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
|
|
||||||
n1 := DeviceID{}
|
|
||||||
n2 := DeviceID{}
|
|
||||||
|
|
||||||
bs, _ := n0.MarshalText()
|
|
||||||
n1.UnmarshalText(bs)
|
|
||||||
bs, _ = n1.MarshalText()
|
|
||||||
n2.UnmarshalText(bs)
|
|
||||||
|
|
||||||
if n2.String() != n0.String() {
|
|
||||||
t.Errorf("String marshalling error; %q != %q", n2.String(), n0.String())
|
|
||||||
}
|
|
||||||
if !n2.Equals(n0) {
|
|
||||||
t.Error("Equals error")
|
|
||||||
}
|
|
||||||
if n2.Compare(n0) != 0 {
|
|
||||||
t.Error("Compare error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,89 +0,0 @@
|
|||||||
// Copyright (C) 2015 The Protocol Authors.
|
|
||||||
|
|
||||||
// +build gofuzz
|
|
||||||
|
|
||||||
package protocol
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"testing/quick"
|
|
||||||
)
|
|
||||||
|
|
||||||
// This can be used to generate a corpus of valid messages as a starting point
|
|
||||||
// for the fuzzer.
|
|
||||||
func TestGenerateCorpus(t *testing.T) {
|
|
||||||
t.Skip("Use to generate initial corpus only")
|
|
||||||
|
|
||||||
n := 0
|
|
||||||
check := func(idx IndexMessage) bool {
|
|
||||||
for i := range idx.Options {
|
|
||||||
if len(idx.Options[i].Key) > 64 {
|
|
||||||
idx.Options[i].Key = idx.Options[i].Key[:64]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
hdr := header{
|
|
||||||
version: 0,
|
|
||||||
msgID: 42,
|
|
||||||
msgType: messageTypeIndex,
|
|
||||||
compression: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
msgBs := idx.MustMarshalXDR()
|
|
||||||
|
|
||||||
buf := make([]byte, 8)
|
|
||||||
binary.BigEndian.PutUint32(buf, encodeHeader(hdr))
|
|
||||||
binary.BigEndian.PutUint32(buf[4:], uint32(len(msgBs)))
|
|
||||||
buf = append(buf, msgBs...)
|
|
||||||
|
|
||||||
ioutil.WriteFile(fmt.Sprintf("testdata/corpus/test-%03d.xdr", n), buf, 0644)
|
|
||||||
n++
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := quick.Check(check, &quick.Config{MaxCount: 1000}); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests any crashers found by the fuzzer, for closer investigation.
|
|
||||||
func TestCrashers(t *testing.T) {
|
|
||||||
testFiles(t, "testdata/crashers")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests the entire corpus, which should PASS before the fuzzer starts
|
|
||||||
// fuzzing.
|
|
||||||
func TestCorpus(t *testing.T) {
|
|
||||||
testFiles(t, "testdata/corpus")
|
|
||||||
}
|
|
||||||
|
|
||||||
func testFiles(t *testing.T, dir string) {
|
|
||||||
fd, err := os.Open(dir)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
crashers, err := fd.Readdirnames(-1)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
for _, name := range crashers {
|
|
||||||
if strings.HasSuffix(name, ".output") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if strings.HasSuffix(name, ".quoted") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Log(name)
|
|
||||||
crasher, err := ioutil.ReadFile(dir + "/" + name)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
Fuzz(crasher)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,311 +0,0 @@
|
|||||||
// Copyright (C) 2014 The Protocol Authors.
|
|
||||||
|
|
||||||
package protocol
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"testing/quick"
|
|
||||||
|
|
||||||
"github.com/calmh/xdr"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
c0ID = NewDeviceID([]byte{1})
|
|
||||||
c1ID = NewDeviceID([]byte{2})
|
|
||||||
quickCfg = &quick.Config{}
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
|
||||||
flag.Parse()
|
|
||||||
if flag.Lookup("test.short").Value.String() != "false" {
|
|
||||||
quickCfg.MaxCount = 10
|
|
||||||
}
|
|
||||||
os.Exit(m.Run())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHeaderFunctions(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
f := func(ver, id, typ int) bool {
|
|
||||||
ver = int(uint(ver) % 16)
|
|
||||||
id = int(uint(id) % 4096)
|
|
||||||
typ = int(uint(typ) % 256)
|
|
||||||
h0 := header{version: ver, msgID: id, msgType: typ}
|
|
||||||
h1 := decodeHeader(encodeHeader(h0))
|
|
||||||
return h0 == h1
|
|
||||||
}
|
|
||||||
if err := quick.Check(f, nil); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHeaderLayout(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
var e, a uint32
|
|
||||||
|
|
||||||
// Version are the first four bits
|
|
||||||
e = 0xf0000000
|
|
||||||
a = encodeHeader(header{version: 0xf})
|
|
||||||
if a != e {
|
|
||||||
t.Errorf("Header layout incorrect; %08x != %08x", a, e)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Message ID are the following 12 bits
|
|
||||||
e = 0x0fff0000
|
|
||||||
a = encodeHeader(header{msgID: 0xfff})
|
|
||||||
if a != e {
|
|
||||||
t.Errorf("Header layout incorrect; %08x != %08x", a, e)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type are the last 8 bits before reserved
|
|
||||||
e = 0x0000ff00
|
|
||||||
a = encodeHeader(header{msgType: 0xff})
|
|
||||||
if a != e {
|
|
||||||
t.Errorf("Header layout incorrect; %08x != %08x", a, e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPing(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
ar, aw := io.Pipe()
|
|
||||||
br, bw := io.Pipe()
|
|
||||||
|
|
||||||
c0 := NewConnection(c0ID, ar, bw, newTestModel(), "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
|
||||||
c0.Start()
|
|
||||||
c1 := NewConnection(c1ID, br, aw, newTestModel(), "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
|
||||||
c1.Start()
|
|
||||||
c0.ClusterConfig(ClusterConfigMessage{})
|
|
||||||
c1.ClusterConfig(ClusterConfigMessage{})
|
|
||||||
|
|
||||||
if ok := c0.ping(); !ok {
|
|
||||||
t.Error("c0 ping failed")
|
|
||||||
}
|
|
||||||
if ok := c1.ping(); !ok {
|
|
||||||
t.Error("c1 ping failed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestVersionErr(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
m0 := newTestModel()
|
|
||||||
m1 := newTestModel()
|
|
||||||
|
|
||||||
ar, aw := io.Pipe()
|
|
||||||
br, bw := io.Pipe()
|
|
||||||
|
|
||||||
c0 := NewConnection(c0ID, ar, bw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
|
||||||
c0.Start()
|
|
||||||
c1 := NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
|
|
||||||
c1.Start()
|
|
||||||
c0.ClusterConfig(ClusterConfigMessage{})
|
|
||||||
c1.ClusterConfig(ClusterConfigMessage{})
|
|
||||||
|
|
||||||
w := xdr.NewWriter(c0.cw)
|
|
||||||
w.WriteUint32(encodeHeader(header{
|
|
||||||
version: 2,
|
|
||||||
msgID: 0,
|
|
||||||
msgType: 0,
|
|
||||||
}))
|
|
||||||
w.WriteUint32(0) // Avoids reader closing due to EOF
|
|
||||||
|
|
||||||
if !m1.isClosed() {
|
|
||||||
t.Error("Connection should close due to unknown version")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTypeErr(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
m0 := newTestModel()
|
|
||||||
m1 := newTestModel()
|
|
||||||
|
|
||||||
ar, aw := io.Pipe()
|
|
||||||
br, bw := io.Pipe()
|
|
||||||
|
|
||||||
c0 := NewConnection(c0ID, ar, bw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
|
||||||
c0.Start()
|
|
||||||
c1 := NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
|
|
||||||
c1.Start()
|
|
||||||
c0.ClusterConfig(ClusterConfigMessage{})
|
|
||||||
c1.ClusterConfig(ClusterConfigMessage{})
|
|
||||||
|
|
||||||
w := xdr.NewWriter(c0.cw)
|
|
||||||
w.WriteUint32(encodeHeader(header{
|
|
||||||
version: 0,
|
|
||||||
msgID: 0,
|
|
||||||
msgType: 42,
|
|
||||||
}))
|
|
||||||
w.WriteUint32(0) // Avoids reader closing due to EOF
|
|
||||||
|
|
||||||
if !m1.isClosed() {
|
|
||||||
t.Error("Connection should close due to unknown message type")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestClose(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
m0 := newTestModel()
|
|
||||||
m1 := newTestModel()
|
|
||||||
|
|
||||||
ar, aw := io.Pipe()
|
|
||||||
br, bw := io.Pipe()
|
|
||||||
|
|
||||||
c0 := NewConnection(c0ID, ar, bw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
|
||||||
c0.Start()
|
|
||||||
c1 := NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
|
|
||||||
c1.Start()
|
|
||||||
c0.ClusterConfig(ClusterConfigMessage{})
|
|
||||||
c1.ClusterConfig(ClusterConfigMessage{})
|
|
||||||
|
|
||||||
c0.close(nil)
|
|
||||||
|
|
||||||
<-c0.closed
|
|
||||||
if !m0.isClosed() {
|
|
||||||
t.Fatal("Connection should be closed")
|
|
||||||
}
|
|
||||||
|
|
||||||
// None of these should panic, some should return an error
|
|
||||||
|
|
||||||
if c0.ping() {
|
|
||||||
t.Error("Ping should not return true")
|
|
||||||
}
|
|
||||||
|
|
||||||
c0.Index("default", nil, 0, nil)
|
|
||||||
c0.Index("default", nil, 0, nil)
|
|
||||||
|
|
||||||
if _, err := c0.Request("default", "foo", 0, 0, nil, 0, nil); err == nil {
|
|
||||||
t.Error("Request should return an error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestElementSizeExceededNested(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
m := ClusterConfigMessage{
|
|
||||||
ClientName: "longstringlongstringlongstringinglongstringlongstringlonlongstringlongstringlon",
|
|
||||||
}
|
|
||||||
_, err := m.EncodeXDR(ioutil.Discard)
|
|
||||||
if err == nil {
|
|
||||||
t.Errorf("ID length %d > max 64, but no error", len(m.Folders[0].ID))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMarshalIndexMessage(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
f := func(m1 IndexMessage) bool {
|
|
||||||
for i, f := range m1.Files {
|
|
||||||
m1.Files[i].CachedSize = 0
|
|
||||||
for j := range f.Blocks {
|
|
||||||
f.Blocks[j].Offset = 0
|
|
||||||
if len(f.Blocks[j].Hash) == 0 {
|
|
||||||
f.Blocks[j].Hash = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return testMarshal(t, "index", &m1, &IndexMessage{})
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := quick.Check(f, quickCfg); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMarshalRequestMessage(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
f := func(m1 RequestMessage) bool {
|
|
||||||
return testMarshal(t, "request", &m1, &RequestMessage{})
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := quick.Check(f, quickCfg); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMarshalResponseMessage(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
f := func(m1 ResponseMessage) bool {
|
|
||||||
if len(m1.Data) == 0 {
|
|
||||||
m1.Data = nil
|
|
||||||
}
|
|
||||||
return testMarshal(t, "response", &m1, &ResponseMessage{})
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := quick.Check(f, quickCfg); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMarshalClusterConfigMessage(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
f := func(m1 ClusterConfigMessage) bool {
|
|
||||||
return testMarshal(t, "clusterconfig", &m1, &ClusterConfigMessage{})
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := quick.Check(f, quickCfg); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMarshalCloseMessage(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
f := func(m1 CloseMessage) bool {
|
|
||||||
return testMarshal(t, "close", &m1, &CloseMessage{})
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := quick.Check(f, quickCfg); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type message interface {
|
|
||||||
EncodeXDR(io.Writer) (int, error)
|
|
||||||
DecodeXDR(io.Reader) error
|
|
||||||
}
|
|
||||||
|
|
||||||
func testMarshal(t *testing.T, prefix string, m1, m2 message) bool {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
|
|
||||||
failed := func(bc []byte) {
|
|
||||||
bs, _ := json.MarshalIndent(m1, "", " ")
|
|
||||||
ioutil.WriteFile(prefix+"-1.txt", bs, 0644)
|
|
||||||
bs, _ = json.MarshalIndent(m2, "", " ")
|
|
||||||
ioutil.WriteFile(prefix+"-2.txt", bs, 0644)
|
|
||||||
if len(bc) > 0 {
|
|
||||||
f, _ := os.Create(prefix + "-data.txt")
|
|
||||||
fmt.Fprint(f, hex.Dump(bc))
|
|
||||||
f.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := m1.EncodeXDR(&buf)
|
|
||||||
if err != nil && strings.Contains(err.Error(), "exceeds size") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
failed(nil)
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
bc := make([]byte, len(buf.Bytes()))
|
|
||||||
copy(bc, buf.Bytes())
|
|
||||||
|
|
||||||
err = m2.DecodeXDR(&buf)
|
|
||||||
if err != nil {
|
|
||||||
failed(bc)
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ok := reflect.DeepEqual(m1, m2)
|
|
||||||
if !ok {
|
|
||||||
failed(bc)
|
|
||||||
}
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
@@ -1,249 +0,0 @@
|
|||||||
// Copyright (C) 2015 The Protocol Authors.
|
|
||||||
|
|
||||||
package protocol
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCompare(t *testing.T) {
|
|
||||||
testcases := []struct {
|
|
||||||
a, b Vector
|
|
||||||
r Ordering
|
|
||||||
}{
|
|
||||||
// Empty vectors are identical
|
|
||||||
{Vector{}, Vector{}, Equal},
|
|
||||||
{Vector{}, nil, Equal},
|
|
||||||
{nil, Vector{}, Equal},
|
|
||||||
{nil, Vector{Counter{42, 0}}, Equal},
|
|
||||||
{Vector{}, Vector{Counter{42, 0}}, Equal},
|
|
||||||
{Vector{Counter{42, 0}}, nil, Equal},
|
|
||||||
{Vector{Counter{42, 0}}, Vector{}, Equal},
|
|
||||||
|
|
||||||
// Zero is the implied value for a missing Counter
|
|
||||||
{
|
|
||||||
Vector{Counter{42, 0}},
|
|
||||||
Vector{Counter{77, 0}},
|
|
||||||
Equal,
|
|
||||||
},
|
|
||||||
|
|
||||||
// Equal vectors are equal
|
|
||||||
{
|
|
||||||
Vector{Counter{42, 33}},
|
|
||||||
Vector{Counter{42, 33}},
|
|
||||||
Equal,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{42, 33}, Counter{77, 24}},
|
|
||||||
Vector{Counter{42, 33}, Counter{77, 24}},
|
|
||||||
Equal,
|
|
||||||
},
|
|
||||||
|
|
||||||
// These a-vectors are all greater than the b-vector
|
|
||||||
{
|
|
||||||
Vector{Counter{42, 1}},
|
|
||||||
nil,
|
|
||||||
Greater,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{42, 1}},
|
|
||||||
Vector{},
|
|
||||||
Greater,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{0, 1}},
|
|
||||||
Vector{Counter{0, 0}},
|
|
||||||
Greater,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{42, 1}},
|
|
||||||
Vector{Counter{42, 0}},
|
|
||||||
Greater,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{math.MaxUint64, 1}},
|
|
||||||
Vector{Counter{math.MaxUint64, 0}},
|
|
||||||
Greater,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{0, math.MaxUint64}},
|
|
||||||
Vector{Counter{0, 0}},
|
|
||||||
Greater,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{42, math.MaxUint64}},
|
|
||||||
Vector{Counter{42, 0}},
|
|
||||||
Greater,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{math.MaxUint64, math.MaxUint64}},
|
|
||||||
Vector{Counter{math.MaxUint64, 0}},
|
|
||||||
Greater,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{0, math.MaxUint64}},
|
|
||||||
Vector{Counter{0, math.MaxUint64 - 1}},
|
|
||||||
Greater,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{42, math.MaxUint64}},
|
|
||||||
Vector{Counter{42, math.MaxUint64 - 1}},
|
|
||||||
Greater,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{math.MaxUint64, math.MaxUint64}},
|
|
||||||
Vector{Counter{math.MaxUint64, math.MaxUint64 - 1}},
|
|
||||||
Greater,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{42, 2}},
|
|
||||||
Vector{Counter{42, 1}},
|
|
||||||
Greater,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{22, 22}, Counter{42, 2}},
|
|
||||||
Vector{Counter{22, 22}, Counter{42, 1}},
|
|
||||||
Greater,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{42, 2}, Counter{77, 3}},
|
|
||||||
Vector{Counter{42, 1}, Counter{77, 3}},
|
|
||||||
Greater,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{22, 22}, Counter{42, 2}, Counter{77, 3}},
|
|
||||||
Vector{Counter{22, 22}, Counter{42, 1}, Counter{77, 3}},
|
|
||||||
Greater,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{22, 23}, Counter{42, 2}, Counter{77, 4}},
|
|
||||||
Vector{Counter{22, 22}, Counter{42, 1}, Counter{77, 3}},
|
|
||||||
Greater,
|
|
||||||
},
|
|
||||||
|
|
||||||
// These a-vectors are all lesser than the b-vector
|
|
||||||
{nil, Vector{Counter{42, 1}}, Lesser},
|
|
||||||
{Vector{}, Vector{Counter{42, 1}}, Lesser},
|
|
||||||
{
|
|
||||||
Vector{Counter{42, 0}},
|
|
||||||
Vector{Counter{42, 1}},
|
|
||||||
Lesser,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{42, 1}},
|
|
||||||
Vector{Counter{42, 2}},
|
|
||||||
Lesser,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{22, 22}, Counter{42, 1}},
|
|
||||||
Vector{Counter{22, 22}, Counter{42, 2}},
|
|
||||||
Lesser,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{42, 1}, Counter{77, 3}},
|
|
||||||
Vector{Counter{42, 2}, Counter{77, 3}},
|
|
||||||
Lesser,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{22, 22}, Counter{42, 1}, Counter{77, 3}},
|
|
||||||
Vector{Counter{22, 22}, Counter{42, 2}, Counter{77, 3}},
|
|
||||||
Lesser,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{22, 22}, Counter{42, 1}, Counter{77, 3}},
|
|
||||||
Vector{Counter{22, 23}, Counter{42, 2}, Counter{77, 4}},
|
|
||||||
Lesser,
|
|
||||||
},
|
|
||||||
|
|
||||||
// These are all in conflict
|
|
||||||
{
|
|
||||||
Vector{Counter{42, 2}},
|
|
||||||
Vector{Counter{43, 1}},
|
|
||||||
ConcurrentGreater,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{43, 1}},
|
|
||||||
Vector{Counter{42, 2}},
|
|
||||||
ConcurrentLesser,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{22, 23}, Counter{42, 1}},
|
|
||||||
Vector{Counter{22, 22}, Counter{42, 2}},
|
|
||||||
ConcurrentGreater,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{22, 21}, Counter{42, 2}},
|
|
||||||
Vector{Counter{22, 22}, Counter{42, 1}},
|
|
||||||
ConcurrentLesser,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{22, 21}, Counter{42, 2}, Counter{43, 1}},
|
|
||||||
Vector{Counter{20, 1}, Counter{22, 22}, Counter{42, 1}},
|
|
||||||
ConcurrentLesser,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, tc := range testcases {
|
|
||||||
// Test real Compare
|
|
||||||
if r := tc.a.Compare(tc.b); r != tc.r {
|
|
||||||
t.Errorf("%d: %+v.Compare(%+v) == %v (expected %v)", i, tc.a, tc.b, r, tc.r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test convenience functions
|
|
||||||
switch tc.r {
|
|
||||||
case Greater:
|
|
||||||
if tc.a.Equal(tc.b) {
|
|
||||||
t.Errorf("%+v == %+v", tc.a, tc.b)
|
|
||||||
}
|
|
||||||
if tc.a.Concurrent(tc.b) {
|
|
||||||
t.Errorf("%+v concurrent %+v", tc.a, tc.b)
|
|
||||||
}
|
|
||||||
if !tc.a.GreaterEqual(tc.b) {
|
|
||||||
t.Errorf("%+v not >= %+v", tc.a, tc.b)
|
|
||||||
}
|
|
||||||
if tc.a.LesserEqual(tc.b) {
|
|
||||||
t.Errorf("%+v <= %+v", tc.a, tc.b)
|
|
||||||
}
|
|
||||||
case Lesser:
|
|
||||||
if tc.a.Concurrent(tc.b) {
|
|
||||||
t.Errorf("%+v concurrent %+v", tc.a, tc.b)
|
|
||||||
}
|
|
||||||
if tc.a.Equal(tc.b) {
|
|
||||||
t.Errorf("%+v == %+v", tc.a, tc.b)
|
|
||||||
}
|
|
||||||
if tc.a.GreaterEqual(tc.b) {
|
|
||||||
t.Errorf("%+v >= %+v", tc.a, tc.b)
|
|
||||||
}
|
|
||||||
if !tc.a.LesserEqual(tc.b) {
|
|
||||||
t.Errorf("%+v not <= %+v", tc.a, tc.b)
|
|
||||||
}
|
|
||||||
case Equal:
|
|
||||||
if tc.a.Concurrent(tc.b) {
|
|
||||||
t.Errorf("%+v concurrent %+v", tc.a, tc.b)
|
|
||||||
}
|
|
||||||
if !tc.a.Equal(tc.b) {
|
|
||||||
t.Errorf("%+v not == %+v", tc.a, tc.b)
|
|
||||||
}
|
|
||||||
if !tc.a.GreaterEqual(tc.b) {
|
|
||||||
t.Errorf("%+v not <= %+v", tc.a, tc.b)
|
|
||||||
}
|
|
||||||
if !tc.a.LesserEqual(tc.b) {
|
|
||||||
t.Errorf("%+v not <= %+v", tc.a, tc.b)
|
|
||||||
}
|
|
||||||
case ConcurrentLesser, ConcurrentGreater:
|
|
||||||
if !tc.a.Concurrent(tc.b) {
|
|
||||||
t.Errorf("%+v not concurrent %+v", tc.a, tc.b)
|
|
||||||
}
|
|
||||||
if tc.a.Equal(tc.b) {
|
|
||||||
t.Errorf("%+v == %+v", tc.a, tc.b)
|
|
||||||
}
|
|
||||||
if tc.a.GreaterEqual(tc.b) {
|
|
||||||
t.Errorf("%+v >= %+v", tc.a, tc.b)
|
|
||||||
}
|
|
||||||
if tc.a.LesserEqual(tc.b) {
|
|
||||||
t.Errorf("%+v <= %+v", tc.a, tc.b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,134 +0,0 @@
|
|||||||
// Copyright (C) 2015 The Protocol Authors.
|
|
||||||
|
|
||||||
package protocol
|
|
||||||
|
|
||||||
import "testing"
|
|
||||||
|
|
||||||
func TestUpdate(t *testing.T) {
|
|
||||||
var v Vector
|
|
||||||
|
|
||||||
// Append
|
|
||||||
|
|
||||||
v = v.Update(42)
|
|
||||||
expected := Vector{Counter{42, 1}}
|
|
||||||
|
|
||||||
if v.Compare(expected) != Equal {
|
|
||||||
t.Errorf("Update error, %+v != %+v", v, expected)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert at front
|
|
||||||
|
|
||||||
v = v.Update(36)
|
|
||||||
expected = Vector{Counter{36, 1}, Counter{42, 1}}
|
|
||||||
|
|
||||||
if v.Compare(expected) != Equal {
|
|
||||||
t.Errorf("Update error, %+v != %+v", v, expected)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert in moddle
|
|
||||||
|
|
||||||
v = v.Update(37)
|
|
||||||
expected = Vector{Counter{36, 1}, Counter{37, 1}, Counter{42, 1}}
|
|
||||||
|
|
||||||
if v.Compare(expected) != Equal {
|
|
||||||
t.Errorf("Update error, %+v != %+v", v, expected)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update existing
|
|
||||||
|
|
||||||
v = v.Update(37)
|
|
||||||
expected = Vector{Counter{36, 1}, Counter{37, 2}, Counter{42, 1}}
|
|
||||||
|
|
||||||
if v.Compare(expected) != Equal {
|
|
||||||
t.Errorf("Update error, %+v != %+v", v, expected)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCopy(t *testing.T) {
|
|
||||||
v0 := Vector{Counter{42, 1}}
|
|
||||||
v1 := v0.Copy()
|
|
||||||
v1.Update(42)
|
|
||||||
if v0.Compare(v1) != Lesser {
|
|
||||||
t.Errorf("Copy error, %+v should be ancestor of %+v", v0, v1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMerge(t *testing.T) {
|
|
||||||
testcases := []struct {
|
|
||||||
a, b, m Vector
|
|
||||||
}{
|
|
||||||
// No-ops
|
|
||||||
{
|
|
||||||
Vector{},
|
|
||||||
Vector{},
|
|
||||||
Vector{},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{22, 1}, Counter{42, 1}},
|
|
||||||
Vector{Counter{22, 1}, Counter{42, 1}},
|
|
||||||
Vector{Counter{22, 1}, Counter{42, 1}},
|
|
||||||
},
|
|
||||||
|
|
||||||
// Appends
|
|
||||||
{
|
|
||||||
Vector{},
|
|
||||||
Vector{Counter{22, 1}, Counter{42, 1}},
|
|
||||||
Vector{Counter{22, 1}, Counter{42, 1}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{22, 1}},
|
|
||||||
Vector{Counter{42, 1}},
|
|
||||||
Vector{Counter{22, 1}, Counter{42, 1}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{22, 1}},
|
|
||||||
Vector{Counter{22, 1}, Counter{42, 1}},
|
|
||||||
Vector{Counter{22, 1}, Counter{42, 1}},
|
|
||||||
},
|
|
||||||
|
|
||||||
// Insert
|
|
||||||
{
|
|
||||||
Vector{Counter{22, 1}, Counter{42, 1}},
|
|
||||||
Vector{Counter{22, 1}, Counter{23, 2}, Counter{42, 1}},
|
|
||||||
Vector{Counter{22, 1}, Counter{23, 2}, Counter{42, 1}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Vector{Counter{42, 1}},
|
|
||||||
Vector{Counter{22, 1}},
|
|
||||||
Vector{Counter{22, 1}, Counter{42, 1}},
|
|
||||||
},
|
|
||||||
|
|
||||||
// Update
|
|
||||||
{
|
|
||||||
Vector{Counter{22, 1}, Counter{42, 2}},
|
|
||||||
Vector{Counter{22, 2}, Counter{42, 1}},
|
|
||||||
Vector{Counter{22, 2}, Counter{42, 2}},
|
|
||||||
},
|
|
||||||
|
|
||||||
// All of the above
|
|
||||||
{
|
|
||||||
Vector{Counter{10, 1}, Counter{20, 2}, Counter{30, 1}},
|
|
||||||
Vector{Counter{5, 1}, Counter{10, 2}, Counter{15, 1}, Counter{20, 1}, Counter{25, 1}, Counter{35, 1}},
|
|
||||||
Vector{Counter{5, 1}, Counter{10, 2}, Counter{15, 1}, Counter{20, 2}, Counter{25, 1}, Counter{30, 1}, Counter{35, 1}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, tc := range testcases {
|
|
||||||
if m := tc.a.Merge(tc.b); m.Compare(tc.m) != Equal {
|
|
||||||
t.Errorf("%d: %+v.Merge(%+v) == %+v (expected %+v)", i, tc.a, tc.b, m, tc.m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCounterValue(t *testing.T) {
|
|
||||||
v0 := Vector{Counter{42, 1}, Counter{64, 5}}
|
|
||||||
if v0.Counter(42) != 1 {
|
|
||||||
t.Errorf("Counter error, %d != %d", v0.Counter(42), 1)
|
|
||||||
}
|
|
||||||
if v0.Counter(64) != 5 {
|
|
||||||
t.Errorf("Counter error, %d != %d", v0.Counter(64), 5)
|
|
||||||
}
|
|
||||||
if v0.Counter(72) != 0 {
|
|
||||||
t.Errorf("Counter error, %d != %d", v0.Counter(72), 0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -200,9 +200,6 @@ func (c *staticClient) connect() error {
|
|||||||
c.mut.Unlock()
|
c.mut.Unlock()
|
||||||
|
|
||||||
conn := tls.Client(tcpConn, c.config)
|
conn := tls.Client(tcpConn, c.config)
|
||||||
if err = conn.Handshake(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := conn.SetDeadline(time.Now().Add(c.connectTimeout)); err != nil {
|
if err := conn.SetDeadline(time.Now().Add(c.connectTimeout)); err != nil {
|
||||||
conn.Close()
|
conn.Close()
|
||||||
|
|||||||
@@ -1,194 +0,0 @@
|
|||||||
// Copyright (C) 2015 The Syncthing Authors.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package sync
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/syncthing/syncthing/lib/logger"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
logThreshold = 100 * time.Millisecond
|
|
||||||
shortWait = 5 * time.Millisecond
|
|
||||||
longWait = 125 * time.Millisecond
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestTypes(t *testing.T) {
|
|
||||||
debug = false
|
|
||||||
l.SetDebug("sync", false)
|
|
||||||
|
|
||||||
if _, ok := NewMutex().(*sync.Mutex); !ok {
|
|
||||||
t.Error("Wrong type")
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := NewRWMutex().(*sync.RWMutex); !ok {
|
|
||||||
t.Error("Wrong type")
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := NewWaitGroup().(*sync.WaitGroup); !ok {
|
|
||||||
t.Error("Wrong type")
|
|
||||||
}
|
|
||||||
|
|
||||||
debug = true
|
|
||||||
l.SetDebug("sync", true)
|
|
||||||
|
|
||||||
if _, ok := NewMutex().(*loggedMutex); !ok {
|
|
||||||
t.Error("Wrong type")
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := NewRWMutex().(*loggedRWMutex); !ok {
|
|
||||||
t.Error("Wrong type")
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := NewWaitGroup().(*loggedWaitGroup); !ok {
|
|
||||||
t.Error("Wrong type")
|
|
||||||
}
|
|
||||||
|
|
||||||
debug = false
|
|
||||||
l.SetDebug("sync", false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMutex(t *testing.T) {
|
|
||||||
debug = true
|
|
||||||
l.SetDebug("sync", true)
|
|
||||||
threshold = logThreshold
|
|
||||||
|
|
||||||
msgmut := sync.Mutex{}
|
|
||||||
var messages []string
|
|
||||||
|
|
||||||
l.AddHandler(logger.LevelDebug, func(_ logger.LogLevel, message string) {
|
|
||||||
msgmut.Lock()
|
|
||||||
messages = append(messages, message)
|
|
||||||
msgmut.Unlock()
|
|
||||||
})
|
|
||||||
|
|
||||||
mut := NewMutex()
|
|
||||||
mut.Lock()
|
|
||||||
time.Sleep(shortWait)
|
|
||||||
mut.Unlock()
|
|
||||||
|
|
||||||
if len(messages) > 0 {
|
|
||||||
t.Errorf("Unexpected message count")
|
|
||||||
}
|
|
||||||
|
|
||||||
mut.Lock()
|
|
||||||
time.Sleep(longWait)
|
|
||||||
mut.Unlock()
|
|
||||||
|
|
||||||
if len(messages) != 1 {
|
|
||||||
t.Errorf("Unexpected message count")
|
|
||||||
}
|
|
||||||
|
|
||||||
debug = false
|
|
||||||
l.SetDebug("sync", false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRWMutex(t *testing.T) {
|
|
||||||
debug = true
|
|
||||||
l.SetDebug("sync", true)
|
|
||||||
threshold = logThreshold
|
|
||||||
|
|
||||||
msgmut := sync.Mutex{}
|
|
||||||
var messages []string
|
|
||||||
|
|
||||||
l.AddHandler(logger.LevelDebug, func(_ logger.LogLevel, message string) {
|
|
||||||
msgmut.Lock()
|
|
||||||
messages = append(messages, message)
|
|
||||||
msgmut.Unlock()
|
|
||||||
})
|
|
||||||
|
|
||||||
mut := NewRWMutex()
|
|
||||||
mut.Lock()
|
|
||||||
time.Sleep(shortWait)
|
|
||||||
mut.Unlock()
|
|
||||||
|
|
||||||
if len(messages) > 0 {
|
|
||||||
t.Errorf("Unexpected message count")
|
|
||||||
}
|
|
||||||
|
|
||||||
mut.Lock()
|
|
||||||
time.Sleep(longWait)
|
|
||||||
mut.Unlock()
|
|
||||||
|
|
||||||
if len(messages) != 1 {
|
|
||||||
t.Errorf("Unexpected message count")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Testing rlocker logging
|
|
||||||
mut.RLock()
|
|
||||||
go func() {
|
|
||||||
time.Sleep(longWait)
|
|
||||||
mut.RUnlock()
|
|
||||||
}()
|
|
||||||
|
|
||||||
mut.Lock()
|
|
||||||
mut.Unlock()
|
|
||||||
|
|
||||||
if len(messages) != 2 {
|
|
||||||
t.Errorf("Unexpected message count")
|
|
||||||
}
|
|
||||||
if !strings.Contains(messages[1], "RUnlockers while locking: sync") || !strings.Contains(messages[1], "sync_test.go:") {
|
|
||||||
t.Error("Unexpected message")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Testing multiple rlockers
|
|
||||||
mut.RLock()
|
|
||||||
mut.RLock()
|
|
||||||
mut.RLock()
|
|
||||||
mut.RUnlock()
|
|
||||||
mut.RUnlock()
|
|
||||||
mut.RUnlock()
|
|
||||||
|
|
||||||
debug = false
|
|
||||||
l.SetDebug("sync", false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWaitGroup(t *testing.T) {
|
|
||||||
debug = true
|
|
||||||
l.SetDebug("sync", true)
|
|
||||||
threshold = logThreshold
|
|
||||||
|
|
||||||
msgmut := sync.Mutex{}
|
|
||||||
var messages []string
|
|
||||||
|
|
||||||
l.AddHandler(logger.LevelDebug, func(_ logger.LogLevel, message string) {
|
|
||||||
msgmut.Lock()
|
|
||||||
messages = append(messages, message)
|
|
||||||
msgmut.Unlock()
|
|
||||||
})
|
|
||||||
|
|
||||||
wg := NewWaitGroup()
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
time.Sleep(shortWait)
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
if len(messages) > 0 {
|
|
||||||
t.Errorf("Unexpected message count")
|
|
||||||
}
|
|
||||||
|
|
||||||
wg = NewWaitGroup()
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
time.Sleep(longWait)
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
if len(messages) != 1 {
|
|
||||||
t.Errorf("Unexpected message count")
|
|
||||||
}
|
|
||||||
|
|
||||||
debug = false
|
|
||||||
l.SetDebug("sync", false)
|
|
||||||
}
|
|
||||||
252
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
generated
vendored
252
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
generated
vendored
@@ -1,252 +0,0 @@
|
|||||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
|
||||||
// All rights reserved.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENSE file.
|
|
||||||
|
|
||||||
package leveldb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
|
||||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ErrBatchCorrupted struct {
|
|
||||||
Reason string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *ErrBatchCorrupted) Error() string {
|
|
||||||
return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newErrBatchCorrupted(reason string) error {
|
|
||||||
return errors.NewErrCorrupted(nil, &ErrBatchCorrupted{reason})
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
batchHdrLen = 8 + 4
|
|
||||||
batchGrowRec = 3000
|
|
||||||
)
|
|
||||||
|
|
||||||
type BatchReplay interface {
|
|
||||||
Put(key, value []byte)
|
|
||||||
Delete(key []byte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Batch is a write batch.
|
|
||||||
type Batch struct {
|
|
||||||
data []byte
|
|
||||||
rLen, bLen int
|
|
||||||
seq uint64
|
|
||||||
sync bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Batch) grow(n int) {
|
|
||||||
off := len(b.data)
|
|
||||||
if off == 0 {
|
|
||||||
off = batchHdrLen
|
|
||||||
if b.data != nil {
|
|
||||||
b.data = b.data[:off]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if cap(b.data)-off < n {
|
|
||||||
if b.data == nil {
|
|
||||||
b.data = make([]byte, off, off+n)
|
|
||||||
} else {
|
|
||||||
odata := b.data
|
|
||||||
div := 1
|
|
||||||
if b.rLen > batchGrowRec {
|
|
||||||
div = b.rLen / batchGrowRec
|
|
||||||
}
|
|
||||||
b.data = make([]byte, off, off+n+(off-batchHdrLen)/div)
|
|
||||||
copy(b.data, odata)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Batch) appendRec(kt kType, key, value []byte) {
|
|
||||||
n := 1 + binary.MaxVarintLen32 + len(key)
|
|
||||||
if kt == ktVal {
|
|
||||||
n += binary.MaxVarintLen32 + len(value)
|
|
||||||
}
|
|
||||||
b.grow(n)
|
|
||||||
off := len(b.data)
|
|
||||||
data := b.data[:off+n]
|
|
||||||
data[off] = byte(kt)
|
|
||||||
off += 1
|
|
||||||
off += binary.PutUvarint(data[off:], uint64(len(key)))
|
|
||||||
copy(data[off:], key)
|
|
||||||
off += len(key)
|
|
||||||
if kt == ktVal {
|
|
||||||
off += binary.PutUvarint(data[off:], uint64(len(value)))
|
|
||||||
copy(data[off:], value)
|
|
||||||
off += len(value)
|
|
||||||
}
|
|
||||||
b.data = data[:off]
|
|
||||||
b.rLen++
|
|
||||||
// Include 8-byte ikey header
|
|
||||||
b.bLen += len(key) + len(value) + 8
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put appends 'put operation' of the given key/value pair to the batch.
|
|
||||||
// It is safe to modify the contents of the argument after Put returns.
|
|
||||||
func (b *Batch) Put(key, value []byte) {
|
|
||||||
b.appendRec(ktVal, key, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete appends 'delete operation' of the given key to the batch.
|
|
||||||
// It is safe to modify the contents of the argument after Delete returns.
|
|
||||||
func (b *Batch) Delete(key []byte) {
|
|
||||||
b.appendRec(ktDel, key, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dump dumps batch contents. The returned slice can be loaded into the
|
|
||||||
// batch using Load method.
|
|
||||||
// The returned slice is not its own copy, so the contents should not be
|
|
||||||
// modified.
|
|
||||||
func (b *Batch) Dump() []byte {
|
|
||||||
return b.encode()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load loads given slice into the batch. Previous contents of the batch
|
|
||||||
// will be discarded.
|
|
||||||
// The given slice will not be copied and will be used as batch buffer, so
|
|
||||||
// it is not safe to modify the contents of the slice.
|
|
||||||
func (b *Batch) Load(data []byte) error {
|
|
||||||
return b.decode(0, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Replay replays batch contents.
|
|
||||||
func (b *Batch) Replay(r BatchReplay) error {
|
|
||||||
return b.decodeRec(func(i int, kt kType, key, value []byte) {
|
|
||||||
switch kt {
|
|
||||||
case ktVal:
|
|
||||||
r.Put(key, value)
|
|
||||||
case ktDel:
|
|
||||||
r.Delete(key)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns number of records in the batch.
|
|
||||||
func (b *Batch) Len() int {
|
|
||||||
return b.rLen
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset resets the batch.
|
|
||||||
func (b *Batch) Reset() {
|
|
||||||
b.data = b.data[:0]
|
|
||||||
b.seq = 0
|
|
||||||
b.rLen = 0
|
|
||||||
b.bLen = 0
|
|
||||||
b.sync = false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Batch) init(sync bool) {
|
|
||||||
b.sync = sync
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Batch) append(p *Batch) {
|
|
||||||
if p.rLen > 0 {
|
|
||||||
b.grow(len(p.data) - batchHdrLen)
|
|
||||||
b.data = append(b.data, p.data[batchHdrLen:]...)
|
|
||||||
b.rLen += p.rLen
|
|
||||||
}
|
|
||||||
if p.sync {
|
|
||||||
b.sync = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// size returns sums of key/value pair length plus 8-bytes ikey.
|
|
||||||
func (b *Batch) size() int {
|
|
||||||
return b.bLen
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Batch) encode() []byte {
|
|
||||||
b.grow(0)
|
|
||||||
binary.LittleEndian.PutUint64(b.data, b.seq)
|
|
||||||
binary.LittleEndian.PutUint32(b.data[8:], uint32(b.rLen))
|
|
||||||
|
|
||||||
return b.data
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Batch) decode(prevSeq uint64, data []byte) error {
|
|
||||||
if len(data) < batchHdrLen {
|
|
||||||
return newErrBatchCorrupted("too short")
|
|
||||||
}
|
|
||||||
|
|
||||||
b.seq = binary.LittleEndian.Uint64(data)
|
|
||||||
if b.seq < prevSeq {
|
|
||||||
return newErrBatchCorrupted("invalid sequence number")
|
|
||||||
}
|
|
||||||
b.rLen = int(binary.LittleEndian.Uint32(data[8:]))
|
|
||||||
if b.rLen < 0 {
|
|
||||||
return newErrBatchCorrupted("invalid records length")
|
|
||||||
}
|
|
||||||
// No need to be precise at this point, it won't be used anyway
|
|
||||||
b.bLen = len(data) - batchHdrLen
|
|
||||||
b.data = data
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte)) (err error) {
|
|
||||||
off := batchHdrLen
|
|
||||||
for i := 0; i < b.rLen; i++ {
|
|
||||||
if off >= len(b.data) {
|
|
||||||
return newErrBatchCorrupted("invalid records length")
|
|
||||||
}
|
|
||||||
|
|
||||||
kt := kType(b.data[off])
|
|
||||||
if kt > ktVal {
|
|
||||||
return newErrBatchCorrupted("bad record: invalid type")
|
|
||||||
}
|
|
||||||
off += 1
|
|
||||||
|
|
||||||
x, n := binary.Uvarint(b.data[off:])
|
|
||||||
off += n
|
|
||||||
if n <= 0 || off+int(x) > len(b.data) {
|
|
||||||
return newErrBatchCorrupted("bad record: invalid key length")
|
|
||||||
}
|
|
||||||
key := b.data[off : off+int(x)]
|
|
||||||
off += int(x)
|
|
||||||
var value []byte
|
|
||||||
if kt == ktVal {
|
|
||||||
x, n := binary.Uvarint(b.data[off:])
|
|
||||||
off += n
|
|
||||||
if n <= 0 || off+int(x) > len(b.data) {
|
|
||||||
return newErrBatchCorrupted("bad record: invalid value length")
|
|
||||||
}
|
|
||||||
value = b.data[off : off+int(x)]
|
|
||||||
off += int(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
f(i, kt, key, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Batch) memReplay(to *memdb.DB) error {
|
|
||||||
return b.decodeRec(func(i int, kt kType, key, value []byte) {
|
|
||||||
ikey := newIkey(key, b.seq+uint64(i), kt)
|
|
||||||
to.Put(ikey, value)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Batch) memDecodeAndReplay(prevSeq uint64, data []byte, to *memdb.DB) error {
|
|
||||||
if err := b.decode(prevSeq, data); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return b.memReplay(to)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Batch) revertMemReplay(to *memdb.DB) error {
|
|
||||||
return b.decodeRec(func(i int, kt kType, key, value []byte) {
|
|
||||||
ikey := newIkey(key, b.seq+uint64(i), kt)
|
|
||||||
to.Delete(ikey)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
120
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go
generated
vendored
120
cmd/strelaypoolsrv/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go
generated
vendored
@@ -1,120 +0,0 @@
|
|||||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
|
||||||
// All rights reserved.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENSE file.
|
|
||||||
|
|
||||||
package leveldb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/syndtr/goleveldb/leveldb/comparer"
|
|
||||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
type tbRec struct {
|
|
||||||
kt kType
|
|
||||||
key, value []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
type testBatch struct {
|
|
||||||
rec []*tbRec
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *testBatch) Put(key, value []byte) {
|
|
||||||
p.rec = append(p.rec, &tbRec{ktVal, key, value})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *testBatch) Delete(key []byte) {
|
|
||||||
p.rec = append(p.rec, &tbRec{ktDel, key, nil})
|
|
||||||
}
|
|
||||||
|
|
||||||
func compareBatch(t *testing.T, b1, b2 *Batch) {
|
|
||||||
if b1.seq != b2.seq {
|
|
||||||
t.Errorf("invalid seq number want %d, got %d", b1.seq, b2.seq)
|
|
||||||
}
|
|
||||||
if b1.Len() != b2.Len() {
|
|
||||||
t.Fatalf("invalid record length want %d, got %d", b1.Len(), b2.Len())
|
|
||||||
}
|
|
||||||
p1, p2 := new(testBatch), new(testBatch)
|
|
||||||
err := b1.Replay(p1)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("error when replaying batch 1: ", err)
|
|
||||||
}
|
|
||||||
err = b2.Replay(p2)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("error when replaying batch 2: ", err)
|
|
||||||
}
|
|
||||||
for i := range p1.rec {
|
|
||||||
r1, r2 := p1.rec[i], p2.rec[i]
|
|
||||||
if r1.kt != r2.kt {
|
|
||||||
t.Errorf("invalid type on record '%d' want %d, got %d", i, r1.kt, r2.kt)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(r1.key, r2.key) {
|
|
||||||
t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key))
|
|
||||||
}
|
|
||||||
if r1.kt == ktVal {
|
|
||||||
if !bytes.Equal(r1.value, r2.value) {
|
|
||||||
t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBatch_EncodeDecode(t *testing.T) {
|
|
||||||
b1 := new(Batch)
|
|
||||||
b1.seq = 10009
|
|
||||||
b1.Put([]byte("key1"), []byte("value1"))
|
|
||||||
b1.Put([]byte("key2"), []byte("value2"))
|
|
||||||
b1.Delete([]byte("key1"))
|
|
||||||
b1.Put([]byte("k"), []byte(""))
|
|
||||||
b1.Put([]byte("zzzzzzzzzzz"), []byte("zzzzzzzzzzzzzzzzzzzzzzzz"))
|
|
||||||
b1.Delete([]byte("key10000"))
|
|
||||||
b1.Delete([]byte("k"))
|
|
||||||
buf := b1.encode()
|
|
||||||
b2 := new(Batch)
|
|
||||||
err := b2.decode(0, buf)
|
|
||||||
if err != nil {
|
|
||||||
t.Error("error when decoding batch: ", err)
|
|
||||||
}
|
|
||||||
compareBatch(t, b1, b2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBatch_Append(t *testing.T) {
|
|
||||||
b1 := new(Batch)
|
|
||||||
b1.seq = 10009
|
|
||||||
b1.Put([]byte("key1"), []byte("value1"))
|
|
||||||
b1.Put([]byte("key2"), []byte("value2"))
|
|
||||||
b1.Delete([]byte("key1"))
|
|
||||||
b1.Put([]byte("foo"), []byte("foovalue"))
|
|
||||||
b1.Put([]byte("bar"), []byte("barvalue"))
|
|
||||||
b2a := new(Batch)
|
|
||||||
b2a.seq = 10009
|
|
||||||
b2a.Put([]byte("key1"), []byte("value1"))
|
|
||||||
b2a.Put([]byte("key2"), []byte("value2"))
|
|
||||||
b2a.Delete([]byte("key1"))
|
|
||||||
b2b := new(Batch)
|
|
||||||
b2b.Put([]byte("foo"), []byte("foovalue"))
|
|
||||||
b2b.Put([]byte("bar"), []byte("barvalue"))
|
|
||||||
b2a.append(b2b)
|
|
||||||
compareBatch(t, b1, b2a)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBatch_Size(t *testing.T) {
|
|
||||||
b := new(Batch)
|
|
||||||
for i := 0; i < 2; i++ {
|
|
||||||
b.Put([]byte("key1"), []byte("value1"))
|
|
||||||
b.Put([]byte("key2"), []byte("value2"))
|
|
||||||
b.Delete([]byte("key1"))
|
|
||||||
b.Put([]byte("foo"), []byte("foovalue"))
|
|
||||||
b.Put([]byte("bar"), []byte("barvalue"))
|
|
||||||
mem := memdb.New(&iComparer{comparer.DefaultComparer}, 0)
|
|
||||||
b.memReplay(mem)
|
|
||||||
if b.size() != mem.Size() {
|
|
||||||
t.Errorf("invalid batch size calculation, want=%d got=%d", mem.Size(), b.size())
|
|
||||||
}
|
|
||||||
b.Reset()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,58 +0,0 @@
|
|||||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
|
||||||
// All rights reserved.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
|
||||||
// found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build !go1.2
|
|
||||||
|
|
||||||
package leveldb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func BenchmarkDBReadConcurrent(b *testing.B) {
|
|
||||||
p := openDBBench(b, false)
|
|
||||||
p.populate(b.N)
|
|
||||||
p.fill()
|
|
||||||
p.gc()
|
|
||||||
defer p.close()
|
|
||||||
|
|
||||||
b.ResetTimer()
|
|
||||||
b.SetBytes(116)
|
|
||||||
|
|
||||||
b.RunParallel(func(pb *testing.PB) {
|
|
||||||
iter := p.newIter()
|
|
||||||
defer iter.Release()
|
|
||||||
for pb.Next() && iter.Next() {
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkDBReadConcurrent2(b *testing.B) {
|
|
||||||
p := openDBBench(b, false)
|
|
||||||
p.populate(b.N)
|
|
||||||
p.fill()
|
|
||||||
p.gc()
|
|
||||||
defer p.close()
|
|
||||||
|
|
||||||
b.ResetTimer()
|
|
||||||
b.SetBytes(116)
|
|
||||||
|
|
||||||
var dir uint32
|
|
||||||
b.RunParallel(func(pb *testing.PB) {
|
|
||||||
iter := p.newIter()
|
|
||||||
defer iter.Release()
|
|
||||||
if atomic.AddUint32(&dir, 1)%2 == 0 {
|
|
||||||
for pb.Next() && iter.Next() {
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if pb.Next() && iter.Last() {
|
|
||||||
for pb.Next() && iter.Prev() {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user