lib/connections: Add KCP support (fixes #804)

GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/3489
This commit is contained in:
Audrius Butkevicius
2017-03-07 12:44:16 +00:00
committed by Jakob Borg
parent 151004d645
commit 0da0774ce4
181 changed files with 30946 additions and 106 deletions

21
vendor/github.com/AudriusButkevicius/pfilter/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2016 Audrius Butkevicius
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

95
vendor/github.com/AudriusButkevicius/pfilter/conn.go generated vendored Normal file
View File

@@ -0,0 +1,95 @@
package pfilter
import (
"net"
"sync/atomic"
"time"
)
type FilteredConn struct {
source *PacketFilter
priority int
recvBuffer chan packet
filter Filter
deadline atomic.Value
closed chan struct{}
}
// LocalAddr returns the local address
func (r *FilteredConn) LocalAddr() net.Addr {
return r.source.LocalAddr()
}
// SetReadDeadline sets a read deadline
func (r *FilteredConn) SetReadDeadline(t time.Time) error {
r.deadline.Store(t)
return nil
}
// SetWriteDeadline sets a write deadline
func (r *FilteredConn) SetWriteDeadline(t time.Time) error {
return r.source.SetWriteDeadline(t)
}
// SetDeadline sets a read and a write deadline
func (r *FilteredConn) SetDeadline(t time.Time) error {
r.SetReadDeadline(t)
return r.SetWriteDeadline(t)
}
// WriteTo writes bytes to the given address
func (r *FilteredConn) WriteTo(b []byte, addr net.Addr) (n int, err error) {
select {
case <-r.closed:
return 0, errClosed
default:
}
if r.filter != nil {
r.filter.Outgoing(b, addr)
}
return r.source.WriteTo(b, addr)
}
// ReadFrom reads from the filtered connection
func (r *FilteredConn) ReadFrom(b []byte) (n int, addr net.Addr, err error) {
select {
case <-r.closed:
return 0, nil, errClosed
default:
}
var timeout <-chan time.Time
if deadline, ok := r.deadline.Load().(time.Time); ok && !deadline.IsZero() {
timer := time.NewTimer(deadline.Sub(time.Now()))
timeout = timer.C
defer timer.Stop()
}
select {
case <-timeout:
return 0, nil, &timeoutError{}
case pkt := <-r.recvBuffer:
copy(b[:pkt.n], pkt.buf)
bufPool.Put(pkt.buf[:maxPacketSize])
return pkt.n, pkt.addr, pkt.err
case <-r.closed:
return 0, nil, errClosed
}
}
// Close closes the filtered connection, removing it's filters
func (r *FilteredConn) Close() error {
select {
case <-r.closed:
return errClosed
default:
}
close(r.closed)
r.source.removeConn(r)
return nil
}

134
vendor/github.com/AudriusButkevicius/pfilter/filter.go generated vendored Normal file
View File

@@ -0,0 +1,134 @@
package pfilter
import (
"net"
"sort"
"sync"
"sync/atomic"
)
// Filter object receives all data sent out on the Outgoing callback,
// and is expected to decide if it wants to receive the packet or not via
// the Receive callback
type Filter interface {
Outgoing([]byte, net.Addr)
ClaimIncoming([]byte, net.Addr) bool
}
// NewPacketFilter creates a packet filter object wrapping the given packet
// connection.
func NewPacketFilter(conn net.PacketConn) *PacketFilter {
d := &PacketFilter{
PacketConn: conn,
}
return d
}
// PacketFilter embeds a net.PacketConn to perform the filtering.
type PacketFilter struct {
net.PacketConn
conns []*FilteredConn
mut sync.Mutex
dropped uint64
overflow uint64
}
// NewConn returns a new net.PacketConn object which filters packets based
// on the provided filter. If filter is nil, the connection will receive all
// packets. Priority decides which connection gets the ability to claim the packet.
func (d *PacketFilter) NewConn(priority int, filter Filter) net.PacketConn {
conn := &FilteredConn{
priority: priority,
source: d,
recvBuffer: make(chan packet, 256),
filter: filter,
closed: make(chan struct{}),
}
d.mut.Lock()
d.conns = append(d.conns, conn)
sort.Sort(filteredConnList(d.conns))
d.mut.Unlock()
return conn
}
func (d *PacketFilter) removeConn(r *FilteredConn) {
d.mut.Lock()
for i, conn := range d.conns {
if conn == r {
copy(d.conns[i:], d.conns[i+1:])
d.conns[len(d.conns)-1] = nil
d.conns = d.conns[:len(d.conns)-1]
break
}
}
d.mut.Unlock()
}
// NumberOfConns returns the number of currently active virtual connections
func (d *PacketFilter) NumberOfConns() int {
d.mut.Lock()
n := len(d.conns)
d.mut.Unlock()
return n
}
// Dropped returns number of packets dropped due to nobody claiming them.
func (d *PacketFilter) Dropped() uint64 {
return atomic.LoadUint64(&d.dropped)
}
// Overflow returns number of packets were dropped due to receive buffers being
// full.
func (d *PacketFilter) Overflow() uint64 {
return atomic.LoadUint64(&d.overflow)
}
// Start starts the packet filter.
func (d *PacketFilter) Start() {
go d.loop()
}
func (d *PacketFilter) loop() {
var buf []byte
next:
for {
buf = bufPool.Get().([]byte)
n, addr, err := d.ReadFrom(buf)
pkt := packet{
n: n,
addr: addr,
err: err,
buf: buf[:n],
}
d.mut.Lock()
conns := d.conns
d.mut.Unlock()
if err != nil {
for _, conn := range conns {
select {
case conn.recvBuffer <- pkt:
default:
atomic.AddUint64(&d.overflow, 1)
}
}
return
}
for _, conn := range conns {
if conn.filter == nil || conn.filter.ClaimIncoming(pkt.buf, pkt.addr) {
select {
case conn.recvBuffer <- pkt:
default:
atomic.AddUint64(&d.overflow, 1)
}
goto next
}
}
atomic.AddUint64(&d.dropped, 1)
}
}

36
vendor/github.com/AudriusButkevicius/pfilter/misc.go generated vendored Normal file
View File

@@ -0,0 +1,36 @@
package pfilter
import (
"fmt"
"net"
"sync"
)
var (
maxPacketSize = 1500
bufPool = sync.Pool{
New: func() interface{} {
return make([]byte, maxPacketSize)
},
}
errClosed = fmt.Errorf("use of closed network connection")
)
type timeoutError struct{}
func (e *timeoutError) Error() string { return "i/o timeout" }
func (e *timeoutError) Timeout() bool { return true }
func (e *timeoutError) Temporary() bool { return true }
type filteredConnList []*FilteredConn
func (r filteredConnList) Len() int { return len(r) }
func (r filteredConnList) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r filteredConnList) Less(i, j int) bool { return r[i].priority < r[j].priority }
type packet struct {
n int
addr net.Addr
err error
buf []byte
}

191
vendor/github.com/ccding/go-stun/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

66
vendor/github.com/ccding/go-stun/README.md generated vendored Normal file
View File

@@ -0,0 +1,66 @@
go-stun
=======
[![Build Status](https://travis-ci.org/ccding/go-stun.svg?branch=master)]
(https://travis-ci.org/ccding/go-stun)
[![License](https://img.shields.io/badge/License-Apache%202.0-red.svg)]
(https://opensource.org/licenses/Apache-2.0)
[![GoDoc](https://godoc.org/github.com/ccding/go-stun?status.svg)]
(http://godoc.org/github.com/ccding/go-stun/stun)
[![Go Report Card](https://goreportcard.com/badge/github.com/ccding/go-stun)]
(https://goreportcard.com/report/github.com/ccding/go-stun)
go-stun is a STUN (RFC 3489, 5389) client implementation in golang
(a.k.a. UDP hole punching).
[RFC 3489](https://tools.ietf.org/html/rfc3489):
STUN - Simple Traversal of User Datagram Protocol (UDP)
Through Network Address Translators (NATs)
[RFC 5389](https://tools.ietf.org/html/rfc5389):
Session Traversal Utilities for NAT (STUN)
### Use the Command Line Tool
Simply run these commands (if you have installed golang and set `$GOPATH`)
```
go get github.com/ccding/go-stun
go-stun
```
or clone this repo and run these commands
```
go build
./go-stun
```
You will get the output like
```
NAT Type: Full cone NAT
External IP Family: 1
External IP: 166.111.4.100
External Port: 23009
```
You can use `-s` flag to use another STUN server, and use `-v` to work on
verbose mode.
```bash
> ./go-stun --help
Usage of ./go-stun:
-s string
server address (default "stun1.l.google.com:19302")
-v verbose mode
```
### Use the Library
The library `github.com/ccding/go-stun/stun` is extremely easy to use -- just
one line of code.
```go
import "github.com/ccding/go-stun/stun"
func main() {
nat, host, err := stun.NewClient().Discover()
}
```
More details please go to `main.go` and [GoDoc]
(http://godoc.org/github.com/ccding/go-stun/stun)

56
vendor/github.com/ccding/go-stun/main.go generated vendored Normal file
View File

@@ -0,0 +1,56 @@
// Copyright 2013, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// author: Cong Ding <dinggnu@gmail.com>
package main
import (
"flag"
"fmt"
"github.com/ccding/go-stun/stun"
)
func main() {
var serverAddr = flag.String("s", stun.DefaultServerAddr, "STUN server address")
var v = flag.Bool("v", false, "verbose mode")
var vv = flag.Bool("vv", false, "double verbose mode (includes -v)")
var vvv = flag.Bool("vvv", false, "triple verbose mode (includes -v and -vv)")
flag.Parse()
// Creates a STUN client. NewClientWithConnection can also be used if
// you want to handle the UDP listener by yourself.
client := stun.NewClient()
// The default addr (stun.DefaultServerAddr) will be used unless we
// call SetServerAddr.
client.SetServerAddr(*serverAddr)
// Non verbose mode will be used by default unless we call
// SetVerbose(true) or SetVVerbose(true).
client.SetVerbose(*v || *vv || *vvv)
client.SetVVerbose(*vv || *vvv)
// Discover the NAT and return the result.
nat, host, err := client.Discover()
if err != nil {
fmt.Println(err)
return
}
fmt.Println("NAT Type:", nat)
if host != nil {
fmt.Println("External IP Family:", host.Family())
fmt.Println("External IP:", host.IP())
fmt.Println("External Port:", host.Port())
}
}

106
vendor/github.com/ccding/go-stun/stun/attribute.go generated vendored Normal file
View File

@@ -0,0 +1,106 @@
// Copyright 2013, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
import (
"encoding/binary"
"hash/crc32"
"net"
)
type attribute struct {
types uint16
length uint16
value []byte
}
func newAttribute(types uint16, value []byte) *attribute {
att := new(attribute)
att.types = types
att.value = padding(value)
att.length = uint16(len(att.value))
return att
}
func newFingerprintAttribute(packet *packet) *attribute {
crc := crc32.ChecksumIEEE(packet.bytes()) ^ fingerprint
buf := make([]byte, 4)
binary.BigEndian.PutUint32(buf, crc)
return newAttribute(attributeFingerprint, buf)
}
func newSoftwareAttribute(name string) *attribute {
return newAttribute(attributeSoftware, []byte(name))
}
func newChangeReqAttribute(changeIP bool, changePort bool) *attribute {
value := make([]byte, 4)
if changeIP {
value[3] |= 0x04
}
if changePort {
value[3] |= 0x02
}
return newAttribute(attributeChangeRequest, value)
}
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// |x x x x x x x x| Family | X-Port |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | X-Address (Variable)
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
//
// Figure 6: Format of XOR-MAPPED-ADDRESS Attribute
func (v *attribute) xorAddr(transID []byte) *Host {
xorIP := make([]byte, 16)
for i := 0; i < len(v.value)-4; i++ {
xorIP[i] = v.value[i+4] ^ transID[i]
}
family := uint16(v.value[1])
port := binary.BigEndian.Uint16(v.value[2:4])
// Truncate if IPv4, otherwise net.IP sometimes renders it as an IPv6 address.
if family == attributeFamilyIPv4 {
xorIP = xorIP[:4]
}
x := binary.BigEndian.Uint16(transID[:2])
return &Host{family, net.IP(xorIP).String(), port ^ x}
}
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// |0 0 0 0 0 0 0 0| Family | Port |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | |
// | Address (32 bits or 128 bits) |
// | |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
//
// Figure 5: Format of MAPPED-ADDRESS Attribute
func (v *attribute) rawAddr() *Host {
host := new(Host)
host.family = uint16(v.value[1])
host.port = binary.BigEndian.Uint16(v.value[2:4])
// Truncate if IPv4, otherwise net.IP sometimes renders it as an IPv6 address.
if host.family == attributeFamilyIPv4 {
v.value = v.value[:8]
}
host.ip = net.IP(v.value[4:]).String()
return host
}

126
vendor/github.com/ccding/go-stun/stun/client.go generated vendored Normal file
View File

@@ -0,0 +1,126 @@
// Copyright 2013, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
import (
"errors"
"net"
"strconv"
)
// Client is a STUN client, which can be set STUN server address and is used
// to discover NAT type.
type Client struct {
serverAddr string
softwareName string
conn net.PacketConn
logger *Logger
}
// NewClient returns a client without network connection. The network
// connection will be build when calling Discover function.
func NewClient() *Client {
c := new(Client)
c.SetSoftwareName(DefaultSoftwareName)
c.logger = NewLogger()
return c
}
// NewClientWithConnection returns a client which uses the given connection.
// Please note the connection should be acquired via net.Listen* method.
func NewClientWithConnection(conn net.PacketConn) *Client {
c := new(Client)
c.conn = conn
c.SetSoftwareName(DefaultSoftwareName)
c.logger = NewLogger()
return c
}
// SetVerbose sets the client to be in the verbose mode, which prints
// information in the discover process.
func (c *Client) SetVerbose(v bool) {
c.logger.SetDebug(v)
}
// SetVVerbose sets the client to be in the double verbose mode, which prints
// information and packet in the discover process.
func (c *Client) SetVVerbose(v bool) {
c.logger.SetInfo(v)
}
// SetServerHost allows user to set the STUN hostname and port.
func (c *Client) SetServerHost(host string, port int) {
c.serverAddr = net.JoinHostPort(host, strconv.Itoa(port))
}
// SetServerAddr allows user to set the transport layer STUN server address.
func (c *Client) SetServerAddr(address string) {
c.serverAddr = address
}
// SetSoftwareName allows user to set the name of the software, which is used
// for logging purpose (NOT used in the current implementation).
func (c *Client) SetSoftwareName(name string) {
c.softwareName = name
}
// Discover contacts the STUN server and gets the response of NAT type, host
// for UDP punching.
func (c *Client) Discover() (NATType, *Host, error) {
if c.serverAddr == "" {
c.SetServerAddr(DefaultServerAddr)
}
serverUDPAddr, err := net.ResolveUDPAddr("udp", c.serverAddr)
if err != nil {
return NATError, nil, err
}
// Use the connection passed to the client if it is not nil, otherwise
// create a connection and close it at the end.
conn := c.conn
if conn == nil {
conn, err = net.ListenUDP("udp", nil)
if err != nil {
return NATError, nil, err
}
defer conn.Close()
}
return c.discover(conn, serverUDPAddr)
}
// Keepalive sends and receives a bind request, which ensures the mapping stays open
// Only applicable when client was created with a connection.
func (c *Client) Keepalive() (*Host, error) {
if c.conn == nil {
return nil, errors.New("no connection available")
}
if c.serverAddr == "" {
c.SetServerAddr(DefaultServerAddr)
}
serverUDPAddr, err := net.ResolveUDPAddr("udp", c.serverAddr)
if err != nil {
return nil, err
}
resp, err := c.test1(c.conn, serverUDPAddr)
if err != nil {
return nil, err
}
if resp == nil || resp.packet == nil {
return nil, errors.New("failed to contact")
}
return resp.mappedAddr, nil
}

174
vendor/github.com/ccding/go-stun/stun/const.go generated vendored Normal file
View File

@@ -0,0 +1,174 @@
// Copyright 2013, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
// Default server address and client name.
const (
DefaultServerAddr = "stun.ekiga.net:3478"
DefaultSoftwareName = "StunClient"
)
const (
magicCookie = 0x2112A442
fingerprint = 0x5354554e
)
// NATType is the type of NAT described by int.
type NATType int
// NAT types.
const (
NATError NATType = iota
NATUnknown
NATNone
NATBlocked
NATFull
NATSymetric
NATRestricted
NATPortRestricted
NATSymetricUDPFirewall
)
var natStr map[NATType]string
func init() {
natStr = map[NATType]string{
NATError: "Test failed",
NATUnknown: "Unexpected response from the STUN server",
NATBlocked: "UDP is blocked",
NATFull: "Full cone NAT",
NATSymetric: "Symetric NAT",
NATRestricted: "Restricted NAT",
NATPortRestricted: "Port restricted NAT",
NATNone: "Not behind a NAT",
NATSymetricUDPFirewall: "Symetric UDP firewall",
}
}
func (nat NATType) String() string {
if s, ok := natStr[nat]; ok {
return s
}
return "Unknown"
}
const (
errorTryAlternate = 300
errorBadRequest = 400
errorUnauthorized = 401
errorUnassigned402 = 402
errorForbidden = 403
errorUnknownAttribute = 420
errorAllocationMismatch = 437
errorStaleNonce = 438
errorUnassigned439 = 439
errorAddressFamilyNotSupported = 440
errorWrongCredentials = 441
errorUnsupportedTransportProtocol = 442
errorPeerAddressFamilyMismatch = 443
errorConnectionAlreadyExists = 446
errorConnectionTimeoutOrFailure = 447
errorAllocationQuotaReached = 486
errorRoleConflict = 487
errorServerError = 500
errorInsufficientCapacity = 508
)
const (
attributeFamilyIPv4 = 0x01
attributeFamilyIPV6 = 0x02
)
const (
attributeMappedAddress = 0x0001
attributeResponseAddress = 0x0002
attributeChangeRequest = 0x0003
attributeSourceAddress = 0x0004
attributeChangedAddress = 0x0005
attributeUsername = 0x0006
attributePassword = 0x0007
attributeMessageIntegrity = 0x0008
attributeErrorCode = 0x0009
attributeUnknownAttributes = 0x000a
attributeReflectedFrom = 0x000b
attributeChannelNumber = 0x000c
attributeLifetime = 0x000d
attributeBandwidth = 0x0010
attributeXorPeerAddress = 0x0012
attributeData = 0x0013
attributeRealm = 0x0014
attributeNonce = 0x0015
attributeXorRelayedAddress = 0x0016
attributeRequestedAddressFamily = 0x0017
attributeEvenPort = 0x0018
attributeRequestedTransport = 0x0019
attributeDontFragment = 0x001a
attributeXorMappedAddress = 0x0020
attributeTimerVal = 0x0021
attributeReservationToken = 0x0022
attributePriority = 0x0024
attributeUseCandidate = 0x0025
attributePadding = 0x0026
attributeResponsePort = 0x0027
attributeConnectionID = 0x002a
attributeXorMappedAddressExp = 0x8020
attributeSoftware = 0x8022
attributeAlternateServer = 0x8023
attributeCacheTimeout = 0x8027
attributeFingerprint = 0x8028
attributeIceControlled = 0x8029
attributeIceControlling = 0x802a
attributeResponseOrigin = 0x802b
attributeOtherAddress = 0x802c
attributeEcnCheckStun = 0x802d
attributeCiscoFlowdata = 0xc000
)
const (
typeBindingRequest = 0x0001
typeBindingResponse = 0x0101
typeBindingErrorResponse = 0x0111
typeSharedSecretRequest = 0x0002
typeSharedSecretResponse = 0x0102
typeSharedErrorResponse = 0x0112
typeAllocate = 0x0003
typeAllocateResponse = 0x0103
typeAllocateErrorResponse = 0x0113
typeRefresh = 0x0004
typeRefreshResponse = 0x0104
typeRefreshErrorResponse = 0x0114
typeSend = 0x0006
typeSendResponse = 0x0106
typeSendErrorResponse = 0x0116
typeData = 0x0007
typeDataResponse = 0x0107
typeDataErrorResponse = 0x0117
typeCreatePermisiion = 0x0008
typeCreatePermisiionResponse = 0x0108
typeCreatePermisiionErrorResponse = 0x0118
typeChannelBinding = 0x0009
typeChannelBindingResponse = 0x0109
typeChannelBindingErrorResponse = 0x0119
typeConnect = 0x000a
typeConnectResponse = 0x010a
typeConnectErrorResponse = 0x011a
typeConnectionBind = 0x000b
typeConnectionBindResponse = 0x010b
typeConnectionBindErrorResponse = 0x011b
typeConnectionAttempt = 0x000c
typeConnectionAttemptResponse = 0x010c
typeConnectionAttemptErrorResponse = 0x011c
)

160
vendor/github.com/ccding/go-stun/stun/discover.go generated vendored Normal file
View File

@@ -0,0 +1,160 @@
// Copyright 2013, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
import (
"errors"
"net"
)
// Follow RFC 3489 and RFC 5389.
// Figure 2: Flow for type discovery process (from RFC 3489).
// +--------+
// | Test |
// | I |
// +--------+
// |
// |
// V
// /\ /\
// N / \ Y / \ Y +--------+
// UDP <-------/Resp\--------->/ IP \------------->| Test |
// Blocked \ ? / \Same/ | II |
// \ / \? / +--------+
// \/ \/ |
// | N |
// | V
// V /\
// +--------+ Sym. N / \
// | Test | UDP <---/Resp\
// | II | Firewall \ ? /
// +--------+ \ /
// | \/
// V |Y
// /\ /\ |
// Symmetric N / \ +--------+ N / \ V
// NAT <--- / IP \<-----| Test |<--- /Resp\ Open
// \Same/ | I | \ ? / Internet
// \? / +--------+ \ /
// \/ \/
// |Y |Y
// | |
// | V
// | Full
// | Cone
// V /\
// +--------+ / \ Y
// | Test |------>/Resp\---->Restricted
// | III | \ ? /
// +--------+ \ /
// \/
// |N
// | Port
// +------>Restricted
func (c *Client) discover(conn net.PacketConn, addr *net.UDPAddr) (NATType, *Host, error) {
// Perform test1 to check if it is under NAT.
c.logger.Debugln("Do Test1")
c.logger.Debugln("Send To:", addr)
resp, err := c.test1(conn, addr)
if err != nil {
return NATError, nil, err
}
c.logger.Debugln("Received:", resp)
if resp == nil {
return NATBlocked, nil, nil
}
// identical used to check if it is open Internet or not.
identical := resp.identical
// changedAddr is used to perform second time test1 and test3.
changedAddr := resp.changedAddr
// mappedAddr is used as the return value, its IP is used for tests
mappedAddr := resp.mappedAddr
// Make sure IP and port are not changed.
if resp.serverAddr.IP() != addr.IP.String() ||
resp.serverAddr.Port() != uint16(addr.Port) {
return NATError, mappedAddr, errors.New("Server error: response IP/port")
}
// changedAddr shall not be nil
if changedAddr == nil {
return NATError, mappedAddr, errors.New("Server error: no changed address.")
}
// Perform test2 to see if the client can receive packet sent from
// another IP and port.
c.logger.Debugln("Do Test2")
c.logger.Debugln("Send To:", addr)
resp, err = c.test2(conn, addr)
if err != nil {
return NATError, mappedAddr, err
}
c.logger.Debugln("Received:", resp)
// Make sure IP and port are changed.
if resp != nil &&
(resp.serverAddr.IP() == addr.IP.String() ||
resp.serverAddr.Port() == uint16(addr.Port)) {
return NATError, mappedAddr, errors.New("Server error: response IP/port")
}
if identical {
if resp == nil {
return NATSymetricUDPFirewall, mappedAddr, nil
}
return NATNone, mappedAddr, nil
}
if resp != nil {
return NATFull, mappedAddr, nil
}
// Perform test1 to another IP and port to see if the NAT use the same
// external IP.
c.logger.Debugln("Do Test1")
c.logger.Debugln("Send To:", changedAddr)
caddr, err := net.ResolveUDPAddr("udp", changedAddr.String())
resp, err = c.test1(conn, caddr)
if err != nil {
return NATError, mappedAddr, err
}
c.logger.Debugln("Received:", resp)
if resp == nil {
// It should be NAT_BLOCKED, but will be detected in the first
// step. So this will never happen.
return NATUnknown, mappedAddr, nil
}
// Make sure IP/port is not changed.
if resp.serverAddr.IP() != caddr.IP.String() ||
resp.serverAddr.Port() != uint16(caddr.Port) {
return NATError, mappedAddr, errors.New("Server error: response IP/port")
}
if mappedAddr.IP() == resp.mappedAddr.IP() && mappedAddr.Port() == resp.mappedAddr.Port() {
// Perform test3 to see if the client can receive packet sent
// from another port.
c.logger.Debugln("Do Test3")
c.logger.Debugln("Send To:", caddr)
resp, err = c.test3(conn, caddr)
if err != nil {
return NATError, mappedAddr, err
}
c.logger.Debugln("Received:", resp)
if resp == nil {
return NATPortRestricted, mappedAddr, nil
}
// Make sure IP is not changed, and port is changed.
if resp.serverAddr.IP() != caddr.IP.String() ||
resp.serverAddr.Port() == uint16(caddr.Port) {
return NATError, mappedAddr, errors.New("Server error: response IP/port")
}
return NATRestricted, mappedAddr, nil
}
return NATSymetric, mappedAddr, nil
}

25
vendor/github.com/ccding/go-stun/stun/doc.go generated vendored Normal file
View File

@@ -0,0 +1,25 @@
// Copyright 2013, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
// Package stun is a STUN (RFC 3489 and RFC 5389) client implementation in
// golang.
//
// It is extremely easy to use -- just one line of code.
//
// nat, host, err := stun.NewClient().Discover()
//
// More details please go to `main.go`.
package stun

70
vendor/github.com/ccding/go-stun/stun/host.go generated vendored Normal file
View File

@@ -0,0 +1,70 @@
// Copyright 2013, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
import (
"net"
"strconv"
)
// Host defines the network address including address family, IP address and port.
type Host struct {
family uint16
ip string
port uint16
}
func newHostFromStr(s string) *Host {
udpAddr, err := net.ResolveUDPAddr("udp", s)
if err != nil {
return nil
}
host := new(Host)
if udpAddr.IP.To4() != nil {
host.family = attributeFamilyIPv4
} else {
host.family = attributeFamilyIPV6
}
host.ip = udpAddr.IP.String()
host.port = uint16(udpAddr.Port)
return host
}
// Family returns the family type of a host (IPv4 or IPv6).
func (h *Host) Family() uint16 {
return h.family
}
// IP returns the internet protocol address of the host.
func (h *Host) IP() string {
return h.ip
}
// Port returns the port number of the host.
func (h *Host) Port() uint16 {
return h.port
}
// TransportAddr returns the transport layer address of the host.
func (h *Host) TransportAddr() string {
return net.JoinHostPort(h.ip, strconv.Itoa(int(h.port)))
}
// String returns the string representation of the host address.
func (h *Host) String() string {
return h.TransportAddr()
}

87
vendor/github.com/ccding/go-stun/stun/log.go generated vendored Normal file
View File

@@ -0,0 +1,87 @@
// Copyright 2016, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
import (
"log"
"os"
)
// Logger is a simple logger specified for this STUN client.
type Logger struct {
log.Logger
debug bool
info bool
}
// NewLogger creates a default logger.
func NewLogger() *Logger {
logger := &Logger{*log.New(os.Stdout, "", log.LstdFlags), false, false}
return logger
}
// SetDebug sets the logger running in debug mode or not.
func (l *Logger) SetDebug(v bool) {
l.debug = v
}
// SetInfo sets the logger running in info mode or not.
func (l *Logger) SetInfo(v bool) {
l.info = v
}
// Debug outputs the log in the format of log.Print.
func (l *Logger) Debug(v ...interface{}) {
if l.debug {
l.Print(v...)
}
}
// Debugf outputs the log in the format of log.Printf.
func (l *Logger) Debugf(format string, v ...interface{}) {
if l.debug {
l.Printf(format, v...)
}
}
// Debugln outputs the log in the format of log.Println.
func (l *Logger) Debugln(v ...interface{}) {
if l.debug {
l.Println(v...)
}
}
// Info outputs the log in the format of log.Print.
func (l *Logger) Info(v ...interface{}) {
if l.info {
l.Print(v...)
}
}
// Infof outputs the log in the format of log.Printf.
func (l *Logger) Infof(format string, v ...interface{}) {
if l.info {
l.Printf(format, v...)
}
}
// Infoln outputs the log in the format of log.Println.
func (l *Logger) Infoln(v ...interface{}) {
if l.info {
l.Println(v...)
}
}

102
vendor/github.com/ccding/go-stun/stun/net.go generated vendored Normal file
View File

@@ -0,0 +1,102 @@
// Copyright 2016, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
import (
"bytes"
"encoding/hex"
"errors"
"net"
"time"
)
const (
numRetransmit = 9
defaultTimeout = 100
maxTimeout = 1600
maxPacketSize = 1024
)
func (c *Client) sendBindingReq(conn net.PacketConn, addr net.Addr, changeIP bool, changePort bool) (*response, error) {
// Construct packet.
pkt, err := newPacket()
if err != nil {
return nil, err
}
pkt.types = typeBindingRequest
attribute := newSoftwareAttribute(c.softwareName)
pkt.addAttribute(*attribute)
if changeIP || changePort {
attribute = newChangeReqAttribute(changeIP, changePort)
pkt.addAttribute(*attribute)
}
attribute = newFingerprintAttribute(pkt)
pkt.addAttribute(*attribute)
// Send packet.
return c.send(pkt, conn, addr)
}
// RFC 3489: Clients SHOULD retransmit the request starting with an interval
// of 100ms, doubling every retransmit until the interval reaches 1.6s.
// Retransmissions continue with intervals of 1.6s until a response is
// received, or a total of 9 requests have been sent.
func (c *Client) send(pkt *packet, conn net.PacketConn, addr net.Addr) (*response, error) {
c.logger.Info("\n" + hex.Dump(pkt.bytes()))
timeout := defaultTimeout
packetBytes := make([]byte, maxPacketSize)
for i := 0; i < numRetransmit; i++ {
// Send packet to the server.
length, err := conn.WriteTo(pkt.bytes(), addr)
if err != nil {
return nil, err
}
if length != len(pkt.bytes()) {
return nil, errors.New("Error in sending data.")
}
err = conn.SetReadDeadline(time.Now().Add(time.Duration(timeout) * time.Millisecond))
if err != nil {
return nil, err
}
if timeout < maxTimeout {
timeout *= 2
}
for {
// Read from the port.
length, raddr, err := conn.ReadFrom(packetBytes)
if err != nil {
if err.(net.Error).Timeout() {
break
}
return nil, err
}
p, err := newPacketFromBytes(packetBytes[0:length])
if err != nil {
return nil, err
}
// If transId mismatches, keep reading until get a
// matched packet or timeout.
if !bytes.Equal(pkt.transID, p.transID) {
continue
}
c.logger.Info("\n" + hex.Dump(packetBytes[0:length]))
resp := newResponse(p, conn)
resp.serverAddr = newHostFromStr(raddr.String())
return resp, err
}
}
return nil, nil
}

125
vendor/github.com/ccding/go-stun/stun/packet.go generated vendored Normal file
View File

@@ -0,0 +1,125 @@
// Copyright 2013, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
import (
"crypto/rand"
"encoding/binary"
"errors"
)
type packet struct {
types uint16
length uint16
transID []byte // 4 bytes magic cookie + 12 bytes transaction id
attributes []attribute
}
func newPacket() (*packet, error) {
v := new(packet)
v.transID = make([]byte, 16)
binary.BigEndian.PutUint32(v.transID[:4], magicCookie)
_, err := rand.Read(v.transID[4:])
if err != nil {
return nil, err
}
v.attributes = make([]attribute, 0, 10)
v.length = 0
return v, nil
}
func newPacketFromBytes(packetBytes []byte) (*packet, error) {
if len(packetBytes) < 24 {
return nil, errors.New("Received data length too short.")
}
pkt := new(packet)
pkt.types = binary.BigEndian.Uint16(packetBytes[0:2])
pkt.length = binary.BigEndian.Uint16(packetBytes[2:4])
pkt.transID = packetBytes[4:20]
pkt.attributes = make([]attribute, 0, 10)
for pos := uint16(20); pos < uint16(len(packetBytes)); {
types := binary.BigEndian.Uint16(packetBytes[pos : pos+2])
length := binary.BigEndian.Uint16(packetBytes[pos+2 : pos+4])
if pos+4+length > uint16(len(packetBytes)) {
return nil, errors.New("Received data format mismatch.")
}
value := packetBytes[pos+4 : pos+4+length]
attribute := newAttribute(types, value)
pkt.addAttribute(*attribute)
pos += align(length) + 4
}
return pkt, nil
}
func (v *packet) addAttribute(a attribute) {
v.attributes = append(v.attributes, a)
v.length += align(a.length) + 4
}
func (v *packet) bytes() []byte {
packetBytes := make([]byte, 4)
binary.BigEndian.PutUint16(packetBytes[0:2], v.types)
binary.BigEndian.PutUint16(packetBytes[2:4], v.length)
packetBytes = append(packetBytes, v.transID...)
for _, a := range v.attributes {
buf := make([]byte, 2)
binary.BigEndian.PutUint16(buf, a.types)
packetBytes = append(packetBytes, buf...)
binary.BigEndian.PutUint16(buf, a.length)
packetBytes = append(packetBytes, buf...)
packetBytes = append(packetBytes, a.value...)
}
return packetBytes
}
func (v *packet) getSourceAddr() *Host {
return v.getRawAddr(attributeSourceAddress)
}
func (v *packet) getMappedAddr() *Host {
return v.getRawAddr(attributeMappedAddress)
}
func (v *packet) getChangedAddr() *Host {
return v.getRawAddr(attributeChangedAddress)
}
func (v *packet) getRawAddr(attribute uint16) *Host {
for _, a := range v.attributes {
if a.types == attribute {
return a.rawAddr()
}
}
return nil
}
func (v *packet) getXorMappedAddr() *Host {
addr := v.getXorAddr(attributeXorMappedAddress)
if addr == nil {
addr = v.getXorAddr(attributeXorMappedAddressExp)
}
return addr
}
func (v *packet) getXorAddr(attribute uint16) *Host {
for _, a := range v.attributes {
if a.types == attribute {
return a.xorAddr(v.transID)
}
}
return nil
}

61
vendor/github.com/ccding/go-stun/stun/packet_test.go generated vendored Normal file
View File

@@ -0,0 +1,61 @@
// Copyright 2016, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
import (
"testing"
)
func TestNewPacketFromBytes(t *testing.T) {
b := make([]byte, 23)
_, err := newPacketFromBytes(b)
if err == nil {
t.Errorf("newPacketFromBytes error")
}
b = make([]byte, 24)
_, err = newPacketFromBytes(b)
if err != nil {
t.Errorf("newPacketFromBytes error")
}
}
func TestNewPacket(t *testing.T) {
_, err := newPacket()
if err != nil {
t.Errorf("newPacket error")
}
}
func TestPacketAll(t *testing.T) {
p, err := newPacket()
if err != nil {
t.Errorf("newPacket error")
}
p.addAttribute(*newChangeReqAttribute(true, true))
p.addAttribute(*newSoftwareAttribute("aaa"))
p.addAttribute(*newFingerprintAttribute(p))
pkt, err := newPacketFromBytes(p.bytes())
if err != nil {
t.Errorf("newPacketFromBytes error")
}
if pkt.types != 0 {
t.Errorf("newPacketFromBytes error")
}
if pkt.length < 24 {
t.Errorf("newPacketFromBytes error")
}
}

70
vendor/github.com/ccding/go-stun/stun/response.go generated vendored Normal file
View File

@@ -0,0 +1,70 @@
// Copyright 2016, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
import (
"fmt"
"net"
)
type response struct {
packet *packet // the original packet from the server
serverAddr *Host // the address received packet
changedAddr *Host // parsed from packet
mappedAddr *Host // parsed from packet, external addr of client NAT
identical bool // if mappedAddr is in local addr list
}
func newResponse(pkt *packet, conn net.PacketConn) *response {
resp := &response{pkt, nil, nil, nil, false}
if pkt == nil {
return resp
}
// RFC 3489 doesn't require the server return XOR mapped address.
mappedAddr := pkt.getXorMappedAddr()
if mappedAddr == nil {
mappedAddr = pkt.getMappedAddr()
}
resp.mappedAddr = mappedAddr
// compute identical
localAddrStr := conn.LocalAddr().String()
if mappedAddr != nil {
mappedAddrStr := mappedAddr.String()
resp.identical = isLocalAddress(localAddrStr, mappedAddrStr)
}
// compute changedAddr
changedAddr := pkt.getChangedAddr()
if changedAddr != nil {
changedAddrHost := newHostFromStr(changedAddr.String())
resp.changedAddr = changedAddrHost
}
return resp
}
// String is only used for verbose mode output.
func (r *response) String() string {
if r == nil {
return "Nil"
}
return fmt.Sprintf("{packet nil: %v, local: %v, remote: %v, changed: %v, identical: %v}",
r.packet == nil,
r.mappedAddr,
r.serverAddr,
r.changedAddr,
r.identical)
}

33
vendor/github.com/ccding/go-stun/stun/tests.go generated vendored Normal file
View File

@@ -0,0 +1,33 @@
// Copyright 2016, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
import (
"net"
)
func (c *Client) test1(conn net.PacketConn, addr net.Addr) (*response, error) {
return c.sendBindingReq(conn, addr, false, false)
}
func (c *Client) test2(conn net.PacketConn, addr net.Addr) (*response, error) {
return c.sendBindingReq(conn, addr, true, true)
}
func (c *Client) test3(conn net.PacketConn, addr net.Addr) (*response, error) {
return c.sendBindingReq(conn, addr, false, true)
}

63
vendor/github.com/ccding/go-stun/stun/utils.go generated vendored Normal file
View File

@@ -0,0 +1,63 @@
// Copyright 2016, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
import (
"net"
)
// Padding the length of the byte slice to multiple of 4.
func padding(bytes []byte) []byte {
length := uint16(len(bytes))
return append(bytes, make([]byte, align(length)-length)...)
}
// Align the uint16 number to the smallest multiple of 4, which is larger than
// or equal to the uint16 number.
func align(n uint16) uint16 {
return (n + 3) & 0xfffc
}
// isLocalAddress check if localRemote is a local address.
func isLocalAddress(local, localRemote string) bool {
// Resolve the IP returned by the STUN server first.
localRemoteAddr, err := net.ResolveUDPAddr("udp", localRemote)
if err != nil {
return false
}
// Try comparing with the local address on the socket first, but only if
// it's actually specified.
addr, err := net.ResolveUDPAddr("udp", local)
if err == nil && addr.IP != nil && !addr.IP.IsUnspecified() {
return addr.IP.Equal(localRemoteAddr.IP)
}
// Fallback to checking IPs of all interfaces
addrs, err := net.InterfaceAddrs()
if err != nil {
return false
}
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if err != nil {
continue
}
if ip.Equal(localRemoteAddr.IP) {
return true
}
}
return false
}

69
vendor/github.com/ccding/go-stun/stun/utils_test.go generated vendored Normal file
View File

@@ -0,0 +1,69 @@
// Copyright 2015, Cong Ding. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Cong Ding <dinggnu@gmail.com>
package stun
import (
"testing"
)
func TestPadding(t *testing.T) {
b := []byte{1, 2}
expected := []byte{1, 2, 0, 0}
result := padding(b)
if len(result) != len(expected) {
t.Errorf("Padding error: result size wrong.\n")
}
for i := range expected {
if expected[i] != result[i] {
t.Errorf("Padding error: data wrong in bit %d.\n", i)
}
}
}
func TestAlign(t *testing.T) {
d := make(map[uint16]uint16)
d[1] = 4
d[4] = 4
d[5] = 8
d[6] = 8
d[7] = 8
d[8] = 8
d[65528] = 65528
d[65529] = 65532
d[65531] = 65532
d[65532] = 65532
for k, v := range d {
if align(k) != v {
t.Errorf("Align error: expected %d, get %d", align(k), v)
}
}
}
func TestIsLocalAddress(t *testing.T) {
if !isLocalAddress(":1234", "127.0.0.1:8888") {
t.Errorf("isLocal error")
}
if !isLocalAddress("192.168.0.1:1234", "192.168.0.1:8888") {
t.Errorf("isLocal error")
}
if !isLocalAddress("8.8.8.8:1234", "8.8.8.8:8888") {
t.Errorf("isLocal error")
}
if isLocalAddress(":1234", "8.8.8.8:8888") {
t.Errorf("isLocal error")
}
}

362
vendor/github.com/hashicorp/yamux/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,362 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

60
vendor/github.com/hashicorp/yamux/addr.go generated vendored Normal file
View File

@@ -0,0 +1,60 @@
package yamux
import (
"fmt"
"net"
)
// hasAddr is used to get the address from the underlying connection
type hasAddr interface {
LocalAddr() net.Addr
RemoteAddr() net.Addr
}
// yamuxAddr is used when we cannot get the underlying address
type yamuxAddr struct {
Addr string
}
func (*yamuxAddr) Network() string {
return "yamux"
}
func (y *yamuxAddr) String() string {
return fmt.Sprintf("yamux:%s", y.Addr)
}
// Addr is used to get the address of the listener.
func (s *Session) Addr() net.Addr {
return s.LocalAddr()
}
// LocalAddr is used to get the local address of the
// underlying connection.
func (s *Session) LocalAddr() net.Addr {
addr, ok := s.conn.(hasAddr)
if !ok {
return &yamuxAddr{"local"}
}
return addr.LocalAddr()
}
// RemoteAddr is used to get the address of remote end
// of the underlying connection
func (s *Session) RemoteAddr() net.Addr {
addr, ok := s.conn.(hasAddr)
if !ok {
return &yamuxAddr{"remote"}
}
return addr.RemoteAddr()
}
// LocalAddr returns the local address
func (s *Stream) LocalAddr() net.Addr {
return s.session.LocalAddr()
}
// LocalAddr returns the remote address
func (s *Stream) RemoteAddr() net.Addr {
return s.session.RemoteAddr()
}

157
vendor/github.com/hashicorp/yamux/const.go generated vendored Normal file
View File

@@ -0,0 +1,157 @@
package yamux
import (
"encoding/binary"
"fmt"
)
var (
// ErrInvalidVersion means we received a frame with an
// invalid version
ErrInvalidVersion = fmt.Errorf("invalid protocol version")
// ErrInvalidMsgType means we received a frame with an
// invalid message type
ErrInvalidMsgType = fmt.Errorf("invalid msg type")
// ErrSessionShutdown is used if there is a shutdown during
// an operation
ErrSessionShutdown = fmt.Errorf("session shutdown")
// ErrStreamsExhausted is returned if we have no more
// stream ids to issue
ErrStreamsExhausted = fmt.Errorf("streams exhausted")
// ErrDuplicateStream is used if a duplicate stream is
// opened inbound
ErrDuplicateStream = fmt.Errorf("duplicate stream initiated")
// ErrReceiveWindowExceeded indicates the window was exceeded
ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded")
// ErrTimeout is used when we reach an IO deadline
ErrTimeout = fmt.Errorf("i/o deadline reached")
// ErrStreamClosed is returned when using a closed stream
ErrStreamClosed = fmt.Errorf("stream closed")
// ErrUnexpectedFlag is set when we get an unexpected flag
ErrUnexpectedFlag = fmt.Errorf("unexpected flag")
// ErrRemoteGoAway is used when we get a go away from the other side
ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections")
// ErrConnectionReset is sent if a stream is reset. This can happen
// if the backlog is exceeded, or if there was a remote GoAway.
ErrConnectionReset = fmt.Errorf("connection reset")
// ErrConnectionWriteTimeout indicates that we hit the "safety valve"
// timeout writing to the underlying stream connection.
ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout")
// ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close
ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout")
)
const (
// protoVersion is the only version we support
protoVersion uint8 = 0
)
const (
// Data is used for data frames. They are followed
// by length bytes worth of payload.
typeData uint8 = iota
// WindowUpdate is used to change the window of
// a given stream. The length indicates the delta
// update to the window.
typeWindowUpdate
// Ping is sent as a keep-alive or to measure
// the RTT. The StreamID and Length value are echoed
// back in the response.
typePing
// GoAway is sent to terminate a session. The StreamID
// should be 0 and the length is an error code.
typeGoAway
)
const (
// SYN is sent to signal a new stream. May
// be sent with a data payload
flagSYN uint16 = 1 << iota
// ACK is sent to acknowledge a new stream. May
// be sent with a data payload
flagACK
// FIN is sent to half-close the given stream.
// May be sent with a data payload.
flagFIN
// RST is used to hard close a given stream.
flagRST
)
const (
// initialStreamWindow is the initial stream window size
initialStreamWindow uint32 = 256 * 1024
)
const (
// goAwayNormal is sent on a normal termination
goAwayNormal uint32 = iota
// goAwayProtoErr sent on a protocol error
goAwayProtoErr
// goAwayInternalErr sent on an internal error
goAwayInternalErr
)
const (
sizeOfVersion = 1
sizeOfType = 1
sizeOfFlags = 2
sizeOfStreamID = 4
sizeOfLength = 4
headerSize = sizeOfVersion + sizeOfType + sizeOfFlags +
sizeOfStreamID + sizeOfLength
)
type header []byte
func (h header) Version() uint8 {
return h[0]
}
func (h header) MsgType() uint8 {
return h[1]
}
func (h header) Flags() uint16 {
return binary.BigEndian.Uint16(h[2:4])
}
func (h header) StreamID() uint32 {
return binary.BigEndian.Uint32(h[4:8])
}
func (h header) Length() uint32 {
return binary.BigEndian.Uint32(h[8:12])
}
func (h header) String() string {
return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d",
h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length())
}
func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) {
h[0] = protoVersion
h[1] = msgType
binary.BigEndian.PutUint16(h[2:4], flags)
binary.BigEndian.PutUint32(h[4:8], streamID)
binary.BigEndian.PutUint32(h[8:12], length)
}

87
vendor/github.com/hashicorp/yamux/mux.go generated vendored Normal file
View File

@@ -0,0 +1,87 @@
package yamux
import (
"fmt"
"io"
"os"
"time"
)
// Config is used to tune the Yamux session
type Config struct {
// AcceptBacklog is used to limit how many streams may be
// waiting an accept.
AcceptBacklog int
// EnableKeepalive is used to do a period keep alive
// messages using a ping.
EnableKeepAlive bool
// KeepAliveInterval is how often to perform the keep alive
KeepAliveInterval time.Duration
// ConnectionWriteTimeout is meant to be a "safety valve" timeout after
// we which will suspect a problem with the underlying connection and
// close it. This is only applied to writes, where's there's generally
// an expectation that things will move along quickly.
ConnectionWriteTimeout time.Duration
// MaxStreamWindowSize is used to control the maximum
// window size that we allow for a stream.
MaxStreamWindowSize uint32
// LogOutput is used to control the log destination
LogOutput io.Writer
}
// DefaultConfig is used to return a default configuration
func DefaultConfig() *Config {
return &Config{
AcceptBacklog: 256,
EnableKeepAlive: true,
KeepAliveInterval: 30 * time.Second,
ConnectionWriteTimeout: 10 * time.Second,
MaxStreamWindowSize: initialStreamWindow,
LogOutput: os.Stderr,
}
}
// VerifyConfig is used to verify the sanity of configuration
func VerifyConfig(config *Config) error {
if config.AcceptBacklog <= 0 {
return fmt.Errorf("backlog must be positive")
}
if config.KeepAliveInterval == 0 {
return fmt.Errorf("keep-alive interval must be positive")
}
if config.MaxStreamWindowSize < initialStreamWindow {
return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow)
}
return nil
}
// Server is used to initialize a new server-side connection.
// There must be at most one server-side connection. If a nil config is
// provided, the DefaultConfiguration will be used.
func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) {
if config == nil {
config = DefaultConfig()
}
if err := VerifyConfig(config); err != nil {
return nil, err
}
return newSession(config, conn, false), nil
}
// Client is used to initialize a new client-side connection.
// There must be at most one client-side connection.
func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) {
if config == nil {
config = DefaultConfig()
}
if err := VerifyConfig(config); err != nil {
return nil, err
}
return newSession(config, conn, true), nil
}

623
vendor/github.com/hashicorp/yamux/session.go generated vendored Normal file
View File

@@ -0,0 +1,623 @@
package yamux
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"log"
"math"
"net"
"strings"
"sync"
"sync/atomic"
"time"
)
// Session is used to wrap a reliable ordered connection and to
// multiplex it into multiple streams.
type Session struct {
// remoteGoAway indicates the remote side does
// not want futher connections. Must be first for alignment.
remoteGoAway int32
// localGoAway indicates that we should stop
// accepting futher connections. Must be first for alignment.
localGoAway int32
// nextStreamID is the next stream we should
// send. This depends if we are a client/server.
nextStreamID uint32
// config holds our configuration
config *Config
// logger is used for our logs
logger *log.Logger
// conn is the underlying connection
conn io.ReadWriteCloser
// bufRead is a buffered reader
bufRead *bufio.Reader
// pings is used to track inflight pings
pings map[uint32]chan struct{}
pingID uint32
pingLock sync.Mutex
// streams maps a stream id to a stream, and inflight has an entry
// for any outgoing stream that has not yet been established. Both are
// protected by streamLock.
streams map[uint32]*Stream
inflight map[uint32]struct{}
streamLock sync.Mutex
// synCh acts like a semaphore. It is sized to the AcceptBacklog which
// is assumed to be symmetric between the client and server. This allows
// the client to avoid exceeding the backlog and instead blocks the open.
synCh chan struct{}
// acceptCh is used to pass ready streams to the client
acceptCh chan *Stream
// sendCh is used to mark a stream as ready to send,
// or to send a header out directly.
sendCh chan sendReady
// recvDoneCh is closed when recv() exits to avoid a race
// between stream registration and stream shutdown
recvDoneCh chan struct{}
// shutdown is used to safely close a session
shutdown bool
shutdownErr error
shutdownCh chan struct{}
shutdownLock sync.Mutex
}
// sendReady is used to either mark a stream as ready
// or to directly send a header
type sendReady struct {
Hdr []byte
Body io.Reader
Err chan error
}
// newSession is used to construct a new session
func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session {
s := &Session{
config: config,
logger: log.New(config.LogOutput, "", log.LstdFlags),
conn: conn,
bufRead: bufio.NewReader(conn),
pings: make(map[uint32]chan struct{}),
streams: make(map[uint32]*Stream),
inflight: make(map[uint32]struct{}),
synCh: make(chan struct{}, config.AcceptBacklog),
acceptCh: make(chan *Stream, config.AcceptBacklog),
sendCh: make(chan sendReady, 64),
recvDoneCh: make(chan struct{}),
shutdownCh: make(chan struct{}),
}
if client {
s.nextStreamID = 1
} else {
s.nextStreamID = 2
}
go s.recv()
go s.send()
if config.EnableKeepAlive {
go s.keepalive()
}
return s
}
// IsClosed does a safe check to see if we have shutdown
func (s *Session) IsClosed() bool {
select {
case <-s.shutdownCh:
return true
default:
return false
}
}
// NumStreams returns the number of currently open streams
func (s *Session) NumStreams() int {
s.streamLock.Lock()
num := len(s.streams)
s.streamLock.Unlock()
return num
}
// Open is used to create a new stream as a net.Conn
func (s *Session) Open() (net.Conn, error) {
conn, err := s.OpenStream()
if err != nil {
return nil, err
}
return conn, nil
}
// OpenStream is used to create a new stream
func (s *Session) OpenStream() (*Stream, error) {
if s.IsClosed() {
return nil, ErrSessionShutdown
}
if atomic.LoadInt32(&s.remoteGoAway) == 1 {
return nil, ErrRemoteGoAway
}
// Block if we have too many inflight SYNs
select {
case s.synCh <- struct{}{}:
case <-s.shutdownCh:
return nil, ErrSessionShutdown
}
GET_ID:
// Get an ID, and check for stream exhaustion
id := atomic.LoadUint32(&s.nextStreamID)
if id >= math.MaxUint32-1 {
return nil, ErrStreamsExhausted
}
if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) {
goto GET_ID
}
// Register the stream
stream := newStream(s, id, streamInit)
s.streamLock.Lock()
s.streams[id] = stream
s.inflight[id] = struct{}{}
s.streamLock.Unlock()
// Send the window update to create
if err := stream.sendWindowUpdate(); err != nil {
select {
case <-s.synCh:
default:
s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore")
}
return nil, err
}
return stream, nil
}
// Accept is used to block until the next available stream
// is ready to be accepted.
func (s *Session) Accept() (net.Conn, error) {
conn, err := s.AcceptStream()
if err != nil {
return nil, err
}
return conn, err
}
// AcceptStream is used to block until the next available stream
// is ready to be accepted.
func (s *Session) AcceptStream() (*Stream, error) {
select {
case stream := <-s.acceptCh:
if err := stream.sendWindowUpdate(); err != nil {
return nil, err
}
return stream, nil
case <-s.shutdownCh:
return nil, s.shutdownErr
}
}
// Close is used to close the session and all streams.
// Attempts to send a GoAway before closing the connection.
func (s *Session) Close() error {
s.shutdownLock.Lock()
defer s.shutdownLock.Unlock()
if s.shutdown {
return nil
}
s.shutdown = true
if s.shutdownErr == nil {
s.shutdownErr = ErrSessionShutdown
}
close(s.shutdownCh)
s.conn.Close()
<-s.recvDoneCh
s.streamLock.Lock()
defer s.streamLock.Unlock()
for _, stream := range s.streams {
stream.forceClose()
}
return nil
}
// exitErr is used to handle an error that is causing the
// session to terminate.
func (s *Session) exitErr(err error) {
s.shutdownLock.Lock()
if s.shutdownErr == nil {
s.shutdownErr = err
}
s.shutdownLock.Unlock()
s.Close()
}
// GoAway can be used to prevent accepting further
// connections. It does not close the underlying conn.
func (s *Session) GoAway() error {
return s.waitForSend(s.goAway(goAwayNormal), nil)
}
// goAway is used to send a goAway message
func (s *Session) goAway(reason uint32) header {
atomic.SwapInt32(&s.localGoAway, 1)
hdr := header(make([]byte, headerSize))
hdr.encode(typeGoAway, 0, 0, reason)
return hdr
}
// Ping is used to measure the RTT response time
func (s *Session) Ping() (time.Duration, error) {
// Get a channel for the ping
ch := make(chan struct{})
// Get a new ping id, mark as pending
s.pingLock.Lock()
id := s.pingID
s.pingID++
s.pings[id] = ch
s.pingLock.Unlock()
// Send the ping request
hdr := header(make([]byte, headerSize))
hdr.encode(typePing, flagSYN, 0, id)
if err := s.waitForSend(hdr, nil); err != nil {
return 0, err
}
// Wait for a response
start := time.Now()
select {
case <-ch:
case <-time.After(s.config.ConnectionWriteTimeout):
s.pingLock.Lock()
delete(s.pings, id) // Ignore it if a response comes later.
s.pingLock.Unlock()
return 0, ErrTimeout
case <-s.shutdownCh:
return 0, ErrSessionShutdown
}
// Compute the RTT
return time.Now().Sub(start), nil
}
// keepalive is a long running goroutine that periodically does
// a ping to keep the connection alive.
func (s *Session) keepalive() {
for {
select {
case <-time.After(s.config.KeepAliveInterval):
_, err := s.Ping()
if err != nil {
s.logger.Printf("[ERR] yamux: keepalive failed: %v", err)
s.exitErr(ErrKeepAliveTimeout)
return
}
case <-s.shutdownCh:
return
}
}
}
// waitForSendErr waits to send a header, checking for a potential shutdown
func (s *Session) waitForSend(hdr header, body io.Reader) error {
errCh := make(chan error, 1)
return s.waitForSendErr(hdr, body, errCh)
}
// waitForSendErr waits to send a header with optional data, checking for a
// potential shutdown. Since there's the expectation that sends can happen
// in a timely manner, we enforce the connection write timeout here.
func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error {
timer := time.NewTimer(s.config.ConnectionWriteTimeout)
defer timer.Stop()
ready := sendReady{Hdr: hdr, Body: body, Err: errCh}
select {
case s.sendCh <- ready:
case <-s.shutdownCh:
return ErrSessionShutdown
case <-timer.C:
return ErrConnectionWriteTimeout
}
select {
case err := <-errCh:
return err
case <-s.shutdownCh:
return ErrSessionShutdown
case <-timer.C:
return ErrConnectionWriteTimeout
}
}
// sendNoWait does a send without waiting. Since there's the expectation that
// the send happens right here, we enforce the connection write timeout if we
// can't queue the header to be sent.
func (s *Session) sendNoWait(hdr header) error {
timer := time.NewTimer(s.config.ConnectionWriteTimeout)
defer timer.Stop()
select {
case s.sendCh <- sendReady{Hdr: hdr}:
return nil
case <-s.shutdownCh:
return ErrSessionShutdown
case <-timer.C:
return ErrConnectionWriteTimeout
}
}
// send is a long running goroutine that sends data
func (s *Session) send() {
for {
select {
case ready := <-s.sendCh:
// Send a header if ready
if ready.Hdr != nil {
sent := 0
for sent < len(ready.Hdr) {
n, err := s.conn.Write(ready.Hdr[sent:])
if err != nil {
s.logger.Printf("[ERR] yamux: Failed to write header: %v", err)
asyncSendErr(ready.Err, err)
s.exitErr(err)
return
}
sent += n
}
}
// Send data from a body if given
if ready.Body != nil {
_, err := io.Copy(s.conn, ready.Body)
if err != nil {
s.logger.Printf("[ERR] yamux: Failed to write body: %v", err)
asyncSendErr(ready.Err, err)
s.exitErr(err)
return
}
}
// No error, successful send
asyncSendErr(ready.Err, nil)
case <-s.shutdownCh:
return
}
}
}
// recv is a long running goroutine that accepts new data
func (s *Session) recv() {
if err := s.recvLoop(); err != nil {
s.exitErr(err)
}
}
// recvLoop continues to receive data until a fatal error is encountered
func (s *Session) recvLoop() error {
defer close(s.recvDoneCh)
hdr := header(make([]byte, headerSize))
var handler func(header) error
for {
// Read the header
if _, err := io.ReadFull(s.bufRead, hdr); err != nil {
if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") {
s.logger.Printf("[ERR] yamux: Failed to read header: %v", err)
}
return err
}
// Verify the version
if hdr.Version() != protoVersion {
s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version())
return ErrInvalidVersion
}
// Switch on the type
switch hdr.MsgType() {
case typeData:
handler = s.handleStreamMessage
case typeWindowUpdate:
handler = s.handleStreamMessage
case typeGoAway:
handler = s.handleGoAway
case typePing:
handler = s.handlePing
default:
return ErrInvalidMsgType
}
// Invoke the handler
if err := handler(hdr); err != nil {
return err
}
}
}
// handleStreamMessage handles either a data or window update frame
func (s *Session) handleStreamMessage(hdr header) error {
// Check for a new stream creation
id := hdr.StreamID()
flags := hdr.Flags()
if flags&flagSYN == flagSYN {
if err := s.incomingStream(id); err != nil {
return err
}
}
// Get the stream
s.streamLock.Lock()
stream := s.streams[id]
s.streamLock.Unlock()
// If we do not have a stream, likely we sent a RST
if stream == nil {
// Drain any data on the wire
if hdr.MsgType() == typeData && hdr.Length() > 0 {
s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id)
if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil {
s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err)
return nil
}
} else {
s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr)
}
return nil
}
// Check if this is a window update
if hdr.MsgType() == typeWindowUpdate {
if err := stream.incrSendWindow(hdr, flags); err != nil {
if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
}
return err
}
return nil
}
// Read the new data
if err := stream.readData(hdr, flags, s.bufRead); err != nil {
if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
}
return err
}
return nil
}
// handlePing is invokde for a typePing frame
func (s *Session) handlePing(hdr header) error {
flags := hdr.Flags()
pingID := hdr.Length()
// Check if this is a query, respond back in a separate context so we
// don't interfere with the receiving thread blocking for the write.
if flags&flagSYN == flagSYN {
go func() {
hdr := header(make([]byte, headerSize))
hdr.encode(typePing, flagACK, 0, pingID)
if err := s.sendNoWait(hdr); err != nil {
s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err)
}
}()
return nil
}
// Handle a response
s.pingLock.Lock()
ch := s.pings[pingID]
if ch != nil {
delete(s.pings, pingID)
close(ch)
}
s.pingLock.Unlock()
return nil
}
// handleGoAway is invokde for a typeGoAway frame
func (s *Session) handleGoAway(hdr header) error {
code := hdr.Length()
switch code {
case goAwayNormal:
atomic.SwapInt32(&s.remoteGoAway, 1)
case goAwayProtoErr:
s.logger.Printf("[ERR] yamux: received protocol error go away")
return fmt.Errorf("yamux protocol error")
case goAwayInternalErr:
s.logger.Printf("[ERR] yamux: received internal error go away")
return fmt.Errorf("remote yamux internal error")
default:
s.logger.Printf("[ERR] yamux: received unexpected go away")
return fmt.Errorf("unexpected go away received")
}
return nil
}
// incomingStream is used to create a new incoming stream
func (s *Session) incomingStream(id uint32) error {
// Reject immediately if we are doing a go away
if atomic.LoadInt32(&s.localGoAway) == 1 {
hdr := header(make([]byte, headerSize))
hdr.encode(typeWindowUpdate, flagRST, id, 0)
return s.sendNoWait(hdr)
}
// Allocate a new stream
stream := newStream(s, id, streamSYNReceived)
s.streamLock.Lock()
defer s.streamLock.Unlock()
// Check if stream already exists
if _, ok := s.streams[id]; ok {
s.logger.Printf("[ERR] yamux: duplicate stream declared")
if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
}
return ErrDuplicateStream
}
// Register the stream
s.streams[id] = stream
// Check if we've exceeded the backlog
select {
case s.acceptCh <- stream:
return nil
default:
// Backlog exceeded! RST the stream
s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset")
delete(s.streams, id)
stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0)
return s.sendNoWait(stream.sendHdr)
}
}
// closeStream is used to close a stream once both sides have
// issued a close. If there was an in-flight SYN and the stream
// was not yet established, then this will give the credit back.
func (s *Session) closeStream(id uint32) {
s.streamLock.Lock()
if _, ok := s.inflight[id]; ok {
select {
case <-s.synCh:
default:
s.logger.Printf("[ERR] yamux: SYN tracking out of sync")
}
}
delete(s.streams, id)
s.streamLock.Unlock()
}
// establishStream is used to mark a stream that was in the
// SYN Sent state as established.
func (s *Session) establishStream(id uint32) {
s.streamLock.Lock()
if _, ok := s.inflight[id]; ok {
delete(s.inflight, id)
} else {
s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)")
}
select {
case <-s.synCh:
default:
s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)")
}
s.streamLock.Unlock()
}

457
vendor/github.com/hashicorp/yamux/stream.go generated vendored Normal file
View File

@@ -0,0 +1,457 @@
package yamux
import (
"bytes"
"io"
"sync"
"sync/atomic"
"time"
)
type streamState int
const (
streamInit streamState = iota
streamSYNSent
streamSYNReceived
streamEstablished
streamLocalClose
streamRemoteClose
streamClosed
streamReset
)
// Stream is used to represent a logical stream
// within a session.
type Stream struct {
recvWindow uint32
sendWindow uint32
id uint32
session *Session
state streamState
stateLock sync.Mutex
recvBuf *bytes.Buffer
recvLock sync.Mutex
controlHdr header
controlErr chan error
controlHdrLock sync.Mutex
sendHdr header
sendErr chan error
sendLock sync.Mutex
recvNotifyCh chan struct{}
sendNotifyCh chan struct{}
readDeadline time.Time
writeDeadline time.Time
}
// newStream is used to construct a new stream within
// a given session for an ID
func newStream(session *Session, id uint32, state streamState) *Stream {
s := &Stream{
id: id,
session: session,
state: state,
controlHdr: header(make([]byte, headerSize)),
controlErr: make(chan error, 1),
sendHdr: header(make([]byte, headerSize)),
sendErr: make(chan error, 1),
recvWindow: initialStreamWindow,
sendWindow: initialStreamWindow,
recvNotifyCh: make(chan struct{}, 1),
sendNotifyCh: make(chan struct{}, 1),
}
return s
}
// Session returns the associated stream session
func (s *Stream) Session() *Session {
return s.session
}
// StreamID returns the ID of this stream
func (s *Stream) StreamID() uint32 {
return s.id
}
// Read is used to read from the stream
func (s *Stream) Read(b []byte) (n int, err error) {
defer asyncNotify(s.recvNotifyCh)
START:
s.stateLock.Lock()
switch s.state {
case streamLocalClose:
fallthrough
case streamRemoteClose:
fallthrough
case streamClosed:
s.recvLock.Lock()
if s.recvBuf == nil || s.recvBuf.Len() == 0 {
s.recvLock.Unlock()
s.stateLock.Unlock()
return 0, io.EOF
}
s.recvLock.Unlock()
case streamReset:
s.stateLock.Unlock()
return 0, ErrConnectionReset
}
s.stateLock.Unlock()
// If there is no data available, block
s.recvLock.Lock()
if s.recvBuf == nil || s.recvBuf.Len() == 0 {
s.recvLock.Unlock()
goto WAIT
}
// Read any bytes
n, _ = s.recvBuf.Read(b)
s.recvLock.Unlock()
// Send a window update potentially
err = s.sendWindowUpdate()
return n, err
WAIT:
var timeout <-chan time.Time
var timer *time.Timer
if !s.readDeadline.IsZero() {
delay := s.readDeadline.Sub(time.Now())
timer = time.NewTimer(delay)
timeout = timer.C
}
select {
case <-s.recvNotifyCh:
if timer != nil {
timer.Stop()
}
goto START
case <-timeout:
return 0, ErrTimeout
}
}
// Write is used to write to the stream
func (s *Stream) Write(b []byte) (n int, err error) {
s.sendLock.Lock()
defer s.sendLock.Unlock()
total := 0
for total < len(b) {
n, err := s.write(b[total:])
total += n
if err != nil {
return total, err
}
}
return total, nil
}
// write is used to write to the stream, may return on
// a short write.
func (s *Stream) write(b []byte) (n int, err error) {
var flags uint16
var max uint32
var body io.Reader
START:
s.stateLock.Lock()
switch s.state {
case streamLocalClose:
fallthrough
case streamClosed:
s.stateLock.Unlock()
return 0, ErrStreamClosed
case streamReset:
s.stateLock.Unlock()
return 0, ErrConnectionReset
}
s.stateLock.Unlock()
// If there is no data available, block
window := atomic.LoadUint32(&s.sendWindow)
if window == 0 {
goto WAIT
}
// Determine the flags if any
flags = s.sendFlags()
// Send up to our send window
max = min(window, uint32(len(b)))
body = bytes.NewReader(b[:max])
// Send the header
s.sendHdr.encode(typeData, flags, s.id, max)
if err := s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil {
return 0, err
}
// Reduce our send window
atomic.AddUint32(&s.sendWindow, ^uint32(max-1))
// Unlock
return int(max), err
WAIT:
var timeout <-chan time.Time
if !s.writeDeadline.IsZero() {
delay := s.writeDeadline.Sub(time.Now())
timeout = time.After(delay)
}
select {
case <-s.sendNotifyCh:
goto START
case <-timeout:
return 0, ErrTimeout
}
return 0, nil
}
// sendFlags determines any flags that are appropriate
// based on the current stream state
func (s *Stream) sendFlags() uint16 {
s.stateLock.Lock()
defer s.stateLock.Unlock()
var flags uint16
switch s.state {
case streamInit:
flags |= flagSYN
s.state = streamSYNSent
case streamSYNReceived:
flags |= flagACK
s.state = streamEstablished
}
return flags
}
// sendWindowUpdate potentially sends a window update enabling
// further writes to take place. Must be invoked with the lock.
func (s *Stream) sendWindowUpdate() error {
s.controlHdrLock.Lock()
defer s.controlHdrLock.Unlock()
// Determine the delta update
max := s.session.config.MaxStreamWindowSize
delta := max - atomic.LoadUint32(&s.recvWindow)
// Determine the flags if any
flags := s.sendFlags()
// Check if we can omit the update
if delta < (max/2) && flags == 0 {
return nil
}
// Update our window
atomic.AddUint32(&s.recvWindow, delta)
// Send the header
s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta)
if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil {
return err
}
return nil
}
// sendClose is used to send a FIN
func (s *Stream) sendClose() error {
s.controlHdrLock.Lock()
defer s.controlHdrLock.Unlock()
flags := s.sendFlags()
flags |= flagFIN
s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0)
if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil {
return err
}
return nil
}
// Close is used to close the stream
func (s *Stream) Close() error {
closeStream := false
s.stateLock.Lock()
switch s.state {
// Opened means we need to signal a close
case streamSYNSent:
fallthrough
case streamSYNReceived:
fallthrough
case streamEstablished:
s.state = streamLocalClose
goto SEND_CLOSE
case streamLocalClose:
case streamRemoteClose:
s.state = streamClosed
closeStream = true
goto SEND_CLOSE
case streamClosed:
case streamReset:
default:
panic("unhandled state")
}
s.stateLock.Unlock()
return nil
SEND_CLOSE:
s.stateLock.Unlock()
s.sendClose()
s.notifyWaiting()
if closeStream {
s.session.closeStream(s.id)
}
return nil
}
// forceClose is used for when the session is exiting
func (s *Stream) forceClose() {
s.stateLock.Lock()
s.state = streamClosed
s.stateLock.Unlock()
s.notifyWaiting()
}
// processFlags is used to update the state of the stream
// based on set flags, if any. Lock must be held
func (s *Stream) processFlags(flags uint16) error {
// Close the stream without holding the state lock
closeStream := false
defer func() {
if closeStream {
s.session.closeStream(s.id)
}
}()
s.stateLock.Lock()
defer s.stateLock.Unlock()
if flags&flagACK == flagACK {
if s.state == streamSYNSent {
s.state = streamEstablished
}
s.session.establishStream(s.id)
}
if flags&flagFIN == flagFIN {
switch s.state {
case streamSYNSent:
fallthrough
case streamSYNReceived:
fallthrough
case streamEstablished:
s.state = streamRemoteClose
s.notifyWaiting()
case streamLocalClose:
s.state = streamClosed
closeStream = true
s.notifyWaiting()
default:
s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state)
return ErrUnexpectedFlag
}
}
if flags&flagRST == flagRST {
s.state = streamReset
closeStream = true
s.notifyWaiting()
}
return nil
}
// notifyWaiting notifies all the waiting channels
func (s *Stream) notifyWaiting() {
asyncNotify(s.recvNotifyCh)
asyncNotify(s.sendNotifyCh)
}
// incrSendWindow updates the size of our send window
func (s *Stream) incrSendWindow(hdr header, flags uint16) error {
if err := s.processFlags(flags); err != nil {
return err
}
// Increase window, unblock a sender
atomic.AddUint32(&s.sendWindow, hdr.Length())
asyncNotify(s.sendNotifyCh)
return nil
}
// readData is used to handle a data frame
func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error {
if err := s.processFlags(flags); err != nil {
return err
}
// Check that our recv window is not exceeded
length := hdr.Length()
if length == 0 {
return nil
}
if remain := atomic.LoadUint32(&s.recvWindow); length > remain {
s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, remain, length)
return ErrRecvWindowExceeded
}
// Wrap in a limited reader
conn = &io.LimitedReader{R: conn, N: int64(length)}
// Copy into buffer
s.recvLock.Lock()
if s.recvBuf == nil {
// Allocate the receive buffer just-in-time to fit the full data frame.
// This way we can read in the whole packet without further allocations.
s.recvBuf = bytes.NewBuffer(make([]byte, 0, length))
}
if _, err := io.Copy(s.recvBuf, conn); err != nil {
s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err)
s.recvLock.Unlock()
return err
}
// Decrement the receive window
atomic.AddUint32(&s.recvWindow, ^uint32(length-1))
s.recvLock.Unlock()
// Unblock any readers
asyncNotify(s.recvNotifyCh)
return nil
}
// SetDeadline sets the read and write deadlines
func (s *Stream) SetDeadline(t time.Time) error {
if err := s.SetReadDeadline(t); err != nil {
return err
}
if err := s.SetWriteDeadline(t); err != nil {
return err
}
return nil
}
// SetReadDeadline sets the deadline for future Read calls.
func (s *Stream) SetReadDeadline(t time.Time) error {
s.readDeadline = t
return nil
}
// SetWriteDeadline sets the deadline for future Write calls
func (s *Stream) SetWriteDeadline(t time.Time) error {
s.writeDeadline = t
return nil
}
// Shrink is used to compact the amount of buffers utilized
// This is useful when using Yamux in a connection pool to reduce
// the idle memory utilization.
func (s *Stream) Shrink() {
s.recvLock.Lock()
if s.recvBuf != nil && s.recvBuf.Len() == 0 {
s.recvBuf = nil
}
s.recvLock.Unlock()
}

28
vendor/github.com/hashicorp/yamux/util.go generated vendored Normal file
View File

@@ -0,0 +1,28 @@
package yamux
// asyncSendErr is used to try an async send of an error
func asyncSendErr(ch chan error, err error) {
if ch == nil {
return
}
select {
case ch <- err:
default:
}
}
// asyncNotify is used to signal a waiting goroutine
func asyncNotify(ch chan struct{}) {
select {
case ch <- struct{}{}:
default:
}
}
// min computes the minimum of two values
func min(a, b uint32) uint32 {
if a < b {
return a
}
return b
}

22
vendor/github.com/klauspost/cpuid/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2015 Klaus Post
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

145
vendor/github.com/klauspost/cpuid/README.md generated vendored Normal file
View File

@@ -0,0 +1,145 @@
# cpuid
Package cpuid provides information about the CPU running the current program.
CPU features are detected on startup, and kept for fast access through the life of the application.
Currently x86 / x64 (AMD64) is supported, and no external C (cgo) code is used, which should make the library very easy to use.
You can access the CPU information by accessing the shared CPU variable of the cpuid library.
Package home: https://github.com/klauspost/cpuid
[![GoDoc][1]][2] [![Build Status][3]][4]
[1]: https://godoc.org/github.com/klauspost/cpuid?status.svg
[2]: https://godoc.org/github.com/klauspost/cpuid
[3]: https://travis-ci.org/klauspost/cpuid.svg
[4]: https://travis-ci.org/klauspost/cpuid
# features
## CPU Instructions
* **CMOV** (i686 CMOV)
* **NX** (NX (No-Execute) bit)
* **AMD3DNOW** (AMD 3DNOW)
* **AMD3DNOWEXT** (AMD 3DNowExt)
* **MMX** (standard MMX)
* **MMXEXT** (SSE integer functions or AMD MMX ext)
* **SSE** (SSE functions)
* **SSE2** (P4 SSE functions)
* **SSE3** (Prescott SSE3 functions)
* **SSSE3** (Conroe SSSE3 functions)
* **SSE4** (Penryn SSE4.1 functions)
* **SSE4A** (AMD Barcelona microarchitecture SSE4a instructions)
* **SSE42** (Nehalem SSE4.2 functions)
* **AVX** (AVX functions)
* **AVX2** (AVX2 functions)
* **FMA3** (Intel FMA 3)
* **FMA4** (Bulldozer FMA4 functions)
* **XOP** (Bulldozer XOP functions)
* **F16C** (Half-precision floating-point conversion)
* **BMI1** (Bit Manipulation Instruction Set 1)
* **BMI2** (Bit Manipulation Instruction Set 2)
* **TBM** (AMD Trailing Bit Manipulation)
* **LZCNT** (LZCNT instruction)
* **POPCNT** (POPCNT instruction)
* **AESNI** (Advanced Encryption Standard New Instructions)
* **CLMUL** (Carry-less Multiplication)
* **HTT** (Hyperthreading (enabled))
* **HLE** (Hardware Lock Elision)
* **RTM** (Restricted Transactional Memory)
* **RDRAND** (RDRAND instruction is available)
* **RDSEED** (RDSEED instruction is available)
* **ADX** (Intel ADX (Multi-Precision Add-Carry Instruction Extensions))
* **SHA** (Intel SHA Extensions)
* **AVX512F** (AVX-512 Foundation)
* **AVX512DQ** (AVX-512 Doubleword and Quadword Instructions)
* **AVX512IFMA** (AVX-512 Integer Fused Multiply-Add Instructions)
* **AVX512PF** (AVX-512 Prefetch Instructions)
* **AVX512ER** (AVX-512 Exponential and Reciprocal Instructions)
* **AVX512CD** (AVX-512 Conflict Detection Instructions)
* **AVX512BW** (AVX-512 Byte and Word Instructions)
* **AVX512VL** (AVX-512 Vector Length Extensions)
* **AVX512VBMI** (AVX-512 Vector Bit Manipulation Instructions)
* **MPX** (Intel MPX (Memory Protection Extensions))
* **ERMS** (Enhanced REP MOVSB/STOSB)
* **RDTSCP** (RDTSCP Instruction)
* **CX16** (CMPXCHG16B Instruction)
* **SGX** (Software Guard Extensions, with activation details)
## Performance
* **RDTSCP()** Returns current cycle count. Can be used for benchmarking.
* **SSE2SLOW** (SSE2 is supported, but usually not faster)
* **SSE3SLOW** (SSE3 is supported, but usually not faster)
* **ATOM** (Atom processor, some SSSE3 instructions are slower)
* **Cache line** (Probable size of a cache line).
* **L1, L2, L3 Cache size** on newer Intel/AMD CPUs.
## Cpu Vendor/VM
* **Intel**
* **AMD**
* **VIA**
* **Transmeta**
* **NSC**
* **KVM** (Kernel-based Virtual Machine)
* **MSVM** (Microsoft Hyper-V or Windows Virtual PC)
* **VMware**
* **XenHVM**
# installing
```go get github.com/klauspost/cpuid```
# example
```Go
package main
import (
"fmt"
"github.com/klauspost/cpuid"
)
func main() {
// Print basic CPU information:
fmt.Println("Name:", cpuid.CPU.BrandName)
fmt.Println("PhysicalCores:", cpuid.CPU.PhysicalCores)
fmt.Println("ThreadsPerCore:", cpuid.CPU.ThreadsPerCore)
fmt.Println("LogicalCores:", cpuid.CPU.LogicalCores)
fmt.Println("Family", cpuid.CPU.Family, "Model:", cpuid.CPU.Model)
fmt.Println("Features:", cpuid.CPU.Features)
fmt.Println("Cacheline bytes:", cpuid.CPU.CacheLine)
fmt.Println("L1 Data Cache:", cpuid.CPU.Cache.L1D, "bytes")
fmt.Println("L1 Instruction Cache:", cpuid.CPU.Cache.L1D, "bytes")
fmt.Println("L2 Cache:", cpuid.CPU.Cache.L2, "bytes")
fmt.Println("L3 Cache:", cpuid.CPU.Cache.L3, "bytes")
// Test if we have a specific feature:
if cpuid.CPU.SSE() {
fmt.Println("We have Streaming SIMD Extensions")
}
}
```
Sample output:
```
>go run main.go
Name: Intel(R) Core(TM) i5-2540M CPU @ 2.60GHz
PhysicalCores: 2
ThreadsPerCore: 2
LogicalCores: 4
Family 6 Model: 42
Features: CMOV,MMX,MMXEXT,SSE,SSE2,SSE3,SSSE3,SSE4.1,SSE4.2,AVX,AESNI,CLMUL
Cacheline bytes: 64
We have Streaming SIMD Extensions
```
# private package
In the "private" folder you can find an autogenerated version of the library you can include in your own packages.
For this purpose all exports are removed, and functions and constants are lowercased.
This is not a recommended way of using the library, but provided for convenience, if it is difficult for you to use external packages.
# license
This code is published under an MIT license. See LICENSE file for more information.

1022
vendor/github.com/klauspost/cpuid/cpuid.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

42
vendor/github.com/klauspost/cpuid/cpuid_386.s generated vendored Normal file
View File

@@ -0,0 +1,42 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
// +build 386,!gccgo
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
TEXT ·asmCpuid(SB), 7, $0
XORL CX, CX
MOVL op+0(FP), AX
CPUID
MOVL AX, eax+4(FP)
MOVL BX, ebx+8(FP)
MOVL CX, ecx+12(FP)
MOVL DX, edx+16(FP)
RET
// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
TEXT ·asmCpuidex(SB), 7, $0
MOVL op+0(FP), AX
MOVL op2+4(FP), CX
CPUID
MOVL AX, eax+8(FP)
MOVL BX, ebx+12(FP)
MOVL CX, ecx+16(FP)
MOVL DX, edx+20(FP)
RET
// func xgetbv(index uint32) (eax, edx uint32)
TEXT ·asmXgetbv(SB), 7, $0
MOVL index+0(FP), CX
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
MOVL AX, eax+4(FP)
MOVL DX, edx+8(FP)
RET
// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
TEXT ·asmRdtscpAsm(SB), 7, $0
BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
MOVL AX, eax+0(FP)
MOVL BX, ebx+4(FP)
MOVL CX, ecx+8(FP)
MOVL DX, edx+12(FP)
RET

42
vendor/github.com/klauspost/cpuid/cpuid_amd64.s generated vendored Normal file
View File

@@ -0,0 +1,42 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
//+build amd64,!gccgo
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
TEXT ·asmCpuid(SB), 7, $0
XORQ CX, CX
MOVL op+0(FP), AX
CPUID
MOVL AX, eax+8(FP)
MOVL BX, ebx+12(FP)
MOVL CX, ecx+16(FP)
MOVL DX, edx+20(FP)
RET
// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
TEXT ·asmCpuidex(SB), 7, $0
MOVL op+0(FP), AX
MOVL op2+4(FP), CX
CPUID
MOVL AX, eax+8(FP)
MOVL BX, ebx+12(FP)
MOVL CX, ecx+16(FP)
MOVL DX, edx+20(FP)
RET
// func asmXgetbv(index uint32) (eax, edx uint32)
TEXT ·asmXgetbv(SB), 7, $0
MOVL index+0(FP), CX
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
MOVL AX, eax+8(FP)
MOVL DX, edx+12(FP)
RET
// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
TEXT ·asmRdtscpAsm(SB), 7, $0
BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
MOVL AX, eax+0(FP)
MOVL BX, ebx+4(FP)
MOVL CX, ecx+8(FP)
MOVL DX, edx+12(FP)
RET

727
vendor/github.com/klauspost/cpuid/cpuid_test.go generated vendored Normal file
View File

@@ -0,0 +1,727 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
package cpuid
import (
"fmt"
"testing"
)
// There is no real way to test a CPU identifier, since results will
// obviously differ on each machine.
func TestCPUID(t *testing.T) {
n := maxFunctionID()
t.Logf("Max Function:0x%x\n", n)
n = maxExtendedFunction()
t.Logf("Max Extended Function:0x%x\n", n)
t.Log("Name:", CPU.BrandName)
t.Log("PhysicalCores:", CPU.PhysicalCores)
t.Log("ThreadsPerCore:", CPU.ThreadsPerCore)
t.Log("LogicalCores:", CPU.LogicalCores)
t.Log("Family", CPU.Family, "Model:", CPU.Model)
t.Log("Features:", CPU.Features)
t.Log("Cacheline bytes:", CPU.CacheLine)
t.Log("L1 Instruction Cache:", CPU.Cache.L1I, "bytes")
t.Log("L1 Data Cache:", CPU.Cache.L1D, "bytes")
t.Log("L2 Cache:", CPU.Cache.L2, "bytes")
t.Log("L3 Cache:", CPU.Cache.L3, "bytes")
if CPU.SSE2() {
t.Log("We have SSE2")
}
}
func TestDumpCPUID(t *testing.T) {
n := int(maxFunctionID())
for i := 0; i <= n; i++ {
a, b, c, d := cpuidex(uint32(i), 0)
t.Logf("CPUID %08x: %08x-%08x-%08x-%08x", i, a, b, c, d)
ex := uint32(1)
for {
a2, b2, c2, d2 := cpuidex(uint32(i), ex)
if a2 == a && b2 == b && d2 == d || ex > 50 || a2 == 0 {
break
}
t.Logf("CPUID %08x: %08x-%08x-%08x-%08x", i, a2, b2, c2, d2)
a, b, c, d = a2, b2, c2, d2
ex++
}
}
n2 := maxExtendedFunction()
for i := uint32(0x80000000); i <= n2; i++ {
a, b, c, d := cpuid(i)
t.Logf("CPUID %08x: %08x-%08x-%08x-%08x", i, a, b, c, d)
}
}
func Example() {
// Print basic CPU information:
fmt.Println("Name:", CPU.BrandName)
fmt.Println("PhysicalCores:", CPU.PhysicalCores)
fmt.Println("ThreadsPerCore:", CPU.ThreadsPerCore)
fmt.Println("LogicalCores:", CPU.LogicalCores)
fmt.Println("Family", CPU.Family, "Model:", CPU.Model)
fmt.Println("Features:", CPU.Features)
fmt.Println("Cacheline bytes:", CPU.CacheLine)
// Test if we have a specific feature:
if CPU.SSE() {
fmt.Println("We have Streaming SIMD Extensions")
}
}
func TestBrandNameZero(t *testing.T) {
if len(CPU.BrandName) > 0 {
// Cut out last byte
last := []byte(CPU.BrandName[len(CPU.BrandName)-1:])
if last[0] == 0 {
t.Fatal("last byte was zero")
} else if last[0] == 32 {
t.Fatal("whitespace wasn't trimmed")
}
}
}
// Generated here: http://play.golang.org/p/mko-0tFt0Q
// TestCmov tests Cmov() function
func TestCmov(t *testing.T) {
got := CPU.Cmov()
expected := CPU.Features&CMOV == CMOV
if got != expected {
t.Fatalf("Cmov: expected %v, got %v", expected, got)
}
t.Log("CMOV Support:", got)
}
// TestAmd3dnow tests Amd3dnow() function
func TestAmd3dnow(t *testing.T) {
got := CPU.Amd3dnow()
expected := CPU.Features&AMD3DNOW == AMD3DNOW
if got != expected {
t.Fatalf("Amd3dnow: expected %v, got %v", expected, got)
}
t.Log("AMD3DNOW Support:", got)
}
// TestAmd3dnowExt tests Amd3dnowExt() function
func TestAmd3dnowExt(t *testing.T) {
got := CPU.Amd3dnowExt()
expected := CPU.Features&AMD3DNOWEXT == AMD3DNOWEXT
if got != expected {
t.Fatalf("Amd3dnowExt: expected %v, got %v", expected, got)
}
t.Log("AMD3DNOWEXT Support:", got)
}
// TestMMX tests MMX() function
func TestMMX(t *testing.T) {
got := CPU.MMX()
expected := CPU.Features&MMX == MMX
if got != expected {
t.Fatalf("MMX: expected %v, got %v", expected, got)
}
t.Log("MMX Support:", got)
}
// TestMMXext tests MMXext() function
func TestMMXext(t *testing.T) {
got := CPU.MMXExt()
expected := CPU.Features&MMXEXT == MMXEXT
if got != expected {
t.Fatalf("MMXExt: expected %v, got %v", expected, got)
}
t.Log("MMXEXT Support:", got)
}
// TestSSE tests SSE() function
func TestSSE(t *testing.T) {
got := CPU.SSE()
expected := CPU.Features&SSE == SSE
if got != expected {
t.Fatalf("SSE: expected %v, got %v", expected, got)
}
t.Log("SSE Support:", got)
}
// TestSSE2 tests SSE2() function
func TestSSE2(t *testing.T) {
got := CPU.SSE2()
expected := CPU.Features&SSE2 == SSE2
if got != expected {
t.Fatalf("SSE2: expected %v, got %v", expected, got)
}
t.Log("SSE2 Support:", got)
}
// TestSSE3 tests SSE3() function
func TestSSE3(t *testing.T) {
got := CPU.SSE3()
expected := CPU.Features&SSE3 == SSE3
if got != expected {
t.Fatalf("SSE3: expected %v, got %v", expected, got)
}
t.Log("SSE3 Support:", got)
}
// TestSSSE3 tests SSSE3() function
func TestSSSE3(t *testing.T) {
got := CPU.SSSE3()
expected := CPU.Features&SSSE3 == SSSE3
if got != expected {
t.Fatalf("SSSE3: expected %v, got %v", expected, got)
}
t.Log("SSSE3 Support:", got)
}
// TestSSE4 tests SSE4() function
func TestSSE4(t *testing.T) {
got := CPU.SSE4()
expected := CPU.Features&SSE4 == SSE4
if got != expected {
t.Fatalf("SSE4: expected %v, got %v", expected, got)
}
t.Log("SSE4 Support:", got)
}
// TestSSE42 tests SSE42() function
func TestSSE42(t *testing.T) {
got := CPU.SSE42()
expected := CPU.Features&SSE42 == SSE42
if got != expected {
t.Fatalf("SSE42: expected %v, got %v", expected, got)
}
t.Log("SSE42 Support:", got)
}
// TestAVX tests AVX() function
func TestAVX(t *testing.T) {
got := CPU.AVX()
expected := CPU.Features&AVX == AVX
if got != expected {
t.Fatalf("AVX: expected %v, got %v", expected, got)
}
t.Log("AVX Support:", got)
}
// TestAVX2 tests AVX2() function
func TestAVX2(t *testing.T) {
got := CPU.AVX2()
expected := CPU.Features&AVX2 == AVX2
if got != expected {
t.Fatalf("AVX2: expected %v, got %v", expected, got)
}
t.Log("AVX2 Support:", got)
}
// TestFMA3 tests FMA3() function
func TestFMA3(t *testing.T) {
got := CPU.FMA3()
expected := CPU.Features&FMA3 == FMA3
if got != expected {
t.Fatalf("FMA3: expected %v, got %v", expected, got)
}
t.Log("FMA3 Support:", got)
}
// TestFMA4 tests FMA4() function
func TestFMA4(t *testing.T) {
got := CPU.FMA4()
expected := CPU.Features&FMA4 == FMA4
if got != expected {
t.Fatalf("FMA4: expected %v, got %v", expected, got)
}
t.Log("FMA4 Support:", got)
}
// TestXOP tests XOP() function
func TestXOP(t *testing.T) {
got := CPU.XOP()
expected := CPU.Features&XOP == XOP
if got != expected {
t.Fatalf("XOP: expected %v, got %v", expected, got)
}
t.Log("XOP Support:", got)
}
// TestF16C tests F16C() function
func TestF16C(t *testing.T) {
got := CPU.F16C()
expected := CPU.Features&F16C == F16C
if got != expected {
t.Fatalf("F16C: expected %v, got %v", expected, got)
}
t.Log("F16C Support:", got)
}
// TestCX16 tests CX16() function
func TestCX16(t *testing.T) {
got := CPU.CX16()
expected := CPU.Features&CX16 == CX16
if got != expected {
t.Fatalf("CX16: expected %v, got %v", expected, got)
}
t.Log("CX16 Support:", got)
}
// TestSGX tests SGX() function
func TestSGX(t *testing.T) {
got := CPU.SGX.Available
expected := CPU.Features&SGX == SGX
if got != expected {
t.Fatalf("SGX: expected %v, got %v", expected, got)
}
t.Log("SGX Support:", got)
}
// TestBMI1 tests BMI1() function
func TestBMI1(t *testing.T) {
got := CPU.BMI1()
expected := CPU.Features&BMI1 == BMI1
if got != expected {
t.Fatalf("BMI1: expected %v, got %v", expected, got)
}
t.Log("BMI1 Support:", got)
}
// TestBMI2 tests BMI2() function
func TestBMI2(t *testing.T) {
got := CPU.BMI2()
expected := CPU.Features&BMI2 == BMI2
if got != expected {
t.Fatalf("BMI2: expected %v, got %v", expected, got)
}
t.Log("BMI2 Support:", got)
}
// TestTBM tests TBM() function
func TestTBM(t *testing.T) {
got := CPU.TBM()
expected := CPU.Features&TBM == TBM
if got != expected {
t.Fatalf("TBM: expected %v, got %v", expected, got)
}
t.Log("TBM Support:", got)
}
// TestLzcnt tests Lzcnt() function
func TestLzcnt(t *testing.T) {
got := CPU.Lzcnt()
expected := CPU.Features&LZCNT == LZCNT
if got != expected {
t.Fatalf("Lzcnt: expected %v, got %v", expected, got)
}
t.Log("LZCNT Support:", got)
}
// TestLzcnt tests Lzcnt() function
func TestPopcnt(t *testing.T) {
got := CPU.Popcnt()
expected := CPU.Features&POPCNT == POPCNT
if got != expected {
t.Fatalf("Popcnt: expected %v, got %v", expected, got)
}
t.Log("POPCNT Support:", got)
}
// TestAesNi tests AesNi() function
func TestAesNi(t *testing.T) {
got := CPU.AesNi()
expected := CPU.Features&AESNI == AESNI
if got != expected {
t.Fatalf("AesNi: expected %v, got %v", expected, got)
}
t.Log("AESNI Support:", got)
}
// TestHTT tests HTT() function
func TestHTT(t *testing.T) {
got := CPU.HTT()
expected := CPU.Features&HTT == HTT
if got != expected {
t.Fatalf("HTT: expected %v, got %v", expected, got)
}
t.Log("HTT Support:", got)
}
// TestClmul tests Clmul() function
func TestClmul(t *testing.T) {
got := CPU.Clmul()
expected := CPU.Features&CLMUL == CLMUL
if got != expected {
t.Fatalf("Clmul: expected %v, got %v", expected, got)
}
t.Log("CLMUL Support:", got)
}
// TestSSE2Slow tests SSE2Slow() function
func TestSSE2Slow(t *testing.T) {
got := CPU.SSE2Slow()
expected := CPU.Features&SSE2SLOW == SSE2SLOW
if got != expected {
t.Fatalf("SSE2Slow: expected %v, got %v", expected, got)
}
t.Log("SSE2SLOW Support:", got)
}
// TestSSE3Slow tests SSE3slow() function
func TestSSE3Slow(t *testing.T) {
got := CPU.SSE3Slow()
expected := CPU.Features&SSE3SLOW == SSE3SLOW
if got != expected {
t.Fatalf("SSE3slow: expected %v, got %v", expected, got)
}
t.Log("SSE3SLOW Support:", got)
}
// TestAtom tests Atom() function
func TestAtom(t *testing.T) {
got := CPU.Atom()
expected := CPU.Features&ATOM == ATOM
if got != expected {
t.Fatalf("Atom: expected %v, got %v", expected, got)
}
t.Log("ATOM Support:", got)
}
// TestNX tests NX() function (NX (No-Execute) bit)
func TestNX(t *testing.T) {
got := CPU.NX()
expected := CPU.Features&NX == NX
if got != expected {
t.Fatalf("NX: expected %v, got %v", expected, got)
}
t.Log("NX Support:", got)
}
// TestSSE4A tests SSE4A() function (AMD Barcelona microarchitecture SSE4a instructions)
func TestSSE4A(t *testing.T) {
got := CPU.SSE4A()
expected := CPU.Features&SSE4A == SSE4A
if got != expected {
t.Fatalf("SSE4A: expected %v, got %v", expected, got)
}
t.Log("SSE4A Support:", got)
}
// TestHLE tests HLE() function (Hardware Lock Elision)
func TestHLE(t *testing.T) {
got := CPU.HLE()
expected := CPU.Features&HLE == HLE
if got != expected {
t.Fatalf("HLE: expected %v, got %v", expected, got)
}
t.Log("HLE Support:", got)
}
// TestRTM tests RTM() function (Restricted Transactional Memory)
func TestRTM(t *testing.T) {
got := CPU.RTM()
expected := CPU.Features&RTM == RTM
if got != expected {
t.Fatalf("RTM: expected %v, got %v", expected, got)
}
t.Log("RTM Support:", got)
}
// TestRdrand tests RDRAND() function (RDRAND instruction is available)
func TestRdrand(t *testing.T) {
got := CPU.Rdrand()
expected := CPU.Features&RDRAND == RDRAND
if got != expected {
t.Fatalf("Rdrand: expected %v, got %v", expected, got)
}
t.Log("Rdrand Support:", got)
}
// TestRdseed tests RDSEED() function (RDSEED instruction is available)
func TestRdseed(t *testing.T) {
got := CPU.Rdseed()
expected := CPU.Features&RDSEED == RDSEED
if got != expected {
t.Fatalf("Rdseed: expected %v, got %v", expected, got)
}
t.Log("Rdseed Support:", got)
}
// TestADX tests ADX() function (Intel ADX (Multi-Precision Add-Carry Instruction Extensions))
func TestADX(t *testing.T) {
got := CPU.ADX()
expected := CPU.Features&ADX == ADX
if got != expected {
t.Fatalf("ADX: expected %v, got %v", expected, got)
}
t.Log("ADX Support:", got)
}
// TestSHA tests SHA() function (Intel SHA Extensions)
func TestSHA(t *testing.T) {
got := CPU.SHA()
expected := CPU.Features&SHA == SHA
if got != expected {
t.Fatalf("SHA: expected %v, got %v", expected, got)
}
t.Log("SHA Support:", got)
}
// TestAVX512F tests AVX512F() function (AVX-512 Foundation)
func TestAVX512F(t *testing.T) {
got := CPU.AVX512F()
expected := CPU.Features&AVX512F == AVX512F
if got != expected {
t.Fatalf("AVX512F: expected %v, got %v", expected, got)
}
t.Log("AVX512F Support:", got)
}
// TestAVX512DQ tests AVX512DQ() function (AVX-512 Doubleword and Quadword Instructions)
func TestAVX512DQ(t *testing.T) {
got := CPU.AVX512DQ()
expected := CPU.Features&AVX512DQ == AVX512DQ
if got != expected {
t.Fatalf("AVX512DQ: expected %v, got %v", expected, got)
}
t.Log("AVX512DQ Support:", got)
}
// TestAVX512IFMA tests AVX512IFMA() function (AVX-512 Integer Fused Multiply-Add Instructions)
func TestAVX512IFMA(t *testing.T) {
got := CPU.AVX512IFMA()
expected := CPU.Features&AVX512IFMA == AVX512IFMA
if got != expected {
t.Fatalf("AVX512IFMA: expected %v, got %v", expected, got)
}
t.Log("AVX512IFMA Support:", got)
}
// TestAVX512PF tests AVX512PF() function (AVX-512 Prefetch Instructions)
func TestAVX512PF(t *testing.T) {
got := CPU.AVX512PF()
expected := CPU.Features&AVX512PF == AVX512PF
if got != expected {
t.Fatalf("AVX512PF: expected %v, got %v", expected, got)
}
t.Log("AVX512PF Support:", got)
}
// TestAVX512ER tests AVX512ER() function (AVX-512 Exponential and Reciprocal Instructions)
func TestAVX512ER(t *testing.T) {
got := CPU.AVX512ER()
expected := CPU.Features&AVX512ER == AVX512ER
if got != expected {
t.Fatalf("AVX512ER: expected %v, got %v", expected, got)
}
t.Log("AVX512ER Support:", got)
}
// TestAVX512CD tests AVX512CD() function (AVX-512 Conflict Detection Instructions)
func TestAVX512CD(t *testing.T) {
got := CPU.AVX512CD()
expected := CPU.Features&AVX512CD == AVX512CD
if got != expected {
t.Fatalf("AVX512CD: expected %v, got %v", expected, got)
}
t.Log("AVX512CD Support:", got)
}
// TestAVX512BW tests AVX512BW() function (AVX-512 Byte and Word Instructions)
func TestAVX512BW(t *testing.T) {
got := CPU.AVX512BW()
expected := CPU.Features&AVX512BW == AVX512BW
if got != expected {
t.Fatalf("AVX512BW: expected %v, got %v", expected, got)
}
t.Log("AVX512BW Support:", got)
}
// TestAVX512VL tests AVX512VL() function (AVX-512 Vector Length Extensions)
func TestAVX512VL(t *testing.T) {
got := CPU.AVX512VL()
expected := CPU.Features&AVX512VL == AVX512VL
if got != expected {
t.Fatalf("AVX512VL: expected %v, got %v", expected, got)
}
t.Log("AVX512VL Support:", got)
}
// TestAVX512VL tests AVX512VBMI() function (AVX-512 Vector Bit Manipulation Instructions)
func TestAVX512VBMI(t *testing.T) {
got := CPU.AVX512VBMI()
expected := CPU.Features&AVX512VBMI == AVX512VBMI
if got != expected {
t.Fatalf("AVX512VBMI: expected %v, got %v", expected, got)
}
t.Log("AVX512VBMI Support:", got)
}
// TestMPX tests MPX() function (Intel MPX (Memory Protection Extensions))
func TestMPX(t *testing.T) {
got := CPU.MPX()
expected := CPU.Features&MPX == MPX
if got != expected {
t.Fatalf("MPX: expected %v, got %v", expected, got)
}
t.Log("MPX Support:", got)
}
// TestERMS tests ERMS() function (Enhanced REP MOVSB/STOSB)
func TestERMS(t *testing.T) {
got := CPU.ERMS()
expected := CPU.Features&ERMS == ERMS
if got != expected {
t.Fatalf("ERMS: expected %v, got %v", expected, got)
}
t.Log("ERMS Support:", got)
}
// TestVendor writes the detected vendor. Will be 0 if unknown
func TestVendor(t *testing.T) {
t.Log("Vendor ID:", CPU.VendorID)
}
// Intel returns true if vendor is recognized as Intel
func TestIntel(t *testing.T) {
got := CPU.Intel()
expected := CPU.VendorID == Intel
if got != expected {
t.Fatalf("TestIntel: expected %v, got %v", expected, got)
}
t.Log("TestIntel:", got)
}
// AMD returns true if vendor is recognized as AMD
func TestAMD(t *testing.T) {
got := CPU.AMD()
expected := CPU.VendorID == AMD
if got != expected {
t.Fatalf("TestAMD: expected %v, got %v", expected, got)
}
t.Log("TestAMD:", got)
}
// Transmeta returns true if vendor is recognized as Transmeta
func TestTransmeta(t *testing.T) {
got := CPU.Transmeta()
expected := CPU.VendorID == Transmeta
if got != expected {
t.Fatalf("TestTransmeta: expected %v, got %v", expected, got)
}
t.Log("TestTransmeta:", got)
}
// NSC returns true if vendor is recognized as National Semiconductor
func TestNSC(t *testing.T) {
got := CPU.NSC()
expected := CPU.VendorID == NSC
if got != expected {
t.Fatalf("TestNSC: expected %v, got %v", expected, got)
}
t.Log("TestNSC:", got)
}
// VIA returns true if vendor is recognized as VIA
func TestVIA(t *testing.T) {
got := CPU.VIA()
expected := CPU.VendorID == VIA
if got != expected {
t.Fatalf("TestVIA: expected %v, got %v", expected, got)
}
t.Log("TestVIA:", got)
}
// Test VM function
func TestVM(t *testing.T) {
t.Log("Vendor ID:", CPU.VM())
}
// Test RTCounter function
func TestRtCounter(t *testing.T) {
a := CPU.RTCounter()
b := CPU.RTCounter()
t.Log("CPU Counter:", a, b, b-a)
}
// Prints the value of Ia32TscAux()
func TestIa32TscAux(t *testing.T) {
ecx := CPU.Ia32TscAux()
t.Logf("Ia32TscAux:0x%x\n", ecx)
if ecx != 0 {
chip := (ecx & 0xFFF000) >> 12
core := ecx & 0xFFF
t.Log("Likely chip, core:", chip, core)
}
}
func TestThreadsPerCoreNZ(t *testing.T) {
if CPU.ThreadsPerCore == 0 {
t.Fatal("threads per core is zero")
}
}
// Prints the value of LogicalCPU()
func TestLogicalCPU(t *testing.T) {
t.Log("Currently executing on cpu:", CPU.LogicalCPU())
}
func TestMaxFunction(t *testing.T) {
expect := maxFunctionID()
if CPU.maxFunc != expect {
t.Fatal("Max function does not match, expected", expect, "but got", CPU.maxFunc)
}
expect = maxExtendedFunction()
if CPU.maxExFunc != expect {
t.Fatal("Max Extended function does not match, expected", expect, "but got", CPU.maxFunc)
}
}
// This example will calculate the chip/core number on Linux
// Linux encodes numa id (<<12) and core id (8bit) into TSC_AUX.
func ExampleCPUInfo_Ia32TscAux(t *testing.T) {
ecx := CPU.Ia32TscAux()
if ecx == 0 {
fmt.Println("Unknown CPU ID")
return
}
chip := (ecx & 0xFFF000) >> 12
core := ecx & 0xFFF
fmt.Println("Chip, Core:", chip, core)
}
/*
func TestPhysical(t *testing.T) {
var test16 = "CPUID 00000000: 0000000d-756e6547-6c65746e-49656e69 \nCPUID 00000001: 000206d7-03200800-1fbee3ff-bfebfbff \nCPUID 00000002: 76035a01-00f0b2ff-00000000-00ca0000 \nCPUID 00000003: 00000000-00000000-00000000-00000000 \nCPUID 00000004: 3c004121-01c0003f-0000003f-00000000 \nCPUID 00000004: 3c004122-01c0003f-0000003f-00000000 \nCPUID 00000004: 3c004143-01c0003f-000001ff-00000000 \nCPUID 00000004: 3c07c163-04c0003f-00003fff-00000006 \nCPUID 00000005: 00000040-00000040-00000003-00021120 \nCPUID 00000006: 00000075-00000002-00000009-00000000 \nCPUID 00000007: 00000000-00000000-00000000-00000000 \nCPUID 00000008: 00000000-00000000-00000000-00000000 \nCPUID 00000009: 00000001-00000000-00000000-00000000 \nCPUID 0000000a: 07300403-00000000-00000000-00000603 \nCPUID 0000000b: 00000000-00000000-00000003-00000003 \nCPUID 0000000b: 00000005-00000010-00000201-00000003 \nCPUID 0000000c: 00000000-00000000-00000000-00000000 \nCPUID 0000000d: 00000007-00000340-00000340-00000000 \nCPUID 0000000d: 00000001-00000000-00000000-00000000 \nCPUID 0000000d: 00000100-00000240-00000000-00000000 \nCPUID 80000000: 80000008-00000000-00000000-00000000 \nCPUID 80000001: 00000000-00000000-00000001-2c100800 \nCPUID 80000002: 20202020-49202020-6c65746e-20295228 \nCPUID 80000003: 6e6f6558-20295228-20555043-322d3545 \nCPUID 80000004: 20303636-20402030-30322e32-007a4847 \nCPUID 80000005: 00000000-00000000-00000000-00000000 \nCPUID 80000006: 00000000-00000000-01006040-00000000 \nCPUID 80000007: 00000000-00000000-00000000-00000100 \nCPUID 80000008: 0000302e-00000000-00000000-00000000"
restore := mockCPU([]byte(test16))
Detect()
t.Log("Name:", CPU.BrandName)
n := maxFunctionID()
t.Logf("Max Function:0x%x\n", n)
n = maxExtendedFunction()
t.Logf("Max Extended Function:0x%x\n", n)
t.Log("PhysicalCores:", CPU.PhysicalCores)
t.Log("ThreadsPerCore:", CPU.ThreadsPerCore)
t.Log("LogicalCores:", CPU.LogicalCores)
t.Log("Family", CPU.Family, "Model:", CPU.Model)
t.Log("Features:", CPU.Features)
t.Log("Cacheline bytes:", CPU.CacheLine)
t.Log("L1 Instruction Cache:", CPU.Cache.L1I, "bytes")
t.Log("L1 Data Cache:", CPU.Cache.L1D, "bytes")
t.Log("L2 Cache:", CPU.Cache.L2, "bytes")
t.Log("L3 Cache:", CPU.Cache.L3, "bytes")
if CPU.LogicalCores > 0 && CPU.PhysicalCores > 0 {
if CPU.LogicalCores != CPU.PhysicalCores*CPU.ThreadsPerCore {
t.Fatalf("Core count mismatch, LogicalCores (%d) != PhysicalCores (%d) * CPU.ThreadsPerCore (%d)",
CPU.LogicalCores, CPU.PhysicalCores, CPU.ThreadsPerCore)
}
}
if CPU.ThreadsPerCore > 1 && !CPU.HTT() {
t.Fatalf("Hyperthreading not detected")
}
if CPU.ThreadsPerCore == 1 && CPU.HTT() {
t.Fatalf("Hyperthreading detected, but only 1 Thread per core")
}
restore()
Detect()
TestCPUID(t)
}
*/

17
vendor/github.com/klauspost/cpuid/detect_intel.go generated vendored Normal file
View File

@@ -0,0 +1,17 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
// +build 386,!gccgo amd64,!gccgo
package cpuid
func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
func asmXgetbv(index uint32) (eax, edx uint32)
func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
func initCPU() {
cpuid = asmCpuid
cpuidex = asmCpuidex
xgetbv = asmXgetbv
rdtscpAsm = asmRdtscpAsm
}

23
vendor/github.com/klauspost/cpuid/detect_ref.go generated vendored Normal file
View File

@@ -0,0 +1,23 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
// +build !amd64,!386 gccgo
package cpuid
func initCPU() {
cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
xgetbv = func(index uint32) (eax, edx uint32) {
return 0, 0
}
rdtscpAsm = func() (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
}

3
vendor/github.com/klauspost/cpuid/generate.go generated vendored Normal file
View File

@@ -0,0 +1,3 @@
package cpuid
//go:generate go run private-gen.go

209
vendor/github.com/klauspost/cpuid/mockcpu_test.go generated vendored Normal file
View File

@@ -0,0 +1,209 @@
package cpuid
import (
"archive/zip"
"fmt"
"io/ioutil"
"sort"
"strings"
"testing"
)
type fakecpuid map[uint32][][]uint32
type idfuncs struct {
cpuid func(op uint32) (eax, ebx, ecx, edx uint32)
cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32)
xgetbv func(index uint32) (eax, edx uint32)
}
func (f fakecpuid) String() string {
var out = make([]string, 0, len(f))
for key, val := range f {
for _, v := range val {
out = append(out, fmt.Sprintf("CPUID %08x: [%08x, %08x, %08x, %08x]", key, v[0], v[1], v[2], v[3]))
}
}
sorter := sort.StringSlice(out)
sort.Sort(&sorter)
return strings.Join(sorter, "\n")
}
func mockCPU(def []byte) func() {
lines := strings.Split(string(def), "\n")
anyfound := false
fakeID := make(fakecpuid)
for _, line := range lines {
line = strings.Trim(line, "\r\t ")
if !strings.HasPrefix(line, "CPUID") {
continue
}
// Only collect for first cpu
if strings.HasPrefix(line, "CPUID 00000000") {
if anyfound {
break
}
}
if !strings.Contains(line, "-") {
//continue
}
items := strings.Split(line, ":")
if len(items) < 2 {
if len(line) == 51 || len(line) == 50 {
items = []string{line[0:14], line[15:]}
} else {
items = strings.Split(line, "\t")
if len(items) != 2 {
//fmt.Println("not found:", line, "len:", len(line))
continue
}
}
}
items = items[0:2]
vals := strings.Trim(items[1], "\r\n ")
var idV uint32
n, err := fmt.Sscanf(items[0], "CPUID %x", &idV)
if err != nil || n != 1 {
continue
}
existing, ok := fakeID[idV]
if !ok {
existing = make([][]uint32, 0)
}
values := make([]uint32, 4)
n, err = fmt.Sscanf(vals, "%x-%x-%x-%x", &values[0], &values[1], &values[2], &values[3])
if n != 4 || err != nil {
n, err = fmt.Sscanf(vals, "%x %x %x %x", &values[0], &values[1], &values[2], &values[3])
if n != 4 || err != nil {
//fmt.Println("scanned", vals, "got", n, "Err:", err)
continue
}
}
existing = append(existing, values)
fakeID[idV] = existing
anyfound = true
}
restorer := func(f idfuncs) func() {
return func() {
cpuid = f.cpuid
cpuidex = f.cpuidex
xgetbv = f.xgetbv
}
}(idfuncs{cpuid: cpuid, cpuidex: cpuidex, xgetbv: xgetbv})
cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) {
if op == 0x80000000 || op == 0 {
var ok bool
_, ok = fakeID[op]
if !ok {
return 0, 0, 0, 0
}
}
first, ok := fakeID[op]
if !ok {
if op > maxFunctionID() {
panic(fmt.Sprintf("Base not found: %v, request:%#v\n", fakeID, op))
} else {
// we have some entries missing
return 0, 0, 0, 0
}
}
theid := first[0]
return theid[0], theid[1], theid[2], theid[3]
}
cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) {
if op == 0x80000000 {
var ok bool
_, ok = fakeID[op]
if !ok {
return 0, 0, 0, 0
}
}
first, ok := fakeID[op]
if !ok {
if op > maxExtendedFunction() {
panic(fmt.Sprintf("Extended not found Info: %v, request:%#v, %#v\n", fakeID, op, op2))
} else {
// we have some entries missing
return 0, 0, 0, 0
}
}
if int(op2) >= len(first) {
//fmt.Printf("Extended not found Info: %v, request:%#v, %#v\n", fakeID, op, op2)
return 0, 0, 0, 0
}
theid := first[op2]
return theid[0], theid[1], theid[2], theid[3]
}
xgetbv = func(index uint32) (eax, edx uint32) {
first, ok := fakeID[1]
if !ok {
panic(fmt.Sprintf("XGETBV not supported %v", fakeID))
}
second := first[0]
// ECX bit 26 must be set
if (second[2] & 1 << 26) == 0 {
panic(fmt.Sprintf("XGETBV not supported %v", fakeID))
}
// We don't have any data to return, unfortunately
return 0, 0
}
return restorer
}
func TestMocks(t *testing.T) {
zr, err := zip.OpenReader("testdata/cpuid_data.zip")
if err != nil {
t.Skip("No testdata:", err)
}
defer zr.Close()
for _, f := range zr.File {
rc, err := f.Open()
if err != nil {
t.Fatal(err)
}
content, err := ioutil.ReadAll(rc)
if err != nil {
t.Fatal(err)
}
rc.Close()
t.Log("Opening", f.FileInfo().Name())
restore := mockCPU(content)
Detect()
t.Log("Name:", CPU.BrandName)
n := maxFunctionID()
t.Logf("Max Function:0x%x\n", n)
n = maxExtendedFunction()
t.Logf("Max Extended Function:0x%x\n", n)
t.Log("PhysicalCores:", CPU.PhysicalCores)
t.Log("ThreadsPerCore:", CPU.ThreadsPerCore)
t.Log("LogicalCores:", CPU.LogicalCores)
t.Log("Family", CPU.Family, "Model:", CPU.Model)
t.Log("Features:", CPU.Features)
t.Log("Cacheline bytes:", CPU.CacheLine)
t.Log("L1 Instruction Cache:", CPU.Cache.L1I, "bytes")
t.Log("L1 Data Cache:", CPU.Cache.L1D, "bytes")
t.Log("L2 Cache:", CPU.Cache.L2, "bytes")
t.Log("L3 Cache:", CPU.Cache.L3, "bytes")
if CPU.LogicalCores > 0 && CPU.PhysicalCores > 0 {
if CPU.LogicalCores != CPU.PhysicalCores*CPU.ThreadsPerCore {
t.Fatalf("Core count mismatch, LogicalCores (%d) != PhysicalCores (%d) * CPU.ThreadsPerCore (%d)",
CPU.LogicalCores, CPU.PhysicalCores, CPU.ThreadsPerCore)
}
}
if CPU.ThreadsPerCore > 1 && !CPU.HTT() {
t.Fatalf("Hyperthreading not detected")
}
if CPU.ThreadsPerCore == 1 && CPU.HTT() {
t.Fatalf("Hyperthreading detected, but only 1 Thread per core")
}
restore()
}
Detect()
}

476
vendor/github.com/klauspost/cpuid/private-gen.go generated vendored Normal file
View File

@@ -0,0 +1,476 @@
// +build ignore
package main
import (
"bytes"
"fmt"
"go/ast"
"go/parser"
"go/printer"
"go/token"
"io"
"io/ioutil"
"log"
"os"
"reflect"
"strings"
"unicode"
"unicode/utf8"
)
var inFiles = []string{"cpuid.go", "cpuid_test.go"}
var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"}
var fileSet = token.NewFileSet()
var reWrites = []rewrite{
initRewrite("CPUInfo -> cpuInfo"),
initRewrite("Vendor -> vendor"),
initRewrite("Flags -> flags"),
initRewrite("Detect -> detect"),
initRewrite("CPU -> cpu"),
}
var excludeNames = map[string]bool{"string": true, "join": true, "trim": true,
// cpuid_test.go
"t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true,
}
var excludePrefixes = []string{"test", "benchmark"}
func main() {
Package := "private"
parserMode := parser.ParseComments
exported := make(map[string]rewrite)
for _, file := range inFiles {
in, err := os.Open(file)
if err != nil {
log.Fatalf("opening input", err)
}
src, err := ioutil.ReadAll(in)
if err != nil {
log.Fatalf("reading input", err)
}
astfile, err := parser.ParseFile(fileSet, file, src, parserMode)
if err != nil {
log.Fatalf("parsing input", err)
}
for _, rw := range reWrites {
astfile = rw(astfile)
}
// Inspect the AST and print all identifiers and literals.
var startDecl token.Pos
var endDecl token.Pos
ast.Inspect(astfile, func(n ast.Node) bool {
var s string
switch x := n.(type) {
case *ast.Ident:
if x.IsExported() {
t := strings.ToLower(x.Name)
for _, pre := range excludePrefixes {
if strings.HasPrefix(t, pre) {
return true
}
}
if excludeNames[t] != true {
//if x.Pos() > startDecl && x.Pos() < endDecl {
exported[x.Name] = initRewrite(x.Name + " -> " + t)
}
}
case *ast.GenDecl:
if x.Tok == token.CONST && x.Lparen > 0 {
startDecl = x.Lparen
endDecl = x.Rparen
// fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl))
}
}
if s != "" {
fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s)
}
return true
})
for _, rw := range exported {
astfile = rw(astfile)
}
var buf bytes.Buffer
printer.Fprint(&buf, fileSet, astfile)
// Remove package documentation and insert information
s := buf.String()
ind := strings.Index(buf.String(), "\npackage cpuid")
s = s[ind:]
s = "// Generated, DO NOT EDIT,\n" +
"// but copy it to your own project and rename the package.\n" +
"// See more at http://github.com/klauspost/cpuid\n" +
s
outputName := Package + string(os.PathSeparator) + file
err = ioutil.WriteFile(outputName, []byte(s), 0644)
if err != nil {
log.Fatalf("writing output: %s", err)
}
log.Println("Generated", outputName)
}
for _, file := range copyFiles {
dst := ""
if strings.HasPrefix(file, "cpuid") {
dst = Package + string(os.PathSeparator) + file
} else {
dst = Package + string(os.PathSeparator) + "cpuid_" + file
}
err := copyFile(file, dst)
if err != nil {
log.Fatalf("copying file: %s", err)
}
log.Println("Copied", dst)
}
}
// CopyFile copies a file from src to dst. If src and dst files exist, and are
// the same, then return success. Copy the file contents from src to dst.
func copyFile(src, dst string) (err error) {
sfi, err := os.Stat(src)
if err != nil {
return
}
if !sfi.Mode().IsRegular() {
// cannot copy non-regular files (e.g., directories,
// symlinks, devices, etc.)
return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String())
}
dfi, err := os.Stat(dst)
if err != nil {
if !os.IsNotExist(err) {
return
}
} else {
if !(dfi.Mode().IsRegular()) {
return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String())
}
if os.SameFile(sfi, dfi) {
return
}
}
err = copyFileContents(src, dst)
return
}
// copyFileContents copies the contents of the file named src to the file named
// by dst. The file will be created if it does not already exist. If the
// destination file exists, all it's contents will be replaced by the contents
// of the source file.
func copyFileContents(src, dst string) (err error) {
in, err := os.Open(src)
if err != nil {
return
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return
}
defer func() {
cerr := out.Close()
if err == nil {
err = cerr
}
}()
if _, err = io.Copy(out, in); err != nil {
return
}
err = out.Sync()
return
}
type rewrite func(*ast.File) *ast.File
// Mostly copied from gofmt
func initRewrite(rewriteRule string) rewrite {
f := strings.Split(rewriteRule, "->")
if len(f) != 2 {
fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n")
os.Exit(2)
}
pattern := parseExpr(f[0], "pattern")
replace := parseExpr(f[1], "replacement")
return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) }
}
// parseExpr parses s as an expression.
// It might make sense to expand this to allow statement patterns,
// but there are problems with preserving formatting and also
// with what a wildcard for a statement looks like.
func parseExpr(s, what string) ast.Expr {
x, err := parser.ParseExpr(s)
if err != nil {
fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err)
os.Exit(2)
}
return x
}
// Keep this function for debugging.
/*
func dump(msg string, val reflect.Value) {
fmt.Printf("%s:\n", msg)
ast.Print(fileSet, val.Interface())
fmt.Println()
}
*/
// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file.
func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File {
cmap := ast.NewCommentMap(fileSet, p, p.Comments)
m := make(map[string]reflect.Value)
pat := reflect.ValueOf(pattern)
repl := reflect.ValueOf(replace)
var rewriteVal func(val reflect.Value) reflect.Value
rewriteVal = func(val reflect.Value) reflect.Value {
// don't bother if val is invalid to start with
if !val.IsValid() {
return reflect.Value{}
}
for k := range m {
delete(m, k)
}
val = apply(rewriteVal, val)
if match(m, pat, val) {
val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos()))
}
return val
}
r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File)
r.Comments = cmap.Filter(r).Comments() // recreate comments list
return r
}
// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y.
func set(x, y reflect.Value) {
// don't bother if x cannot be set or y is invalid
if !x.CanSet() || !y.IsValid() {
return
}
defer func() {
if x := recover(); x != nil {
if s, ok := x.(string); ok &&
(strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) {
// x cannot be set to y - ignore this rewrite
return
}
panic(x)
}
}()
x.Set(y)
}
// Values/types for special cases.
var (
objectPtrNil = reflect.ValueOf((*ast.Object)(nil))
scopePtrNil = reflect.ValueOf((*ast.Scope)(nil))
identType = reflect.TypeOf((*ast.Ident)(nil))
objectPtrType = reflect.TypeOf((*ast.Object)(nil))
positionType = reflect.TypeOf(token.NoPos)
callExprType = reflect.TypeOf((*ast.CallExpr)(nil))
scopePtrType = reflect.TypeOf((*ast.Scope)(nil))
)
// apply replaces each AST field x in val with f(x), returning val.
// To avoid extra conversions, f operates on the reflect.Value form.
func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value {
if !val.IsValid() {
return reflect.Value{}
}
// *ast.Objects introduce cycles and are likely incorrect after
// rewrite; don't follow them but replace with nil instead
if val.Type() == objectPtrType {
return objectPtrNil
}
// similarly for scopes: they are likely incorrect after a rewrite;
// replace them with nil
if val.Type() == scopePtrType {
return scopePtrNil
}
switch v := reflect.Indirect(val); v.Kind() {
case reflect.Slice:
for i := 0; i < v.Len(); i++ {
e := v.Index(i)
set(e, f(e))
}
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
e := v.Field(i)
set(e, f(e))
}
case reflect.Interface:
e := v.Elem()
set(v, f(e))
}
return val
}
func isWildcard(s string) bool {
rune, size := utf8.DecodeRuneInString(s)
return size == len(s) && unicode.IsLower(rune)
}
// match returns true if pattern matches val,
// recording wildcard submatches in m.
// If m == nil, match checks whether pattern == val.
func match(m map[string]reflect.Value, pattern, val reflect.Value) bool {
// Wildcard matches any expression. If it appears multiple
// times in the pattern, it must match the same expression
// each time.
if m != nil && pattern.IsValid() && pattern.Type() == identType {
name := pattern.Interface().(*ast.Ident).Name
if isWildcard(name) && val.IsValid() {
// wildcards only match valid (non-nil) expressions.
if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() {
if old, ok := m[name]; ok {
return match(nil, old, val)
}
m[name] = val
return true
}
}
}
// Otherwise, pattern and val must match recursively.
if !pattern.IsValid() || !val.IsValid() {
return !pattern.IsValid() && !val.IsValid()
}
if pattern.Type() != val.Type() {
return false
}
// Special cases.
switch pattern.Type() {
case identType:
// For identifiers, only the names need to match
// (and none of the other *ast.Object information).
// This is a common case, handle it all here instead
// of recursing down any further via reflection.
p := pattern.Interface().(*ast.Ident)
v := val.Interface().(*ast.Ident)
return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name
case objectPtrType, positionType:
// object pointers and token positions always match
return true
case callExprType:
// For calls, the Ellipsis fields (token.Position) must
// match since that is how f(x) and f(x...) are different.
// Check them here but fall through for the remaining fields.
p := pattern.Interface().(*ast.CallExpr)
v := val.Interface().(*ast.CallExpr)
if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() {
return false
}
}
p := reflect.Indirect(pattern)
v := reflect.Indirect(val)
if !p.IsValid() || !v.IsValid() {
return !p.IsValid() && !v.IsValid()
}
switch p.Kind() {
case reflect.Slice:
if p.Len() != v.Len() {
return false
}
for i := 0; i < p.Len(); i++ {
if !match(m, p.Index(i), v.Index(i)) {
return false
}
}
return true
case reflect.Struct:
for i := 0; i < p.NumField(); i++ {
if !match(m, p.Field(i), v.Field(i)) {
return false
}
}
return true
case reflect.Interface:
return match(m, p.Elem(), v.Elem())
}
// Handle token integers, etc.
return p.Interface() == v.Interface()
}
// subst returns a copy of pattern with values from m substituted in place
// of wildcards and pos used as the position of tokens from the pattern.
// if m == nil, subst returns a copy of pattern and doesn't change the line
// number information.
func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value {
if !pattern.IsValid() {
return reflect.Value{}
}
// Wildcard gets replaced with map value.
if m != nil && pattern.Type() == identType {
name := pattern.Interface().(*ast.Ident).Name
if isWildcard(name) {
if old, ok := m[name]; ok {
return subst(nil, old, reflect.Value{})
}
}
}
if pos.IsValid() && pattern.Type() == positionType {
// use new position only if old position was valid in the first place
if old := pattern.Interface().(token.Pos); !old.IsValid() {
return pattern
}
return pos
}
// Otherwise copy.
switch p := pattern; p.Kind() {
case reflect.Slice:
v := reflect.MakeSlice(p.Type(), p.Len(), p.Len())
for i := 0; i < p.Len(); i++ {
v.Index(i).Set(subst(m, p.Index(i), pos))
}
return v
case reflect.Struct:
v := reflect.New(p.Type()).Elem()
for i := 0; i < p.NumField(); i++ {
v.Field(i).Set(subst(m, p.Field(i), pos))
}
return v
case reflect.Ptr:
v := reflect.New(p.Type()).Elem()
if elem := p.Elem(); elem.IsValid() {
v.Set(subst(m, elem, pos).Addr())
}
return v
case reflect.Interface:
v := reflect.New(p.Type()).Elem()
if elem := p.Elem(); elem.IsValid() {
v.Set(subst(m, elem, pos))
}
return v
}
return pattern
}

6
vendor/github.com/klauspost/cpuid/private/README.md generated vendored Normal file
View File

@@ -0,0 +1,6 @@
# cpuid private
This is a specially converted of the cpuid package, so it can be included in
a package without exporting anything.
Package home: https://github.com/klauspost/cpuid

987
vendor/github.com/klauspost/cpuid/private/cpuid.go generated vendored Normal file
View File

@@ -0,0 +1,987 @@
// Generated, DO NOT EDIT,
// but copy it to your own project and rename the package.
// See more at http://github.com/klauspost/cpuid
package cpuid
import (
"strings"
)
// Vendor is a representation of a CPU vendor.
type vendor int
const (
other vendor = iota
intel
amd
via
transmeta
nsc
kvm // Kernel-based Virtual Machine
msvm // Microsoft Hyper-V or Windows Virtual PC
vmware
xenhvm
)
const (
cmov = 1 << iota // i686 CMOV
nx // NX (No-Execute) bit
amd3dnow // AMD 3DNOW
amd3dnowext // AMD 3DNowExt
mmx // standard MMX
mmxext // SSE integer functions or AMD MMX ext
sse // SSE functions
sse2 // P4 SSE functions
sse3 // Prescott SSE3 functions
ssse3 // Conroe SSSE3 functions
sse4 // Penryn SSE4.1 functions
sse4a // AMD Barcelona microarchitecture SSE4a instructions
sse42 // Nehalem SSE4.2 functions
avx // AVX functions
avx2 // AVX2 functions
fma3 // Intel FMA 3
fma4 // Bulldozer FMA4 functions
xop // Bulldozer XOP functions
f16c // Half-precision floating-point conversion
bmi1 // Bit Manipulation Instruction Set 1
bmi2 // Bit Manipulation Instruction Set 2
tbm // AMD Trailing Bit Manipulation
lzcnt // LZCNT instruction
popcnt // POPCNT instruction
aesni // Advanced Encryption Standard New Instructions
clmul // Carry-less Multiplication
htt // Hyperthreading (enabled)
hle // Hardware Lock Elision
rtm // Restricted Transactional Memory
rdrand // RDRAND instruction is available
rdseed // RDSEED instruction is available
adx // Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
sha // Intel SHA Extensions
avx512f // AVX-512 Foundation
avx512dq // AVX-512 Doubleword and Quadword Instructions
avx512ifma // AVX-512 Integer Fused Multiply-Add Instructions
avx512pf // AVX-512 Prefetch Instructions
avx512er // AVX-512 Exponential and Reciprocal Instructions
avx512cd // AVX-512 Conflict Detection Instructions
avx512bw // AVX-512 Byte and Word Instructions
avx512vl // AVX-512 Vector Length Extensions
avx512vbmi // AVX-512 Vector Bit Manipulation Instructions
mpx // Intel MPX (Memory Protection Extensions)
erms // Enhanced REP MOVSB/STOSB
rdtscp // RDTSCP Instruction
cx16 // CMPXCHG16B Instruction
// Performance indicators
sse2slow // SSE2 is supported, but usually not faster
sse3slow // SSE3 is supported, but usually not faster
atom // Atom processor, some SSSE3 instructions are slower
)
var flagNames = map[flags]string{
cmov: "CMOV", // i686 CMOV
nx: "NX", // NX (No-Execute) bit
amd3dnow: "AMD3DNOW", // AMD 3DNOW
amd3dnowext: "AMD3DNOWEXT", // AMD 3DNowExt
mmx: "MMX", // Standard MMX
mmxext: "MMXEXT", // SSE integer functions or AMD MMX ext
sse: "SSE", // SSE functions
sse2: "SSE2", // P4 SSE2 functions
sse3: "SSE3", // Prescott SSE3 functions
ssse3: "SSSE3", // Conroe SSSE3 functions
sse4: "SSE4.1", // Penryn SSE4.1 functions
sse4a: "SSE4A", // AMD Barcelona microarchitecture SSE4a instructions
sse42: "SSE4.2", // Nehalem SSE4.2 functions
avx: "AVX", // AVX functions
avx2: "AVX2", // AVX functions
fma3: "FMA3", // Intel FMA 3
fma4: "FMA4", // Bulldozer FMA4 functions
xop: "XOP", // Bulldozer XOP functions
f16c: "F16C", // Half-precision floating-point conversion
bmi1: "BMI1", // Bit Manipulation Instruction Set 1
bmi2: "BMI2", // Bit Manipulation Instruction Set 2
tbm: "TBM", // AMD Trailing Bit Manipulation
lzcnt: "LZCNT", // LZCNT instruction
popcnt: "POPCNT", // POPCNT instruction
aesni: "AESNI", // Advanced Encryption Standard New Instructions
clmul: "CLMUL", // Carry-less Multiplication
htt: "HTT", // Hyperthreading (enabled)
hle: "HLE", // Hardware Lock Elision
rtm: "RTM", // Restricted Transactional Memory
rdrand: "RDRAND", // RDRAND instruction is available
rdseed: "RDSEED", // RDSEED instruction is available
adx: "ADX", // Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
sha: "SHA", // Intel SHA Extensions
avx512f: "AVX512F", // AVX-512 Foundation
avx512dq: "AVX512DQ", // AVX-512 Doubleword and Quadword Instructions
avx512ifma: "AVX512IFMA", // AVX-512 Integer Fused Multiply-Add Instructions
avx512pf: "AVX512PF", // AVX-512 Prefetch Instructions
avx512er: "AVX512ER", // AVX-512 Exponential and Reciprocal Instructions
avx512cd: "AVX512CD", // AVX-512 Conflict Detection Instructions
avx512bw: "AVX512BW", // AVX-512 Byte and Word Instructions
avx512vl: "AVX512VL", // AVX-512 Vector Length Extensions
avx512vbmi: "AVX512VBMI", // AVX-512 Vector Bit Manipulation Instructions
mpx: "MPX", // Intel MPX (Memory Protection Extensions)
erms: "ERMS", // Enhanced REP MOVSB/STOSB
rdtscp: "RDTSCP", // RDTSCP Instruction
cx16: "CX16", // CMPXCHG16B Instruction
// Performance indicators
sse2slow: "SSE2SLOW", // SSE2 supported, but usually not faster
sse3slow: "SSE3SLOW", // SSE3 supported, but usually not faster
atom: "ATOM", // Atom processor, some SSSE3 instructions are slower
}
// CPUInfo contains information about the detected system CPU.
type cpuInfo struct {
brandname string // Brand name reported by the CPU
vendorid vendor // Comparable CPU vendor ID
features flags // Features of the CPU
physicalcores int // Number of physical processor cores in your CPU. Will be 0 if undetectable.
threadspercore int // Number of threads per physical core. Will be 1 if undetectable.
logicalcores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable.
family int // CPU family number
model int // CPU model number
cacheline int // Cache line size in bytes. Will be 0 if undetectable.
cache struct {
l1i int // L1 Instruction Cache (per core or shared). Will be -1 if undetected
l1d int // L1 Data Cache (per core or shared). Will be -1 if undetected
l2 int // L2 Cache (per core or shared). Will be -1 if undetected
l3 int // L3 Instruction Cache (per core or shared). Will be -1 if undetected
}
maxFunc uint32
maxExFunc uint32
}
var cpuid func(op uint32) (eax, ebx, ecx, edx uint32)
var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32)
var xgetbv func(index uint32) (eax, edx uint32)
var rdtscpAsm func() (eax, ebx, ecx, edx uint32)
// CPU contains information about the CPU as detected on startup,
// or when Detect last was called.
//
// Use this as the primary entry point to you data,
// this way queries are
var cpu cpuInfo
func init() {
initCPU()
detect()
}
// Detect will re-detect current CPU info.
// This will replace the content of the exported CPU variable.
//
// Unless you expect the CPU to change while you are running your program
// you should not need to call this function.
// If you call this, you must ensure that no other goroutine is accessing the
// exported CPU variable.
func detect() {
cpu.maxFunc = maxFunctionID()
cpu.maxExFunc = maxExtendedFunction()
cpu.brandname = brandName()
cpu.cacheline = cacheLine()
cpu.family, cpu.model = familyModel()
cpu.features = support()
cpu.threadspercore = threadsPerCore()
cpu.logicalcores = logicalCores()
cpu.physicalcores = physicalCores()
cpu.vendorid = vendorID()
cpu.cacheSize()
}
// Generated here: http://play.golang.org/p/BxFH2Gdc0G
// Cmov indicates support of CMOV instructions
func (c cpuInfo) cmov() bool {
return c.features&cmov != 0
}
// Amd3dnow indicates support of AMD 3DNOW! instructions
func (c cpuInfo) amd3dnow() bool {
return c.features&amd3dnow != 0
}
// Amd3dnowExt indicates support of AMD 3DNOW! Extended instructions
func (c cpuInfo) amd3dnowext() bool {
return c.features&amd3dnowext != 0
}
// MMX indicates support of MMX instructions
func (c cpuInfo) mmx() bool {
return c.features&mmx != 0
}
// MMXExt indicates support of MMXEXT instructions
// (SSE integer functions or AMD MMX ext)
func (c cpuInfo) mmxext() bool {
return c.features&mmxext != 0
}
// SSE indicates support of SSE instructions
func (c cpuInfo) sse() bool {
return c.features&sse != 0
}
// SSE2 indicates support of SSE 2 instructions
func (c cpuInfo) sse2() bool {
return c.features&sse2 != 0
}
// SSE3 indicates support of SSE 3 instructions
func (c cpuInfo) sse3() bool {
return c.features&sse3 != 0
}
// SSSE3 indicates support of SSSE 3 instructions
func (c cpuInfo) ssse3() bool {
return c.features&ssse3 != 0
}
// SSE4 indicates support of SSE 4 (also called SSE 4.1) instructions
func (c cpuInfo) sse4() bool {
return c.features&sse4 != 0
}
// SSE42 indicates support of SSE4.2 instructions
func (c cpuInfo) sse42() bool {
return c.features&sse42 != 0
}
// AVX indicates support of AVX instructions
// and operating system support of AVX instructions
func (c cpuInfo) avx() bool {
return c.features&avx != 0
}
// AVX2 indicates support of AVX2 instructions
func (c cpuInfo) avx2() bool {
return c.features&avx2 != 0
}
// FMA3 indicates support of FMA3 instructions
func (c cpuInfo) fma3() bool {
return c.features&fma3 != 0
}
// FMA4 indicates support of FMA4 instructions
func (c cpuInfo) fma4() bool {
return c.features&fma4 != 0
}
// XOP indicates support of XOP instructions
func (c cpuInfo) xop() bool {
return c.features&xop != 0
}
// F16C indicates support of F16C instructions
func (c cpuInfo) f16c() bool {
return c.features&f16c != 0
}
// BMI1 indicates support of BMI1 instructions
func (c cpuInfo) bmi1() bool {
return c.features&bmi1 != 0
}
// BMI2 indicates support of BMI2 instructions
func (c cpuInfo) bmi2() bool {
return c.features&bmi2 != 0
}
// TBM indicates support of TBM instructions
// (AMD Trailing Bit Manipulation)
func (c cpuInfo) tbm() bool {
return c.features&tbm != 0
}
// Lzcnt indicates support of LZCNT instruction
func (c cpuInfo) lzcnt() bool {
return c.features&lzcnt != 0
}
// Popcnt indicates support of POPCNT instruction
func (c cpuInfo) popcnt() bool {
return c.features&popcnt != 0
}
// HTT indicates the processor has Hyperthreading enabled
func (c cpuInfo) htt() bool {
return c.features&htt != 0
}
// SSE2Slow indicates that SSE2 may be slow on this processor
func (c cpuInfo) sse2slow() bool {
return c.features&sse2slow != 0
}
// SSE3Slow indicates that SSE3 may be slow on this processor
func (c cpuInfo) sse3slow() bool {
return c.features&sse3slow != 0
}
// AesNi indicates support of AES-NI instructions
// (Advanced Encryption Standard New Instructions)
func (c cpuInfo) aesni() bool {
return c.features&aesni != 0
}
// Clmul indicates support of CLMUL instructions
// (Carry-less Multiplication)
func (c cpuInfo) clmul() bool {
return c.features&clmul != 0
}
// NX indicates support of NX (No-Execute) bit
func (c cpuInfo) nx() bool {
return c.features&nx != 0
}
// SSE4A indicates support of AMD Barcelona microarchitecture SSE4a instructions
func (c cpuInfo) sse4a() bool {
return c.features&sse4a != 0
}
// HLE indicates support of Hardware Lock Elision
func (c cpuInfo) hle() bool {
return c.features&hle != 0
}
// RTM indicates support of Restricted Transactional Memory
func (c cpuInfo) rtm() bool {
return c.features&rtm != 0
}
// Rdrand indicates support of RDRAND instruction is available
func (c cpuInfo) rdrand() bool {
return c.features&rdrand != 0
}
// Rdseed indicates support of RDSEED instruction is available
func (c cpuInfo) rdseed() bool {
return c.features&rdseed != 0
}
// ADX indicates support of Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
func (c cpuInfo) adx() bool {
return c.features&adx != 0
}
// SHA indicates support of Intel SHA Extensions
func (c cpuInfo) sha() bool {
return c.features&sha != 0
}
// AVX512F indicates support of AVX-512 Foundation
func (c cpuInfo) avx512f() bool {
return c.features&avx512f != 0
}
// AVX512DQ indicates support of AVX-512 Doubleword and Quadword Instructions
func (c cpuInfo) avx512dq() bool {
return c.features&avx512dq != 0
}
// AVX512IFMA indicates support of AVX-512 Integer Fused Multiply-Add Instructions
func (c cpuInfo) avx512ifma() bool {
return c.features&avx512ifma != 0
}
// AVX512PF indicates support of AVX-512 Prefetch Instructions
func (c cpuInfo) avx512pf() bool {
return c.features&avx512pf != 0
}
// AVX512ER indicates support of AVX-512 Exponential and Reciprocal Instructions
func (c cpuInfo) avx512er() bool {
return c.features&avx512er != 0
}
// AVX512CD indicates support of AVX-512 Conflict Detection Instructions
func (c cpuInfo) avx512cd() bool {
return c.features&avx512cd != 0
}
// AVX512BW indicates support of AVX-512 Byte and Word Instructions
func (c cpuInfo) avx512bw() bool {
return c.features&avx512bw != 0
}
// AVX512VL indicates support of AVX-512 Vector Length Extensions
func (c cpuInfo) avx512vl() bool {
return c.features&avx512vl != 0
}
// AVX512VBMI indicates support of AVX-512 Vector Bit Manipulation Instructions
func (c cpuInfo) avx512vbmi() bool {
return c.features&avx512vbmi != 0
}
// MPX indicates support of Intel MPX (Memory Protection Extensions)
func (c cpuInfo) mpx() bool {
return c.features&mpx != 0
}
// ERMS indicates support of Enhanced REP MOVSB/STOSB
func (c cpuInfo) erms() bool {
return c.features&erms != 0
}
func (c cpuInfo) rdtscp() bool {
return c.features&rdtscp != 0
}
func (c cpuInfo) cx16() bool {
return c.features&cx16 != 0
}
// Atom indicates an Atom processor
func (c cpuInfo) atom() bool {
return c.features&atom != 0
}
// Intel returns true if vendor is recognized as Intel
func (c cpuInfo) intel() bool {
return c.vendorid == intel
}
// AMD returns true if vendor is recognized as AMD
func (c cpuInfo) amd() bool {
return c.vendorid == amd
}
// Transmeta returns true if vendor is recognized as Transmeta
func (c cpuInfo) transmeta() bool {
return c.vendorid == transmeta
}
// NSC returns true if vendor is recognized as National Semiconductor
func (c cpuInfo) nsc() bool {
return c.vendorid == nsc
}
// VIA returns true if vendor is recognized as VIA
func (c cpuInfo) via() bool {
return c.vendorid == via
}
// RTCounter returns the 64-bit time-stamp counter
// Uses the RDTSCP instruction. The value 0 is returned
// if the CPU does not support the instruction.
func (c cpuInfo) rtcounter() uint64 {
if !c.rdtscp() {
return 0
}
a, _, _, d := rdtscpAsm()
return uint64(a) | (uint64(d) << 32)
}
// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP.
// This variable is OS dependent, but on Linux contains information
// about the current cpu/core the code is running on.
// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned.
func (c cpuInfo) ia32tscaux() uint32 {
if !c.rdtscp() {
return 0
}
_, _, ecx, _ := rdtscpAsm()
return ecx
}
// LogicalCPU will return the Logical CPU the code is currently executing on.
// This is likely to change when the OS re-schedules the running thread
// to another CPU.
// If the current core cannot be detected, -1 will be returned.
func (c cpuInfo) logicalcpu() int {
if c.maxFunc < 1 {
return -1
}
_, ebx, _, _ := cpuid(1)
return int(ebx >> 24)
}
// VM Will return true if the cpu id indicates we are in
// a virtual machine. This is only a hint, and will very likely
// have many false negatives.
func (c cpuInfo) vm() bool {
switch c.vendorid {
case msvm, kvm, vmware, xenhvm:
return true
}
return false
}
// Flags contains detected cpu features and caracteristics
type flags uint64
// String returns a string representation of the detected
// CPU features.
func (f flags) String() string {
return strings.Join(f.strings(), ",")
}
// Strings returns and array of the detected features.
func (f flags) strings() []string {
s := support()
r := make([]string, 0, 20)
for i := uint(0); i < 64; i++ {
key := flags(1 << i)
val := flagNames[key]
if s&key != 0 {
r = append(r, val)
}
}
return r
}
func maxExtendedFunction() uint32 {
eax, _, _, _ := cpuid(0x80000000)
return eax
}
func maxFunctionID() uint32 {
a, _, _, _ := cpuid(0)
return a
}
func brandName() string {
if maxExtendedFunction() >= 0x80000004 {
v := make([]uint32, 0, 48)
for i := uint32(0); i < 3; i++ {
a, b, c, d := cpuid(0x80000002 + i)
v = append(v, a, b, c, d)
}
return strings.Trim(string(valAsString(v...)), " ")
}
return "unknown"
}
func threadsPerCore() int {
mfi := maxFunctionID()
if mfi < 0x4 || vendorID() != intel {
return 1
}
if mfi < 0xb {
_, b, _, d := cpuid(1)
if (d & (1 << 28)) != 0 {
// v will contain logical core count
v := (b >> 16) & 255
if v > 1 {
a4, _, _, _ := cpuid(4)
// physical cores
v2 := (a4 >> 26) + 1
if v2 > 0 {
return int(v) / int(v2)
}
}
}
return 1
}
_, b, _, _ := cpuidex(0xb, 0)
if b&0xffff == 0 {
return 1
}
return int(b & 0xffff)
}
func logicalCores() int {
mfi := maxFunctionID()
switch vendorID() {
case intel:
// Use this on old Intel processors
if mfi < 0xb {
if mfi < 1 {
return 0
}
// CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID)
// that can be assigned to logical processors in a physical package.
// The value may not be the same as the number of logical processors that are present in the hardware of a physical package.
_, ebx, _, _ := cpuid(1)
logical := (ebx >> 16) & 0xff
return int(logical)
}
_, b, _, _ := cpuidex(0xb, 1)
return int(b & 0xffff)
case amd:
_, b, _, _ := cpuid(1)
return int((b >> 16) & 0xff)
default:
return 0
}
}
func familyModel() (int, int) {
if maxFunctionID() < 0x1 {
return 0, 0
}
eax, _, _, _ := cpuid(1)
family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff)
model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0)
return int(family), int(model)
}
func physicalCores() int {
switch vendorID() {
case intel:
return logicalCores() / threadsPerCore()
case amd:
if maxExtendedFunction() >= 0x80000008 {
_, _, c, _ := cpuid(0x80000008)
return int(c&0xff) + 1
}
}
return 0
}
// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
var vendorMapping = map[string]vendor{
"AMDisbetter!": amd,
"AuthenticAMD": amd,
"CentaurHauls": via,
"GenuineIntel": intel,
"TransmetaCPU": transmeta,
"GenuineTMx86": transmeta,
"Geode by NSC": nsc,
"VIA VIA VIA ": via,
"KVMKVMKVMKVM": kvm,
"Microsoft Hv": msvm,
"VMwareVMware": vmware,
"XenVMMXenVMM": xenhvm,
}
func vendorID() vendor {
_, b, c, d := cpuid(0)
v := valAsString(b, d, c)
vend, ok := vendorMapping[string(v)]
if !ok {
return other
}
return vend
}
func cacheLine() int {
if maxFunctionID() < 0x1 {
return 0
}
_, ebx, _, _ := cpuid(1)
cache := (ebx & 0xff00) >> 5 // cflush size
if cache == 0 && maxExtendedFunction() >= 0x80000006 {
_, _, ecx, _ := cpuid(0x80000006)
cache = ecx & 0xff // cacheline size
}
// TODO: Read from Cache and TLB Information
return int(cache)
}
func (c *cpuInfo) cacheSize() {
c.cache.l1d = -1
c.cache.l1i = -1
c.cache.l2 = -1
c.cache.l3 = -1
vendor := vendorID()
switch vendor {
case intel:
if maxFunctionID() < 4 {
return
}
for i := uint32(0); ; i++ {
eax, ebx, ecx, _ := cpuidex(4, i)
cacheType := eax & 15
if cacheType == 0 {
break
}
cacheLevel := (eax >> 5) & 7
coherency := int(ebx&0xfff) + 1
partitions := int((ebx>>12)&0x3ff) + 1
associativity := int((ebx>>22)&0x3ff) + 1
sets := int(ecx) + 1
size := associativity * partitions * coherency * sets
switch cacheLevel {
case 1:
if cacheType == 1 {
// 1 = Data Cache
c.cache.l1d = size
} else if cacheType == 2 {
// 2 = Instruction Cache
c.cache.l1i = size
} else {
if c.cache.l1d < 0 {
c.cache.l1i = size
}
if c.cache.l1i < 0 {
c.cache.l1i = size
}
}
case 2:
c.cache.l2 = size
case 3:
c.cache.l3 = size
}
}
case amd:
// Untested.
if maxExtendedFunction() < 0x80000005 {
return
}
_, _, ecx, edx := cpuid(0x80000005)
c.cache.l1d = int(((ecx >> 24) & 0xFF) * 1024)
c.cache.l1i = int(((edx >> 24) & 0xFF) * 1024)
if maxExtendedFunction() < 0x80000006 {
return
}
_, _, ecx, _ = cpuid(0x80000006)
c.cache.l2 = int(((ecx >> 16) & 0xFFFF) * 1024)
}
return
}
func support() flags {
mfi := maxFunctionID()
vend := vendorID()
if mfi < 0x1 {
return 0
}
rval := uint64(0)
_, _, c, d := cpuid(1)
if (d & (1 << 15)) != 0 {
rval |= cmov
}
if (d & (1 << 23)) != 0 {
rval |= mmx
}
if (d & (1 << 25)) != 0 {
rval |= mmxext
}
if (d & (1 << 25)) != 0 {
rval |= sse
}
if (d & (1 << 26)) != 0 {
rval |= sse2
}
if (c & 1) != 0 {
rval |= sse3
}
if (c & 0x00000200) != 0 {
rval |= ssse3
}
if (c & 0x00080000) != 0 {
rval |= sse4
}
if (c & 0x00100000) != 0 {
rval |= sse42
}
if (c & (1 << 25)) != 0 {
rval |= aesni
}
if (c & (1 << 1)) != 0 {
rval |= clmul
}
if c&(1<<23) != 0 {
rval |= popcnt
}
if c&(1<<30) != 0 {
rval |= rdrand
}
if c&(1<<29) != 0 {
rval |= f16c
}
if c&(1<<13) != 0 {
rval |= cx16
}
if vend == intel && (d&(1<<28)) != 0 && mfi >= 4 {
if threadsPerCore() > 1 {
rval |= htt
}
}
// Check XGETBV, OXSAVE and AVX bits
if c&(1<<26) != 0 && c&(1<<27) != 0 && c&(1<<28) != 0 {
// Check for OS support
eax, _ := xgetbv(0)
if (eax & 0x6) == 0x6 {
rval |= avx
if (c & 0x00001000) != 0 {
rval |= fma3
}
}
}
// Check AVX2, AVX2 requires OS support, but BMI1/2 don't.
if mfi >= 7 {
_, ebx, ecx, _ := cpuidex(7, 0)
if (rval&avx) != 0 && (ebx&0x00000020) != 0 {
rval |= avx2
}
if (ebx & 0x00000008) != 0 {
rval |= bmi1
if (ebx & 0x00000100) != 0 {
rval |= bmi2
}
}
if ebx&(1<<4) != 0 {
rval |= hle
}
if ebx&(1<<9) != 0 {
rval |= erms
}
if ebx&(1<<11) != 0 {
rval |= rtm
}
if ebx&(1<<14) != 0 {
rval |= mpx
}
if ebx&(1<<18) != 0 {
rval |= rdseed
}
if ebx&(1<<19) != 0 {
rval |= adx
}
if ebx&(1<<29) != 0 {
rval |= sha
}
// Only detect AVX-512 features if XGETBV is supported
if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) {
// Check for OS support
eax, _ := xgetbv(0)
// Verify that XCR0[7:5] = 111b (OPMASK state, upper 256-bit of ZMM0-ZMM15 and
// ZMM16-ZMM31 state are enabled by OS)
/// and that XCR0[2:1] = 11b (XMM state and YMM state are enabled by OS).
if (eax>>5)&7 == 7 && (eax>>1)&3 == 3 {
if ebx&(1<<16) != 0 {
rval |= avx512f
}
if ebx&(1<<17) != 0 {
rval |= avx512dq
}
if ebx&(1<<21) != 0 {
rval |= avx512ifma
}
if ebx&(1<<26) != 0 {
rval |= avx512pf
}
if ebx&(1<<27) != 0 {
rval |= avx512er
}
if ebx&(1<<28) != 0 {
rval |= avx512cd
}
if ebx&(1<<30) != 0 {
rval |= avx512bw
}
if ebx&(1<<31) != 0 {
rval |= avx512vl
}
// ecx
if ecx&(1<<1) != 0 {
rval |= avx512vbmi
}
}
}
}
if maxExtendedFunction() >= 0x80000001 {
_, _, c, d := cpuid(0x80000001)
if (c & (1 << 5)) != 0 {
rval |= lzcnt
rval |= popcnt
}
if (d & (1 << 31)) != 0 {
rval |= amd3dnow
}
if (d & (1 << 30)) != 0 {
rval |= amd3dnowext
}
if (d & (1 << 23)) != 0 {
rval |= mmx
}
if (d & (1 << 22)) != 0 {
rval |= mmxext
}
if (c & (1 << 6)) != 0 {
rval |= sse4a
}
if d&(1<<20) != 0 {
rval |= nx
}
if d&(1<<27) != 0 {
rval |= rdtscp
}
/* Allow for selectively disabling SSE2 functions on AMD processors
with SSE2 support but not SSE4a. This includes Athlon64, some
Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster
than SSE2 often enough to utilize this special-case flag.
AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case
so that SSE2 is used unless explicitly disabled by checking
AV_CPU_FLAG_SSE2SLOW. */
if vendorID() != intel &&
rval&sse2 != 0 && (c&0x00000040) == 0 {
rval |= sse2slow
}
/* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
* used unless the OS has AVX support. */
if (rval & avx) != 0 {
if (c & 0x00000800) != 0 {
rval |= xop
}
if (c & 0x00010000) != 0 {
rval |= fma4
}
}
if vendorID() == intel {
family, model := familyModel()
if family == 6 && (model == 9 || model == 13 || model == 14) {
/* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and
* 6/14 (core1 "yonah") theoretically support sse2, but it's
* usually slower than mmx. */
if (rval & sse2) != 0 {
rval |= sse2slow
}
if (rval & sse3) != 0 {
rval |= sse3slow
}
}
/* The Atom processor has SSSE3 support, which is useful in many cases,
* but sometimes the SSSE3 version is slower than the SSE2 equivalent
* on the Atom, but is generally faster on other processors supporting
* SSSE3. This flag allows for selectively disabling certain SSSE3
* functions on the Atom. */
if family == 6 && model == 28 {
rval |= atom
}
}
}
return flags(rval)
}
func valAsString(values ...uint32) []byte {
r := make([]byte, 4*len(values))
for i, v := range values {
dst := r[i*4:]
dst[0] = byte(v & 0xff)
dst[1] = byte((v >> 8) & 0xff)
dst[2] = byte((v >> 16) & 0xff)
dst[3] = byte((v >> 24) & 0xff)
switch {
case dst[0] == 0:
return r[:i*4]
case dst[1] == 0:
return r[:i*4+1]
case dst[2] == 0:
return r[:i*4+2]
case dst[3] == 0:
return r[:i*4+3]
}
}
return r
}

42
vendor/github.com/klauspost/cpuid/private/cpuid_386.s generated vendored Normal file
View File

@@ -0,0 +1,42 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
// +build 386,!gccgo
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
TEXT ·asmCpuid(SB), 7, $0
XORL CX, CX
MOVL op+0(FP), AX
CPUID
MOVL AX, eax+4(FP)
MOVL BX, ebx+8(FP)
MOVL CX, ecx+12(FP)
MOVL DX, edx+16(FP)
RET
// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
TEXT ·asmCpuidex(SB), 7, $0
MOVL op+0(FP), AX
MOVL op2+4(FP), CX
CPUID
MOVL AX, eax+8(FP)
MOVL BX, ebx+12(FP)
MOVL CX, ecx+16(FP)
MOVL DX, edx+20(FP)
RET
// func xgetbv(index uint32) (eax, edx uint32)
TEXT ·asmXgetbv(SB), 7, $0
MOVL index+0(FP), CX
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
MOVL AX, eax+4(FP)
MOVL DX, edx+8(FP)
RET
// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
TEXT ·asmRdtscpAsm(SB), 7, $0
BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
MOVL AX, eax+0(FP)
MOVL BX, ebx+4(FP)
MOVL CX, ecx+8(FP)
MOVL DX, edx+12(FP)
RET

View File

@@ -0,0 +1,42 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
//+build amd64,!gccgo
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
TEXT ·asmCpuid(SB), 7, $0
XORQ CX, CX
MOVL op+0(FP), AX
CPUID
MOVL AX, eax+8(FP)
MOVL BX, ebx+12(FP)
MOVL CX, ecx+16(FP)
MOVL DX, edx+20(FP)
RET
// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
TEXT ·asmCpuidex(SB), 7, $0
MOVL op+0(FP), AX
MOVL op2+4(FP), CX
CPUID
MOVL AX, eax+8(FP)
MOVL BX, ebx+12(FP)
MOVL CX, ecx+16(FP)
MOVL DX, edx+20(FP)
RET
// func asmXgetbv(index uint32) (eax, edx uint32)
TEXT ·asmXgetbv(SB), 7, $0
MOVL index+0(FP), CX
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
MOVL AX, eax+8(FP)
MOVL DX, edx+12(FP)
RET
// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
TEXT ·asmRdtscpAsm(SB), 7, $0
BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
MOVL AX, eax+0(FP)
MOVL BX, ebx+4(FP)
MOVL CX, ecx+8(FP)
MOVL DX, edx+12(FP)
RET

View File

@@ -0,0 +1,17 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
// +build 386,!gccgo amd64,!gccgo
package cpuid
func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
func asmXgetbv(index uint32) (eax, edx uint32)
func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
func initCPU() {
cpuid = asmCpuid
cpuidex = asmCpuidex
xgetbv = asmXgetbv
rdtscpAsm = asmRdtscpAsm
}

View File

@@ -0,0 +1,23 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
// +build !amd64,!386 gccgo
package cpuid
func initCPU() {
cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
xgetbv = func(index uint32) (eax, edx uint32) {
return 0, 0
}
rdtscpAsm = func() (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
}

719
vendor/github.com/klauspost/cpuid/private/cpuid_test.go generated vendored Normal file
View File

@@ -0,0 +1,719 @@
// Generated, DO NOT EDIT,
// but copy it to your own project and rename the package.
// See more at http://github.com/klauspost/cpuid
package cpuid
import (
"fmt"
"testing"
)
// There is no real way to test a CPU identifier, since results will
// obviously differ on each machine.
func TestCPUID(t *testing.T) {
n := maxFunctionID()
t.Logf("Max Function:0x%x\n", n)
n = maxExtendedFunction()
t.Logf("Max Extended Function:0x%x\n", n)
t.Log("Name:", cpu.brandname)
t.Log("PhysicalCores:", cpu.physicalcores)
t.Log("ThreadsPerCore:", cpu.threadspercore)
t.Log("LogicalCores:", cpu.logicalcores)
t.Log("Family", cpu.family, "Model:", cpu.model)
t.Log("Features:", cpu.features)
t.Log("Cacheline bytes:", cpu.cacheline)
t.Log("L1 Instruction Cache:", cpu.cache.l1i, "bytes")
t.Log("L1 Data Cache:", cpu.cache.l1d, "bytes")
t.Log("L2 Cache:", cpu.cache.l2, "bytes")
t.Log("L3 Cache:", cpu.cache.l3, "bytes")
if cpu.sse2() {
t.Log("We have SSE2")
}
}
func TestDumpCPUID(t *testing.T) {
n := int(maxFunctionID())
for i := 0; i <= n; i++ {
a, b, c, d := cpuidex(uint32(i), 0)
t.Logf("CPUID %08x: %08x-%08x-%08x-%08x", i, a, b, c, d)
ex := uint32(1)
for {
a2, b2, c2, d2 := cpuidex(uint32(i), ex)
if a2 == a && b2 == b && d2 == d || ex > 50 || a2 == 0 {
break
}
t.Logf("CPUID %08x: %08x-%08x-%08x-%08x", i, a2, b2, c2, d2)
a, b, c, d = a2, b2, c2, d2
ex++
}
}
n2 := maxExtendedFunction()
for i := uint32(0x80000000); i <= n2; i++ {
a, b, c, d := cpuid(i)
t.Logf("CPUID %08x: %08x-%08x-%08x-%08x", i, a, b, c, d)
}
}
func example() {
// Print basic CPU information:
fmt.Println("Name:", cpu.brandname)
fmt.Println("PhysicalCores:", cpu.physicalcores)
fmt.Println("ThreadsPerCore:", cpu.threadspercore)
fmt.Println("LogicalCores:", cpu.logicalcores)
fmt.Println("Family", cpu.family, "Model:", cpu.model)
fmt.Println("Features:", cpu.features)
fmt.Println("Cacheline bytes:", cpu.cacheline)
// Test if we have a specific feature:
if cpu.sse() {
fmt.Println("We have Streaming SIMD Extensions")
}
}
func TestBrandNameZero(t *testing.T) {
if len(cpu.brandname) > 0 {
// Cut out last byte
last := []byte(cpu.brandname[len(cpu.brandname)-1:])
if last[0] == 0 {
t.Fatal("last byte was zero")
} else if last[0] == 32 {
t.Fatal("whitespace wasn't trimmed")
}
}
}
// Generated here: http://play.golang.org/p/mko-0tFt0Q
// TestCmov tests Cmov() function
func TestCmov(t *testing.T) {
got := cpu.cmov()
expected := cpu.features&cmov == cmov
if got != expected {
t.Fatalf("Cmov: expected %v, got %v", expected, got)
}
t.Log("CMOV Support:", got)
}
// TestAmd3dnow tests Amd3dnow() function
func TestAmd3dnow(t *testing.T) {
got := cpu.amd3dnow()
expected := cpu.features&amd3dnow == amd3dnow
if got != expected {
t.Fatalf("Amd3dnow: expected %v, got %v", expected, got)
}
t.Log("AMD3DNOW Support:", got)
}
// TestAmd3dnowExt tests Amd3dnowExt() function
func TestAmd3dnowExt(t *testing.T) {
got := cpu.amd3dnowext()
expected := cpu.features&amd3dnowext == amd3dnowext
if got != expected {
t.Fatalf("Amd3dnowExt: expected %v, got %v", expected, got)
}
t.Log("AMD3DNOWEXT Support:", got)
}
// TestMMX tests MMX() function
func TestMMX(t *testing.T) {
got := cpu.mmx()
expected := cpu.features&mmx == mmx
if got != expected {
t.Fatalf("MMX: expected %v, got %v", expected, got)
}
t.Log("MMX Support:", got)
}
// TestMMXext tests MMXext() function
func TestMMXext(t *testing.T) {
got := cpu.mmxext()
expected := cpu.features&mmxext == mmxext
if got != expected {
t.Fatalf("MMXExt: expected %v, got %v", expected, got)
}
t.Log("MMXEXT Support:", got)
}
// TestSSE tests SSE() function
func TestSSE(t *testing.T) {
got := cpu.sse()
expected := cpu.features&sse == sse
if got != expected {
t.Fatalf("SSE: expected %v, got %v", expected, got)
}
t.Log("SSE Support:", got)
}
// TestSSE2 tests SSE2() function
func TestSSE2(t *testing.T) {
got := cpu.sse2()
expected := cpu.features&sse2 == sse2
if got != expected {
t.Fatalf("SSE2: expected %v, got %v", expected, got)
}
t.Log("SSE2 Support:", got)
}
// TestSSE3 tests SSE3() function
func TestSSE3(t *testing.T) {
got := cpu.sse3()
expected := cpu.features&sse3 == sse3
if got != expected {
t.Fatalf("SSE3: expected %v, got %v", expected, got)
}
t.Log("SSE3 Support:", got)
}
// TestSSSE3 tests SSSE3() function
func TestSSSE3(t *testing.T) {
got := cpu.ssse3()
expected := cpu.features&ssse3 == ssse3
if got != expected {
t.Fatalf("SSSE3: expected %v, got %v", expected, got)
}
t.Log("SSSE3 Support:", got)
}
// TestSSE4 tests SSE4() function
func TestSSE4(t *testing.T) {
got := cpu.sse4()
expected := cpu.features&sse4 == sse4
if got != expected {
t.Fatalf("SSE4: expected %v, got %v", expected, got)
}
t.Log("SSE4 Support:", got)
}
// TestSSE42 tests SSE42() function
func TestSSE42(t *testing.T) {
got := cpu.sse42()
expected := cpu.features&sse42 == sse42
if got != expected {
t.Fatalf("SSE42: expected %v, got %v", expected, got)
}
t.Log("SSE42 Support:", got)
}
// TestAVX tests AVX() function
func TestAVX(t *testing.T) {
got := cpu.avx()
expected := cpu.features&avx == avx
if got != expected {
t.Fatalf("AVX: expected %v, got %v", expected, got)
}
t.Log("AVX Support:", got)
}
// TestAVX2 tests AVX2() function
func TestAVX2(t *testing.T) {
got := cpu.avx2()
expected := cpu.features&avx2 == avx2
if got != expected {
t.Fatalf("AVX2: expected %v, got %v", expected, got)
}
t.Log("AVX2 Support:", got)
}
// TestFMA3 tests FMA3() function
func TestFMA3(t *testing.T) {
got := cpu.fma3()
expected := cpu.features&fma3 == fma3
if got != expected {
t.Fatalf("FMA3: expected %v, got %v", expected, got)
}
t.Log("FMA3 Support:", got)
}
// TestFMA4 tests FMA4() function
func TestFMA4(t *testing.T) {
got := cpu.fma4()
expected := cpu.features&fma4 == fma4
if got != expected {
t.Fatalf("FMA4: expected %v, got %v", expected, got)
}
t.Log("FMA4 Support:", got)
}
// TestXOP tests XOP() function
func TestXOP(t *testing.T) {
got := cpu.xop()
expected := cpu.features&xop == xop
if got != expected {
t.Fatalf("XOP: expected %v, got %v", expected, got)
}
t.Log("XOP Support:", got)
}
// TestF16C tests F16C() function
func TestF16C(t *testing.T) {
got := cpu.f16c()
expected := cpu.features&f16c == f16c
if got != expected {
t.Fatalf("F16C: expected %v, got %v", expected, got)
}
t.Log("F16C Support:", got)
}
// TestCX16 tests CX16() function
func TestCX16(t *testing.T) {
got := cpu.cx16()
expected := cpu.features&cx16 == cx16
if got != expected {
t.Fatalf("CX16: expected %v, got %v", expected, got)
}
t.Log("CX16 Support:", got)
}
// TestBMI1 tests BMI1() function
func TestBMI1(t *testing.T) {
got := cpu.bmi1()
expected := cpu.features&bmi1 == bmi1
if got != expected {
t.Fatalf("BMI1: expected %v, got %v", expected, got)
}
t.Log("BMI1 Support:", got)
}
// TestBMI2 tests BMI2() function
func TestBMI2(t *testing.T) {
got := cpu.bmi2()
expected := cpu.features&bmi2 == bmi2
if got != expected {
t.Fatalf("BMI2: expected %v, got %v", expected, got)
}
t.Log("BMI2 Support:", got)
}
// TestTBM tests TBM() function
func TestTBM(t *testing.T) {
got := cpu.tbm()
expected := cpu.features&tbm == tbm
if got != expected {
t.Fatalf("TBM: expected %v, got %v", expected, got)
}
t.Log("TBM Support:", got)
}
// TestLzcnt tests Lzcnt() function
func TestLzcnt(t *testing.T) {
got := cpu.lzcnt()
expected := cpu.features&lzcnt == lzcnt
if got != expected {
t.Fatalf("Lzcnt: expected %v, got %v", expected, got)
}
t.Log("LZCNT Support:", got)
}
// TestLzcnt tests Lzcnt() function
func TestPopcnt(t *testing.T) {
got := cpu.popcnt()
expected := cpu.features&popcnt == popcnt
if got != expected {
t.Fatalf("Popcnt: expected %v, got %v", expected, got)
}
t.Log("POPCNT Support:", got)
}
// TestAesNi tests AesNi() function
func TestAesNi(t *testing.T) {
got := cpu.aesni()
expected := cpu.features&aesni == aesni
if got != expected {
t.Fatalf("AesNi: expected %v, got %v", expected, got)
}
t.Log("AESNI Support:", got)
}
// TestHTT tests HTT() function
func TestHTT(t *testing.T) {
got := cpu.htt()
expected := cpu.features&htt == htt
if got != expected {
t.Fatalf("HTT: expected %v, got %v", expected, got)
}
t.Log("HTT Support:", got)
}
// TestClmul tests Clmul() function
func TestClmul(t *testing.T) {
got := cpu.clmul()
expected := cpu.features&clmul == clmul
if got != expected {
t.Fatalf("Clmul: expected %v, got %v", expected, got)
}
t.Log("CLMUL Support:", got)
}
// TestSSE2Slow tests SSE2Slow() function
func TestSSE2Slow(t *testing.T) {
got := cpu.sse2slow()
expected := cpu.features&sse2slow == sse2slow
if got != expected {
t.Fatalf("SSE2Slow: expected %v, got %v", expected, got)
}
t.Log("SSE2SLOW Support:", got)
}
// TestSSE3Slow tests SSE3slow() function
func TestSSE3Slow(t *testing.T) {
got := cpu.sse3slow()
expected := cpu.features&sse3slow == sse3slow
if got != expected {
t.Fatalf("SSE3slow: expected %v, got %v", expected, got)
}
t.Log("SSE3SLOW Support:", got)
}
// TestAtom tests Atom() function
func TestAtom(t *testing.T) {
got := cpu.atom()
expected := cpu.features&atom == atom
if got != expected {
t.Fatalf("Atom: expected %v, got %v", expected, got)
}
t.Log("ATOM Support:", got)
}
// TestNX tests NX() function (NX (No-Execute) bit)
func TestNX(t *testing.T) {
got := cpu.nx()
expected := cpu.features&nx == nx
if got != expected {
t.Fatalf("NX: expected %v, got %v", expected, got)
}
t.Log("NX Support:", got)
}
// TestSSE4A tests SSE4A() function (AMD Barcelona microarchitecture SSE4a instructions)
func TestSSE4A(t *testing.T) {
got := cpu.sse4a()
expected := cpu.features&sse4a == sse4a
if got != expected {
t.Fatalf("SSE4A: expected %v, got %v", expected, got)
}
t.Log("SSE4A Support:", got)
}
// TestHLE tests HLE() function (Hardware Lock Elision)
func TestHLE(t *testing.T) {
got := cpu.hle()
expected := cpu.features&hle == hle
if got != expected {
t.Fatalf("HLE: expected %v, got %v", expected, got)
}
t.Log("HLE Support:", got)
}
// TestRTM tests RTM() function (Restricted Transactional Memory)
func TestRTM(t *testing.T) {
got := cpu.rtm()
expected := cpu.features&rtm == rtm
if got != expected {
t.Fatalf("RTM: expected %v, got %v", expected, got)
}
t.Log("RTM Support:", got)
}
// TestRdrand tests RDRAND() function (RDRAND instruction is available)
func TestRdrand(t *testing.T) {
got := cpu.rdrand()
expected := cpu.features&rdrand == rdrand
if got != expected {
t.Fatalf("Rdrand: expected %v, got %v", expected, got)
}
t.Log("Rdrand Support:", got)
}
// TestRdseed tests RDSEED() function (RDSEED instruction is available)
func TestRdseed(t *testing.T) {
got := cpu.rdseed()
expected := cpu.features&rdseed == rdseed
if got != expected {
t.Fatalf("Rdseed: expected %v, got %v", expected, got)
}
t.Log("Rdseed Support:", got)
}
// TestADX tests ADX() function (Intel ADX (Multi-Precision Add-Carry Instruction Extensions))
func TestADX(t *testing.T) {
got := cpu.adx()
expected := cpu.features&adx == adx
if got != expected {
t.Fatalf("ADX: expected %v, got %v", expected, got)
}
t.Log("ADX Support:", got)
}
// TestSHA tests SHA() function (Intel SHA Extensions)
func TestSHA(t *testing.T) {
got := cpu.sha()
expected := cpu.features&sha == sha
if got != expected {
t.Fatalf("SHA: expected %v, got %v", expected, got)
}
t.Log("SHA Support:", got)
}
// TestAVX512F tests AVX512F() function (AVX-512 Foundation)
func TestAVX512F(t *testing.T) {
got := cpu.avx512f()
expected := cpu.features&avx512f == avx512f
if got != expected {
t.Fatalf("AVX512F: expected %v, got %v", expected, got)
}
t.Log("AVX512F Support:", got)
}
// TestAVX512DQ tests AVX512DQ() function (AVX-512 Doubleword and Quadword Instructions)
func TestAVX512DQ(t *testing.T) {
got := cpu.avx512dq()
expected := cpu.features&avx512dq == avx512dq
if got != expected {
t.Fatalf("AVX512DQ: expected %v, got %v", expected, got)
}
t.Log("AVX512DQ Support:", got)
}
// TestAVX512IFMA tests AVX512IFMA() function (AVX-512 Integer Fused Multiply-Add Instructions)
func TestAVX512IFMA(t *testing.T) {
got := cpu.avx512ifma()
expected := cpu.features&avx512ifma == avx512ifma
if got != expected {
t.Fatalf("AVX512IFMA: expected %v, got %v", expected, got)
}
t.Log("AVX512IFMA Support:", got)
}
// TestAVX512PF tests AVX512PF() function (AVX-512 Prefetch Instructions)
func TestAVX512PF(t *testing.T) {
got := cpu.avx512pf()
expected := cpu.features&avx512pf == avx512pf
if got != expected {
t.Fatalf("AVX512PF: expected %v, got %v", expected, got)
}
t.Log("AVX512PF Support:", got)
}
// TestAVX512ER tests AVX512ER() function (AVX-512 Exponential and Reciprocal Instructions)
func TestAVX512ER(t *testing.T) {
got := cpu.avx512er()
expected := cpu.features&avx512er == avx512er
if got != expected {
t.Fatalf("AVX512ER: expected %v, got %v", expected, got)
}
t.Log("AVX512ER Support:", got)
}
// TestAVX512CD tests AVX512CD() function (AVX-512 Conflict Detection Instructions)
func TestAVX512CD(t *testing.T) {
got := cpu.avx512cd()
expected := cpu.features&avx512cd == avx512cd
if got != expected {
t.Fatalf("AVX512CD: expected %v, got %v", expected, got)
}
t.Log("AVX512CD Support:", got)
}
// TestAVX512BW tests AVX512BW() function (AVX-512 Byte and Word Instructions)
func TestAVX512BW(t *testing.T) {
got := cpu.avx512bw()
expected := cpu.features&avx512bw == avx512bw
if got != expected {
t.Fatalf("AVX512BW: expected %v, got %v", expected, got)
}
t.Log("AVX512BW Support:", got)
}
// TestAVX512VL tests AVX512VL() function (AVX-512 Vector Length Extensions)
func TestAVX512VL(t *testing.T) {
got := cpu.avx512vl()
expected := cpu.features&avx512vl == avx512vl
if got != expected {
t.Fatalf("AVX512VL: expected %v, got %v", expected, got)
}
t.Log("AVX512VL Support:", got)
}
// TestAVX512VL tests AVX512VBMI() function (AVX-512 Vector Bit Manipulation Instructions)
func TestAVX512VBMI(t *testing.T) {
got := cpu.avx512vbmi()
expected := cpu.features&avx512vbmi == avx512vbmi
if got != expected {
t.Fatalf("AVX512VBMI: expected %v, got %v", expected, got)
}
t.Log("AVX512VBMI Support:", got)
}
// TestMPX tests MPX() function (Intel MPX (Memory Protection Extensions))
func TestMPX(t *testing.T) {
got := cpu.mpx()
expected := cpu.features&mpx == mpx
if got != expected {
t.Fatalf("MPX: expected %v, got %v", expected, got)
}
t.Log("MPX Support:", got)
}
// TestERMS tests ERMS() function (Enhanced REP MOVSB/STOSB)
func TestERMS(t *testing.T) {
got := cpu.erms()
expected := cpu.features&erms == erms
if got != expected {
t.Fatalf("ERMS: expected %v, got %v", expected, got)
}
t.Log("ERMS Support:", got)
}
// TestVendor writes the detected vendor. Will be 0 if unknown
func TestVendor(t *testing.T) {
t.Log("Vendor ID:", cpu.vendorid)
}
// Intel returns true if vendor is recognized as Intel
func TestIntel(t *testing.T) {
got := cpu.intel()
expected := cpu.vendorid == intel
if got != expected {
t.Fatalf("TestIntel: expected %v, got %v", expected, got)
}
t.Log("TestIntel:", got)
}
// AMD returns true if vendor is recognized as AMD
func TestAMD(t *testing.T) {
got := cpu.amd()
expected := cpu.vendorid == amd
if got != expected {
t.Fatalf("TestAMD: expected %v, got %v", expected, got)
}
t.Log("TestAMD:", got)
}
// Transmeta returns true if vendor is recognized as Transmeta
func TestTransmeta(t *testing.T) {
got := cpu.transmeta()
expected := cpu.vendorid == transmeta
if got != expected {
t.Fatalf("TestTransmeta: expected %v, got %v", expected, got)
}
t.Log("TestTransmeta:", got)
}
// NSC returns true if vendor is recognized as National Semiconductor
func TestNSC(t *testing.T) {
got := cpu.nsc()
expected := cpu.vendorid == nsc
if got != expected {
t.Fatalf("TestNSC: expected %v, got %v", expected, got)
}
t.Log("TestNSC:", got)
}
// VIA returns true if vendor is recognized as VIA
func TestVIA(t *testing.T) {
got := cpu.via()
expected := cpu.vendorid == via
if got != expected {
t.Fatalf("TestVIA: expected %v, got %v", expected, got)
}
t.Log("TestVIA:", got)
}
// Test VM function
func TestVM(t *testing.T) {
t.Log("Vendor ID:", cpu.vm())
}
// Test RTCounter function
func TestRtCounter(t *testing.T) {
a := cpu.rtcounter()
b := cpu.rtcounter()
t.Log("CPU Counter:", a, b, b-a)
}
// Prints the value of Ia32TscAux()
func TestIa32TscAux(t *testing.T) {
ecx := cpu.ia32tscaux()
t.Logf("Ia32TscAux:0x%x\n", ecx)
if ecx != 0 {
chip := (ecx & 0xFFF000) >> 12
core := ecx & 0xFFF
t.Log("Likely chip, core:", chip, core)
}
}
func TestThreadsPerCoreNZ(t *testing.T) {
if cpu.threadspercore == 0 {
t.Fatal("threads per core is zero")
}
}
// Prints the value of LogicalCPU()
func TestLogicalCPU(t *testing.T) {
t.Log("Currently executing on cpu:", cpu.logicalcpu())
}
func TestMaxFunction(t *testing.T) {
expect := maxFunctionID()
if cpu.maxFunc != expect {
t.Fatal("Max function does not match, expected", expect, "but got", cpu.maxFunc)
}
expect = maxExtendedFunction()
if cpu.maxExFunc != expect {
t.Fatal("Max Extended function does not match, expected", expect, "but got", cpu.maxFunc)
}
}
// This example will calculate the chip/core number on Linux
// Linux encodes numa id (<<12) and core id (8bit) into TSC_AUX.
func examplecpuinfo_ia32tscaux(t *testing.T) {
ecx := cpu.ia32tscaux()
if ecx == 0 {
fmt.Println("Unknown CPU ID")
return
}
chip := (ecx & 0xFFF000) >> 12
core := ecx & 0xFFF
fmt.Println("Chip, Core:", chip, core)
}
/*
func TestPhysical(t *testing.T) {
var test16 = "CPUID 00000000: 0000000d-756e6547-6c65746e-49656e69 \nCPUID 00000001: 000206d7-03200800-1fbee3ff-bfebfbff \nCPUID 00000002: 76035a01-00f0b2ff-00000000-00ca0000 \nCPUID 00000003: 00000000-00000000-00000000-00000000 \nCPUID 00000004: 3c004121-01c0003f-0000003f-00000000 \nCPUID 00000004: 3c004122-01c0003f-0000003f-00000000 \nCPUID 00000004: 3c004143-01c0003f-000001ff-00000000 \nCPUID 00000004: 3c07c163-04c0003f-00003fff-00000006 \nCPUID 00000005: 00000040-00000040-00000003-00021120 \nCPUID 00000006: 00000075-00000002-00000009-00000000 \nCPUID 00000007: 00000000-00000000-00000000-00000000 \nCPUID 00000008: 00000000-00000000-00000000-00000000 \nCPUID 00000009: 00000001-00000000-00000000-00000000 \nCPUID 0000000a: 07300403-00000000-00000000-00000603 \nCPUID 0000000b: 00000000-00000000-00000003-00000003 \nCPUID 0000000b: 00000005-00000010-00000201-00000003 \nCPUID 0000000c: 00000000-00000000-00000000-00000000 \nCPUID 0000000d: 00000007-00000340-00000340-00000000 \nCPUID 0000000d: 00000001-00000000-00000000-00000000 \nCPUID 0000000d: 00000100-00000240-00000000-00000000 \nCPUID 80000000: 80000008-00000000-00000000-00000000 \nCPUID 80000001: 00000000-00000000-00000001-2c100800 \nCPUID 80000002: 20202020-49202020-6c65746e-20295228 \nCPUID 80000003: 6e6f6558-20295228-20555043-322d3545 \nCPUID 80000004: 20303636-20402030-30322e32-007a4847 \nCPUID 80000005: 00000000-00000000-00000000-00000000 \nCPUID 80000006: 00000000-00000000-01006040-00000000 \nCPUID 80000007: 00000000-00000000-00000000-00000100 \nCPUID 80000008: 0000302e-00000000-00000000-00000000"
restore := mockCPU([]byte(test16))
Detect()
t.Log("Name:", CPU.BrandName)
n := maxFunctionID()
t.Logf("Max Function:0x%x\n", n)
n = maxExtendedFunction()
t.Logf("Max Extended Function:0x%x\n", n)
t.Log("PhysicalCores:", CPU.PhysicalCores)
t.Log("ThreadsPerCore:", CPU.ThreadsPerCore)
t.Log("LogicalCores:", CPU.LogicalCores)
t.Log("Family", CPU.Family, "Model:", CPU.Model)
t.Log("Features:", CPU.Features)
t.Log("Cacheline bytes:", CPU.CacheLine)
t.Log("L1 Instruction Cache:", CPU.Cache.L1I, "bytes")
t.Log("L1 Data Cache:", CPU.Cache.L1D, "bytes")
t.Log("L2 Cache:", CPU.Cache.L2, "bytes")
t.Log("L3 Cache:", CPU.Cache.L3, "bytes")
if CPU.LogicalCores > 0 && CPU.PhysicalCores > 0 {
if CPU.LogicalCores != CPU.PhysicalCores*CPU.ThreadsPerCore {
t.Fatalf("Core count mismatch, LogicalCores (%d) != PhysicalCores (%d) * CPU.ThreadsPerCore (%d)",
CPU.LogicalCores, CPU.PhysicalCores, CPU.ThreadsPerCore)
}
}
if CPU.ThreadsPerCore > 1 && !CPU.HTT() {
t.Fatalf("Hyperthreading not detected")
}
if CPU.ThreadsPerCore == 1 && CPU.HTT() {
t.Fatalf("Hyperthreading detected, but only 1 Thread per core")
}
restore()
Detect()
TestCPUID(t)
}
*/

77
vendor/github.com/klauspost/cpuid/testdata/getall.go generated vendored Normal file
View File

@@ -0,0 +1,77 @@
package main
import (
"archive/zip"
_ "bytes"
"fmt"
"golang.org/x/net/html"
"io"
"net/http"
"os"
"strings"
)
// Download all CPUID dumps from http://users.atw.hu/instlatx64/
func main() {
resp, err := http.Get("http://users.atw.hu/instlatx64/?")
if err != nil {
panic(err)
}
node, err := html.Parse(resp.Body)
if err != nil {
panic(err)
}
file, err := os.Create("cpuid_data.zip")
if err != nil {
panic(err)
}
defer file.Close()
gw := zip.NewWriter(file)
var f func(*html.Node)
f = func(n *html.Node) {
if n.Type == html.ElementNode && n.Data == "a" {
for _, a := range n.Attr {
if a.Key == "href" {
err := ParseURL(a.Val, gw)
if err != nil {
panic(err)
}
break
}
}
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
f(c)
}
}
f(node)
err = gw.Close()
if err != nil {
panic(err)
}
}
func ParseURL(s string, gw *zip.Writer) error {
if strings.Contains(s, "CPUID.txt") {
fmt.Println("Adding", "http://users.atw.hu/instlatx64/"+s)
resp, err := http.Get("http://users.atw.hu/instlatx64/" + s)
if err != nil {
fmt.Println("Error getting ", s, ":", err)
}
defer resp.Body.Close()
w, err := gw.Create(s)
if err != nil {
return err
}
_, err = io.Copy(w, resp.Body)
if err != nil {
return err
}
}
return nil
}

28
vendor/github.com/klauspost/crc32/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,28 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Copyright (c) 2015 Klaus Post
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

87
vendor/github.com/klauspost/crc32/README.md generated vendored Normal file
View File

@@ -0,0 +1,87 @@
# crc32
CRC32 hash with x64 optimizations
This package is a drop-in replacement for the standard library `hash/crc32` package, that features SSE 4.2 optimizations on x64 platforms, for a 10x speedup.
[![Build Status](https://travis-ci.org/klauspost/crc32.svg?branch=master)](https://travis-ci.org/klauspost/crc32)
# usage
Install using `go get github.com/klauspost/crc32`. This library is based on Go 1.5 code and requires Go 1.3 or newer.
Replace `import "hash/crc32"` with `import "github.com/klauspost/crc32"` and you are good to go.
# changes
* Oct 20, 2016: Changes have been merged to upstream Go. Package updated to match.
* Dec 4, 2015: Uses the "slice-by-8" trick more extensively, which gives a 1.5 to 2.5x speedup if assembler is unavailable.
# performance
For *Go 1.7* performance is equivalent to the standard library. So if you use this package for Go 1.7 you can switch back.
For IEEE tables (the most common), there is approximately a factor 10 speedup with "CLMUL" (Carryless multiplication) instruction:
```
benchmark old ns/op new ns/op delta
BenchmarkCrc32KB 99955 10258 -89.74%
benchmark old MB/s new MB/s speedup
BenchmarkCrc32KB 327.83 3194.20 9.74x
```
For other tables and "CLMUL" capable machines the performance is the same as the standard library.
Here are some detailed benchmarks, comparing to go 1.5 standard library with and without assembler enabled.
```
Std: Standard Go 1.5 library
Crc: Indicates IEEE type CRC.
40B: Size of each slice encoded.
NoAsm: Assembler was disabled (ie. not an AMD64 or SSE 4.2+ capable machine).
Castagnoli: Castagnoli CRC type.
BenchmarkStdCrc40B-4 10000000 158 ns/op 252.88 MB/s
BenchmarkCrc40BNoAsm-4 20000000 105 ns/op 377.38 MB/s (slice8)
BenchmarkCrc40B-4 20000000 105 ns/op 378.77 MB/s (slice8)
BenchmarkStdCrc1KB-4 500000 3604 ns/op 284.10 MB/s
BenchmarkCrc1KBNoAsm-4 1000000 1463 ns/op 699.79 MB/s (slice8)
BenchmarkCrc1KB-4 3000000 396 ns/op 2583.69 MB/s (asm)
BenchmarkStdCrc8KB-4 200000 11417 ns/op 717.48 MB/s (slice8)
BenchmarkCrc8KBNoAsm-4 200000 11317 ns/op 723.85 MB/s (slice8)
BenchmarkCrc8KB-4 500000 2919 ns/op 2805.73 MB/s (asm)
BenchmarkStdCrc32KB-4 30000 45749 ns/op 716.24 MB/s (slice8)
BenchmarkCrc32KBNoAsm-4 30000 45109 ns/op 726.42 MB/s (slice8)
BenchmarkCrc32KB-4 100000 11497 ns/op 2850.09 MB/s (asm)
BenchmarkStdNoAsmCastagnol40B-4 10000000 161 ns/op 246.94 MB/s
BenchmarkStdCastagnoli40B-4 50000000 28.4 ns/op 1410.69 MB/s (asm)
BenchmarkCastagnoli40BNoAsm-4 20000000 100 ns/op 398.01 MB/s (slice8)
BenchmarkCastagnoli40B-4 50000000 28.2 ns/op 1419.54 MB/s (asm)
BenchmarkStdNoAsmCastagnoli1KB-4 500000 3622 ns/op 282.67 MB/s
BenchmarkStdCastagnoli1KB-4 10000000 144 ns/op 7099.78 MB/s (asm)
BenchmarkCastagnoli1KBNoAsm-4 1000000 1475 ns/op 694.14 MB/s (slice8)
BenchmarkCastagnoli1KB-4 10000000 146 ns/op 6993.35 MB/s (asm)
BenchmarkStdNoAsmCastagnoli8KB-4 50000 28781 ns/op 284.63 MB/s
BenchmarkStdCastagnoli8KB-4 1000000 1029 ns/op 7957.89 MB/s (asm)
BenchmarkCastagnoli8KBNoAsm-4 200000 11410 ns/op 717.94 MB/s (slice8)
BenchmarkCastagnoli8KB-4 1000000 1000 ns/op 8188.71 MB/s (asm)
BenchmarkStdNoAsmCastagnoli32KB-4 10000 115426 ns/op 283.89 MB/s
BenchmarkStdCastagnoli32KB-4 300000 4065 ns/op 8059.13 MB/s (asm)
BenchmarkCastagnoli32KBNoAsm-4 30000 45171 ns/op 725.41 MB/s (slice8)
BenchmarkCastagnoli32KB-4 500000 4077 ns/op 8035.89 MB/s (asm)
```
The IEEE assembler optimizations has been submitted and will be part of the Go 1.6 standard library.
However, the improved use of slice-by-8 has not, but will probably be submitted for Go 1.7.
# license
Standard Go license. Changes are Copyright (c) 2015 Klaus Post under same conditions.

207
vendor/github.com/klauspost/crc32/crc32.go generated vendored Normal file
View File

@@ -0,0 +1,207 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package crc32 implements the 32-bit cyclic redundancy check, or CRC-32,
// checksum. See http://en.wikipedia.org/wiki/Cyclic_redundancy_check for
// information.
//
// Polynomials are represented in LSB-first form also known as reversed representation.
//
// See http://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks#Reversed_representations_and_reciprocal_polynomials
// for information.
package crc32
import (
"hash"
"sync"
)
// The size of a CRC-32 checksum in bytes.
const Size = 4
// Predefined polynomials.
const (
// IEEE is by far and away the most common CRC-32 polynomial.
// Used by ethernet (IEEE 802.3), v.42, fddi, gzip, zip, png, ...
IEEE = 0xedb88320
// Castagnoli's polynomial, used in iSCSI.
// Has better error detection characteristics than IEEE.
// http://dx.doi.org/10.1109/26.231911
Castagnoli = 0x82f63b78
// Koopman's polynomial.
// Also has better error detection characteristics than IEEE.
// http://dx.doi.org/10.1109/DSN.2002.1028931
Koopman = 0xeb31d82e
)
// Table is a 256-word table representing the polynomial for efficient processing.
type Table [256]uint32
// This file makes use of functions implemented in architecture-specific files.
// The interface that they implement is as follows:
//
// // archAvailableIEEE reports whether an architecture-specific CRC32-IEEE
// // algorithm is available.
// archAvailableIEEE() bool
//
// // archInitIEEE initializes the architecture-specific CRC3-IEEE algorithm.
// // It can only be called if archAvailableIEEE() returns true.
// archInitIEEE()
//
// // archUpdateIEEE updates the given CRC32-IEEE. It can only be called if
// // archInitIEEE() was previously called.
// archUpdateIEEE(crc uint32, p []byte) uint32
//
// // archAvailableCastagnoli reports whether an architecture-specific
// // CRC32-C algorithm is available.
// archAvailableCastagnoli() bool
//
// // archInitCastagnoli initializes the architecture-specific CRC32-C
// // algorithm. It can only be called if archAvailableCastagnoli() returns
// // true.
// archInitCastagnoli()
//
// // archUpdateCastagnoli updates the given CRC32-C. It can only be called
// // if archInitCastagnoli() was previously called.
// archUpdateCastagnoli(crc uint32, p []byte) uint32
// castagnoliTable points to a lazily initialized Table for the Castagnoli
// polynomial. MakeTable will always return this value when asked to make a
// Castagnoli table so we can compare against it to find when the caller is
// using this polynomial.
var castagnoliTable *Table
var castagnoliTable8 *slicing8Table
var castagnoliArchImpl bool
var updateCastagnoli func(crc uint32, p []byte) uint32
var castagnoliOnce sync.Once
func castagnoliInit() {
castagnoliTable = simpleMakeTable(Castagnoli)
castagnoliArchImpl = archAvailableCastagnoli()
if castagnoliArchImpl {
archInitCastagnoli()
updateCastagnoli = archUpdateCastagnoli
} else {
// Initialize the slicing-by-8 table.
castagnoliTable8 = slicingMakeTable(Castagnoli)
updateCastagnoli = func(crc uint32, p []byte) uint32 {
return slicingUpdate(crc, castagnoliTable8, p)
}
}
}
// IEEETable is the table for the IEEE polynomial.
var IEEETable = simpleMakeTable(IEEE)
// ieeeTable8 is the slicing8Table for IEEE
var ieeeTable8 *slicing8Table
var ieeeArchImpl bool
var updateIEEE func(crc uint32, p []byte) uint32
var ieeeOnce sync.Once
func ieeeInit() {
ieeeArchImpl = archAvailableIEEE()
if ieeeArchImpl {
archInitIEEE()
updateIEEE = archUpdateIEEE
} else {
// Initialize the slicing-by-8 table.
ieeeTable8 = slicingMakeTable(IEEE)
updateIEEE = func(crc uint32, p []byte) uint32 {
return slicingUpdate(crc, ieeeTable8, p)
}
}
}
// MakeTable returns a Table constructed from the specified polynomial.
// The contents of this Table must not be modified.
func MakeTable(poly uint32) *Table {
switch poly {
case IEEE:
ieeeOnce.Do(ieeeInit)
return IEEETable
case Castagnoli:
castagnoliOnce.Do(castagnoliInit)
return castagnoliTable
}
return simpleMakeTable(poly)
}
// digest represents the partial evaluation of a checksum.
type digest struct {
crc uint32
tab *Table
}
// New creates a new hash.Hash32 computing the CRC-32 checksum
// using the polynomial represented by the Table.
// Its Sum method will lay the value out in big-endian byte order.
func New(tab *Table) hash.Hash32 {
if tab == IEEETable {
ieeeOnce.Do(ieeeInit)
}
return &digest{0, tab}
}
// NewIEEE creates a new hash.Hash32 computing the CRC-32 checksum
// using the IEEE polynomial.
// Its Sum method will lay the value out in big-endian byte order.
func NewIEEE() hash.Hash32 { return New(IEEETable) }
func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return 1 }
func (d *digest) Reset() { d.crc = 0 }
// Update returns the result of adding the bytes in p to the crc.
func Update(crc uint32, tab *Table, p []byte) uint32 {
switch tab {
case castagnoliTable:
return updateCastagnoli(crc, p)
case IEEETable:
// Unfortunately, because IEEETable is exported, IEEE may be used without a
// call to MakeTable. We have to make sure it gets initialized in that case.
ieeeOnce.Do(ieeeInit)
return updateIEEE(crc, p)
default:
return simpleUpdate(crc, tab, p)
}
}
func (d *digest) Write(p []byte) (n int, err error) {
switch d.tab {
case castagnoliTable:
d.crc = updateCastagnoli(d.crc, p)
case IEEETable:
// We only create digest objects through New() which takes care of
// initialization in this case.
d.crc = updateIEEE(d.crc, p)
default:
d.crc = simpleUpdate(d.crc, d.tab, p)
}
return len(p), nil
}
func (d *digest) Sum32() uint32 { return d.crc }
func (d *digest) Sum(in []byte) []byte {
s := d.Sum32()
return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
}
// Checksum returns the CRC-32 checksum of data
// using the polynomial represented by the Table.
func Checksum(data []byte, tab *Table) uint32 { return Update(0, tab, data) }
// ChecksumIEEE returns the CRC-32 checksum of data
// using the IEEE polynomial.
func ChecksumIEEE(data []byte) uint32 {
ieeeOnce.Do(ieeeInit)
return updateIEEE(0, data)
}

230
vendor/github.com/klauspost/crc32/crc32_amd64.go generated vendored Normal file
View File

@@ -0,0 +1,230 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine,!gccgo
// AMD64-specific hardware-assisted CRC32 algorithms. See crc32.go for a
// description of the interface that each architecture-specific file
// implements.
package crc32
import "unsafe"
// This file contains the code to call the SSE 4.2 version of the Castagnoli
// and IEEE CRC.
// haveSSE41/haveSSE42/haveCLMUL are defined in crc_amd64.s and use
// CPUID to test for SSE 4.1, 4.2 and CLMUL support.
func haveSSE41() bool
func haveSSE42() bool
func haveCLMUL() bool
// castagnoliSSE42 is defined in crc32_amd64.s and uses the SSE4.2 CRC32
// instruction.
//go:noescape
func castagnoliSSE42(crc uint32, p []byte) uint32
// castagnoliSSE42Triple is defined in crc32_amd64.s and uses the SSE4.2 CRC32
// instruction.
//go:noescape
func castagnoliSSE42Triple(
crcA, crcB, crcC uint32,
a, b, c []byte,
rounds uint32,
) (retA uint32, retB uint32, retC uint32)
// ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ
// instruction as well as SSE 4.1.
//go:noescape
func ieeeCLMUL(crc uint32, p []byte) uint32
var sse42 = haveSSE42()
var useFastIEEE = haveCLMUL() && haveSSE41()
const castagnoliK1 = 168
const castagnoliK2 = 1344
type sse42Table [4]Table
var castagnoliSSE42TableK1 *sse42Table
var castagnoliSSE42TableK2 *sse42Table
func archAvailableCastagnoli() bool {
return sse42
}
func archInitCastagnoli() {
if !sse42 {
panic("arch-specific Castagnoli not available")
}
castagnoliSSE42TableK1 = new(sse42Table)
castagnoliSSE42TableK2 = new(sse42Table)
// See description in updateCastagnoli.
// t[0][i] = CRC(i000, O)
// t[1][i] = CRC(0i00, O)
// t[2][i] = CRC(00i0, O)
// t[3][i] = CRC(000i, O)
// where O is a sequence of K zeros.
var tmp [castagnoliK2]byte
for b := 0; b < 4; b++ {
for i := 0; i < 256; i++ {
val := uint32(i) << uint32(b*8)
castagnoliSSE42TableK1[b][i] = castagnoliSSE42(val, tmp[:castagnoliK1])
castagnoliSSE42TableK2[b][i] = castagnoliSSE42(val, tmp[:])
}
}
}
// castagnoliShift computes the CRC32-C of K1 or K2 zeroes (depending on the
// table given) with the given initial crc value. This corresponds to
// CRC(crc, O) in the description in updateCastagnoli.
func castagnoliShift(table *sse42Table, crc uint32) uint32 {
return table[3][crc>>24] ^
table[2][(crc>>16)&0xFF] ^
table[1][(crc>>8)&0xFF] ^
table[0][crc&0xFF]
}
func archUpdateCastagnoli(crc uint32, p []byte) uint32 {
if !sse42 {
panic("not available")
}
// This method is inspired from the algorithm in Intel's white paper:
// "Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction"
// The same strategy of splitting the buffer in three is used but the
// combining calculation is different; the complete derivation is explained
// below.
//
// -- The basic idea --
//
// The CRC32 instruction (available in SSE4.2) can process 8 bytes at a
// time. In recent Intel architectures the instruction takes 3 cycles;
// however the processor can pipeline up to three instructions if they
// don't depend on each other.
//
// Roughly this means that we can process three buffers in about the same
// time we can process one buffer.
//
// The idea is then to split the buffer in three, CRC the three pieces
// separately and then combine the results.
//
// Combining the results requires precomputed tables, so we must choose a
// fixed buffer length to optimize. The longer the length, the faster; but
// only buffers longer than this length will use the optimization. We choose
// two cutoffs and compute tables for both:
// - one around 512: 168*3=504
// - one around 4KB: 1344*3=4032
//
// -- The nitty gritty --
//
// Let CRC(I, X) be the non-inverted CRC32-C of the sequence X (with
// initial non-inverted CRC I). This function has the following properties:
// (a) CRC(I, AB) = CRC(CRC(I, A), B)
// (b) CRC(I, A xor B) = CRC(I, A) xor CRC(0, B)
//
// Say we want to compute CRC(I, ABC) where A, B, C are three sequences of
// K bytes each, where K is a fixed constant. Let O be the sequence of K zero
// bytes.
//
// CRC(I, ABC) = CRC(I, ABO xor C)
// = CRC(I, ABO) xor CRC(0, C)
// = CRC(CRC(I, AB), O) xor CRC(0, C)
// = CRC(CRC(I, AO xor B), O) xor CRC(0, C)
// = CRC(CRC(I, AO) xor CRC(0, B), O) xor CRC(0, C)
// = CRC(CRC(CRC(I, A), O) xor CRC(0, B), O) xor CRC(0, C)
//
// The castagnoliSSE42Triple function can compute CRC(I, A), CRC(0, B),
// and CRC(0, C) efficiently. We just need to find a way to quickly compute
// CRC(uvwx, O) given a 4-byte initial value uvwx. We can precompute these
// values; since we can't have a 32-bit table, we break it up into four
// 8-bit tables:
//
// CRC(uvwx, O) = CRC(u000, O) xor
// CRC(0v00, O) xor
// CRC(00w0, O) xor
// CRC(000x, O)
//
// We can compute tables corresponding to the four terms for all 8-bit
// values.
crc = ^crc
// If a buffer is long enough to use the optimization, process the first few
// bytes to align the buffer to an 8 byte boundary (if necessary).
if len(p) >= castagnoliK1*3 {
delta := int(uintptr(unsafe.Pointer(&p[0])) & 7)
if delta != 0 {
delta = 8 - delta
crc = castagnoliSSE42(crc, p[:delta])
p = p[delta:]
}
}
// Process 3*K2 at a time.
for len(p) >= castagnoliK2*3 {
// Compute CRC(I, A), CRC(0, B), and CRC(0, C).
crcA, crcB, crcC := castagnoliSSE42Triple(
crc, 0, 0,
p, p[castagnoliK2:], p[castagnoliK2*2:],
castagnoliK2/24)
// CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B)
crcAB := castagnoliShift(castagnoliSSE42TableK2, crcA) ^ crcB
// CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C)
crc = castagnoliShift(castagnoliSSE42TableK2, crcAB) ^ crcC
p = p[castagnoliK2*3:]
}
// Process 3*K1 at a time.
for len(p) >= castagnoliK1*3 {
// Compute CRC(I, A), CRC(0, B), and CRC(0, C).
crcA, crcB, crcC := castagnoliSSE42Triple(
crc, 0, 0,
p, p[castagnoliK1:], p[castagnoliK1*2:],
castagnoliK1/24)
// CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B)
crcAB := castagnoliShift(castagnoliSSE42TableK1, crcA) ^ crcB
// CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C)
crc = castagnoliShift(castagnoliSSE42TableK1, crcAB) ^ crcC
p = p[castagnoliK1*3:]
}
// Use the simple implementation for what's left.
crc = castagnoliSSE42(crc, p)
return ^crc
}
func archAvailableIEEE() bool {
return useFastIEEE
}
var archIeeeTable8 *slicing8Table
func archInitIEEE() {
if !useFastIEEE {
panic("not available")
}
// We still use slicing-by-8 for small buffers.
archIeeeTable8 = slicingMakeTable(IEEE)
}
func archUpdateIEEE(crc uint32, p []byte) uint32 {
if !useFastIEEE {
panic("not available")
}
if len(p) >= 64 {
left := len(p) & 15
do := len(p) - left
crc = ^ieeeCLMUL(^crc, p[:do])
p = p[do:]
}
if len(p) == 0 {
return crc
}
return slicingUpdate(crc, archIeeeTable8, p)
}

319
vendor/github.com/klauspost/crc32/crc32_amd64.s generated vendored Normal file
View File

@@ -0,0 +1,319 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build gc
#define NOSPLIT 4
#define RODATA 8
// castagnoliSSE42 updates the (non-inverted) crc with the given buffer.
//
// func castagnoliSSE42(crc uint32, p []byte) uint32
TEXT ·castagnoliSSE42(SB), NOSPLIT, $0
MOVL crc+0(FP), AX // CRC value
MOVQ p+8(FP), SI // data pointer
MOVQ p_len+16(FP), CX // len(p)
// If there are fewer than 8 bytes to process, skip alignment.
CMPQ CX, $8
JL less_than_8
MOVQ SI, BX
ANDQ $7, BX
JZ aligned
// Process the first few bytes to 8-byte align the input.
// BX = 8 - BX. We need to process this many bytes to align.
SUBQ $1, BX
XORQ $7, BX
BTQ $0, BX
JNC align_2
CRC32B (SI), AX
DECQ CX
INCQ SI
align_2:
BTQ $1, BX
JNC align_4
// CRC32W (SI), AX
BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
SUBQ $2, CX
ADDQ $2, SI
align_4:
BTQ $2, BX
JNC aligned
// CRC32L (SI), AX
BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
SUBQ $4, CX
ADDQ $4, SI
aligned:
// The input is now 8-byte aligned and we can process 8-byte chunks.
CMPQ CX, $8
JL less_than_8
CRC32Q (SI), AX
ADDQ $8, SI
SUBQ $8, CX
JMP aligned
less_than_8:
// We may have some bytes left over; process 4 bytes, then 2, then 1.
BTQ $2, CX
JNC less_than_4
// CRC32L (SI), AX
BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
ADDQ $4, SI
less_than_4:
BTQ $1, CX
JNC less_than_2
// CRC32W (SI), AX
BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
ADDQ $2, SI
less_than_2:
BTQ $0, CX
JNC done
CRC32B (SI), AX
done:
MOVL AX, ret+32(FP)
RET
// castagnoliSSE42Triple updates three (non-inverted) crcs with (24*rounds)
// bytes from each buffer.
//
// func castagnoliSSE42Triple(
// crc1, crc2, crc3 uint32,
// a, b, c []byte,
// rounds uint32,
// ) (retA uint32, retB uint32, retC uint32)
TEXT ·castagnoliSSE42Triple(SB), NOSPLIT, $0
MOVL crcA+0(FP), AX
MOVL crcB+4(FP), CX
MOVL crcC+8(FP), DX
MOVQ a+16(FP), R8 // data pointer
MOVQ b+40(FP), R9 // data pointer
MOVQ c+64(FP), R10 // data pointer
MOVL rounds+88(FP), R11
loop:
CRC32Q (R8), AX
CRC32Q (R9), CX
CRC32Q (R10), DX
CRC32Q 8(R8), AX
CRC32Q 8(R9), CX
CRC32Q 8(R10), DX
CRC32Q 16(R8), AX
CRC32Q 16(R9), CX
CRC32Q 16(R10), DX
ADDQ $24, R8
ADDQ $24, R9
ADDQ $24, R10
DECQ R11
JNZ loop
MOVL AX, retA+96(FP)
MOVL CX, retB+100(FP)
MOVL DX, retC+104(FP)
RET
// func haveSSE42() bool
TEXT ·haveSSE42(SB), NOSPLIT, $0
XORQ AX, AX
INCL AX
CPUID
SHRQ $20, CX
ANDQ $1, CX
MOVB CX, ret+0(FP)
RET
// func haveCLMUL() bool
TEXT ·haveCLMUL(SB), NOSPLIT, $0
XORQ AX, AX
INCL AX
CPUID
SHRQ $1, CX
ANDQ $1, CX
MOVB CX, ret+0(FP)
RET
// func haveSSE41() bool
TEXT ·haveSSE41(SB), NOSPLIT, $0
XORQ AX, AX
INCL AX
CPUID
SHRQ $19, CX
ANDQ $1, CX
MOVB CX, ret+0(FP)
RET
// CRC32 polynomial data
//
// These constants are lifted from the
// Linux kernel, since they avoid the costly
// PSHUFB 16 byte reversal proposed in the
// original Intel paper.
DATA r2r1kp<>+0(SB)/8, $0x154442bd4
DATA r2r1kp<>+8(SB)/8, $0x1c6e41596
DATA r4r3kp<>+0(SB)/8, $0x1751997d0
DATA r4r3kp<>+8(SB)/8, $0x0ccaa009e
DATA rupolykp<>+0(SB)/8, $0x1db710641
DATA rupolykp<>+8(SB)/8, $0x1f7011641
DATA r5kp<>+0(SB)/8, $0x163cd6124
GLOBL r2r1kp<>(SB), RODATA, $16
GLOBL r4r3kp<>(SB), RODATA, $16
GLOBL rupolykp<>(SB), RODATA, $16
GLOBL r5kp<>(SB), RODATA, $8
// Based on http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
// len(p) must be at least 64, and must be a multiple of 16.
// func ieeeCLMUL(crc uint32, p []byte) uint32
TEXT ·ieeeCLMUL(SB), NOSPLIT, $0
MOVL crc+0(FP), X0 // Initial CRC value
MOVQ p+8(FP), SI // data pointer
MOVQ p_len+16(FP), CX // len(p)
MOVOU (SI), X1
MOVOU 16(SI), X2
MOVOU 32(SI), X3
MOVOU 48(SI), X4
PXOR X0, X1
ADDQ $64, SI // buf+=64
SUBQ $64, CX // len-=64
CMPQ CX, $64 // Less than 64 bytes left
JB remain64
MOVOA r2r1kp<>+0(SB), X0
loopback64:
MOVOA X1, X5
MOVOA X2, X6
MOVOA X3, X7
MOVOA X4, X8
PCLMULQDQ $0, X0, X1
PCLMULQDQ $0, X0, X2
PCLMULQDQ $0, X0, X3
PCLMULQDQ $0, X0, X4
// Load next early
MOVOU (SI), X11
MOVOU 16(SI), X12
MOVOU 32(SI), X13
MOVOU 48(SI), X14
PCLMULQDQ $0x11, X0, X5
PCLMULQDQ $0x11, X0, X6
PCLMULQDQ $0x11, X0, X7
PCLMULQDQ $0x11, X0, X8
PXOR X5, X1
PXOR X6, X2
PXOR X7, X3
PXOR X8, X4
PXOR X11, X1
PXOR X12, X2
PXOR X13, X3
PXOR X14, X4
ADDQ $0x40, DI
ADDQ $64, SI // buf+=64
SUBQ $64, CX // len-=64
CMPQ CX, $64 // Less than 64 bytes left?
JGE loopback64
// Fold result into a single register (X1)
remain64:
MOVOA r4r3kp<>+0(SB), X0
MOVOA X1, X5
PCLMULQDQ $0, X0, X1
PCLMULQDQ $0x11, X0, X5
PXOR X5, X1
PXOR X2, X1
MOVOA X1, X5
PCLMULQDQ $0, X0, X1
PCLMULQDQ $0x11, X0, X5
PXOR X5, X1
PXOR X3, X1
MOVOA X1, X5
PCLMULQDQ $0, X0, X1
PCLMULQDQ $0x11, X0, X5
PXOR X5, X1
PXOR X4, X1
// If there is less than 16 bytes left we are done
CMPQ CX, $16
JB finish
// Encode 16 bytes
remain16:
MOVOU (SI), X10
MOVOA X1, X5
PCLMULQDQ $0, X0, X1
PCLMULQDQ $0x11, X0, X5
PXOR X5, X1
PXOR X10, X1
SUBQ $16, CX
ADDQ $16, SI
CMPQ CX, $16
JGE remain16
finish:
// Fold final result into 32 bits and return it
PCMPEQB X3, X3
PCLMULQDQ $1, X1, X0
PSRLDQ $8, X1
PXOR X0, X1
MOVOA X1, X2
MOVQ r5kp<>+0(SB), X0
// Creates 32 bit mask. Note that we don't care about upper half.
PSRLQ $32, X3
PSRLDQ $4, X2
PAND X3, X1
PCLMULQDQ $0, X0, X1
PXOR X2, X1
MOVOA rupolykp<>+0(SB), X0
MOVOA X1, X2
PAND X3, X1
PCLMULQDQ $0x10, X0, X1
PAND X3, X1
PCLMULQDQ $0, X0, X1
PXOR X2, X1
// PEXTRD $1, X1, AX (SSE 4.1)
BYTE $0x66; BYTE $0x0f; BYTE $0x3a
BYTE $0x16; BYTE $0xc8; BYTE $0x01
MOVL AX, ret+32(FP)
RET

43
vendor/github.com/klauspost/crc32/crc32_amd64p32.go generated vendored Normal file
View File

@@ -0,0 +1,43 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine,!gccgo
package crc32
// This file contains the code to call the SSE 4.2 version of the Castagnoli
// CRC.
// haveSSE42 is defined in crc32_amd64p32.s and uses CPUID to test for SSE 4.2
// support.
func haveSSE42() bool
// castagnoliSSE42 is defined in crc32_amd64p32.s and uses the SSE4.2 CRC32
// instruction.
//go:noescape
func castagnoliSSE42(crc uint32, p []byte) uint32
var sse42 = haveSSE42()
func archAvailableCastagnoli() bool {
return sse42
}
func archInitCastagnoli() {
if !sse42 {
panic("not available")
}
// No initialization necessary.
}
func archUpdateCastagnoli(crc uint32, p []byte) uint32 {
if !sse42 {
panic("not available")
}
return castagnoliSSE42(crc, p)
}
func archAvailableIEEE() bool { return false }
func archInitIEEE() { panic("not available") }
func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") }

67
vendor/github.com/klauspost/crc32/crc32_amd64p32.s generated vendored Normal file
View File

@@ -0,0 +1,67 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build gc
#define NOSPLIT 4
#define RODATA 8
// func castagnoliSSE42(crc uint32, p []byte) uint32
TEXT ·castagnoliSSE42(SB), NOSPLIT, $0
MOVL crc+0(FP), AX // CRC value
MOVL p+4(FP), SI // data pointer
MOVL p_len+8(FP), CX // len(p)
NOTL AX
// If there's less than 8 bytes to process, we do it byte-by-byte.
CMPQ CX, $8
JL cleanup
// Process individual bytes until the input is 8-byte aligned.
startup:
MOVQ SI, BX
ANDQ $7, BX
JZ aligned
CRC32B (SI), AX
DECQ CX
INCQ SI
JMP startup
aligned:
// The input is now 8-byte aligned and we can process 8-byte chunks.
CMPQ CX, $8
JL cleanup
CRC32Q (SI), AX
ADDQ $8, SI
SUBQ $8, CX
JMP aligned
cleanup:
// We may have some bytes left over that we process one at a time.
CMPQ CX, $0
JE done
CRC32B (SI), AX
INCQ SI
DECQ CX
JMP cleanup
done:
NOTL AX
MOVL AX, ret+16(FP)
RET
// func haveSSE42() bool
TEXT ·haveSSE42(SB), NOSPLIT, $0
XORQ AX, AX
INCL AX
CPUID
SHRQ $20, CX
ANDQ $1, CX
MOVB CX, ret+0(FP)
RET

89
vendor/github.com/klauspost/crc32/crc32_generic.go generated vendored Normal file
View File

@@ -0,0 +1,89 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains CRC32 algorithms that are not specific to any architecture
// and don't use hardware acceleration.
//
// The simple (and slow) CRC32 implementation only uses a 256*4 bytes table.
//
// The slicing-by-8 algorithm is a faster implementation that uses a bigger
// table (8*256*4 bytes).
package crc32
// simpleMakeTable allocates and constructs a Table for the specified
// polynomial. The table is suitable for use with the simple algorithm
// (simpleUpdate).
func simpleMakeTable(poly uint32) *Table {
t := new(Table)
simplePopulateTable(poly, t)
return t
}
// simplePopulateTable constructs a Table for the specified polynomial, suitable
// for use with simpleUpdate.
func simplePopulateTable(poly uint32, t *Table) {
for i := 0; i < 256; i++ {
crc := uint32(i)
for j := 0; j < 8; j++ {
if crc&1 == 1 {
crc = (crc >> 1) ^ poly
} else {
crc >>= 1
}
}
t[i] = crc
}
}
// simpleUpdate uses the simple algorithm to update the CRC, given a table that
// was previously computed using simpleMakeTable.
func simpleUpdate(crc uint32, tab *Table, p []byte) uint32 {
crc = ^crc
for _, v := range p {
crc = tab[byte(crc)^v] ^ (crc >> 8)
}
return ^crc
}
// Use slicing-by-8 when payload >= this value.
const slicing8Cutoff = 16
// slicing8Table is array of 8 Tables, used by the slicing-by-8 algorithm.
type slicing8Table [8]Table
// slicingMakeTable constructs a slicing8Table for the specified polynomial. The
// table is suitable for use with the slicing-by-8 algorithm (slicingUpdate).
func slicingMakeTable(poly uint32) *slicing8Table {
t := new(slicing8Table)
simplePopulateTable(poly, &t[0])
for i := 0; i < 256; i++ {
crc := t[0][i]
for j := 1; j < 8; j++ {
crc = t[0][crc&0xFF] ^ (crc >> 8)
t[j][i] = crc
}
}
return t
}
// slicingUpdate uses the slicing-by-8 algorithm to update the CRC, given a
// table that was previously computed using slicingMakeTable.
func slicingUpdate(crc uint32, tab *slicing8Table, p []byte) uint32 {
if len(p) >= slicing8Cutoff {
crc = ^crc
for len(p) > 8 {
crc ^= uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
crc = tab[0][p[7]] ^ tab[1][p[6]] ^ tab[2][p[5]] ^ tab[3][p[4]] ^
tab[4][crc>>24] ^ tab[5][(crc>>16)&0xFF] ^
tab[6][(crc>>8)&0xFF] ^ tab[7][crc&0xFF]
p = p[8:]
}
crc = ^crc
}
if len(p) == 0 {
return crc
}
return simpleUpdate(crc, &tab[0], p)
}

15
vendor/github.com/klauspost/crc32/crc32_otherarch.go generated vendored Normal file
View File

@@ -0,0 +1,15 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64,!amd64p32,!s390x
package crc32
func archAvailableIEEE() bool { return false }
func archInitIEEE() { panic("not available") }
func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") }
func archAvailableCastagnoli() bool { return false }
func archInitCastagnoli() { panic("not available") }
func archUpdateCastagnoli(crc uint32, p []byte) uint32 { panic("not available") }

91
vendor/github.com/klauspost/crc32/crc32_s390x.go generated vendored Normal file
View File

@@ -0,0 +1,91 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build s390x
package crc32
const (
vxMinLen = 64
vxAlignMask = 15 // align to 16 bytes
)
// hasVectorFacility reports whether the machine has the z/Architecture
// vector facility installed and enabled.
func hasVectorFacility() bool
var hasVX = hasVectorFacility()
// vectorizedCastagnoli implements CRC32 using vector instructions.
// It is defined in crc32_s390x.s.
//go:noescape
func vectorizedCastagnoli(crc uint32, p []byte) uint32
// vectorizedIEEE implements CRC32 using vector instructions.
// It is defined in crc32_s390x.s.
//go:noescape
func vectorizedIEEE(crc uint32, p []byte) uint32
func archAvailableCastagnoli() bool {
return hasVX
}
var archCastagnoliTable8 *slicing8Table
func archInitCastagnoli() {
if !hasVX {
panic("not available")
}
// We still use slicing-by-8 for small buffers.
archCastagnoliTable8 = slicingMakeTable(Castagnoli)
}
// archUpdateCastagnoli calculates the checksum of p using
// vectorizedCastagnoli.
func archUpdateCastagnoli(crc uint32, p []byte) uint32 {
if !hasVX {
panic("not available")
}
// Use vectorized function if data length is above threshold.
if len(p) >= vxMinLen {
aligned := len(p) & ^vxAlignMask
crc = vectorizedCastagnoli(crc, p[:aligned])
p = p[aligned:]
}
if len(p) == 0 {
return crc
}
return slicingUpdate(crc, archCastagnoliTable8, p)
}
func archAvailableIEEE() bool {
return hasVX
}
var archIeeeTable8 *slicing8Table
func archInitIEEE() {
if !hasVX {
panic("not available")
}
// We still use slicing-by-8 for small buffers.
archIeeeTable8 = slicingMakeTable(IEEE)
}
// archUpdateIEEE calculates the checksum of p using vectorizedIEEE.
func archUpdateIEEE(crc uint32, p []byte) uint32 {
if !hasVX {
panic("not available")
}
// Use vectorized function if data length is above threshold.
if len(p) >= vxMinLen {
aligned := len(p) & ^vxAlignMask
crc = vectorizedIEEE(crc, p[:aligned])
p = p[aligned:]
}
if len(p) == 0 {
return crc
}
return slicingUpdate(crc, archIeeeTable8, p)
}

249
vendor/github.com/klauspost/crc32/crc32_s390x.s generated vendored Normal file
View File

@@ -0,0 +1,249 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build s390x
#include "textflag.h"
// Vector register range containing CRC-32 constants
#define CONST_PERM_LE2BE V9
#define CONST_R2R1 V10
#define CONST_R4R3 V11
#define CONST_R5 V12
#define CONST_RU_POLY V13
#define CONST_CRC_POLY V14
// The CRC-32 constant block contains reduction constants to fold and
// process particular chunks of the input data stream in parallel.
//
// Note that the constant definitions below are extended in order to compute
// intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction.
// The rightmost doubleword can be 0 to prevent contribution to the result or
// can be multiplied by 1 to perform an XOR without the need for a separate
// VECTOR EXCLUSIVE OR instruction.
//
// The polynomials used are bit-reflected:
//
// IEEE: P'(x) = 0x0edb88320
// Castagnoli: P'(x) = 0x082f63b78
// IEEE polynomial constants
DATA ·crcleconskp+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask
DATA ·crcleconskp+8(SB)/8, $0x0706050403020100
DATA ·crcleconskp+16(SB)/8, $0x00000001c6e41596 // R2
DATA ·crcleconskp+24(SB)/8, $0x0000000154442bd4 // R1
DATA ·crcleconskp+32(SB)/8, $0x00000000ccaa009e // R4
DATA ·crcleconskp+40(SB)/8, $0x00000001751997d0 // R3
DATA ·crcleconskp+48(SB)/8, $0x0000000000000000
DATA ·crcleconskp+56(SB)/8, $0x0000000163cd6124 // R5
DATA ·crcleconskp+64(SB)/8, $0x0000000000000000
DATA ·crcleconskp+72(SB)/8, $0x00000001F7011641 // u'
DATA ·crcleconskp+80(SB)/8, $0x0000000000000000
DATA ·crcleconskp+88(SB)/8, $0x00000001DB710641 // P'(x) << 1
GLOBL ·crcleconskp(SB), RODATA, $144
// Castagonli Polynomial constants
DATA ·crccleconskp+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask
DATA ·crccleconskp+8(SB)/8, $0x0706050403020100
DATA ·crccleconskp+16(SB)/8, $0x000000009e4addf8 // R2
DATA ·crccleconskp+24(SB)/8, $0x00000000740eef02 // R1
DATA ·crccleconskp+32(SB)/8, $0x000000014cd00bd6 // R4
DATA ·crccleconskp+40(SB)/8, $0x00000000f20c0dfe // R3
DATA ·crccleconskp+48(SB)/8, $0x0000000000000000
DATA ·crccleconskp+56(SB)/8, $0x00000000dd45aab8 // R5
DATA ·crccleconskp+64(SB)/8, $0x0000000000000000
DATA ·crccleconskp+72(SB)/8, $0x00000000dea713f1 // u'
DATA ·crccleconskp+80(SB)/8, $0x0000000000000000
DATA ·crccleconskp+88(SB)/8, $0x0000000105ec76f0 // P'(x) << 1
GLOBL ·crccleconskp(SB), RODATA, $144
// func hasVectorFacility() bool
TEXT ·hasVectorFacility(SB), NOSPLIT, $24-1
MOVD $x-24(SP), R1
XC $24, 0(R1), 0(R1) // clear the storage
MOVD $2, R0 // R0 is the number of double words stored -1
WORD $0xB2B01000 // STFLE 0(R1)
XOR R0, R0 // reset the value of R0
MOVBZ z-8(SP), R1
AND $0x40, R1
BEQ novector
vectorinstalled:
// check if the vector instruction has been enabled
VLEIB $0, $0xF, V16
VLGVB $0, V16, R1
CMPBNE R1, $0xF, novector
MOVB $1, ret+0(FP) // have vx
RET
novector:
MOVB $0, ret+0(FP) // no vx
RET
// The CRC-32 function(s) use these calling conventions:
//
// Parameters:
//
// R2: Initial CRC value, typically ~0; and final CRC (return) value.
// R3: Input buffer pointer, performance might be improved if the
// buffer is on a doubleword boundary.
// R4: Length of the buffer, must be 64 bytes or greater.
//
// Register usage:
//
// R5: CRC-32 constant pool base pointer.
// V0: Initial CRC value and intermediate constants and results.
// V1..V4: Data for CRC computation.
// V5..V8: Next data chunks that are fetched from the input buffer.
//
// V9..V14: CRC-32 constants.
// func vectorizedIEEE(crc uint32, p []byte) uint32
TEXT ·vectorizedIEEE(SB), NOSPLIT, $0
MOVWZ crc+0(FP), R2 // R2 stores the CRC value
MOVD p+8(FP), R3 // data pointer
MOVD p_len+16(FP), R4 // len(p)
MOVD $·crcleconskp(SB), R5
BR vectorizedBody<>(SB)
// func vectorizedCastagnoli(crc uint32, p []byte) uint32
TEXT ·vectorizedCastagnoli(SB), NOSPLIT, $0
MOVWZ crc+0(FP), R2 // R2 stores the CRC value
MOVD p+8(FP), R3 // data pointer
MOVD p_len+16(FP), R4 // len(p)
// R5: crc-32 constant pool base pointer, constant is used to reduce crc
MOVD $·crccleconskp(SB), R5
BR vectorizedBody<>(SB)
TEXT vectorizedBody<>(SB), NOSPLIT, $0
XOR $0xffffffff, R2 // NOTW R2
VLM 0(R5), CONST_PERM_LE2BE, CONST_CRC_POLY
// Load the initial CRC value into the rightmost word of V0
VZERO V0
VLVGF $3, R2, V0
// Crash if the input size is less than 64-bytes.
CMP R4, $64
BLT crash
// Load a 64-byte data chunk and XOR with CRC
VLM 0(R3), V1, V4 // 64-bytes into V1..V4
// Reflect the data if the CRC operation is in the bit-reflected domain
VPERM V1, V1, CONST_PERM_LE2BE, V1
VPERM V2, V2, CONST_PERM_LE2BE, V2
VPERM V3, V3, CONST_PERM_LE2BE, V3
VPERM V4, V4, CONST_PERM_LE2BE, V4
VX V0, V1, V1 // V1 ^= CRC
ADD $64, R3 // BUF = BUF + 64
ADD $(-64), R4
// Check remaining buffer size and jump to proper folding method
CMP R4, $64
BLT less_than_64bytes
fold_64bytes_loop:
// Load the next 64-byte data chunk into V5 to V8
VLM 0(R3), V5, V8
VPERM V5, V5, CONST_PERM_LE2BE, V5
VPERM V6, V6, CONST_PERM_LE2BE, V6
VPERM V7, V7, CONST_PERM_LE2BE, V7
VPERM V8, V8, CONST_PERM_LE2BE, V8
// Perform a GF(2) multiplication of the doublewords in V1 with
// the reduction constants in V0. The intermediate result is
// then folded (accumulated) with the next data chunk in V5 and
// stored in V1. Repeat this step for the register contents
// in V2, V3, and V4 respectively.
VGFMAG CONST_R2R1, V1, V5, V1
VGFMAG CONST_R2R1, V2, V6, V2
VGFMAG CONST_R2R1, V3, V7, V3
VGFMAG CONST_R2R1, V4, V8, V4
// Adjust buffer pointer and length for next loop
ADD $64, R3 // BUF = BUF + 64
ADD $(-64), R4 // LEN = LEN - 64
CMP R4, $64
BGE fold_64bytes_loop
less_than_64bytes:
// Fold V1 to V4 into a single 128-bit value in V1
VGFMAG CONST_R4R3, V1, V2, V1
VGFMAG CONST_R4R3, V1, V3, V1
VGFMAG CONST_R4R3, V1, V4, V1
// Check whether to continue with 64-bit folding
CMP R4, $16
BLT final_fold
fold_16bytes_loop:
VL 0(R3), V2 // Load next data chunk
VPERM V2, V2, CONST_PERM_LE2BE, V2
VGFMAG CONST_R4R3, V1, V2, V1 // Fold next data chunk
// Adjust buffer pointer and size for folding next data chunk
ADD $16, R3
ADD $-16, R4
// Process remaining data chunks
CMP R4, $16
BGE fold_16bytes_loop
final_fold:
VLEIB $7, $0x40, V9
VSRLB V9, CONST_R4R3, V0
VLEIG $0, $1, V0
VGFMG V0, V1, V1
VLEIB $7, $0x20, V9 // Shift by words
VSRLB V9, V1, V2 // Store remaining bits in V2
VUPLLF V1, V1 // Split rightmost doubleword
VGFMAG CONST_R5, V1, V2, V1 // V1 = (V1 * R5) XOR V2
// The input values to the Barret reduction are the degree-63 polynomial
// in V1 (R(x)), degree-32 generator polynomial, and the reduction
// constant u. The Barret reduction result is the CRC value of R(x) mod
// P(x).
//
// The Barret reduction algorithm is defined as:
//
// 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u
// 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x)
// 3. C(x) = R(x) XOR T2(x) mod x^32
//
// Note: To compensate the division by x^32, use the vector unpack
// instruction to move the leftmost word into the leftmost doubleword
// of the vector register. The rightmost doubleword is multiplied
// with zero to not contribute to the intermedate results.
// T1(x) = floor( R(x) / x^32 ) GF2MUL u
VUPLLF V1, V2
VGFMG CONST_RU_POLY, V2, V2
// Compute the GF(2) product of the CRC polynomial in VO with T1(x) in
// V2 and XOR the intermediate result, T2(x), with the value in V1.
// The final result is in the rightmost word of V2.
VUPLLF V2, V2
VGFMAG CONST_CRC_POLY, V2, V1, V2
done:
VLGVF $2, V2, R2
XOR $0xffffffff, R2 // NOTW R2
MOVWZ R2, ret + 32(FP)
RET
crash:
MOVD $0, (R0) // input size is less than 64-bytes

284
vendor/github.com/klauspost/crc32/crc32_test.go generated vendored Normal file
View File

@@ -0,0 +1,284 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package crc32
import (
crand "crypto/rand"
"hash"
mrand "math/rand"
"testing"
)
type test struct {
ieee, castagnoli uint32
in string
}
var golden = []test{
{0x0, 0x0, ""},
{0xe8b7be43, 0xc1d04330, "a"},
{0x9e83486d, 0xe2a22936, "ab"},
{0x352441c2, 0x364b3fb7, "abc"},
{0xed82cd11, 0x92c80a31, "abcd"},
{0x8587d865, 0xc450d697, "abcde"},
{0x4b8e39ef, 0x53bceff1, "abcdef"},
{0x312a6aa6, 0xe627f441, "abcdefg"},
{0xaeef2a50, 0xa9421b7, "abcdefgh"},
{0x8da988af, 0x2ddc99fc, "abcdefghi"},
{0x3981703a, 0xe6599437, "abcdefghij"},
{0x6b9cdfe7, 0xb2cc01fe, "Discard medicine more than two years old."},
{0xc90ef73f, 0xe28207f, "He who has a shady past knows that nice guys finish last."},
{0xb902341f, 0xbe93f964, "I wouldn't marry him with a ten foot pole."},
{0x42080e8, 0x9e3be0c3, "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"},
{0x154c6d11, 0xf505ef04, "The days of the digital watch are numbered. -Tom Stoppard"},
{0x4c418325, 0x85d3dc82, "Nepal premier won't resign."},
{0x33955150, 0xc5142380, "For every action there is an equal and opposite government program."},
{0x26216a4b, 0x75eb77dd, "His money is twice tainted: 'taint yours and 'taint mine."},
{0x1abbe45e, 0x91ebe9f7, "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"},
{0xc89a94f7, 0xf0b1168e, "It's a tiny change to the code and not completely disgusting. - Bob Manchek"},
{0xab3abe14, 0x572b74e2, "size: a.out: bad magic"},
{0xbab102b6, 0x8a58a6d5, "The major problem is with sendmail. -Mark Horton"},
{0x999149d7, 0x9c426c50, "Give me a rock, paper and scissors and I will move the world. CCFestoon"},
{0x6d52a33c, 0x735400a4, "If the enemy is within range, then so are you."},
{0x90631e8d, 0xbec49c95, "It's well we cannot hear the screams/That we create in others' dreams."},
{0x78309130, 0xa95a2079, "You remind me of a TV show, but that's all right: I watch it anyway."},
{0x7d0a377f, 0xde2e65c5, "C is as portable as Stonehedge!!"},
{0x8c79fd79, 0x297a88ed, "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"},
{0xa20b7167, 0x66ed1d8b, "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"},
{0x8e0bb443, 0xdcded527, "How can you write a big system without C++? -Paul Glick"},
}
// testGoldenIEEE verifies that the given function returns
// correct IEEE checksums.
func testGoldenIEEE(t *testing.T, crcFunc func(b []byte) uint32) {
for _, g := range golden {
if crc := crcFunc([]byte(g.in)); crc != g.ieee {
t.Errorf("IEEE(%s) = 0x%x want 0x%x", g.in, crc, g.ieee)
}
}
}
// testGoldenCastagnoli verifies that the given function returns
// correct IEEE checksums.
func testGoldenCastagnoli(t *testing.T, crcFunc func(b []byte) uint32) {
for _, g := range golden {
if crc := crcFunc([]byte(g.in)); crc != g.castagnoli {
t.Errorf("Castagnoli(%s) = 0x%x want 0x%x", g.in, crc, g.castagnoli)
}
}
}
// testCrossCheck generates random buffers of various lengths and verifies that
// the two "update" functions return the same result.
func testCrossCheck(t *testing.T, crcFunc1, crcFunc2 func(crc uint32, b []byte) uint32) {
// The AMD64 implementation has some cutoffs at lengths 168*3=504 and
// 1344*3=4032. We should make sure lengths around these values are in the
// list.
lengths := []int{0, 1, 2, 3, 4, 5, 10, 16, 50, 100, 128,
500, 501, 502, 503, 504, 505, 512, 1000, 1024, 2000,
4030, 4031, 4032, 4033, 4036, 4040, 4048, 4096, 5000, 10000}
for _, length := range lengths {
p := make([]byte, length)
_, _ = crand.Read(p)
crcInit := uint32(mrand.Int63())
crc1 := crcFunc1(crcInit, p)
crc2 := crcFunc2(crcInit, p)
if crc1 != crc2 {
t.Errorf("mismatch: 0x%x vs 0x%x (buffer length %d)", crc1, crc2, length)
}
}
}
// TestSimple tests the simple generic algorithm.
func TestSimple(t *testing.T) {
tab := simpleMakeTable(IEEE)
testGoldenIEEE(t, func(b []byte) uint32 {
return simpleUpdate(0, tab, b)
})
tab = simpleMakeTable(Castagnoli)
testGoldenCastagnoli(t, func(b []byte) uint32 {
return simpleUpdate(0, tab, b)
})
}
// TestSimple tests the slicing-by-8 algorithm.
func TestSlicing(t *testing.T) {
tab := slicingMakeTable(IEEE)
testGoldenIEEE(t, func(b []byte) uint32 {
return slicingUpdate(0, tab, b)
})
tab = slicingMakeTable(Castagnoli)
testGoldenCastagnoli(t, func(b []byte) uint32 {
return slicingUpdate(0, tab, b)
})
// Cross-check various polys against the simple algorithm.
for _, poly := range []uint32{IEEE, Castagnoli, Koopman, 0xD5828281} {
t1 := simpleMakeTable(poly)
f1 := func(crc uint32, b []byte) uint32 {
return simpleUpdate(crc, t1, b)
}
t2 := slicingMakeTable(poly)
f2 := func(crc uint32, b []byte) uint32 {
return slicingUpdate(crc, t2, b)
}
testCrossCheck(t, f1, f2)
}
}
func TestArchIEEE(t *testing.T) {
if !archAvailableIEEE() {
t.Skip("Arch-specific IEEE not available.")
}
archInitIEEE()
slicingTable := slicingMakeTable(IEEE)
testCrossCheck(t, archUpdateIEEE, func(crc uint32, b []byte) uint32 {
return slicingUpdate(crc, slicingTable, b)
})
}
func TestArchCastagnoli(t *testing.T) {
if !archAvailableCastagnoli() {
t.Skip("Arch-specific Castagnoli not available.")
}
archInitCastagnoli()
slicingTable := slicingMakeTable(Castagnoli)
testCrossCheck(t, archUpdateCastagnoli, func(crc uint32, b []byte) uint32 {
return slicingUpdate(crc, slicingTable, b)
})
}
func TestGolden(t *testing.T) {
testGoldenIEEE(t, ChecksumIEEE)
// Some implementations have special code to deal with misaligned
// data; test that as well.
for delta := 1; delta <= 7; delta++ {
testGoldenIEEE(t, func(b []byte) uint32 {
ieee := NewIEEE()
d := delta
if d >= len(b) {
d = len(b)
}
ieee.Write(b[:d])
ieee.Write(b[d:])
return ieee.Sum32()
})
}
castagnoliTab := MakeTable(Castagnoli)
if castagnoliTab == nil {
t.Errorf("nil Castagnoli Table")
}
testGoldenCastagnoli(t, func(b []byte) uint32 {
castagnoli := New(castagnoliTab)
castagnoli.Write(b)
return castagnoli.Sum32()
})
// Some implementations have special code to deal with misaligned
// data; test that as well.
for delta := 1; delta <= 7; delta++ {
testGoldenCastagnoli(t, func(b []byte) uint32 {
castagnoli := New(castagnoliTab)
d := delta
if d >= len(b) {
d = len(b)
}
castagnoli.Write(b[:d])
castagnoli.Write(b[d:])
return castagnoli.Sum32()
})
}
}
func BenchmarkIEEECrc40B(b *testing.B) {
benchmark(b, NewIEEE(), 40, 0)
}
func BenchmarkIEEECrc1KB(b *testing.B) {
benchmark(b, NewIEEE(), 1<<10, 0)
}
func BenchmarkIEEECrc4KB(b *testing.B) {
benchmark(b, NewIEEE(), 4<<10, 0)
}
func BenchmarkIEEECrc32KB(b *testing.B) {
benchmark(b, NewIEEE(), 32<<10, 0)
}
func BenchmarkCastagnoliCrc15B(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 15, 0)
}
func BenchmarkCastagnoliCrc15BMisaligned(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 15, 1)
}
func BenchmarkCastagnoliCrc40B(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 40, 0)
}
func BenchmarkCastagnoliCrc40BMisaligned(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 40, 1)
}
func BenchmarkCastagnoliCrc512(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 512, 0)
}
func BenchmarkCastagnoliCrc512Misaligned(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 512, 1)
}
func BenchmarkCastagnoliCrc1KB(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 1<<10, 0)
}
func BenchmarkCastagnoliCrc1KBMisaligned(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 1<<10, 1)
}
func BenchmarkCastagnoliCrc4KB(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 4<<10, 0)
}
func BenchmarkCastagnoliCrc4KBMisaligned(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 4<<10, 1)
}
func BenchmarkCastagnoliCrc32KB(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 32<<10, 0)
}
func BenchmarkCastagnoliCrc32KBMisaligned(b *testing.B) {
benchmark(b, New(MakeTable(Castagnoli)), 32<<10, 1)
}
func benchmark(b *testing.B, h hash.Hash32, n, alignment int64) {
b.SetBytes(n)
data := make([]byte, n+alignment)
data = data[alignment:]
for i := range data {
data[i] = byte(i)
}
in := make([]byte, 0, h.Size())
// Warm up
h.Reset()
h.Write(data)
h.Sum(in)
b.ResetTimer()
for i := 0; i < b.N; i++ {
h.Reset()
h.Write(data)
h.Sum(in)
}
}

28
vendor/github.com/klauspost/crc32/example_test.go generated vendored Normal file
View File

@@ -0,0 +1,28 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package crc32_test
import (
"fmt"
"hash/crc32"
)
func ExampleMakeTable() {
// In this package, the CRC polynomial is represented in reversed notation,
// or LSB-first representation.
//
// LSB-first representation is a hexadecimal number with n bits, in which the
// most significant bit represents the coefficient of x⁰ and the least significant
// bit represents the coefficient of xⁿ⁻¹ (the coefficient for xⁿ is implicit).
//
// For example, CRC32-Q, as defined by the following polynomial,
// x³²+ x³¹+ x²⁴+ x²²+ x¹⁶+ x¹⁴+ x⁸+ x⁷+ x⁵+ x³+ x¹+ x⁰
// has the reversed notation 0b11010101100000101000001010000001, so the value
// that should be passed to MakeTable is 0xD5828281.
crc32q := crc32.MakeTable(0xD5828281)
fmt.Printf("%08x\n", crc32.Checksum([]byte("Hello world"), crc32q))
// Output:
// 2964d064
}

23
vendor/github.com/klauspost/reedsolomon/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,23 @@
The MIT License (MIT)
Copyright (c) 2015 Klaus Post
Copyright (c) 2015 Backblaze
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

204
vendor/github.com/klauspost/reedsolomon/README.md generated vendored Normal file
View File

@@ -0,0 +1,204 @@
# Reed-Solomon
[![GoDoc][1]][2] [![Build Status][3]][4]
[1]: https://godoc.org/github.com/klauspost/reedsolomon?status.svg
[2]: https://godoc.org/github.com/klauspost/reedsolomon
[3]: https://travis-ci.org/klauspost/reedsolomon.svg?branch=master
[4]: https://travis-ci.org/klauspost/reedsolomon
Reed-Solomon Erasure Coding in Go, with speeds exceeding 1GB/s/cpu core implemented in pure Go.
This is a golang port of the [JavaReedSolomon](https://github.com/Backblaze/JavaReedSolomon) library released by [Backblaze](http://backblaze.com), with some additional optimizations.
For an introduction on erasure coding, see the post on the [Backblaze blog](https://www.backblaze.com/blog/reed-solomon/).
Package home: https://github.com/klauspost/reedsolomon
Godoc: https://godoc.org/github.com/klauspost/reedsolomon
# Installation
To get the package use the standard:
```bash
go get github.com/klauspost/reedsolomon
```
# Usage
This section assumes you know the basics of Reed-Solomon encoding. A good start is this [Backblaze blog post](https://www.backblaze.com/blog/reed-solomon/).
This package performs the calculation of the parity sets. The usage is therefore relatively simple.
First of all, you need to choose your distribution of data and parity shards. A 'good' distribution is very subjective, and will depend a lot on your usage scenario. A good starting point is above 5 and below 257 data shards (the maximum supported number), and the number of parity shards to be 2 or above, and below the number of data shards.
To create an encoder with 10 data shards (where your data goes) and 3 parity shards (calculated):
```Go
enc, err := reedsolomon.New(10, 3)
```
This encoder will work for all parity sets with this distribution of data and parity shards. The error will only be set if you specify 0 or negative values in any of the parameters, or if you specify more than 256 data shards.
The you send and receive data is a simple slice of byte slices; `[][]byte`. In the example above, the top slice must have a length of 13.
```Go
data := make([][]byte, 13)
```
You should then fill the 10 first slices with *equally sized* data, and create parity shards that will be populated with parity data. In this case we create the data in memory, but you could for instance also use [mmap](https://github.com/edsrzf/mmap-go) to map files.
```Go
// Create all shards, size them at 50000 each
for i := range input {
data[i] := make([]byte, 50000)
}
// Fill some data into the data shards
for i, in := range data[:10] {
for j:= range in {
in[j] = byte((i+j)&0xff)
}
}
```
To populate the parity shards, you simply call `Encode()` with your data.
```Go
err = enc.Encode(data)
```
The only cases where you should get an error is, if the data shards aren't of equal size. The last 3 shards now contain parity data. You can verify this by calling `Verify()`:
```Go
ok, err = enc.Verify(data)
```
The final (and important) part is to be able to reconstruct missing shards. For this to work, you need to know which parts of your data is missing. The encoder *does not know which parts are invalid*, so if data corruption is a likely scenario, you need to implement a hash check for each shard. If a byte has changed in your set, and you don't know which it is, there is no way to reconstruct the data set.
To indicate missing data, you set the shard to nil before calling `Reconstruct()`:
```Go
// Delete two data shards
data[3] = nil
data[7] = nil
// Reconstruct the missing shards
err := enc.Reconstruct(data)
```
The missing data and parity shards will be recreated. If more than 3 shards are missing, the reconstruction will fail.
So to sum up reconstruction:
* The number of data/parity shards must match the numbers used for encoding.
* The order of shards must be the same as used when encoding.
* You may only supply data you know is valid.
* Invalid shards should be set to nil.
For complete examples of an encoder and decoder see the [examples folder](https://github.com/klauspost/reedsolomon/tree/master/examples).
# Splitting/Joining Data
You might have a large slice of data. To help you split this, there are some helper functions that can split and join a single byte slice.
```Go
bigfile, _ := ioutil.Readfile("myfile.data")
// Split the file
split, err := enc.Split(bigfile)
```
This will split the file into the number of data shards set when creating the encoder and create empty parity shards.
An important thing to note is that you have to *keep track of the exact input size*. If the size of the input isn't diviable by the number of data shards, extra zeros will be inserted in the last shard.
To join a data set, use the `Join()` function, which will join the shards and write it to the `io.Writer` you supply:
```Go
// Join a data set and write it to io.Discard.
err = enc.Join(io.Discard, data, len(bigfile))
```
# Streaming/Merging
It might seem like a limitation that all data should be in memory, but an important property is that *as long as the number of data/parity shards are the same, you can merge/split data sets*, and they will remain valid as a separate set.
```Go
// Split the data set of 50000 elements into two of 25000
splitA := make([][]byte, 13)
splitB := make([][]byte, 13)
// Merge into a 100000 element set
merged := make([][]byte, 13)
for i := range data {
splitA[i] = data[i][:25000]
splitB[i] = data[i][25000:]
// Concencate it to itself
merged[i] = append(make([]byte, 0, len(data[i])*2), data[i]...)
merged[i] = append(merged[i], data[i]...)
}
// Each part should still verify as ok.
ok, err := enc.Verify(splitA)
if ok && err == nil {
log.Println("splitA ok")
}
ok, err = enc.Verify(splitB)
if ok && err == nil {
log.Println("splitB ok")
}
ok, err = enc.Verify(merge)
if ok && err == nil {
log.Println("merge ok")
}
```
This means that if you have a data set that may not fit into memory, you can split processing into smaller blocks. For the best throughput, don't use too small blocks.
This also means that you can divide big input up into smaller blocks, and do reconstruction on parts of your data. This doesn't give the same flexibility of a higher number of data shards, but it will be much more performant.
# Streaming API
There has been added a fully streaming API, to help perform fully streaming operations, which enables you to do the same operations, but on streams. To use the stream API, use [`NewStream`](https://godoc.org/github.com/klauspost/reedsolomon#NewStream) function to create the encoding/decoding interfaces. You can use [`NewStreamC`](https://godoc.org/github.com/klauspost/reedsolomon#NewStreamC) to ready an interface that reads/writes concurrently from the streams.
Input is delivered as `[]io.Reader`, output as `[]io.Writer`, and functionality corresponds to the in-memory API. Each stream must supply the same amount of data, similar to how each slice must be similar size with the in-memory API.
If an error occurs in relation to a stream, a [`StreamReadError`](https://godoc.org/github.com/klauspost/reedsolomon#StreamReadError) or [`StreamWriteError`](https://godoc.org/github.com/klauspost/reedsolomon#StreamWriteError) will help you determine which stream was the offender.
There is no buffering or timeouts/retry specified. If you want to add that, you need to add it to the Reader/Writer.
For complete examples of a streaming encoder and decoder see the [examples folder](https://github.com/klauspost/reedsolomon/tree/master/examples).
# Performance
Performance depends mainly on the number of parity shards. In rough terms, doubling the number of parity shards will double the encoding time.
Here are the throughput numbers with some different selections of data and parity shards. For reference each shard is 1MB random data, and 2 CPU cores are used for encoding.
| Data | Parity | Parity | MB/s | SSSE3 MB/s | SSSE3 Speed | Rel. Speed |
|------|--------|--------|--------|-------------|-------------|------------|
| 5 | 2 | 40% | 576,11 | 2599,2 | 451% | 100,00% |
| 10 | 2 | 20% | 587,73 | 3100,28 | 528% | 102,02% |
| 10 | 4 | 40% | 298,38 | 2470,97 | 828% | 51,79% |
| 50 | 20 | 40% | 59,81 | 713,28 | 1193% | 10,38% |
If `runtime.GOMAXPROCS()` is set to a value higher than 1, the encoder will use multiple goroutines to perform the calculations in `Verify`, `Encode` and `Reconstruct`.
Example of performance scaling on Intel(R) Core(TM) i7-2600 CPU @ 3.40GHz - 4 physical cores, 8 logical cores. The example uses 10 blocks with 16MB data each and 4 parity blocks.
| Threads | MB/s | Speed |
|---------|---------|-------|
| 1 | 1355,11 | 100% |
| 2 | 2339,78 | 172% |
| 4 | 3179,33 | 235% |
| 8 | 4346,18 | 321% |
# asm2plan9s
[asm2plan9s](https://github.com/fwessels/asm2plan9s) is used for assembling the AVX2 instructions into their BYTE/WORD/LONG equivalents.
# Links
* [Backblaze Open Sources Reed-Solomon Erasure Coding Source Code](https://www.backblaze.com/blog/reed-solomon/).
* [JavaReedSolomon](https://github.com/Backblaze/JavaReedSolomon). Compatible java library by Backblaze.
* [reedsolomon-c](https://github.com/jannson/reedsolomon-c). C version, compatible with output from this package.
* [Reed-Solomon Erasure Coding in Haskell](https://github.com/NicolasT/reedsolomon). Haskell port of the package with similar performance.
* [go-erasure](https://github.com/somethingnew2-0/go-erasure). A similar library using cgo, slower in my tests.
* [rsraid](https://github.com/goayame/rsraid). A similar library written in Go. Slower, but supports more shards.
* [Screaming Fast Galois Field Arithmetic](http://www.snia.org/sites/default/files2/SDC2013/presentations/NewThinking/EthanMiller_Screaming_Fast_Galois_Field%20Arithmetic_SIMD%20Instructions.pdf). Basis for SSE3 optimizations.
# License
This code, as the original [JavaReedSolomon](https://github.com/Backblaze/JavaReedSolomon) is published under an MIT license. See LICENSE file for more information.

20
vendor/github.com/klauspost/reedsolomon/appveyor.yml generated vendored Normal file
View File

@@ -0,0 +1,20 @@
os: Visual Studio 2015
platform: x64
clone_folder: c:\gopath\src\github.com\klauspost\reedsolomon
# environment variables
environment:
GOPATH: c:\gopath
install:
- echo %PATH%
- echo %GOPATH%
- go version
- go env
- go get -d ./...
build_script:
- go test -v -cpu=2 ./...
- go test -cpu=1,2,4 -short -race ./...

View File

@@ -0,0 +1,45 @@
# Examples
This folder contains usage examples of the Reed-Solomon encoder.
# Simple Encoder/Decoder
Shows basic use of the encoder, and will encode a single file into a number of
data and parity shards. This is meant as an example and is not meant for production use
since there is a number of shotcomings noted below.
To build an executable use:
```bash
go build simple-decoder.go
go build simple-encoder.go
```
# Streamin API examples
There are streaming examples of the same functionality, which streams data instead of keeping it in memory.
To build the executables use:
```bash
go build stream-decoder.go
go build stream-encoder.go
```
## Shortcomings
* If the file size of the input isn't diviable by the number of data shards
the output will contain extra zeroes
* If the shard numbers isn't the same for the decoder as in the
encoder, invalid output will be generated.
* If values have changed in a shard, it cannot be reconstructed.
* If two shards have been swapped, reconstruction will always fail.
You need to supply the shards in the same order as they were given to you.
The solution for this is to save a metadata file containing:
* File size.
* The number of data/parity shards.
* HASH of each shard.
* Order of the shards.
If you save these properties, you should abe able to detect file corruption in a shard and be able to reconstruct your data if you have the needed number of shards left.

View File

@@ -0,0 +1,125 @@
//+build ignore
// Copyright 2015, Klaus Post, see LICENSE for details.
//
// Simple decoder example.
//
// The decoder reverses the process of "simple-encoder.go"
//
// To build an executable use:
//
// go build simple-decoder.go
//
// Simple Encoder/Decoder Shortcomings:
// * If the file size of the input isn't diviable by the number of data shards
// the output will contain extra zeroes
//
// * If the shard numbers isn't the same for the decoder as in the
// encoder, invalid output will be generated.
//
// * If values have changed in a shard, it cannot be reconstructed.
//
// * If two shards have been swapped, reconstruction will always fail.
// You need to supply the shards in the same order as they were given to you.
//
// The solution for this is to save a metadata file containing:
//
// * File size.
// * The number of data/parity shards.
// * HASH of each shard.
// * Order of the shards.
//
// If you save these properties, you should abe able to detect file corruption
// in a shard and be able to reconstruct your data if you have the needed number of shards left.
package main
import (
"flag"
"fmt"
"io/ioutil"
"os"
"github.com/klauspost/reedsolomon"
)
var dataShards = flag.Int("data", 4, "Number of shards to split the data into")
var parShards = flag.Int("par", 2, "Number of parity shards")
var outFile = flag.String("out", "", "Alternative output path/file")
func init() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
fmt.Fprintf(os.Stderr, " simple-decoder [-flags] basefile.ext\nDo not add the number to the filename.\n")
fmt.Fprintf(os.Stderr, "Valid flags:\n")
flag.PrintDefaults()
}
}
func main() {
// Parse flags
flag.Parse()
args := flag.Args()
if len(args) != 1 {
fmt.Fprintf(os.Stderr, "Error: No filenames given\n")
flag.Usage()
os.Exit(1)
}
fname := args[0]
// Create matrix
enc, err := reedsolomon.New(*dataShards, *parShards)
checkErr(err)
// Create shards and load the data.
shards := make([][]byte, *dataShards+*parShards)
for i := range shards {
infn := fmt.Sprintf("%s.%d", fname, i)
fmt.Println("Opening", infn)
shards[i], err = ioutil.ReadFile(infn)
if err != nil {
fmt.Println("Error reading file", err)
shards[i] = nil
}
}
// Verify the shards
ok, err := enc.Verify(shards)
if ok {
fmt.Println("No reconstruction needed")
} else {
fmt.Println("Verification failed. Reconstructing data")
err = enc.Reconstruct(shards)
if err != nil {
fmt.Println("Reconstruct failed -", err)
os.Exit(1)
}
ok, err = enc.Verify(shards)
if !ok {
fmt.Println("Verification failed after reconstruction, data likely corrupted.")
os.Exit(1)
}
checkErr(err)
}
// Join the shards and write them
outfn := *outFile
if outfn == "" {
outfn = fname
}
fmt.Println("Writing data to", outfn)
f, err := os.Create(outfn)
checkErr(err)
// We don't know the exact filesize.
err = enc.Join(f, shards, len(shards[0])**dataShards)
checkErr(err)
}
func checkErr(err error) {
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %s", err.Error())
os.Exit(2)
}
}

View File

@@ -0,0 +1,112 @@
//+build ignore
// Copyright 2015, Klaus Post, see LICENSE for details.
//
// Simple encoder example
//
// The encoder encodes a simgle file into a number of shards
// To reverse the process see "simpledecoder.go"
//
// To build an executable use:
//
// go build simple-decoder.go
//
// Simple Encoder/Decoder Shortcomings:
// * If the file size of the input isn't diviable by the number of data shards
// the output will contain extra zeroes
//
// * If the shard numbers isn't the same for the decoder as in the
// encoder, invalid output will be generated.
//
// * If values have changed in a shard, it cannot be reconstructed.
//
// * If two shards have been swapped, reconstruction will always fail.
// You need to supply the shards in the same order as they were given to you.
//
// The solution for this is to save a metadata file containing:
//
// * File size.
// * The number of data/parity shards.
// * HASH of each shard.
// * Order of the shards.
//
// If you save these properties, you should abe able to detect file corruption
// in a shard and be able to reconstruct your data if you have the needed number of shards left.
package main
import (
"flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/klauspost/reedsolomon"
)
var dataShards = flag.Int("data", 4, "Number of shards to split the data into, must be below 257.")
var parShards = flag.Int("par", 2, "Number of parity shards")
var outDir = flag.String("out", "", "Alternative output directory")
func init() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
fmt.Fprintf(os.Stderr, " simple-encoder [-flags] filename.ext\n\n")
fmt.Fprintf(os.Stderr, "Valid flags:\n")
flag.PrintDefaults()
}
}
func main() {
// Parse command line parameters.
flag.Parse()
args := flag.Args()
if len(args) != 1 {
fmt.Fprintf(os.Stderr, "Error: No input filename given\n")
flag.Usage()
os.Exit(1)
}
if *dataShards > 257 {
fmt.Fprintf(os.Stderr, "Error: Too many data shards\n")
os.Exit(1)
}
fname := args[0]
// Create encoding matrix.
enc, err := reedsolomon.New(*dataShards, *parShards)
checkErr(err)
fmt.Println("Opening", fname)
b, err := ioutil.ReadFile(fname)
checkErr(err)
// Split the file into equally sized shards.
shards, err := enc.Split(b)
checkErr(err)
fmt.Printf("File split into %d data+parity shards with %d bytes/shard.\n", len(shards), len(shards[0]))
// Encode parity
err = enc.Encode(shards)
checkErr(err)
// Write out the resulting files.
dir, file := filepath.Split(fname)
if *outDir != "" {
dir = *outDir
}
for i, shard := range shards {
outfn := fmt.Sprintf("%s.%d", file, i)
fmt.Println("Writing to", outfn)
err = ioutil.WriteFile(filepath.Join(dir, outfn), shard, os.ModePerm)
checkErr(err)
}
}
func checkErr(err error) {
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %s", err.Error())
os.Exit(2)
}
}

View File

@@ -0,0 +1,167 @@
//+build ignore
// Copyright 2015, Klaus Post, see LICENSE for details.
//
// Stream decoder example.
//
// The decoder reverses the process of "stream-encoder.go"
//
// To build an executable use:
//
// go build stream-decoder.go
//
// Simple Encoder/Decoder Shortcomings:
// * If the file size of the input isn't dividable by the number of data shards
// the output will contain extra zeroes
//
// * If the shard numbers isn't the same for the decoder as in the
// encoder, invalid output will be generated.
//
// * If values have changed in a shard, it cannot be reconstructed.
//
// * If two shards have been swapped, reconstruction will always fail.
// You need to supply the shards in the same order as they were given to you.
//
// The solution for this is to save a metadata file containing:
//
// * File size.
// * The number of data/parity shards.
// * HASH of each shard.
// * Order of the shards.
//
// If you save these properties, you should abe able to detect file corruption
// in a shard and be able to reconstruct your data if you have the needed number of shards left.
package main
import (
"flag"
"fmt"
"io"
"os"
"path/filepath"
"github.com/klauspost/reedsolomon"
)
var dataShards = flag.Int("data", 4, "Number of shards to split the data into")
var parShards = flag.Int("par", 2, "Number of parity shards")
var outFile = flag.String("out", "", "Alternative output path/file")
func init() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
fmt.Fprintf(os.Stderr, " %s [-flags] basefile.ext\nDo not add the number to the filename.\n", os.Args[0])
fmt.Fprintf(os.Stderr, "Valid flags:\n")
flag.PrintDefaults()
}
}
func main() {
// Parse flags
flag.Parse()
args := flag.Args()
if len(args) != 1 {
fmt.Fprintf(os.Stderr, "Error: No filenames given\n")
flag.Usage()
os.Exit(1)
}
fname := args[0]
// Create matrix
enc, err := reedsolomon.NewStream(*dataShards, *parShards)
checkErr(err)
// Open the inputs
shards, size, err := openInput(*dataShards, *parShards, fname)
checkErr(err)
// Verify the shards
ok, err := enc.Verify(shards)
if ok {
fmt.Println("No reconstruction needed")
} else {
fmt.Println("Verification failed. Reconstructing data")
shards, size, err = openInput(*dataShards, *parShards, fname)
checkErr(err)
// Create out destination writers
out := make([]io.Writer, len(shards))
for i := range out {
if shards[i] == nil {
dir, _ := filepath.Split(fname)
outfn := fmt.Sprintf("%s.%d", fname, i)
fmt.Println("Creating", outfn)
out[i], err = os.Create(filepath.Join(dir, outfn))
checkErr(err)
}
}
err = enc.Reconstruct(shards, out)
if err != nil {
fmt.Println("Reconstruct failed -", err)
os.Exit(1)
}
// Close output.
for i := range out {
if out[i] != nil {
err := out[i].(*os.File).Close()
checkErr(err)
}
}
shards, size, err = openInput(*dataShards, *parShards, fname)
ok, err = enc.Verify(shards)
if !ok {
fmt.Println("Verification failed after reconstruction, data likely corrupted:", err)
os.Exit(1)
}
checkErr(err)
}
// Join the shards and write them
outfn := *outFile
if outfn == "" {
outfn = fname
}
fmt.Println("Writing data to", outfn)
f, err := os.Create(outfn)
checkErr(err)
shards, size, err = openInput(*dataShards, *parShards, fname)
checkErr(err)
// We don't know the exact filesize.
err = enc.Join(f, shards, int64(*dataShards)*size)
checkErr(err)
}
func openInput(dataShards, parShards int, fname string) (r []io.Reader, size int64, err error) {
// Create shards and load the data.
shards := make([]io.Reader, dataShards+parShards)
for i := range shards {
infn := fmt.Sprintf("%s.%d", fname, i)
fmt.Println("Opening", infn)
f, err := os.Open(infn)
if err != nil {
fmt.Println("Error reading file", err)
shards[i] = nil
continue
} else {
shards[i] = f
}
stat, err := f.Stat()
checkErr(err)
if stat.Size() > 0 {
size = stat.Size()
} else {
shards[i] = nil
}
}
return shards, size, nil
}
func checkErr(err error) {
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %s", err.Error())
os.Exit(2)
}
}

View File

@@ -0,0 +1,142 @@
//+build ignore
// Copyright 2015, Klaus Post, see LICENSE for details.
//
// Simple stream encoder example
//
// The encoder encodes a single file into a number of shards
// To reverse the process see "stream-decoder.go"
//
// To build an executable use:
//
// go build stream-encoder.go
//
// Simple Encoder/Decoder Shortcomings:
// * If the file size of the input isn't dividable by the number of data shards
// the output will contain extra zeroes
//
// * If the shard numbers isn't the same for the decoder as in the
// encoder, invalid output will be generated.
//
// * If values have changed in a shard, it cannot be reconstructed.
//
// * If two shards have been swapped, reconstruction will always fail.
// You need to supply the shards in the same order as they were given to you.
//
// The solution for this is to save a metadata file containing:
//
// * File size.
// * The number of data/parity shards.
// * HASH of each shard.
// * Order of the shards.
//
// If you save these properties, you should abe able to detect file corruption
// in a shard and be able to reconstruct your data if you have the needed number of shards left.
package main
import (
"flag"
"fmt"
"os"
"path/filepath"
"io"
"github.com/klauspost/reedsolomon"
)
var dataShards = flag.Int("data", 4, "Number of shards to split the data into, must be below 257.")
var parShards = flag.Int("par", 2, "Number of parity shards")
var outDir = flag.String("out", "", "Alternative output directory")
func init() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
fmt.Fprintf(os.Stderr, " %s [-flags] filename.ext\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, "Valid flags:\n")
flag.PrintDefaults()
}
}
func main() {
// Parse command line parameters.
flag.Parse()
args := flag.Args()
if len(args) != 1 {
fmt.Fprintf(os.Stderr, "Error: No input filename given\n")
flag.Usage()
os.Exit(1)
}
if *dataShards > 257 {
fmt.Fprintf(os.Stderr, "Error: Too many data shards\n")
os.Exit(1)
}
fname := args[0]
// Create encoding matrix.
enc, err := reedsolomon.NewStream(*dataShards, *parShards)
checkErr(err)
fmt.Println("Opening", fname)
f, err := os.Open(fname)
checkErr(err)
instat, err := f.Stat()
checkErr(err)
shards := *dataShards + *parShards
out := make([]*os.File, shards)
// Create the resulting files.
dir, file := filepath.Split(fname)
if *outDir != "" {
dir = *outDir
}
for i := range out {
outfn := fmt.Sprintf("%s.%d", file, i)
fmt.Println("Creating", outfn)
out[i], err = os.Create(filepath.Join(dir, outfn))
checkErr(err)
}
// Split into files.
data := make([]io.Writer, *dataShards)
for i := range data {
data[i] = out[i]
}
// Do the split
err = enc.Split(f, data, instat.Size())
checkErr(err)
// Close and re-open the files.
input := make([]io.Reader, *dataShards)
for i := range data {
out[i].Close()
f, err := os.Open(out[i].Name())
checkErr(err)
input[i] = f
defer f.Close()
}
// Create parity output writers
parity := make([]io.Writer, *parShards)
for i := range parity {
parity[i] = out[*dataShards+i]
defer out[*dataShards+i].Close()
}
// Encode parity
err = enc.Encode(input, parity)
checkErr(err)
fmt.Printf("File split into %d data + %d parity shards.\n", *dataShards, *parShards)
}
func checkErr(err error) {
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %s", err.Error())
os.Exit(2)
}
}

View File

@@ -0,0 +1,209 @@
package reedsolomon_test
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"github.com/klauspost/reedsolomon"
)
func fillRandom(p []byte) {
for i := 0; i < len(p); i += 7 {
val := rand.Int63()
for j := 0; i+j < len(p) && j < 7; j++ {
p[i+j] = byte(val)
val >>= 8
}
}
}
// Simple example of how to use all functions of the Encoder.
// Note that all error checks have been removed to keep it short.
func ExampleEncoder() {
// Create some sample data
var data = make([]byte, 250000)
fillRandom(data)
// Create an encoder with 17 data and 3 parity slices.
enc, _ := reedsolomon.New(17, 3)
// Split the data into shards
shards, _ := enc.Split(data)
// Encode the parity set
_ = enc.Encode(shards)
// Verify the parity set
ok, _ := enc.Verify(shards)
if ok {
fmt.Println("ok")
}
// Delete two shards
shards[10], shards[11] = nil, nil
// Reconstruct the shards
_ = enc.Reconstruct(shards)
// Verify the data set
ok, _ = enc.Verify(shards)
if ok {
fmt.Println("ok")
}
// Output: ok
// ok
}
// This demonstrates that shards can be arbitrary sliced and
// merged and still remain valid.
func ExampleEncoder_slicing() {
// Create some sample data
var data = make([]byte, 250000)
fillRandom(data)
// Create 5 data slices of 50000 elements each
enc, _ := reedsolomon.New(5, 3)
shards, _ := enc.Split(data)
err := enc.Encode(shards)
if err != nil {
panic(err)
}
// Check that it verifies
ok, err := enc.Verify(shards)
if ok && err == nil {
fmt.Println("encode ok")
}
// Split the data set of 50000 elements into two of 25000
splitA := make([][]byte, 8)
splitB := make([][]byte, 8)
// Merge into a 100000 element set
merged := make([][]byte, 8)
// Split/merge the shards
for i := range shards {
splitA[i] = shards[i][:25000]
splitB[i] = shards[i][25000:]
// Concencate it to itself
merged[i] = append(make([]byte, 0, len(shards[i])*2), shards[i]...)
merged[i] = append(merged[i], shards[i]...)
}
// Each part should still verify as ok.
ok, err = enc.Verify(shards)
if ok && err == nil {
fmt.Println("splitA ok")
}
ok, err = enc.Verify(splitB)
if ok && err == nil {
fmt.Println("splitB ok")
}
ok, err = enc.Verify(merged)
if ok && err == nil {
fmt.Println("merge ok")
}
// Output: encode ok
// splitA ok
// splitB ok
// merge ok
}
// This demonstrates that shards can xor'ed and
// still remain a valid set.
//
// The xor value must be the same for element 'n' in each shard,
// except if you xor with a similar sized encoded shard set.
func ExampleEncoder_xor() {
// Create some sample data
var data = make([]byte, 25000)
fillRandom(data)
// Create 5 data slices of 5000 elements each
enc, _ := reedsolomon.New(5, 3)
shards, _ := enc.Split(data)
err := enc.Encode(shards)
if err != nil {
panic(err)
}
// Check that it verifies
ok, err := enc.Verify(shards)
if !ok || err != nil {
fmt.Println("falied initial verify", err)
}
// Create an xor'ed set
xored := make([][]byte, 8)
// We xor by the index, so you can see that the xor can change,
// It should however be constant vertically through your slices.
for i := range shards {
xored[i] = make([]byte, len(shards[i]))
for j := range xored[i] {
xored[i][j] = shards[i][j] ^ byte(j&0xff)
}
}
// Each part should still verify as ok.
ok, err = enc.Verify(xored)
if ok && err == nil {
fmt.Println("verified ok after xor")
}
// Output: verified ok after xor
}
// This will show a simple stream encoder where we encode from
// a []io.Reader which contain a reader for each shard.
//
// Input and output can be exchanged with files, network streams
// or what may suit your needs.
func ExampleStreamEncoder() {
dataShards := 5
parityShards := 2
// Create a StreamEncoder with the number of data and
// parity shards.
rs, err := reedsolomon.NewStream(dataShards, parityShards)
if err != nil {
log.Fatal(err)
}
shardSize := 50000
// Create input data shards.
input := make([][]byte, dataShards)
for s := range input {
input[s] = make([]byte, shardSize)
fillRandom(input[s])
}
// Convert our buffers to io.Readers
readers := make([]io.Reader, dataShards)
for i := range readers {
readers[i] = io.Reader(bytes.NewBuffer(input[i]))
}
// Create our output io.Writers
out := make([]io.Writer, parityShards)
for i := range out {
out[i] = ioutil.Discard
}
// Encode from input to output.
err = rs.Encode(readers, out)
if err != nil {
log.Fatal(err)
}
fmt.Println("ok")
// OUTPUT: ok
}

134
vendor/github.com/klauspost/reedsolomon/galois.go generated vendored Normal file

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,77 @@
//+build !noasm
//+build !appengine
// Copyright 2015, Klaus Post, see LICENSE for details.
package reedsolomon
import (
"github.com/klauspost/cpuid"
)
//go:noescape
func galMulSSSE3(low, high, in, out []byte)
//go:noescape
func galMulSSSE3Xor(low, high, in, out []byte)
//go:noescape
func galMulAVX2Xor(low, high, in, out []byte)
//go:noescape
func galMulAVX2(low, high, in, out []byte)
// This is what the assembler rountes does in blocks of 16 bytes:
/*
func galMulSSSE3(low, high, in, out []byte) {
for n, input := range in {
l := input & 0xf
h := input >> 4
out[n] = low[l] ^ high[h]
}
}
func galMulSSSE3Xor(low, high, in, out []byte) {
for n, input := range in {
l := input & 0xf
h := input >> 4
out[n] ^= low[l] ^ high[h]
}
}
*/
func galMulSlice(c byte, in, out []byte) {
var done int
if cpuid.CPU.AVX2() {
galMulAVX2(mulTableLow[c][:], mulTableHigh[c][:], in, out)
done = (len(in) >> 5) << 5
} else if cpuid.CPU.SSSE3() {
galMulSSSE3(mulTableLow[c][:], mulTableHigh[c][:], in, out)
done = (len(in) >> 4) << 4
}
remain := len(in) - done
if remain > 0 {
mt := mulTable[c]
for i := done; i < len(in); i++ {
out[i] = mt[in[i]]
}
}
}
func galMulSliceXor(c byte, in, out []byte) {
var done int
if cpuid.CPU.AVX2() {
galMulAVX2Xor(mulTableLow[c][:], mulTableHigh[c][:], in, out)
done = (len(in) >> 5) << 5
} else if cpuid.CPU.SSSE3() {
galMulSSSE3Xor(mulTableLow[c][:], mulTableHigh[c][:], in, out)
done = (len(in) >> 4) << 4
}
remain := len(in) - done
if remain > 0 {
mt := mulTable[c]
for i := done; i < len(in); i++ {
out[i] ^= mt[in[i]]
}
}
}

164
vendor/github.com/klauspost/reedsolomon/galois_amd64.s generated vendored Normal file
View File

@@ -0,0 +1,164 @@
//+build !noasm !appengine
// Copyright 2015, Klaus Post, see LICENSE for details.
// Based on http://www.snia.org/sites/default/files2/SDC2013/presentations/NewThinking/EthanMiller_Screaming_Fast_Galois_Field%20Arithmetic_SIMD%20Instructions.pdf
// and http://jerasure.org/jerasure/gf-complete/tree/master
// func galMulSSSE3Xor(low, high, in, out []byte)
TEXT ·galMulSSSE3Xor(SB), 7, $0
MOVQ low+0(FP), SI // SI: &low
MOVQ high+24(FP), DX // DX: &high
MOVOU (SI), X6 // X6 low
MOVOU (DX), X7 // X7: high
MOVQ $15, BX // BX: low mask
MOVQ BX, X8
PXOR X5, X5
MOVQ in+48(FP), SI // R11: &in
MOVQ in_len+56(FP), R9 // R9: len(in)
MOVQ out+72(FP), DX // DX: &out
PSHUFB X5, X8 // X8: lomask (unpacked)
SHRQ $4, R9 // len(in) / 16
CMPQ R9, $0
JEQ done_xor
loopback_xor:
MOVOU (SI), X0 // in[x]
MOVOU (DX), X4 // out[x]
MOVOU X0, X1 // in[x]
MOVOU X6, X2 // low copy
MOVOU X7, X3 // high copy
PSRLQ $4, X1 // X1: high input
PAND X8, X0 // X0: low input
PAND X8, X1 // X0: high input
PSHUFB X0, X2 // X2: mul low part
PSHUFB X1, X3 // X3: mul high part
PXOR X2, X3 // X3: Result
PXOR X4, X3 // X3: Result xor existing out
MOVOU X3, (DX) // Store
ADDQ $16, SI // in+=16
ADDQ $16, DX // out+=16
SUBQ $1, R9
JNZ loopback_xor
done_xor:
RET
// func galMulSSSE3(low, high, in, out []byte)
TEXT ·galMulSSSE3(SB), 7, $0
MOVQ low+0(FP), SI // SI: &low
MOVQ high+24(FP), DX // DX: &high
MOVOU (SI), X6 // X6 low
MOVOU (DX), X7 // X7: high
MOVQ $15, BX // BX: low mask
MOVQ BX, X8
PXOR X5, X5
MOVQ in+48(FP), SI // R11: &in
MOVQ in_len+56(FP), R9 // R9: len(in)
MOVQ out+72(FP), DX // DX: &out
PSHUFB X5, X8 // X8: lomask (unpacked)
SHRQ $4, R9 // len(in) / 16
CMPQ R9, $0
JEQ done
loopback:
MOVOU (SI), X0 // in[x]
MOVOU X0, X1 // in[x]
MOVOU X6, X2 // low copy
MOVOU X7, X3 // high copy
PSRLQ $4, X1 // X1: high input
PAND X8, X0 // X0: low input
PAND X8, X1 // X0: high input
PSHUFB X0, X2 // X2: mul low part
PSHUFB X1, X3 // X3: mul high part
PXOR X2, X3 // X3: Result
MOVOU X3, (DX) // Store
ADDQ $16, SI // in+=16
ADDQ $16, DX // out+=16
SUBQ $1, R9
JNZ loopback
done:
RET
// func galMulAVX2Xor(low, high, in, out []byte)
TEXT ·galMulAVX2Xor(SB), 7, $0
MOVQ low+0(FP), SI // SI: &low
MOVQ high+24(FP), DX // DX: &high
MOVQ $15, BX // BX: low mask
MOVQ BX, X5
MOVOU (SI), X6 // X6 low
MOVOU (DX), X7 // X7: high
MOVQ in_len+56(FP), R9 // R9: len(in)
LONG $0x384de3c4; WORD $0x01f6 // VINSERTI128 YMM6, YMM6, XMM6, 1 ; low
LONG $0x3845e3c4; WORD $0x01ff // VINSERTI128 YMM7, YMM7, XMM7, 1 ; high
LONG $0x787d62c4; BYTE $0xc5 // VPBROADCASTB YMM8, XMM5 ; X8: lomask (unpacked)
SHRQ $5, R9 // len(in) /32
MOVQ out+72(FP), DX // DX: &out
MOVQ in+48(FP), SI // R11: &in
TESTQ R9, R9
JZ done_xor_avx2
loopback_xor_avx2:
LONG $0x066ffec5 // VMOVDQU YMM0, [rsi]
LONG $0x226ffec5 // VMOVDQU YMM4, [rdx]
LONG $0xd073f5c5; BYTE $0x04 // VPSRLQ YMM1, YMM0, 4 ; X1: high input
LONG $0xdb7dc1c4; BYTE $0xc0 // VPAND YMM0, YMM0, YMM8 ; X0: low input
LONG $0xdb75c1c4; BYTE $0xc8 // VPAND YMM1, YMM1, YMM8 ; X1: high input
LONG $0x004de2c4; BYTE $0xd0 // VPSHUFB YMM2, YMM6, YMM0 ; X2: mul low part
LONG $0x0045e2c4; BYTE $0xd9 // VPSHUFB YMM3, YMM7, YMM1 ; X2: mul high part
LONG $0xdbefedc5 // VPXOR YMM3, YMM2, YMM3 ; X3: Result
LONG $0xe4efe5c5 // VPXOR YMM4, YMM3, YMM4 ; X4: Result
LONG $0x227ffec5 // VMOVDQU [rdx], YMM4
ADDQ $32, SI // in+=32
ADDQ $32, DX // out+=32
SUBQ $1, R9
JNZ loopback_xor_avx2
done_xor_avx2:
// VZEROUPPER
BYTE $0xc5; BYTE $0xf8; BYTE $0x77
RET
// func galMulAVX2(low, high, in, out []byte)
TEXT ·galMulAVX2(SB), 7, $0
MOVQ low+0(FP), SI // SI: &low
MOVQ high+24(FP), DX // DX: &high
MOVQ $15, BX // BX: low mask
MOVQ BX, X5
MOVOU (SI), X6 // X6 low
MOVOU (DX), X7 // X7: high
MOVQ in_len+56(FP), R9 // R9: len(in)
LONG $0x384de3c4; WORD $0x01f6 // VINSERTI128 YMM6, YMM6, XMM6, 1 ; low
LONG $0x3845e3c4; WORD $0x01ff // VINSERTI128 YMM7, YMM7, XMM7, 1 ; high
LONG $0x787d62c4; BYTE $0xc5 // VPBROADCASTB YMM8, XMM5 ; X8: lomask (unpacked)
SHRQ $5, R9 // len(in) /32
MOVQ out+72(FP), DX // DX: &out
MOVQ in+48(FP), SI // R11: &in
TESTQ R9, R9
JZ done_avx2
loopback_avx2:
LONG $0x066ffec5 // VMOVDQU YMM0, [rsi]
LONG $0xd073f5c5; BYTE $0x04 // VPSRLQ YMM1, YMM0, 4 ; X1: high input
LONG $0xdb7dc1c4; BYTE $0xc0 // VPAND YMM0, YMM0, YMM8 ; X0: low input
LONG $0xdb75c1c4; BYTE $0xc8 // VPAND YMM1, YMM1, YMM8 ; X1: high input
LONG $0x004de2c4; BYTE $0xd0 // VPSHUFB YMM2, YMM6, YMM0 ; X2: mul low part
LONG $0x0045e2c4; BYTE $0xd9 // VPSHUFB YMM3, YMM7, YMM1 ; X2: mul high part
LONG $0xe3efedc5 // VPXOR YMM4, YMM2, YMM3 ; X4: Result
LONG $0x227ffec5 // VMOVDQU [rdx], YMM4
ADDQ $32, SI // in+=32
ADDQ $32, DX // out+=32
SUBQ $1, R9
JNZ loopback_avx2
done_avx2:
BYTE $0xc5; BYTE $0xf8; BYTE $0x77 // VZEROUPPER
RET

View File

@@ -0,0 +1,19 @@
//+build !amd64 noasm appengine
// Copyright 2015, Klaus Post, see LICENSE for details.
package reedsolomon
func galMulSlice(c byte, in, out []byte) {
mt := mulTable[c]
for n, input := range in {
out[n] = mt[input]
}
}
func galMulSliceXor(c byte, in, out []byte) {
mt := mulTable[c]
for n, input := range in {
out[n] ^= mt[input]
}
}

155
vendor/github.com/klauspost/reedsolomon/galois_test.go generated vendored Normal file
View File

@@ -0,0 +1,155 @@
/**
* Unit tests for Galois
*
* Copyright 2015, Klaus Post
* Copyright 2015, Backblaze, Inc.
*/
package reedsolomon
import (
"bytes"
"testing"
)
func TestAssociativity(t *testing.T) {
for i := 0; i < 256; i++ {
a := byte(i)
for j := 0; j < 256; j++ {
b := byte(j)
for k := 0; k < 256; k++ {
c := byte(k)
x := galAdd(a, galAdd(b, c))
y := galAdd(galAdd(a, b), c)
if x != y {
t.Fatal("add does not match:", x, "!=", y)
}
x = galMultiply(a, galMultiply(b, c))
y = galMultiply(galMultiply(a, b), c)
if x != y {
t.Fatal("multiply does not match:", x, "!=", y)
}
}
}
}
}
func TestIdentity(t *testing.T) {
for i := 0; i < 256; i++ {
a := byte(i)
b := galAdd(a, 0)
if a != b {
t.Fatal("Add zero should yield same result", a, "!=", b)
}
b = galMultiply(a, 1)
if a != b {
t.Fatal("Mul by one should yield same result", a, "!=", b)
}
}
}
func TestInverse(t *testing.T) {
for i := 0; i < 256; i++ {
a := byte(i)
b := galSub(0, a)
c := galAdd(a, b)
if c != 0 {
t.Fatal("inverse sub/add", c, "!=", 0)
}
if a != 0 {
b = galDivide(1, a)
c = galMultiply(a, b)
if c != 1 {
t.Fatal("inverse div/mul", c, "!=", 1)
}
}
}
}
func TestCommutativity(t *testing.T) {
for i := 0; i < 256; i++ {
a := byte(i)
for j := 0; j < 256; j++ {
b := byte(j)
x := galAdd(a, b)
y := galAdd(b, a)
if x != y {
t.Fatal(x, "!= ", y)
}
x = galMultiply(a, b)
y = galMultiply(b, a)
if x != y {
t.Fatal(x, "!= ", y)
}
}
}
}
func TestDistributivity(t *testing.T) {
for i := 0; i < 256; i++ {
a := byte(i)
for j := 0; j < 256; j++ {
b := byte(j)
for k := 0; k < 256; k++ {
c := byte(k)
x := galMultiply(a, galAdd(b, c))
y := galAdd(galMultiply(a, b), galMultiply(a, c))
if x != y {
t.Fatal(x, "!= ", y)
}
}
}
}
}
func TestExp(t *testing.T) {
for i := 0; i < 256; i++ {
a := byte(i)
power := byte(1)
for j := 0; j < 256; j++ {
x := galExp(a, j)
if x != power {
t.Fatal(x, "!=", power)
}
power = galMultiply(power, a)
}
}
}
func TestGalois(t *testing.T) {
// These values were copied output of the Python code.
if galMultiply(3, 4) != 12 {
t.Fatal("galMultiply(3, 4) != 12")
}
if galMultiply(7, 7) != 21 {
t.Fatal("galMultiply(7, 7) != 21")
}
if galMultiply(23, 45) != 41 {
t.Fatal("galMultiply(23, 45) != 41")
}
// Test slices (>16 entries to test assembler)
in := []byte{0, 1, 2, 3, 4, 5, 6, 10, 50, 100, 150, 174, 201, 255, 99, 32, 67, 85}
out := make([]byte, len(in))
galMulSlice(25, in, out)
expect := []byte{0x0, 0x19, 0x32, 0x2b, 0x64, 0x7d, 0x56, 0xfa, 0xb8, 0x6d, 0xc7, 0x85, 0xc3, 0x1f, 0x22, 0x7, 0x25, 0xfe}
if 0 != bytes.Compare(out, expect) {
t.Errorf("got %#v, expected %#v", out, expect)
}
galMulSlice(177, in, out)
expect = []byte{0x0, 0xb1, 0x7f, 0xce, 0xfe, 0x4f, 0x81, 0x9e, 0x3, 0x6, 0xe8, 0x75, 0xbd, 0x40, 0x36, 0xa3, 0x95, 0xcb}
if 0 != bytes.Compare(out, expect) {
t.Errorf("got %#v, expected %#v", out, expect)
}
if galExp(2, 2) != 4 {
t.Fatal("galExp(2, 2) != 4")
}
if galExp(5, 20) != 235 {
t.Fatal("galExp(5, 20) != 235")
}
if galExp(13, 7) != 43 {
t.Fatal("galExp(13, 7) != 43")
}
}

132
vendor/github.com/klauspost/reedsolomon/gentables.go generated vendored Normal file
View File

@@ -0,0 +1,132 @@
//+build ignore
package main
import (
"fmt"
)
var logTable = [fieldSize]int16{
-1, 0, 1, 25, 2, 50, 26, 198,
3, 223, 51, 238, 27, 104, 199, 75,
4, 100, 224, 14, 52, 141, 239, 129,
28, 193, 105, 248, 200, 8, 76, 113,
5, 138, 101, 47, 225, 36, 15, 33,
53, 147, 142, 218, 240, 18, 130, 69,
29, 181, 194, 125, 106, 39, 249, 185,
201, 154, 9, 120, 77, 228, 114, 166,
6, 191, 139, 98, 102, 221, 48, 253,
226, 152, 37, 179, 16, 145, 34, 136,
54, 208, 148, 206, 143, 150, 219, 189,
241, 210, 19, 92, 131, 56, 70, 64,
30, 66, 182, 163, 195, 72, 126, 110,
107, 58, 40, 84, 250, 133, 186, 61,
202, 94, 155, 159, 10, 21, 121, 43,
78, 212, 229, 172, 115, 243, 167, 87,
7, 112, 192, 247, 140, 128, 99, 13,
103, 74, 222, 237, 49, 197, 254, 24,
227, 165, 153, 119, 38, 184, 180, 124,
17, 68, 146, 217, 35, 32, 137, 46,
55, 63, 209, 91, 149, 188, 207, 205,
144, 135, 151, 178, 220, 252, 190, 97,
242, 86, 211, 171, 20, 42, 93, 158,
132, 60, 57, 83, 71, 109, 65, 162,
31, 45, 67, 216, 183, 123, 164, 118,
196, 23, 73, 236, 127, 12, 111, 246,
108, 161, 59, 82, 41, 157, 85, 170,
251, 96, 134, 177, 187, 204, 62, 90,
203, 89, 95, 176, 156, 169, 160, 81,
11, 245, 22, 235, 122, 117, 44, 215,
79, 174, 213, 233, 230, 231, 173, 232,
116, 214, 244, 234, 168, 80, 88, 175,
}
const (
// The number of elements in the field.
fieldSize = 256
// The polynomial used to generate the logarithm table.
//
// There are a number of polynomials that work to generate
// a Galois field of 256 elements. The choice is arbitrary,
// and we just use the first one.
//
// The possibilities are: 29, 43, 45, 77, 95, 99, 101, 105,
//* 113, 135, 141, 169, 195, 207, 231, and 245.
generatingPolynomial = 29
)
func main() {
t := generateExpTable()
fmt.Printf("var expTable = %#v\n", t)
//t2 := generateMulTableSplit(t)
//fmt.Printf("var mulTable = %#v\n", t2)
low, high := generateMulTableHalf(t)
fmt.Printf("var mulTableLow = %#v\n", low)
fmt.Printf("var mulTableHigh = %#v\n", high)
}
/**
* Generates the inverse log table.
*/
func generateExpTable() []byte {
result := make([]byte, fieldSize*2-2)
for i := 1; i < fieldSize; i++ {
log := logTable[i]
result[log] = byte(i)
result[log+fieldSize-1] = byte(i)
}
return result
}
func generateMulTable(expTable []byte) []byte {
result := make([]byte, 256*256)
for v := range result {
a := byte(v & 0xff)
b := byte(v >> 8)
if a == 0 || b == 0 {
result[v] = 0
continue
}
logA := int(logTable[a])
logB := int(logTable[b])
result[v] = expTable[logA+logB]
}
return result
}
func generateMulTableSplit(expTable []byte) [256][256]byte {
var result [256][256]byte
for a := range result {
for b := range result[a] {
if a == 0 || b == 0 {
result[a][b] = 0
continue
}
logA := int(logTable[a])
logB := int(logTable[b])
result[a][b] = expTable[logA+logB]
}
}
return result
}
func generateMulTableHalf(expTable []byte) (low [256][16]byte, high [256][16]byte) {
for a := range low {
for b := range low {
result := 0
if !(a == 0 || b == 0) {
logA := int(logTable[a])
logB := int(logTable[b])
result = int(expTable[logA+logB])
}
if (b & 0xf) == b {
low[a][b] = byte(result)
}
if (b & 0xf0) == b {
high[a][b>>4] = byte(result)
}
}
}
return
}

View File

@@ -0,0 +1,160 @@
/**
* A thread-safe tree which caches inverted matrices.
*
* Copyright 2016, Peter Collins
*/
package reedsolomon
import (
"errors"
"sync"
)
// The tree uses a Reader-Writer mutex to make it thread-safe
// when accessing cached matrices and inserting new ones.
type inversionTree struct {
mutex *sync.RWMutex
root inversionNode
}
type inversionNode struct {
matrix matrix
children []*inversionNode
}
// newInversionTree initializes a tree for storing inverted matrices.
// Note that the root node is the identity matrix as it implies
// there were no errors with the original data.
func newInversionTree(dataShards, parityShards int) inversionTree {
identity, _ := identityMatrix(dataShards)
root := inversionNode{
matrix: identity,
children: make([]*inversionNode, dataShards+parityShards),
}
return inversionTree{
mutex: &sync.RWMutex{},
root: root,
}
}
// GetInvertedMatrix returns the cached inverted matrix or nil if it
// is not found in the tree keyed on the indices of invalid rows.
func (t inversionTree) GetInvertedMatrix(invalidIndices []int) matrix {
// Lock the tree for reading before accessing the tree.
t.mutex.RLock()
defer t.mutex.RUnlock()
// If no invalid indices were give we should return the root
// identity matrix.
if len(invalidIndices) == 0 {
return t.root.matrix
}
// Recursively search for the inverted matrix in the tree, passing in
// 0 as the parent index as we start at the root of the tree.
return t.root.getInvertedMatrix(invalidIndices, 0)
}
// errAlreadySet is returned if the root node matrix is overwritten
var errAlreadySet = errors.New("the root node identity matrix is already set")
// InsertInvertedMatrix inserts a new inverted matrix into the tree
// keyed by the indices of invalid rows. The total number of shards
// is required for creating the proper length lists of child nodes for
// each node.
func (t inversionTree) InsertInvertedMatrix(invalidIndices []int, matrix matrix, shards int) error {
// If no invalid indices were given then we are done because the
// root node is already set with the identity matrix.
if len(invalidIndices) == 0 {
return errAlreadySet
}
if !matrix.IsSquare() {
return errNotSquare
}
// Lock the tree for writing and reading before accessing the tree.
t.mutex.Lock()
defer t.mutex.Unlock()
// Recursively create nodes for the inverted matrix in the tree until
// we reach the node to insert the matrix to. We start by passing in
// 0 as the parent index as we start at the root of the tree.
t.root.insertInvertedMatrix(invalidIndices, matrix, shards, 0)
return nil
}
func (n inversionNode) getInvertedMatrix(invalidIndices []int, parent int) matrix {
// Get the child node to search next from the list of children. The
// list of children starts relative to the parent index passed in
// because the indices of invalid rows is sorted (by default). As we
// search recursively, the first invalid index gets popped off the list,
// so when searching through the list of children, use that first invalid
// index to find the child node.
firstIndex := invalidIndices[0]
node := n.children[firstIndex-parent]
// If the child node doesn't exist in the list yet, fail fast by
// returning, so we can construct and insert the proper inverted matrix.
if node == nil {
return nil
}
// If there's more than one invalid index left in the list we should
// keep searching recursively.
if len(invalidIndices) > 1 {
// Search recursively on the child node by passing in the invalid indices
// with the first index popped off the front. Also the parent index to
// pass down is the first index plus one.
return node.getInvertedMatrix(invalidIndices[1:], firstIndex+1)
}
// If there aren't any more invalid indices to search, we've found our
// node. Return it, however keep in mind that the matrix could still be
// nil because intermediary nodes in the tree are created sometimes with
// their inversion matrices uninitialized.
return node.matrix
}
func (n inversionNode) insertInvertedMatrix(invalidIndices []int, matrix matrix, shards, parent int) {
// As above, get the child node to search next from the list of children.
// The list of children starts relative to the parent index passed in
// because the indices of invalid rows is sorted (by default). As we
// search recursively, the first invalid index gets popped off the list,
// so when searching through the list of children, use that first invalid
// index to find the child node.
firstIndex := invalidIndices[0]
node := n.children[firstIndex-parent]
// If the child node doesn't exist in the list yet, create a new
// node because we have the writer lock and add it to the list
// of children.
if node == nil {
// Make the length of the list of children equal to the number
// of shards minus the first invalid index because the list of
// invalid indices is sorted, so only this length of errors
// are possible in the tree.
node = &inversionNode{
children: make([]*inversionNode, shards-firstIndex),
}
// Insert the new node into the tree at the first index relative
// to the parent index that was given in this recursive call.
n.children[firstIndex-parent] = node
}
// If there's more than one invalid index left in the list we should
// keep searching recursively in order to find the node to add our
// matrix.
if len(invalidIndices) > 1 {
// As above, search recursively on the child node by passing in
// the invalid indices with the first index popped off the front.
// Also the total number of shards and parent index are passed down
// which is equal to the first index plus one.
node.insertInvertedMatrix(invalidIndices[1:], matrix, shards, firstIndex+1)
} else {
// If there aren't any more invalid indices to search, we've found our
// node. Cache the inverted matrix in this node.
node.matrix = matrix
}
}

View File

@@ -0,0 +1,125 @@
/**
* Unit tests for inversion tree.
*
* Copyright 2016, Peter Collins
*/
package reedsolomon
import (
"testing"
)
func TestNewInversionTree(t *testing.T) {
tree := newInversionTree(3, 2)
children := len(tree.root.children)
if children != 5 {
t.Fatal("Root node children list length", children, "!=", 5)
}
str := tree.root.matrix.String()
expect := "[[1, 0, 0], [0, 1, 0], [0, 0, 1]]"
if str != expect {
t.Fatal(str, "!=", expect)
}
}
func TestGetInvertedMatrix(t *testing.T) {
tree := newInversionTree(3, 2)
matrix := tree.GetInvertedMatrix([]int{})
str := matrix.String()
expect := "[[1, 0, 0], [0, 1, 0], [0, 0, 1]]"
if str != expect {
t.Fatal(str, "!=", expect)
}
matrix = tree.GetInvertedMatrix([]int{1})
if matrix != nil {
t.Fatal(matrix, "!= nil")
}
matrix = tree.GetInvertedMatrix([]int{1, 2})
if matrix != nil {
t.Fatal(matrix, "!= nil")
}
matrix, err := newMatrix(3, 3)
if err != nil {
t.Fatalf("Failed initializing new Matrix : %s", err)
}
err = tree.InsertInvertedMatrix([]int{1}, matrix, 5)
if err != nil {
t.Fatalf("Failed inserting new Matrix : %s", err)
}
cachedMatrix := tree.GetInvertedMatrix([]int{1})
if cachedMatrix == nil {
t.Fatal(cachedMatrix, "== nil")
}
if matrix.String() != cachedMatrix.String() {
t.Fatal(matrix.String(), "!=", cachedMatrix.String())
}
}
func TestInsertInvertedMatrix(t *testing.T) {
tree := newInversionTree(3, 2)
matrix, err := newMatrix(3, 3)
if err != nil {
t.Fatalf("Failed initializing new Matrix : %s", err)
}
err = tree.InsertInvertedMatrix([]int{1}, matrix, 5)
if err != nil {
t.Fatalf("Failed inserting new Matrix : %s", err)
}
err = tree.InsertInvertedMatrix([]int{}, matrix, 5)
if err == nil {
t.Fatal("Should have failed inserting the root node matrix", matrix)
}
matrix, err = newMatrix(3, 2)
if err != nil {
t.Fatalf("Failed initializing new Matrix : %s", err)
}
err = tree.InsertInvertedMatrix([]int{2}, matrix, 5)
if err == nil {
t.Fatal("Should have failed inserting a non-square matrix", matrix)
}
matrix, err = newMatrix(3, 3)
if err != nil {
t.Fatalf("Failed initializing new Matrix : %s", err)
}
err = tree.InsertInvertedMatrix([]int{0, 1}, matrix, 5)
if err != nil {
t.Fatalf("Failed inserting new Matrix : %s", err)
}
}
func TestDoubleInsertInvertedMatrix(t *testing.T) {
tree := newInversionTree(3, 2)
matrix, err := newMatrix(3, 3)
if err != nil {
t.Fatalf("Failed initializing new Matrix : %s", err)
}
err = tree.InsertInvertedMatrix([]int{1}, matrix, 5)
if err != nil {
t.Fatalf("Failed inserting new Matrix : %s", err)
}
err = tree.InsertInvertedMatrix([]int{1}, matrix, 5)
if err != nil {
t.Fatalf("Failed inserting new Matrix : %s", err)
}
cachedMatrix := tree.GetInvertedMatrix([]int{1})
if cachedMatrix == nil {
t.Fatal(cachedMatrix, "== nil")
}
if matrix.String() != cachedMatrix.String() {
t.Fatal(matrix.String(), "!=", cachedMatrix.String())
}
}

279
vendor/github.com/klauspost/reedsolomon/matrix.go generated vendored Normal file
View File

@@ -0,0 +1,279 @@
/**
* Matrix Algebra over an 8-bit Galois Field
*
* Copyright 2015, Klaus Post
* Copyright 2015, Backblaze, Inc.
*/
package reedsolomon
import (
"errors"
"fmt"
"strconv"
"strings"
)
// byte[row][col]
type matrix [][]byte
// newMatrix returns a matrix of zeros.
func newMatrix(rows, cols int) (matrix, error) {
if rows <= 0 {
return nil, errInvalidRowSize
}
if cols <= 0 {
return nil, errInvalidColSize
}
m := matrix(make([][]byte, rows))
for i := range m {
m[i] = make([]byte, cols)
}
return m, nil
}
// NewMatrixData initializes a matrix with the given row-major data.
// Note that data is not copied from input.
func newMatrixData(data [][]byte) (matrix, error) {
m := matrix(data)
err := m.Check()
if err != nil {
return nil, err
}
return m, nil
}
// IdentityMatrix returns an identity matrix of the given size.
func identityMatrix(size int) (matrix, error) {
m, err := newMatrix(size, size)
if err != nil {
return nil, err
}
for i := range m {
m[i][i] = 1
}
return m, nil
}
// errInvalidRowSize will be returned if attempting to create a matrix with negative or zero row number.
var errInvalidRowSize = errors.New("invalid row size")
// errInvalidColSize will be returned if attempting to create a matrix with negative or zero column number.
var errInvalidColSize = errors.New("invalid column size")
// errColSizeMismatch is returned if the size of matrix columns mismatch.
var errColSizeMismatch = errors.New("column size is not the same for all rows")
func (m matrix) Check() error {
rows := len(m)
if rows <= 0 {
return errInvalidRowSize
}
cols := len(m[0])
if cols <= 0 {
return errInvalidColSize
}
for _, col := range m {
if len(col) != cols {
return errColSizeMismatch
}
}
return nil
}
// String returns a human-readable string of the matrix contents.
//
// Example: [[1, 2], [3, 4]]
func (m matrix) String() string {
rowOut := make([]string, 0, len(m))
for _, row := range m {
colOut := make([]string, 0, len(row))
for _, col := range row {
colOut = append(colOut, strconv.Itoa(int(col)))
}
rowOut = append(rowOut, "["+strings.Join(colOut, ", ")+"]")
}
return "[" + strings.Join(rowOut, ", ") + "]"
}
// Multiply multiplies this matrix (the one on the left) by another
// matrix (the one on the right) and returns a new matrix with the result.
func (m matrix) Multiply(right matrix) (matrix, error) {
if len(m[0]) != len(right) {
return nil, fmt.Errorf("columns on left (%d) is different than rows on right (%d)", len(m[0]), len(right))
}
result, _ := newMatrix(len(m), len(right[0]))
for r, row := range result {
for c := range row {
var value byte
for i := range m[0] {
value ^= galMultiply(m[r][i], right[i][c])
}
result[r][c] = value
}
}
return result, nil
}
// Augment returns the concatenation of this matrix and the matrix on the right.
func (m matrix) Augment(right matrix) (matrix, error) {
if len(m) != len(right) {
return nil, errMatrixSize
}
result, _ := newMatrix(len(m), len(m[0])+len(right[0]))
for r, row := range m {
for c := range row {
result[r][c] = m[r][c]
}
cols := len(m[0])
for c := range right[0] {
result[r][cols+c] = right[r][c]
}
}
return result, nil
}
// errMatrixSize is returned if matrix dimensions are doesn't match.
var errMatrixSize = errors.New("matrix sizes does not match")
func (m matrix) SameSize(n matrix) error {
if len(m) != len(n) {
return errMatrixSize
}
for i := range m {
if len(m[i]) != len(n[i]) {
return errMatrixSize
}
}
return nil
}
// Returns a part of this matrix. Data is copied.
func (m matrix) SubMatrix(rmin, cmin, rmax, cmax int) (matrix, error) {
result, err := newMatrix(rmax-rmin, cmax-cmin)
if err != nil {
return nil, err
}
// OPTME: If used heavily, use copy function to copy slice
for r := rmin; r < rmax; r++ {
for c := cmin; c < cmax; c++ {
result[r-rmin][c-cmin] = m[r][c]
}
}
return result, nil
}
// SwapRows Exchanges two rows in the matrix.
func (m matrix) SwapRows(r1, r2 int) error {
if r1 < 0 || len(m) <= r1 || r2 < 0 || len(m) <= r2 {
return errInvalidRowSize
}
m[r2], m[r1] = m[r1], m[r2]
return nil
}
// IsSquare will return true if the matrix is square
// and nil if the matrix is square
func (m matrix) IsSquare() bool {
return len(m) == len(m[0])
}
// errSingular is returned if the matrix is singular and cannot be inversed
var errSingular = errors.New("matrix is singular")
// errNotSquare is returned if attempting to inverse a non-square matrix.
var errNotSquare = errors.New("only square matrices can be inverted")
// Invert returns the inverse of this matrix.
// Returns ErrSingular when the matrix is singular and doesn't have an inverse.
// The matrix must be square, otherwise ErrNotSquare is returned.
func (m matrix) Invert() (matrix, error) {
if !m.IsSquare() {
return nil, errNotSquare
}
size := len(m)
work, _ := identityMatrix(size)
work, _ = m.Augment(work)
err := work.gaussianElimination()
if err != nil {
return nil, err
}
return work.SubMatrix(0, size, size, size*2)
}
func (m matrix) gaussianElimination() error {
rows := len(m)
columns := len(m[0])
// Clear out the part below the main diagonal and scale the main
// diagonal to be 1.
for r := 0; r < rows; r++ {
// If the element on the diagonal is 0, find a row below
// that has a non-zero and swap them.
if m[r][r] == 0 {
for rowBelow := r + 1; rowBelow < rows; rowBelow++ {
if m[rowBelow][r] != 0 {
m.SwapRows(r, rowBelow)
break
}
}
}
// If we couldn't find one, the matrix is singular.
if m[r][r] == 0 {
return errSingular
}
// Scale to 1.
if m[r][r] != 1 {
scale := galDivide(1, m[r][r])
for c := 0; c < columns; c++ {
m[r][c] = galMultiply(m[r][c], scale)
}
}
// Make everything below the 1 be a 0 by subtracting
// a multiple of it. (Subtraction and addition are
// both exclusive or in the Galois field.)
for rowBelow := r + 1; rowBelow < rows; rowBelow++ {
if m[rowBelow][r] != 0 {
scale := m[rowBelow][r]
for c := 0; c < columns; c++ {
m[rowBelow][c] ^= galMultiply(scale, m[r][c])
}
}
}
}
// Now clear the part above the main diagonal.
for d := 0; d < rows; d++ {
for rowAbove := 0; rowAbove < d; rowAbove++ {
if m[rowAbove][d] != 0 {
scale := m[rowAbove][d]
for c := 0; c < columns; c++ {
m[rowAbove][c] ^= galMultiply(scale, m[d][c])
}
}
}
}
return nil
}
// Create a Vandermonde matrix, which is guaranteed to have the
// property that any subset of rows that forms a square matrix
// is invertible.
func vandermonde(rows, cols int) (matrix, error) {
result, err := newMatrix(rows, cols)
if err != nil {
return nil, err
}
for r, row := range result {
for c := range row {
result[r][c] = galExp(byte(r), c)
}
}
return result, nil
}

217
vendor/github.com/klauspost/reedsolomon/matrix_test.go generated vendored Normal file
View File

@@ -0,0 +1,217 @@
/**
* Unit tests for Matrix
*
* Copyright 2015, Klaus Post
* Copyright 2015, Backblaze, Inc. All rights reserved.
*/
package reedsolomon
import (
"testing"
)
// TestNewMatrix - Tests validate the result for invalid input and the allocations made by newMatrix method.
func TestNewMatrix(t *testing.T) {
testCases := []struct {
rows int
columns int
// flag to indicate whether the test should pass.
shouldPass bool
expectedResult matrix
expectedErr error
}{
// Test case - 1.
// Test case with a negative row size.
{-1, 10, false, nil, errInvalidRowSize},
// Test case - 2.
// Test case with a negative column size.
{10, -1, false, nil, errInvalidColSize},
// Test case - 3.
// Test case with negative value for both row and column size.
{-1, -1, false, nil, errInvalidRowSize},
// Test case - 4.
// Test case with 0 value for row size.
{0, 10, false, nil, errInvalidRowSize},
// Test case - 5.
// Test case with 0 value for column size.
{-1, 0, false, nil, errInvalidRowSize},
// Test case - 6.
// Test case with 0 value for both row and column size.
{0, 0, false, nil, errInvalidRowSize},
}
for i, testCase := range testCases {
actualResult, actualErr := newMatrix(testCase.rows, testCase.columns)
if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, actualErr.Error())
}
if actualErr == nil && !testCase.shouldPass {
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead.", i+1, testCase.expectedErr)
}
// Failed as expected, but does it fail for the expected reason.
if actualErr != nil && !testCase.shouldPass {
if testCase.expectedErr != actualErr {
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", i+1, testCase.expectedErr, actualErr)
}
}
// Test passes as expected, but the output values
// are verified for correctness here.
if actualErr == nil && testCase.shouldPass {
if testCase.rows != len(actualResult) {
// End the tests here if the the size doesn't match number of rows.
t.Fatalf("Test %d: Expected the size of the row of the new matrix to be `%d`, but instead found `%d`", i+1, testCase.rows, len(actualResult))
}
// Iterating over each row and validating the size of the column.
for j, row := range actualResult {
// If the row check passes, verify the size of each columns.
if testCase.columns != len(row) {
t.Errorf("Test %d: Row %d: Expected the size of the column of the new matrix to be `%d`, but instead found `%d`", i+1, j+1, testCase.columns, len(row))
}
}
}
}
}
// TestMatrixIdentity - validates the method for returning identity matrix of given size.
func TestMatrixIdentity(t *testing.T) {
m, err := identityMatrix(3)
if err != nil {
t.Fatal(err)
}
str := m.String()
expect := "[[1, 0, 0], [0, 1, 0], [0, 0, 1]]"
if str != expect {
t.Fatal(str, "!=", expect)
}
}
// Tests validate the output of matix multiplication method.
func TestMatrixMultiply(t *testing.T) {
m1, err := newMatrixData(
[][]byte{
[]byte{1, 2},
[]byte{3, 4},
})
if err != nil {
t.Fatal(err)
}
m2, err := newMatrixData(
[][]byte{
[]byte{5, 6},
[]byte{7, 8},
})
if err != nil {
t.Fatal(err)
}
actual, err := m1.Multiply(m2)
if err != nil {
t.Fatal(err)
}
str := actual.String()
expect := "[[11, 22], [19, 42]]"
if str != expect {
t.Fatal(str, "!=", expect)
}
}
// Tests validate the output of the method with computes inverse of matrix.
func TestMatrixInverse(t *testing.T) {
testCases := []struct {
matrixData [][]byte
// expected inverse matrix.
expectedResult string
// flag indicating whether the test should pass.
shouldPass bool
expectedErr error
}{
// Test case - 1.
// Test case validating inverse of the input Matrix.
{
// input data to construct the matrix.
[][]byte{
[]byte{56, 23, 98},
[]byte{3, 100, 200},
[]byte{45, 201, 123},
},
// expected Inverse matrix.
"[[175, 133, 33], [130, 13, 245], [112, 35, 126]]",
// test is expected to pass.
true,
nil,
},
// Test case - 2.
// Test case validating inverse of the input Matrix.
{
// input data to contruct the matrix.
[][]byte{
[]byte{1, 0, 0, 0, 0},
[]byte{0, 1, 0, 0, 0},
[]byte{0, 0, 0, 1, 0},
[]byte{0, 0, 0, 0, 1},
[]byte{7, 7, 6, 6, 1},
},
// expectedInverse matrix.
"[[1, 0, 0, 0, 0]," +
" [0, 1, 0, 0, 0]," +
" [123, 123, 1, 122, 122]," +
" [0, 0, 1, 0, 0]," +
" [0, 0, 0, 1, 0]]",
// test is expected to pass.
true,
nil,
},
// Test case with a non-square matrix.
// expected to fail with errNotSquare.
{
[][]byte{
[]byte{56, 23},
[]byte{3, 100},
[]byte{45, 201},
},
"",
false,
errNotSquare,
},
// Test case with singular matrix.
// expected to fail with error errSingular.
{
[][]byte{
[]byte{4, 2},
[]byte{12, 6},
},
"",
false,
errSingular,
},
}
for i, testCase := range testCases {
m, err := newMatrixData(testCase.matrixData)
if err != nil {
t.Fatalf("Test %d: Failed initializing new Matrix : %s", i+1, err)
}
actualResult, actualErr := m.Invert()
if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, actualErr.Error())
}
if actualErr == nil && !testCase.shouldPass {
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead.", i+1, testCase.expectedErr)
}
// Failed as expected, but does it fail for the expected reason.
if actualErr != nil && !testCase.shouldPass {
if testCase.expectedErr != actualErr {
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", i+1, testCase.expectedErr, actualErr)
}
}
// Test passes as expected, but the output values
// are verified for correctness here.
if actualErr == nil && testCase.shouldPass {
if testCase.expectedResult != actualResult.String() {
t.Errorf("Test %d: The inverse matrix doesnt't match the expected result", i+1)
}
}
}
}

573
vendor/github.com/klauspost/reedsolomon/reedsolomon.go generated vendored Normal file
View File

@@ -0,0 +1,573 @@
/**
* Reed-Solomon Coding over 8-bit values.
*
* Copyright 2015, Klaus Post
* Copyright 2015, Backblaze, Inc.
*/
// Package reedsolomon enables Erasure Coding in Go
//
// For usage and examples, see https://github.com/klauspost/reedsolomon
//
package reedsolomon
import (
"bytes"
"errors"
"io"
"runtime"
"sync"
)
// Encoder is an interface to encode Reed-Salomon parity sets for your data.
type Encoder interface {
// Encodes parity for a set of data shards.
// Input is 'shards' containing data shards followed by parity shards.
// The number of shards must match the number given to New().
// Each shard is a byte array, and they must all be the same size.
// The parity shards will always be overwritten and the data shards
// will remain the same, so it is safe for you to read from the
// data shards while this is running.
Encode(shards [][]byte) error
// Verify returns true if the parity shards contain correct data.
// The data is the same format as Encode. No data is modified, so
// you are allowed to read from data while this is running.
Verify(shards [][]byte) (bool, error)
// Reconstruct will recreate the missing shards if possible.
//
// Given a list of shards, some of which contain data, fills in the
// ones that don't have data.
//
// The length of the array must be equal to the total number of shards.
// You indicate that a shard is missing by setting it to nil.
//
// If there are too few shards to reconstruct the missing
// ones, ErrTooFewShards will be returned.
//
// The reconstructed shard set is complete, but integrity is not verified.
// Use the Verify function to check if data set is ok.
Reconstruct(shards [][]byte) error
// Split a data slice into the number of shards given to the encoder,
// and create empty parity shards.
//
// The data will be split into equally sized shards.
// If the data size isn't dividable by the number of shards,
// the last shard will contain extra zeros.
//
// There must be at least 1 byte otherwise ErrShortData will be
// returned.
//
// The data will not be copied, except for the last shard, so you
// should not modify the data of the input slice afterwards.
Split(data []byte) ([][]byte, error)
// Join the shards and write the data segment to dst.
//
// Only the data shards are considered.
// You must supply the exact output size you want.
// If there are to few shards given, ErrTooFewShards will be returned.
// If the total data size is less than outSize, ErrShortData will be returned.
Join(dst io.Writer, shards [][]byte, outSize int) error
}
// reedSolomon contains a matrix for a specific
// distribution of datashards and parity shards.
// Construct if using New()
type reedSolomon struct {
DataShards int // Number of data shards, should not be modified.
ParityShards int // Number of parity shards, should not be modified.
Shards int // Total number of shards. Calculated, and should not be modified.
m matrix
tree inversionTree
parity [][]byte
}
// ErrInvShardNum will be returned by New, if you attempt to create
// an Encoder where either data or parity shards is zero or less.
var ErrInvShardNum = errors.New("cannot create Encoder with zero or less data/parity shards")
// ErrMaxShardNum will be returned by New, if you attempt to create
// an Encoder where data and parity shards cannot be bigger than
// Galois field GF(2^8) - 1.
var ErrMaxShardNum = errors.New("cannot create Encoder with 255 or more data+parity shards")
// New creates a new encoder and initializes it to
// the number of data shards and parity shards that
// you want to use. You can reuse this encoder.
// Note that the maximum number of data shards is 256.
func New(dataShards, parityShards int) (Encoder, error) {
r := reedSolomon{
DataShards: dataShards,
ParityShards: parityShards,
Shards: dataShards + parityShards,
}
if dataShards <= 0 || parityShards <= 0 {
return nil, ErrInvShardNum
}
if dataShards+parityShards > 255 {
return nil, ErrMaxShardNum
}
// Start with a Vandermonde matrix. This matrix would work,
// in theory, but doesn't have the property that the data
// shards are unchanged after encoding.
vm, err := vandermonde(r.Shards, dataShards)
if err != nil {
return nil, err
}
// Multiply by the inverse of the top square of the matrix.
// This will make the top square be the identity matrix, but
// preserve the property that any square subset of rows is
// invertible.
top, _ := vm.SubMatrix(0, 0, dataShards, dataShards)
top, _ = top.Invert()
r.m, _ = vm.Multiply(top)
// Inverted matrices are cached in a tree keyed by the indices
// of the invalid rows of the data to reconstruct.
// The inversion root node will have the identity matrix as
// its inversion matrix because it implies there are no errors
// with the original data.
r.tree = newInversionTree(dataShards, parityShards)
r.parity = make([][]byte, parityShards)
for i := range r.parity {
r.parity[i] = r.m[dataShards+i]
}
return &r, err
}
// ErrTooFewShards is returned if too few shards where given to
// Encode/Verify/Reconstruct. It will also be returned from Reconstruct
// if there were too few shards to reconstruct the missing data.
var ErrTooFewShards = errors.New("too few shards given")
// Encodes parity for a set of data shards.
// An array 'shards' containing data shards followed by parity shards.
// The number of shards must match the number given to New.
// Each shard is a byte array, and they must all be the same size.
// The parity shards will always be overwritten and the data shards
// will remain the same.
func (r reedSolomon) Encode(shards [][]byte) error {
if len(shards) != r.Shards {
return ErrTooFewShards
}
err := checkShards(shards, false)
if err != nil {
return err
}
// Get the slice of output buffers.
output := shards[r.DataShards:]
// Do the coding.
r.codeSomeShards(r.parity, shards[0:r.DataShards], output, r.ParityShards, len(shards[0]))
return nil
}
// Verify returns true if the parity shards contain the right data.
// The data is the same format as Encode. No data is modified.
func (r reedSolomon) Verify(shards [][]byte) (bool, error) {
if len(shards) != r.Shards {
return false, ErrTooFewShards
}
err := checkShards(shards, false)
if err != nil {
return false, err
}
// Slice of buffers being checked.
toCheck := shards[r.DataShards:]
// Do the checking.
return r.checkSomeShards(r.parity, shards[0:r.DataShards], toCheck, r.ParityShards, len(shards[0])), nil
}
// Multiplies a subset of rows from a coding matrix by a full set of
// input shards to produce some output shards.
// 'matrixRows' is The rows from the matrix to use.
// 'inputs' An array of byte arrays, each of which is one input shard.
// The number of inputs used is determined by the length of each matrix row.
// outputs Byte arrays where the computed shards are stored.
// The number of outputs computed, and the
// number of matrix rows used, is determined by
// outputCount, which is the number of outputs to compute.
func (r reedSolomon) codeSomeShards(matrixRows, inputs, outputs [][]byte, outputCount, byteCount int) {
if runtime.GOMAXPROCS(0) > 1 && len(inputs[0]) > minSplitSize {
r.codeSomeShardsP(matrixRows, inputs, outputs, outputCount, byteCount)
return
}
for c := 0; c < r.DataShards; c++ {
in := inputs[c]
for iRow := 0; iRow < outputCount; iRow++ {
if c == 0 {
galMulSlice(matrixRows[iRow][c], in, outputs[iRow])
} else {
galMulSliceXor(matrixRows[iRow][c], in, outputs[iRow])
}
}
}
}
const (
minSplitSize = 512 // min split size per goroutine
maxGoroutines = 50 // max goroutines number for encoding & decoding
)
// Perform the same as codeSomeShards, but split the workload into
// several goroutines.
func (r reedSolomon) codeSomeShardsP(matrixRows, inputs, outputs [][]byte, outputCount, byteCount int) {
var wg sync.WaitGroup
do := byteCount / maxGoroutines
if do < minSplitSize {
do = minSplitSize
}
start := 0
for start < byteCount {
if start+do > byteCount {
do = byteCount - start
}
wg.Add(1)
go func(start, stop int) {
for c := 0; c < r.DataShards; c++ {
in := inputs[c]
for iRow := 0; iRow < outputCount; iRow++ {
if c == 0 {
galMulSlice(matrixRows[iRow][c], in[start:stop], outputs[iRow][start:stop])
} else {
galMulSliceXor(matrixRows[iRow][c], in[start:stop], outputs[iRow][start:stop])
}
}
}
wg.Done()
}(start, start+do)
start += do
}
wg.Wait()
}
// checkSomeShards is mostly the same as codeSomeShards,
// except this will check values and return
// as soon as a difference is found.
func (r reedSolomon) checkSomeShards(matrixRows, inputs, toCheck [][]byte, outputCount, byteCount int) bool {
same := true
var mu sync.RWMutex // For above
var wg sync.WaitGroup
do := byteCount / maxGoroutines
if do < minSplitSize {
do = minSplitSize
}
start := 0
for start < byteCount {
if start+do > byteCount {
do = byteCount - start
}
wg.Add(1)
go func(start, do int) {
defer wg.Done()
outputs := make([][]byte, len(toCheck))
for i := range outputs {
outputs[i] = make([]byte, do)
}
for c := 0; c < r.DataShards; c++ {
mu.RLock()
if !same {
mu.RUnlock()
return
}
mu.RUnlock()
in := inputs[c][start : start+do]
for iRow := 0; iRow < outputCount; iRow++ {
galMulSliceXor(matrixRows[iRow][c], in, outputs[iRow])
}
}
for i, calc := range outputs {
if !bytes.Equal(calc, toCheck[i][start:start+do]) {
mu.Lock()
same = false
mu.Unlock()
return
}
}
}(start, do)
start += do
}
wg.Wait()
return same
}
// ErrShardNoData will be returned if there are no shards,
// or if the length of all shards is zero.
var ErrShardNoData = errors.New("no shard data")
// ErrShardSize is returned if shard length isn't the same for all
// shards.
var ErrShardSize = errors.New("shard sizes does not match")
// checkShards will check if shards are the same size
// or 0, if allowed. An error is returned if this fails.
// An error is also returned if all shards are size 0.
func checkShards(shards [][]byte, nilok bool) error {
size := shardSize(shards)
if size == 0 {
return ErrShardNoData
}
for _, shard := range shards {
if len(shard) != size {
if len(shard) != 0 || !nilok {
return ErrShardSize
}
}
}
return nil
}
// shardSize return the size of a single shard.
// The first non-zero size is returned,
// or 0 if all shards are size 0.
func shardSize(shards [][]byte) int {
for _, shard := range shards {
if len(shard) != 0 {
return len(shard)
}
}
return 0
}
// Reconstruct will recreate the missing shards, if possible.
//
// Given a list of shards, some of which contain data, fills in the
// ones that don't have data.
//
// The length of the array must be equal to Shards.
// You indicate that a shard is missing by setting it to nil.
//
// If there are too few shards to reconstruct the missing
// ones, ErrTooFewShards will be returned.
//
// The reconstructed shard set is complete, but integrity is not verified.
// Use the Verify function to check if data set is ok.
func (r reedSolomon) Reconstruct(shards [][]byte) error {
if len(shards) != r.Shards {
return ErrTooFewShards
}
// Check arguments.
err := checkShards(shards, true)
if err != nil {
return err
}
shardSize := shardSize(shards)
// Quick check: are all of the shards present? If so, there's
// nothing to do.
numberPresent := 0
for i := 0; i < r.Shards; i++ {
if len(shards[i]) != 0 {
numberPresent++
}
}
if numberPresent == r.Shards {
// Cool. All of the shards data data. We don't
// need to do anything.
return nil
}
// More complete sanity check
if numberPresent < r.DataShards {
return ErrTooFewShards
}
// Pull out an array holding just the shards that
// correspond to the rows of the submatrix. These shards
// will be the input to the decoding process that re-creates
// the missing data shards.
//
// Also, create an array of indices of the valid rows we do have
// and the invalid rows we don't have up until we have enough valid rows.
subShards := make([][]byte, r.DataShards)
validIndices := make([]int, r.DataShards)
invalidIndices := make([]int, 0)
subMatrixRow := 0
for matrixRow := 0; matrixRow < r.Shards && subMatrixRow < r.DataShards; matrixRow++ {
if len(shards[matrixRow]) != 0 {
subShards[subMatrixRow] = shards[matrixRow]
validIndices[subMatrixRow] = matrixRow
subMatrixRow++
} else {
invalidIndices = append(invalidIndices, matrixRow)
}
}
// Attempt to get the cached inverted matrix out of the tree
// based on the indices of the invalid rows.
dataDecodeMatrix := r.tree.GetInvertedMatrix(invalidIndices)
// If the inverted matrix isn't cached in the tree yet we must
// construct it ourselves and insert it into the tree for the
// future. In this way the inversion tree is lazily loaded.
if dataDecodeMatrix == nil {
// Pull out the rows of the matrix that correspond to the
// shards that we have and build a square matrix. This
// matrix could be used to generate the shards that we have
// from the original data.
subMatrix, _ := newMatrix(r.DataShards, r.DataShards)
for subMatrixRow, validIndex := range validIndices {
for c := 0; c < r.DataShards; c++ {
subMatrix[subMatrixRow][c] = r.m[validIndex][c]
}
}
// Invert the matrix, so we can go from the encoded shards
// back to the original data. Then pull out the row that
// generates the shard that we want to decode. Note that
// since this matrix maps back to the original data, it can
// be used to create a data shard, but not a parity shard.
dataDecodeMatrix, err = subMatrix.Invert()
if err != nil {
return err
}
// Cache the inverted matrix in the tree for future use keyed on the
// indices of the invalid rows.
err = r.tree.InsertInvertedMatrix(invalidIndices, dataDecodeMatrix, r.Shards)
if err != nil {
return err
}
}
// Re-create any data shards that were missing.
//
// The input to the coding is all of the shards we actually
// have, and the output is the missing data shards. The computation
// is done using the special decode matrix we just built.
outputs := make([][]byte, r.ParityShards)
matrixRows := make([][]byte, r.ParityShards)
outputCount := 0
for iShard := 0; iShard < r.DataShards; iShard++ {
if len(shards[iShard]) == 0 {
shards[iShard] = make([]byte, shardSize)
outputs[outputCount] = shards[iShard]
matrixRows[outputCount] = dataDecodeMatrix[iShard]
outputCount++
}
}
r.codeSomeShards(matrixRows, subShards, outputs[:outputCount], outputCount, shardSize)
// Now that we have all of the data shards intact, we can
// compute any of the parity that is missing.
//
// The input to the coding is ALL of the data shards, including
// any that we just calculated. The output is whichever of the
// data shards were missing.
outputCount = 0
for iShard := r.DataShards; iShard < r.Shards; iShard++ {
if len(shards[iShard]) == 0 {
shards[iShard] = make([]byte, shardSize)
outputs[outputCount] = shards[iShard]
matrixRows[outputCount] = r.parity[iShard-r.DataShards]
outputCount++
}
}
r.codeSomeShards(matrixRows, shards[:r.DataShards], outputs[:outputCount], outputCount, shardSize)
return nil
}
// ErrShortData will be returned by Split(), if there isn't enough data
// to fill the number of shards.
var ErrShortData = errors.New("not enough data to fill the number of requested shards")
// Split a data slice into the number of shards given to the encoder,
// and create empty parity shards.
//
// The data will be split into equally sized shards.
// If the data size isn't divisible by the number of shards,
// the last shard will contain extra zeros.
//
// There must be at least 1 byte otherwise ErrShortData will be
// returned.
//
// The data will not be copied, except for the last shard, so you
// should not modify the data of the input slice afterwards.
func (r reedSolomon) Split(data []byte) ([][]byte, error) {
if len(data) == 0 {
return nil, ErrShortData
}
// Calculate number of bytes per shard.
perShard := (len(data) + r.DataShards - 1) / r.DataShards
// Pad data to r.Shards*perShard.
padding := make([]byte, (r.Shards*perShard)-len(data))
data = append(data, padding...)
// Split into equal-length shards.
dst := make([][]byte, r.Shards)
for i := range dst {
dst[i] = data[:perShard]
data = data[perShard:]
}
return dst, nil
}
// ErrReconstructRequired is returned if too few data shards are intact and a
// reconstruction is required before you can successfully join the shards.
var ErrReconstructRequired = errors.New("reconstruction required as one or more required data shards are nil")
// Join the shards and write the data segment to dst.
//
// Only the data shards are considered.
// You must supply the exact output size you want.
//
// If there are to few shards given, ErrTooFewShards will be returned.
// If the total data size is less than outSize, ErrShortData will be returned.
// If one or more required data shards are nil, ErrReconstructRequired will be returned.
func (r reedSolomon) Join(dst io.Writer, shards [][]byte, outSize int) error {
// Do we have enough shards?
if len(shards) < r.DataShards {
return ErrTooFewShards
}
shards = shards[:r.DataShards]
// Do we have enough data?
size := 0
for _, shard := range shards {
if shard == nil {
return ErrReconstructRequired
}
size += len(shard)
// Do we have enough data already?
if size >= outSize {
break
}
}
if size < outSize {
return ErrShortData
}
// Copy data to dst
write := outSize
for _, shard := range shards {
if write < len(shard) {
_, err := dst.Write(shard[:write])
return err
}
n, err := dst.Write(shard)
if err != nil {
return err
}
write -= n
}
return nil
}

View File

@@ -0,0 +1,700 @@
/**
* Unit tests for ReedSolomon
*
* Copyright 2015, Klaus Post
* Copyright 2015, Backblaze, Inc. All rights reserved.
*/
package reedsolomon
import (
"bytes"
"math/rand"
"runtime"
"testing"
)
func TestEncoding(t *testing.T) {
perShard := 50000
r, err := New(10, 3)
if err != nil {
t.Fatal(err)
}
shards := make([][]byte, 13)
for s := range shards {
shards[s] = make([]byte, perShard)
}
rand.Seed(0)
for s := 0; s < 13; s++ {
fillRandom(shards[s])
}
err = r.Encode(shards)
if err != nil {
t.Fatal(err)
}
ok, err := r.Verify(shards)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("Verification failed")
}
err = r.Encode(make([][]byte, 1))
if err != ErrTooFewShards {
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
}
badShards := make([][]byte, 13)
badShards[0] = make([]byte, 1)
err = r.Encode(badShards)
if err != ErrShardSize {
t.Errorf("expected %v, got %v", ErrShardSize, err)
}
}
func TestReconstruct(t *testing.T) {
perShard := 50000
r, err := New(10, 3)
if err != nil {
t.Fatal(err)
}
shards := make([][]byte, 13)
for s := range shards {
shards[s] = make([]byte, perShard)
}
rand.Seed(0)
for s := 0; s < 13; s++ {
fillRandom(shards[s])
}
err = r.Encode(shards)
if err != nil {
t.Fatal(err)
}
// Reconstruct with all shards present
err = r.Reconstruct(shards)
if err != nil {
t.Fatal(err)
}
// Reconstruct with 10 shards present
shards[0] = nil
shards[7] = nil
shards[11] = nil
err = r.Reconstruct(shards)
if err != nil {
t.Fatal(err)
}
ok, err := r.Verify(shards)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("Verification failed")
}
// Reconstruct with 9 shards present (should fail)
shards[0] = nil
shards[4] = nil
shards[7] = nil
shards[11] = nil
err = r.Reconstruct(shards)
if err != ErrTooFewShards {
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
}
err = r.Reconstruct(make([][]byte, 1))
if err != ErrTooFewShards {
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
}
err = r.Reconstruct(make([][]byte, 13))
if err != ErrShardNoData {
t.Errorf("expected %v, got %v", ErrShardNoData, err)
}
}
func TestVerify(t *testing.T) {
perShard := 33333
r, err := New(10, 4)
if err != nil {
t.Fatal(err)
}
shards := make([][]byte, 14)
for s := range shards {
shards[s] = make([]byte, perShard)
}
rand.Seed(0)
for s := 0; s < 10; s++ {
fillRandom(shards[s])
}
err = r.Encode(shards)
if err != nil {
t.Fatal(err)
}
ok, err := r.Verify(shards)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("Verification failed")
}
// Put in random data. Verification should fail
fillRandom(shards[10])
ok, err = r.Verify(shards)
if err != nil {
t.Fatal(err)
}
if ok {
t.Fatal("Verification did not fail")
}
// Re-encode
err = r.Encode(shards)
if err != nil {
t.Fatal(err)
}
// Fill a data segment with random data
fillRandom(shards[0])
ok, err = r.Verify(shards)
if err != nil {
t.Fatal(err)
}
if ok {
t.Fatal("Verification did not fail")
}
_, err = r.Verify(make([][]byte, 1))
if err != ErrTooFewShards {
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
}
_, err = r.Verify(make([][]byte, 14))
if err != ErrShardNoData {
t.Errorf("expected %v, got %v", ErrShardNoData, err)
}
}
func TestOneEncode(t *testing.T) {
codec, err := New(5, 5)
if err != nil {
t.Fatal(err)
}
shards := [][]byte{
{0, 1},
{4, 5},
{2, 3},
{6, 7},
{8, 9},
{0, 0},
{0, 0},
{0, 0},
{0, 0},
{0, 0},
}
codec.Encode(shards)
if shards[5][0] != 12 || shards[5][1] != 13 {
t.Fatal("shard 5 mismatch")
}
if shards[6][0] != 10 || shards[6][1] != 11 {
t.Fatal("shard 6 mismatch")
}
if shards[7][0] != 14 || shards[7][1] != 15 {
t.Fatal("shard 7 mismatch")
}
if shards[8][0] != 90 || shards[8][1] != 91 {
t.Fatal("shard 8 mismatch")
}
if shards[9][0] != 94 || shards[9][1] != 95 {
t.Fatal("shard 9 mismatch")
}
ok, err := codec.Verify(shards)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("did not verify")
}
shards[8][0]++
ok, err = codec.Verify(shards)
if err != nil {
t.Fatal(err)
}
if ok {
t.Fatal("verify did not fail as expected")
}
}
func fillRandom(p []byte) {
for i := 0; i < len(p); i += 7 {
val := rand.Int63()
for j := 0; i+j < len(p) && j < 7; j++ {
p[i+j] = byte(val)
val >>= 8
}
}
}
func benchmarkEncode(b *testing.B, dataShards, parityShards, shardSize int) {
r, err := New(dataShards, parityShards)
if err != nil {
b.Fatal(err)
}
shards := make([][]byte, dataShards+parityShards)
for s := range shards {
shards[s] = make([]byte, shardSize)
}
rand.Seed(0)
for s := 0; s < dataShards; s++ {
fillRandom(shards[s])
}
b.SetBytes(int64(shardSize * dataShards))
b.ResetTimer()
for i := 0; i < b.N; i++ {
err = r.Encode(shards)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkEncode10x2x10000(b *testing.B) {
benchmarkEncode(b, 10, 2, 10000)
}
func BenchmarkEncode100x20x10000(b *testing.B) {
benchmarkEncode(b, 100, 20, 10000)
}
func BenchmarkEncode17x3x1M(b *testing.B) {
benchmarkEncode(b, 17, 3, 1024*1024)
}
// Benchmark 10 data shards and 4 parity shards with 16MB each.
func BenchmarkEncode10x4x16M(b *testing.B) {
benchmarkEncode(b, 10, 4, 16*1024*1024)
}
// Benchmark 5 data shards and 2 parity shards with 1MB each.
func BenchmarkEncode5x2x1M(b *testing.B) {
benchmarkEncode(b, 5, 2, 1024*1024)
}
// Benchmark 1 data shards and 2 parity shards with 1MB each.
func BenchmarkEncode10x2x1M(b *testing.B) {
benchmarkEncode(b, 10, 2, 1024*1024)
}
// Benchmark 10 data shards and 4 parity shards with 1MB each.
func BenchmarkEncode10x4x1M(b *testing.B) {
benchmarkEncode(b, 10, 4, 1024*1024)
}
// Benchmark 50 data shards and 20 parity shards with 1MB each.
func BenchmarkEncode50x20x1M(b *testing.B) {
benchmarkEncode(b, 50, 20, 1024*1024)
}
// Benchmark 17 data shards and 3 parity shards with 16MB each.
func BenchmarkEncode17x3x16M(b *testing.B) {
benchmarkEncode(b, 17, 3, 16*1024*1024)
}
func benchmarkVerify(b *testing.B, dataShards, parityShards, shardSize int) {
r, err := New(dataShards, parityShards)
if err != nil {
b.Fatal(err)
}
shards := make([][]byte, parityShards+dataShards)
for s := range shards {
shards[s] = make([]byte, shardSize)
}
rand.Seed(0)
for s := 0; s < dataShards; s++ {
fillRandom(shards[s])
}
err = r.Encode(shards)
if err != nil {
b.Fatal(err)
}
b.SetBytes(int64(shardSize * dataShards))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err = r.Verify(shards)
if err != nil {
b.Fatal(err)
}
}
}
// Benchmark 10 data slices with 2 parity slices holding 10000 bytes each
func BenchmarkVerify10x2x10000(b *testing.B) {
benchmarkVerify(b, 10, 2, 10000)
}
// Benchmark 50 data slices with 5 parity slices holding 100000 bytes each
func BenchmarkVerify50x5x50000(b *testing.B) {
benchmarkVerify(b, 50, 5, 100000)
}
// Benchmark 10 data slices with 2 parity slices holding 1MB bytes each
func BenchmarkVerify10x2x1M(b *testing.B) {
benchmarkVerify(b, 10, 2, 1024*1024)
}
// Benchmark 5 data slices with 2 parity slices holding 1MB bytes each
func BenchmarkVerify5x2x1M(b *testing.B) {
benchmarkVerify(b, 5, 2, 1024*1024)
}
// Benchmark 10 data slices with 4 parity slices holding 1MB bytes each
func BenchmarkVerify10x4x1M(b *testing.B) {
benchmarkVerify(b, 10, 4, 1024*1024)
}
// Benchmark 5 data slices with 2 parity slices holding 1MB bytes each
func BenchmarkVerify50x20x1M(b *testing.B) {
benchmarkVerify(b, 50, 20, 1024*1024)
}
// Benchmark 10 data slices with 4 parity slices holding 16MB bytes each
func BenchmarkVerify10x4x16M(b *testing.B) {
benchmarkVerify(b, 10, 4, 16*1024*1024)
}
func corruptRandom(shards [][]byte, dataShards, parityShards int) {
shardsToCorrupt := rand.Intn(parityShards)
for i := 1; i <= shardsToCorrupt; i++ {
shards[rand.Intn(dataShards+parityShards)] = nil
}
}
func benchmarkReconstruct(b *testing.B, dataShards, parityShards, shardSize int) {
r, err := New(dataShards, parityShards)
if err != nil {
b.Fatal(err)
}
shards := make([][]byte, parityShards+dataShards)
for s := range shards {
shards[s] = make([]byte, shardSize)
}
rand.Seed(0)
for s := 0; s < dataShards; s++ {
fillRandom(shards[s])
}
err = r.Encode(shards)
if err != nil {
b.Fatal(err)
}
b.SetBytes(int64(shardSize * dataShards))
b.ResetTimer()
for i := 0; i < b.N; i++ {
corruptRandom(shards, dataShards, parityShards)
err = r.Reconstruct(shards)
if err != nil {
b.Fatal(err)
}
ok, err := r.Verify(shards)
if err != nil {
b.Fatal(err)
}
if !ok {
b.Fatal("Verification failed")
}
}
}
// Benchmark 10 data slices with 2 parity slices holding 10000 bytes each
func BenchmarkReconstruct10x2x10000(b *testing.B) {
benchmarkReconstruct(b, 10, 2, 10000)
}
// Benchmark 50 data slices with 5 parity slices holding 100000 bytes each
func BenchmarkReconstruct50x5x50000(b *testing.B) {
benchmarkReconstruct(b, 50, 5, 100000)
}
// Benchmark 10 data slices with 2 parity slices holding 1MB bytes each
func BenchmarkReconstruct10x2x1M(b *testing.B) {
benchmarkReconstruct(b, 10, 2, 1024*1024)
}
// Benchmark 5 data slices with 2 parity slices holding 1MB bytes each
func BenchmarkReconstruct5x2x1M(b *testing.B) {
benchmarkReconstruct(b, 5, 2, 1024*1024)
}
// Benchmark 10 data slices with 4 parity slices holding 1MB bytes each
func BenchmarkReconstruct10x4x1M(b *testing.B) {
benchmarkReconstruct(b, 10, 4, 1024*1024)
}
// Benchmark 5 data slices with 2 parity slices holding 1MB bytes each
func BenchmarkReconstruct50x20x1M(b *testing.B) {
benchmarkReconstruct(b, 50, 20, 1024*1024)
}
// Benchmark 10 data slices with 4 parity slices holding 16MB bytes each
func BenchmarkReconstruct10x4x16M(b *testing.B) {
benchmarkReconstruct(b, 10, 4, 16*1024*1024)
}
func benchmarkReconstructP(b *testing.B, dataShards, parityShards, shardSize int) {
r, err := New(dataShards, parityShards)
if err != nil {
b.Fatal(err)
}
b.SetBytes(int64(shardSize * dataShards))
runtime.GOMAXPROCS(runtime.NumCPU())
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
shards := make([][]byte, parityShards+dataShards)
for s := range shards {
shards[s] = make([]byte, shardSize)
}
rand.Seed(0)
for s := 0; s < dataShards; s++ {
fillRandom(shards[s])
}
err = r.Encode(shards)
if err != nil {
b.Fatal(err)
}
for pb.Next() {
corruptRandom(shards, dataShards, parityShards)
err = r.Reconstruct(shards)
if err != nil {
b.Fatal(err)
}
ok, err := r.Verify(shards)
if err != nil {
b.Fatal(err)
}
if !ok {
b.Fatal("Verification failed")
}
}
})
}
// Benchmark 10 data slices with 2 parity slices holding 10000 bytes each
func BenchmarkReconstructP10x2x10000(b *testing.B) {
benchmarkReconstructP(b, 10, 2, 10000)
}
// Benchmark 50 data slices with 5 parity slices holding 100000 bytes each
func BenchmarkReconstructP50x5x50000(b *testing.B) {
benchmarkReconstructP(b, 50, 5, 100000)
}
// Benchmark 10 data slices with 2 parity slices holding 1MB bytes each
func BenchmarkReconstructP10x2x1M(b *testing.B) {
benchmarkReconstructP(b, 10, 2, 1024*1024)
}
// Benchmark 5 data slices with 2 parity slices holding 1MB bytes each
func BenchmarkReconstructP5x2x1M(b *testing.B) {
benchmarkReconstructP(b, 5, 2, 1024*1024)
}
// Benchmark 10 data slices with 4 parity slices holding 1MB bytes each
func BenchmarkReconstructP10x4x1M(b *testing.B) {
benchmarkReconstructP(b, 10, 4, 1024*1024)
}
// Benchmark 5 data slices with 2 parity slices holding 1MB bytes each
func BenchmarkReconstructP50x20x1M(b *testing.B) {
benchmarkReconstructP(b, 50, 20, 1024*1024)
}
// Benchmark 10 data slices with 4 parity slices holding 16MB bytes each
func BenchmarkReconstructP10x4x16M(b *testing.B) {
benchmarkReconstructP(b, 10, 4, 16*1024*1024)
}
func TestEncoderReconstruct(t *testing.T) {
// Create some sample data
var data = make([]byte, 250000)
fillRandom(data)
// Create 5 data slices of 50000 elements each
enc, _ := New(5, 3)
shards, _ := enc.Split(data)
err := enc.Encode(shards)
if err != nil {
t.Fatal(err)
}
// Check that it verifies
ok, err := enc.Verify(shards)
if !ok || err != nil {
t.Fatal("not ok:", ok, "err:", err)
}
// Delete a shard
shards[0] = nil
// Should reconstruct
err = enc.Reconstruct(shards)
if err != nil {
t.Fatal(err)
}
// Check that it verifies
ok, err = enc.Verify(shards)
if !ok || err != nil {
t.Fatal("not ok:", ok, "err:", err)
}
// Recover original bytes
buf := new(bytes.Buffer)
err = enc.Join(buf, shards, len(data))
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf.Bytes(), data) {
t.Fatal("recovered bytes do not match")
}
// Corrupt a shard
shards[0] = nil
shards[1][0], shards[1][500] = 75, 75
// Should reconstruct (but with corrupted data)
err = enc.Reconstruct(shards)
if err != nil {
t.Fatal(err)
}
// Check that it verifies
ok, err = enc.Verify(shards)
if ok || err != nil {
t.Fatal("error or ok:", ok, "err:", err)
}
// Recovered data should not match original
buf.Reset()
err = enc.Join(buf, shards, len(data))
if err != nil {
t.Fatal(err)
}
if bytes.Equal(buf.Bytes(), data) {
t.Fatal("corrupted data matches original")
}
}
func TestSplitJoin(t *testing.T) {
var data = make([]byte, 250000)
rand.Seed(0)
fillRandom(data)
enc, _ := New(5, 3)
shards, err := enc.Split(data)
if err != nil {
t.Fatal(err)
}
_, err = enc.Split([]byte{})
if err != ErrShortData {
t.Errorf("expected %v, got %v", ErrShortData, err)
}
buf := new(bytes.Buffer)
err = enc.Join(buf, shards, 50)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf.Bytes(), data[:50]) {
t.Fatal("recovered data does match original")
}
err = enc.Join(buf, [][]byte{}, 0)
if err != ErrTooFewShards {
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
}
err = enc.Join(buf, shards, len(data)+1)
if err != ErrShortData {
t.Errorf("expected %v, got %v", ErrShortData, err)
}
shards[0] = nil
err = enc.Join(buf, shards, len(data))
if err != ErrReconstructRequired {
t.Errorf("expected %v, got %v", ErrReconstructRequired, err)
}
}
func TestCodeSomeShards(t *testing.T) {
var data = make([]byte, 250000)
fillRandom(data)
enc, _ := New(5, 3)
r := enc.(*reedSolomon) // need to access private methods
shards, _ := enc.Split(data)
old := runtime.GOMAXPROCS(1)
r.codeSomeShards(r.parity, shards[:r.DataShards], shards[r.DataShards:], r.ParityShards, len(shards[0]))
// hopefully more than 1 CPU
runtime.GOMAXPROCS(runtime.NumCPU())
r.codeSomeShards(r.parity, shards[:r.DataShards], shards[r.DataShards:], r.ParityShards, len(shards[0]))
// reset MAXPROCS, otherwise testing complains
runtime.GOMAXPROCS(old)
}
func TestAllMatrices(t *testing.T) {
t.Skip("Skipping slow matrix check")
for i := 1; i < 257; i++ {
_, err := New(i, i)
if err != nil {
t.Fatal("creating matrix size", i, i, ":", err)
}
}
}
func TestNew(t *testing.T) {
tests := []struct {
data, parity int
err error
}{
{127, 127, nil},
{256, 256, ErrMaxShardNum},
{0, 1, ErrInvShardNum},
{1, 0, ErrInvShardNum},
{257, 1, ErrMaxShardNum},
// overflow causes r.Shards to be negative
{256, int(^uint(0) >> 1), errInvalidRowSize},
}
for _, test := range tests {
_, err := New(test.data, test.parity)
if err != test.err {
t.Errorf("New(%v, %v): expected %v, got %v", test.data, test.parity, test.err, err)
}
}
}

575
vendor/github.com/klauspost/reedsolomon/streaming.go generated vendored Normal file
View File

@@ -0,0 +1,575 @@
/**
* Reed-Solomon Coding over 8-bit values.
*
* Copyright 2015, Klaus Post
* Copyright 2015, Backblaze, Inc.
*/
package reedsolomon
import (
"bytes"
"errors"
"fmt"
"io"
"sync"
)
// StreamEncoder is an interface to encode Reed-Salomon parity sets for your data.
// It provides a fully streaming interface, and processes data in blocks of up to 4MB.
//
// For small shard sizes, 10MB and below, it is recommended to use the in-memory interface,
// since the streaming interface has a start up overhead.
//
// For all operations, no readers and writers should not assume any order/size of
// individual reads/writes.
//
// For usage examples, see "stream-encoder.go" and "streamdecoder.go" in the examples
// folder.
type StreamEncoder interface {
// Encodes parity shards for a set of data shards.
//
// Input is 'shards' containing readers for data shards followed by parity shards
// io.Writer.
//
// The number of shards must match the number given to NewStream().
//
// Each reader must supply the same number of bytes.
//
// The parity shards will be written to the writer.
// The number of bytes written will match the input size.
//
// If a data stream returns an error, a StreamReadError type error
// will be returned. If a parity writer returns an error, a
// StreamWriteError will be returned.
Encode(data []io.Reader, parity []io.Writer) error
// Verify returns true if the parity shards contain correct data.
//
// The number of shards must match the number total data+parity shards
// given to NewStream().
//
// Each reader must supply the same number of bytes.
// If a shard stream returns an error, a StreamReadError type error
// will be returned.
Verify(shards []io.Reader) (bool, error)
// Reconstruct will recreate the missing shards if possible.
//
// Given a list of valid shards (to read) and invalid shards (to write)
//
// You indicate that a shard is missing by setting it to nil in the 'valid'
// slice and at the same time setting a non-nil writer in "fill".
// An index cannot contain both non-nil 'valid' and 'fill' entry.
// If both are provided 'ErrReconstructMismatch' is returned.
//
// If there are too few shards to reconstruct the missing
// ones, ErrTooFewShards will be returned.
//
// The reconstructed shard set is complete, but integrity is not verified.
// Use the Verify function to check if data set is ok.
Reconstruct(valid []io.Reader, fill []io.Writer) error
// Split a an input stream into the number of shards given to the encoder.
//
// The data will be split into equally sized shards.
// If the data size isn't dividable by the number of shards,
// the last shard will contain extra zeros.
//
// You must supply the total size of your input.
// 'ErrShortData' will be returned if it is unable to retrieve the
// number of bytes indicated.
Split(data io.Reader, dst []io.Writer, size int64) (err error)
// Join the shards and write the data segment to dst.
//
// Only the data shards are considered.
//
// You must supply the exact output size you want.
// If there are to few shards given, ErrTooFewShards will be returned.
// If the total data size is less than outSize, ErrShortData will be returned.
Join(dst io.Writer, shards []io.Reader, outSize int64) error
}
// StreamReadError is returned when a read error is encountered
// that relates to a supplied stream.
// This will allow you to find out which reader has failed.
type StreamReadError struct {
Err error // The error
Stream int // The stream number on which the error occurred
}
// Error returns the error as a string
func (s StreamReadError) Error() string {
return fmt.Sprintf("error reading stream %d: %s", s.Stream, s.Err)
}
// String returns the error as a string
func (s StreamReadError) String() string {
return s.Error()
}
// StreamWriteError is returned when a write error is encountered
// that relates to a supplied stream. This will allow you to
// find out which reader has failed.
type StreamWriteError struct {
Err error // The error
Stream int // The stream number on which the error occurred
}
// Error returns the error as a string
func (s StreamWriteError) Error() string {
return fmt.Sprintf("error writing stream %d: %s", s.Stream, s.Err)
}
// String returns the error as a string
func (s StreamWriteError) String() string {
return s.Error()
}
// rsStream contains a matrix for a specific
// distribution of datashards and parity shards.
// Construct if using NewStream()
type rsStream struct {
r *reedSolomon
bs int // Block size
// Shard reader
readShards func(dst [][]byte, in []io.Reader) error
// Shard writer
writeShards func(out []io.Writer, in [][]byte) error
creads bool
cwrites bool
}
// NewStream creates a new encoder and initializes it to
// the number of data shards and parity shards that
// you want to use. You can reuse this encoder.
// Note that the maximum number of data shards is 256.
func NewStream(dataShards, parityShards int) (StreamEncoder, error) {
enc, err := New(dataShards, parityShards)
if err != nil {
return nil, err
}
rs := enc.(*reedSolomon)
r := rsStream{r: rs, bs: 4 << 20}
r.readShards = readShards
r.writeShards = writeShards
return &r, err
}
// NewStreamC creates a new encoder and initializes it to
// the number of data shards and parity shards given.
//
// This functions as 'NewStream', but allows you to enable CONCURRENT reads and writes.
func NewStreamC(dataShards, parityShards int, conReads, conWrites bool) (StreamEncoder, error) {
enc, err := New(dataShards, parityShards)
if err != nil {
return nil, err
}
rs := enc.(*reedSolomon)
r := rsStream{r: rs, bs: 4 << 20}
r.readShards = readShards
r.writeShards = writeShards
if conReads {
r.readShards = cReadShards
}
if conWrites {
r.writeShards = cWriteShards
}
return &r, err
}
func createSlice(n, length int) [][]byte {
out := make([][]byte, n)
for i := range out {
out[i] = make([]byte, length)
}
return out
}
// Encodes parity shards for a set of data shards.
//
// Input is 'shards' containing readers for data shards followed by parity shards
// io.Writer.
//
// The number of shards must match the number given to NewStream().
//
// Each reader must supply the same number of bytes.
//
// The parity shards will be written to the writer.
// The number of bytes written will match the input size.
//
// If a data stream returns an error, a StreamReadError type error
// will be returned. If a parity writer returns an error, a
// StreamWriteError will be returned.
func (r rsStream) Encode(data []io.Reader, parity []io.Writer) error {
if len(data) != r.r.DataShards {
return ErrTooFewShards
}
if len(parity) != r.r.ParityShards {
return ErrTooFewShards
}
all := createSlice(r.r.Shards, r.bs)
in := all[:r.r.DataShards]
out := all[r.r.DataShards:]
read := 0
for {
err := r.readShards(in, data)
switch err {
case nil:
case io.EOF:
if read == 0 {
return ErrShardNoData
}
return nil
default:
return err
}
out = trimShards(out, shardSize(in))
read += shardSize(in)
err = r.r.Encode(all)
if err != nil {
return err
}
err = r.writeShards(parity, out)
if err != nil {
return err
}
}
}
// Trim the shards so they are all the same size
func trimShards(in [][]byte, size int) [][]byte {
for i := range in {
if in[i] != nil {
in[i] = in[i][0:size]
}
if len(in[i]) < size {
in[i] = nil
}
}
return in
}
func readShards(dst [][]byte, in []io.Reader) error {
if len(in) != len(dst) {
panic("internal error: in and dst size does not match")
}
size := -1
for i := range in {
if in[i] == nil {
dst[i] = nil
continue
}
n, err := io.ReadFull(in[i], dst[i])
// The error is EOF only if no bytes were read.
// If an EOF happens after reading some but not all the bytes,
// ReadFull returns ErrUnexpectedEOF.
switch err {
case io.ErrUnexpectedEOF, io.EOF:
if size < 0 {
size = n
} else if n != size {
// Shard sizes must match.
return ErrShardSize
}
dst[i] = dst[i][0:n]
case nil:
continue
default:
return StreamReadError{Err: err, Stream: i}
}
}
if size == 0 {
return io.EOF
}
return nil
}
func writeShards(out []io.Writer, in [][]byte) error {
if len(out) != len(in) {
panic("internal error: in and out size does not match")
}
for i := range in {
if out[i] == nil {
continue
}
n, err := out[i].Write(in[i])
if err != nil {
return StreamWriteError{Err: err, Stream: i}
}
//
if n != len(in[i]) {
return StreamWriteError{Err: io.ErrShortWrite, Stream: i}
}
}
return nil
}
type readResult struct {
n int
size int
err error
}
// cReadShards reads shards concurrently
func cReadShards(dst [][]byte, in []io.Reader) error {
if len(in) != len(dst) {
panic("internal error: in and dst size does not match")
}
var wg sync.WaitGroup
wg.Add(len(in))
res := make(chan readResult, len(in))
for i := range in {
if in[i] == nil {
dst[i] = nil
wg.Done()
continue
}
go func(i int) {
defer wg.Done()
n, err := io.ReadFull(in[i], dst[i])
// The error is EOF only if no bytes were read.
// If an EOF happens after reading some but not all the bytes,
// ReadFull returns ErrUnexpectedEOF.
res <- readResult{size: n, err: err, n: i}
}(i)
}
wg.Wait()
close(res)
size := -1
for r := range res {
switch r.err {
case io.ErrUnexpectedEOF, io.EOF:
if size < 0 {
size = r.size
} else if r.size != size {
// Shard sizes must match.
return ErrShardSize
}
dst[r.n] = dst[r.n][0:r.size]
case nil:
default:
return StreamReadError{Err: r.err, Stream: r.n}
}
}
if size == 0 {
return io.EOF
}
return nil
}
// cWriteShards writes shards concurrently
func cWriteShards(out []io.Writer, in [][]byte) error {
if len(out) != len(in) {
panic("internal error: in and out size does not match")
}
var errs = make(chan error, len(out))
var wg sync.WaitGroup
wg.Add(len(out))
for i := range in {
go func(i int) {
defer wg.Done()
if out[i] == nil {
errs <- nil
return
}
n, err := out[i].Write(in[i])
if err != nil {
errs <- StreamWriteError{Err: err, Stream: i}
return
}
if n != len(in[i]) {
errs <- StreamWriteError{Err: io.ErrShortWrite, Stream: i}
}
}(i)
}
wg.Wait()
close(errs)
for err := range errs {
if err != nil {
return err
}
}
return nil
}
// Verify returns true if the parity shards contain correct data.
//
// The number of shards must match the number total data+parity shards
// given to NewStream().
//
// Each reader must supply the same number of bytes.
// If a shard stream returns an error, a StreamReadError type error
// will be returned.
func (r rsStream) Verify(shards []io.Reader) (bool, error) {
if len(shards) != r.r.Shards {
return false, ErrTooFewShards
}
read := 0
all := createSlice(r.r.Shards, r.bs)
for {
err := r.readShards(all, shards)
if err == io.EOF {
if read == 0 {
return false, ErrShardNoData
}
return true, nil
}
if err != nil {
return false, err
}
read += shardSize(all)
ok, err := r.r.Verify(all)
if !ok || err != nil {
return ok, err
}
}
}
// ErrReconstructMismatch is returned by the StreamEncoder, if you supply
// "valid" and "fill" streams on the same index.
// Therefore it is impossible to see if you consider the shard valid
// or would like to have it reconstructed.
var ErrReconstructMismatch = errors.New("valid shards and fill shards are mutually exclusive")
// Reconstruct will recreate the missing shards if possible.
//
// Given a list of valid shards (to read) and invalid shards (to write)
//
// You indicate that a shard is missing by setting it to nil in the 'valid'
// slice and at the same time setting a non-nil writer in "fill".
// An index cannot contain both non-nil 'valid' and 'fill' entry.
//
// If there are too few shards to reconstruct the missing
// ones, ErrTooFewShards will be returned.
//
// The reconstructed shard set is complete, but integrity is not verified.
// Use the Verify function to check if data set is ok.
func (r rsStream) Reconstruct(valid []io.Reader, fill []io.Writer) error {
if len(valid) != r.r.Shards {
return ErrTooFewShards
}
if len(fill) != r.r.Shards {
return ErrTooFewShards
}
all := createSlice(r.r.Shards, r.bs)
for i := range valid {
if valid[i] != nil && fill[i] != nil {
return ErrReconstructMismatch
}
}
read := 0
for {
err := r.readShards(all, valid)
if err == io.EOF {
if read == 0 {
return ErrShardNoData
}
return nil
}
if err != nil {
return err
}
read += shardSize(all)
all = trimShards(all, shardSize(all))
err = r.r.Reconstruct(all)
if err != nil {
return err
}
err = r.writeShards(fill, all)
if err != nil {
return err
}
}
}
// Join the shards and write the data segment to dst.
//
// Only the data shards are considered.
//
// You must supply the exact output size you want.
// If there are to few shards given, ErrTooFewShards will be returned.
// If the total data size is less than outSize, ErrShortData will be returned.
func (r rsStream) Join(dst io.Writer, shards []io.Reader, outSize int64) error {
// Do we have enough shards?
if len(shards) < r.r.DataShards {
return ErrTooFewShards
}
// Trim off parity shards if any
shards = shards[:r.r.DataShards]
for i := range shards {
if shards[i] == nil {
return StreamReadError{Err: ErrShardNoData, Stream: i}
}
}
// Join all shards
src := io.MultiReader(shards...)
// Copy data to dst
n, err := io.CopyN(dst, src, outSize)
if err == io.EOF {
return ErrShortData
}
if err != nil {
return err
}
if n != outSize {
return ErrShortData
}
return nil
}
// Split a an input stream into the number of shards given to the encoder.
//
// The data will be split into equally sized shards.
// If the data size isn't dividable by the number of shards,
// the last shard will contain extra zeros.
//
// You must supply the total size of your input.
// 'ErrShortData' will be returned if it is unable to retrieve the
// number of bytes indicated.
func (r rsStream) Split(data io.Reader, dst []io.Writer, size int64) error {
if size == 0 {
return ErrShortData
}
if len(dst) != r.r.DataShards {
return ErrInvShardNum
}
for i := range dst {
if dst[i] == nil {
return StreamWriteError{Err: ErrShardNoData, Stream: i}
}
}
// Calculate number of bytes per shard.
perShard := (size + int64(r.r.DataShards) - 1) / int64(r.r.DataShards)
// Pad data to r.Shards*perShard.
padding := make([]byte, (int64(r.r.Shards)*perShard)-size)
data = io.MultiReader(data, bytes.NewBuffer(padding))
// Split into equal-length shards and copy.
for i := range dst {
n, err := io.CopyN(dst[i], data, perShard)
if err != io.EOF && err != nil {
return err
}
if n != perShard {
return ErrShortData
}
}
return nil
}

View File

@@ -0,0 +1,604 @@
/**
* Unit tests for ReedSolomon Streaming API
*
* Copyright 2015, Klaus Post
*/
package reedsolomon
import (
"bytes"
"io"
"io/ioutil"
"math/rand"
"testing"
)
func TestStreamEncoding(t *testing.T) {
perShard := 10 << 20
if testing.Short() {
perShard = 50000
}
r, err := NewStream(10, 3)
if err != nil {
t.Fatal(err)
}
rand.Seed(0)
input := randomBytes(10, perShard)
data := toBuffers(input)
par := emptyBuffers(3)
err = r.Encode(toReaders(data), toWriters(par))
if err != nil {
t.Fatal(err)
}
// Reset Data
data = toBuffers(input)
all := append(toReaders(data), toReaders(par)...)
ok, err := r.Verify(all)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("Verification failed")
}
err = r.Encode(toReaders(emptyBuffers(1)), toWriters(emptyBuffers(1)))
if err != ErrTooFewShards {
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
}
err = r.Encode(toReaders(emptyBuffers(10)), toWriters(emptyBuffers(1)))
if err != ErrTooFewShards {
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
}
err = r.Encode(toReaders(emptyBuffers(10)), toWriters(emptyBuffers(3)))
if err != ErrShardNoData {
t.Errorf("expected %v, got %v", ErrShardNoData, err)
}
badShards := emptyBuffers(10)
badShards[0] = randomBuffer(123)
err = r.Encode(toReaders(badShards), toWriters(emptyBuffers(3)))
if err != ErrShardSize {
t.Errorf("expected %v, got %v", ErrShardSize, err)
}
}
func TestStreamEncodingConcurrent(t *testing.T) {
perShard := 10 << 20
if testing.Short() {
perShard = 50000
}
r, err := NewStreamC(10, 3, true, true)
if err != nil {
t.Fatal(err)
}
rand.Seed(0)
input := randomBytes(10, perShard)
data := toBuffers(input)
par := emptyBuffers(3)
err = r.Encode(toReaders(data), toWriters(par))
if err != nil {
t.Fatal(err)
}
// Reset Data
data = toBuffers(input)
all := append(toReaders(data), toReaders(par)...)
ok, err := r.Verify(all)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("Verification failed")
}
err = r.Encode(toReaders(emptyBuffers(1)), toWriters(emptyBuffers(1)))
if err != ErrTooFewShards {
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
}
err = r.Encode(toReaders(emptyBuffers(10)), toWriters(emptyBuffers(1)))
if err != ErrTooFewShards {
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
}
err = r.Encode(toReaders(emptyBuffers(10)), toWriters(emptyBuffers(3)))
if err != ErrShardNoData {
t.Errorf("expected %v, got %v", ErrShardNoData, err)
}
badShards := emptyBuffers(10)
badShards[0] = randomBuffer(123)
badShards[1] = randomBuffer(123)
err = r.Encode(toReaders(badShards), toWriters(emptyBuffers(3)))
if err != ErrShardSize {
t.Errorf("expected %v, got %v", ErrShardSize, err)
}
}
func randomBuffer(length int) *bytes.Buffer {
b := make([]byte, length)
fillRandom(b)
return bytes.NewBuffer(b)
}
func randomBytes(n, length int) [][]byte {
bufs := make([][]byte, n)
for j := range bufs {
bufs[j] = make([]byte, length)
fillRandom(bufs[j])
}
return bufs
}
func toBuffers(in [][]byte) []*bytes.Buffer {
out := make([]*bytes.Buffer, len(in))
for i := range in {
out[i] = bytes.NewBuffer(in[i])
}
return out
}
func toReaders(in []*bytes.Buffer) []io.Reader {
out := make([]io.Reader, len(in))
for i := range in {
out[i] = in[i]
}
return out
}
func toWriters(in []*bytes.Buffer) []io.Writer {
out := make([]io.Writer, len(in))
for i := range in {
out[i] = in[i]
}
return out
}
func nilWriters(n int) []io.Writer {
out := make([]io.Writer, n)
for i := range out {
out[i] = nil
}
return out
}
func emptyBuffers(n int) []*bytes.Buffer {
b := make([]*bytes.Buffer, n)
for i := range b {
b[i] = &bytes.Buffer{}
}
return b
}
func toBytes(in []*bytes.Buffer) [][]byte {
b := make([][]byte, len(in))
for i := range in {
b[i] = in[i].Bytes()
}
return b
}
func TestStreamReconstruct(t *testing.T) {
perShard := 10 << 20
if testing.Short() {
perShard = 50000
}
r, err := NewStream(10, 3)
if err != nil {
t.Fatal(err)
}
rand.Seed(0)
shards := randomBytes(10, perShard)
parb := emptyBuffers(3)
err = r.Encode(toReaders(toBuffers(shards)), toWriters(parb))
if err != nil {
t.Fatal(err)
}
parity := toBytes(parb)
all := append(toReaders(toBuffers(shards)), toReaders(toBuffers(parity))...)
fill := make([]io.Writer, 13)
// Reconstruct with all shards present, all fill nil
err = r.Reconstruct(all, fill)
if err != nil {
t.Fatal(err)
}
all = append(toReaders(toBuffers(shards)), toReaders(toBuffers(parity))...)
// Reconstruct with 10 shards present
all[0] = nil
fill[0] = emptyBuffers(1)[0]
all[7] = nil
fill[7] = emptyBuffers(1)[0]
all[11] = nil
fill[11] = emptyBuffers(1)[0]
err = r.Reconstruct(all, fill)
if err != nil {
t.Fatal(err)
}
shards[0] = fill[0].(*bytes.Buffer).Bytes()
shards[7] = fill[7].(*bytes.Buffer).Bytes()
parity[1] = fill[11].(*bytes.Buffer).Bytes()
all = append(toReaders(toBuffers(shards)), toReaders(toBuffers(parity))...)
ok, err := r.Verify(all)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("Verification failed")
}
all = append(toReaders(toBuffers(shards)), toReaders(toBuffers(parity))...)
// Reconstruct with 9 shards present (should fail)
all[0] = nil
fill[0] = emptyBuffers(1)[0]
all[4] = nil
fill[4] = emptyBuffers(1)[0]
all[7] = nil
fill[7] = emptyBuffers(1)[0]
all[11] = nil
fill[11] = emptyBuffers(1)[0]
err = r.Reconstruct(all, fill)
if err != ErrTooFewShards {
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
}
err = r.Reconstruct(toReaders(emptyBuffers(3)), toWriters(emptyBuffers(3)))
if err != ErrTooFewShards {
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
}
err = r.Reconstruct(toReaders(emptyBuffers(13)), toWriters(emptyBuffers(3)))
if err != ErrTooFewShards {
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
}
err = r.Reconstruct(toReaders(emptyBuffers(13)), toWriters(emptyBuffers(13)))
if err != ErrReconstructMismatch {
t.Errorf("expected %v, got %v", ErrReconstructMismatch, err)
}
err = r.Reconstruct(toReaders(emptyBuffers(13)), nilWriters(13))
if err != ErrShardNoData {
t.Errorf("expected %v, got %v", ErrShardNoData, err)
}
}
func TestStreamVerify(t *testing.T) {
perShard := 10 << 20
if testing.Short() {
perShard = 50000
}
r, err := NewStream(10, 4)
if err != nil {
t.Fatal(err)
}
shards := randomBytes(10, perShard)
parb := emptyBuffers(4)
err = r.Encode(toReaders(toBuffers(shards)), toWriters(parb))
if err != nil {
t.Fatal(err)
}
parity := toBytes(parb)
all := append(toReaders(toBuffers(shards)), toReaders(parb)...)
ok, err := r.Verify(all)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("Verification failed")
}
// Flip bits in a random byte
parity[0][len(parity[0])-20000] = parity[0][len(parity[0])-20000] ^ 0xff
all = append(toReaders(toBuffers(shards)), toReaders(toBuffers(parity))...)
ok, err = r.Verify(all)
if err != nil {
t.Fatal(err)
}
if ok {
t.Fatal("Verification did not fail")
}
// Re-encode
err = r.Encode(toReaders(toBuffers(shards)), toWriters(parb))
if err != nil {
t.Fatal(err)
}
// Fill a data segment with random data
shards[0][len(shards[0])-30000] = shards[0][len(shards[0])-30000] ^ 0xff
all = append(toReaders(toBuffers(shards)), toReaders(parb)...)
ok, err = r.Verify(all)
if err != nil {
t.Fatal(err)
}
if ok {
t.Fatal("Verification did not fail")
}
_, err = r.Verify(toReaders(emptyBuffers(10)))
if err != ErrTooFewShards {
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
}
_, err = r.Verify(toReaders(emptyBuffers(14)))
if err != ErrShardNoData {
t.Errorf("expected %v, got %v", ErrShardNoData, err)
}
}
func TestStreamOneEncode(t *testing.T) {
codec, err := NewStream(5, 5)
if err != nil {
t.Fatal(err)
}
shards := [][]byte{
{0, 1},
{4, 5},
{2, 3},
{6, 7},
{8, 9},
}
parb := emptyBuffers(5)
codec.Encode(toReaders(toBuffers(shards)), toWriters(parb))
parity := toBytes(parb)
if parity[0][0] != 12 || parity[0][1] != 13 {
t.Fatal("shard 5 mismatch")
}
if parity[1][0] != 10 || parity[1][1] != 11 {
t.Fatal("shard 6 mismatch")
}
if parity[2][0] != 14 || parity[2][1] != 15 {
t.Fatal("shard 7 mismatch")
}
if parity[3][0] != 90 || parity[3][1] != 91 {
t.Fatal("shard 8 mismatch")
}
if parity[4][0] != 94 || parity[4][1] != 95 {
t.Fatal("shard 9 mismatch")
}
all := append(toReaders(toBuffers(shards)), toReaders(toBuffers(parity))...)
ok, err := codec.Verify(all)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("did not verify")
}
shards[3][0]++
all = append(toReaders(toBuffers(shards)), toReaders(toBuffers(parity))...)
ok, err = codec.Verify(all)
if err != nil {
t.Fatal(err)
}
if ok {
t.Fatal("verify did not fail as expected")
}
}
func benchmarkStreamEncode(b *testing.B, dataShards, parityShards, shardSize int) {
r, err := NewStream(dataShards, parityShards)
if err != nil {
b.Fatal(err)
}
shards := make([][]byte, dataShards)
for s := range shards {
shards[s] = make([]byte, shardSize)
}
rand.Seed(0)
for s := 0; s < dataShards; s++ {
fillRandom(shards[s])
}
b.SetBytes(int64(shardSize * dataShards))
b.ResetTimer()
out := make([]io.Writer, parityShards)
for i := range out {
out[i] = ioutil.Discard
}
for i := 0; i < b.N; i++ {
err = r.Encode(toReaders(toBuffers(shards)), out)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkStreamEncode10x2x10000(b *testing.B) {
benchmarkStreamEncode(b, 10, 2, 10000)
}
func BenchmarkStreamEncode100x20x10000(b *testing.B) {
benchmarkStreamEncode(b, 100, 20, 10000)
}
func BenchmarkStreamEncode17x3x1M(b *testing.B) {
benchmarkStreamEncode(b, 17, 3, 1024*1024)
}
// Benchmark 10 data shards and 4 parity shards with 16MB each.
func BenchmarkStreamEncode10x4x16M(b *testing.B) {
benchmarkStreamEncode(b, 10, 4, 16*1024*1024)
}
// Benchmark 5 data shards and 2 parity shards with 1MB each.
func BenchmarkStreamEncode5x2x1M(b *testing.B) {
benchmarkStreamEncode(b, 5, 2, 1024*1024)
}
// Benchmark 1 data shards and 2 parity shards with 1MB each.
func BenchmarkStreamEncode10x2x1M(b *testing.B) {
benchmarkStreamEncode(b, 10, 2, 1024*1024)
}
// Benchmark 10 data shards and 4 parity shards with 1MB each.
func BenchmarkStreamEncode10x4x1M(b *testing.B) {
benchmarkStreamEncode(b, 10, 4, 1024*1024)
}
// Benchmark 50 data shards and 20 parity shards with 1MB each.
func BenchmarkStreamEncode50x20x1M(b *testing.B) {
benchmarkStreamEncode(b, 50, 20, 1024*1024)
}
// Benchmark 17 data shards and 3 parity shards with 16MB each.
func BenchmarkStreamEncode17x3x16M(b *testing.B) {
benchmarkStreamEncode(b, 17, 3, 16*1024*1024)
}
func benchmarkStreamVerify(b *testing.B, dataShards, parityShards, shardSize int) {
r, err := NewStream(dataShards, parityShards)
if err != nil {
b.Fatal(err)
}
shards := make([][]byte, parityShards+dataShards)
for s := range shards {
shards[s] = make([]byte, shardSize)
}
rand.Seed(0)
for s := 0; s < dataShards; s++ {
fillRandom(shards[s])
}
err = r.Encode(toReaders(toBuffers(shards[:dataShards])), toWriters(toBuffers(shards[dataShards:])))
if err != nil {
b.Fatal(err)
}
b.SetBytes(int64(shardSize * dataShards))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err = r.Verify(toReaders(toBuffers(shards)))
if err != nil {
b.Fatal(err)
}
}
}
// Benchmark 10 data slices with 2 parity slices holding 10000 bytes each
func BenchmarkStreamVerify10x2x10000(b *testing.B) {
benchmarkStreamVerify(b, 10, 2, 10000)
}
// Benchmark 50 data slices with 5 parity slices holding 100000 bytes each
func BenchmarkStreamVerify50x5x50000(b *testing.B) {
benchmarkStreamVerify(b, 50, 5, 100000)
}
// Benchmark 10 data slices with 2 parity slices holding 1MB bytes each
func BenchmarkStreamVerify10x2x1M(b *testing.B) {
benchmarkStreamVerify(b, 10, 2, 1024*1024)
}
// Benchmark 5 data slices with 2 parity slices holding 1MB bytes each
func BenchmarkStreamVerify5x2x1M(b *testing.B) {
benchmarkStreamVerify(b, 5, 2, 1024*1024)
}
// Benchmark 10 data slices with 4 parity slices holding 1MB bytes each
func BenchmarkStreamVerify10x4x1M(b *testing.B) {
benchmarkStreamVerify(b, 10, 4, 1024*1024)
}
// Benchmark 5 data slices with 2 parity slices holding 1MB bytes each
func BenchmarkStreamVerify50x20x1M(b *testing.B) {
benchmarkStreamVerify(b, 50, 20, 1024*1024)
}
// Benchmark 10 data slices with 4 parity slices holding 16MB bytes each
func BenchmarkStreamVerify10x4x16M(b *testing.B) {
benchmarkStreamVerify(b, 10, 4, 16*1024*1024)
}
func TestStreamSplitJoin(t *testing.T) {
var data = make([]byte, 250000)
rand.Seed(0)
fillRandom(data)
enc, _ := NewStream(5, 3)
split := emptyBuffers(5)
err := enc.Split(bytes.NewBuffer(data), toWriters(split), int64(len(data)))
if err != nil {
t.Fatal(err)
}
splits := toBytes(split)
expect := len(data) / 5
// Beware, if changing data size
if split[0].Len() != expect {
t.Errorf("unexpected size. expected %d, got %d", expect, split[0].Len())
}
err = enc.Split(bytes.NewBuffer([]byte{}), toWriters(emptyBuffers(3)), 0)
if err != ErrShortData {
t.Errorf("expected %v, got %v", ErrShortData, err)
}
buf := new(bytes.Buffer)
err = enc.Join(buf, toReaders(toBuffers(splits)), int64(len(data)))
if err != nil {
t.Fatal(err)
}
joined := buf.Bytes()
if !bytes.Equal(joined, data) {
t.Fatal("recovered data does match original", joined[:8], data[:8], "... lengths:", len(joined), len(data))
}
err = enc.Join(buf, toReaders(emptyBuffers(2)), 0)
if err != ErrTooFewShards {
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
}
bufs := toReaders(emptyBuffers(5))
bufs[2] = nil
err = enc.Join(buf, bufs, 0)
if se, ok := err.(StreamReadError); ok {
if se.Err != ErrShardNoData {
t.Errorf("expected %v, got %v", ErrShardNoData, se.Err)
}
if se.Stream != 2 {
t.Errorf("Expected error on stream 2, got %d", se.Stream)
}
} else {
t.Errorf("expected error type %T, got %T", StreamReadError{}, err)
}
err = enc.Join(buf, toReaders(toBuffers(splits)), int64(len(data)+1))
if err != ErrShortData {
t.Errorf("expected %v, got %v", ErrShortData, err)
}
}
func TestNewStream(t *testing.T) {
tests := []struct {
data, parity int
err error
}{
{127, 127, nil},
{256, 256, ErrMaxShardNum},
{0, 1, ErrInvShardNum},
{1, 0, ErrInvShardNum},
{257, 1, ErrMaxShardNum},
// overflow causes r.Shards to be negative
{256, int(^uint(0) >> 1), errInvalidRowSize},
}
for _, test := range tests {
_, err := NewStream(test.data, test.parity)
if err != test.err {
t.Errorf("New(%v, %v): expected %v, got %v", test.data, test.parity, test.err, err)
}
}
}

23
vendor/github.com/pkg/errors/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,23 @@
Copyright (c) 2015, Dave Cheney <dave@cheney.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

52
vendor/github.com/pkg/errors/README.md generated vendored Normal file
View File

@@ -0,0 +1,52 @@
# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors)
Package errors provides simple error handling primitives.
`go get github.com/pkg/errors`
The traditional error handling idiom in Go is roughly akin to
```go
if err != nil {
return err
}
```
which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
## Adding context to an error
The errors.Wrap function returns a new error that adds context to the original error. For example
```go
_, err := ioutil.ReadAll(r)
if err != nil {
return errors.Wrap(err, "read failed")
}
```
## Retrieving the cause of an error
Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
```go
type causer interface {
Cause() error
}
```
`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
```go
switch err := errors.Cause(err).(type) {
case *MyError:
// handle specifically
default:
// unknown error
}
```
[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
## Contributing
We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high.
Before proposing a change, please discuss your change by raising an issue.
## Licence
BSD-2-Clause

32
vendor/github.com/pkg/errors/appveyor.yml generated vendored Normal file
View File

@@ -0,0 +1,32 @@
version: build-{build}.{branch}
clone_folder: C:\gopath\src\github.com\pkg\errors
shallow_clone: true # for startup speed
environment:
GOPATH: C:\gopath
platform:
- x64
# http://www.appveyor.com/docs/installed-software
install:
# some helpful output for debugging builds
- go version
- go env
# pre-installed MinGW at C:\MinGW is 32bit only
# but MSYS2 at C:\msys64 has mingw64
- set PATH=C:\msys64\mingw64\bin;%PATH%
- gcc --version
- g++ --version
build_script:
- go install -v ./...
test_script:
- set PATH=C:\gopath\bin;%PATH%
- go test -v ./...
#artifacts:
# - path: '%GOPATH%\bin\*.exe'
deploy: off

60
vendor/github.com/pkg/errors/bench_test.go generated vendored Normal file
View File

@@ -0,0 +1,60 @@
// +build go1.7
package errors
import (
"fmt"
"testing"
stderrors "errors"
)
func noErrors(at, depth int) error {
if at >= depth {
return stderrors.New("no error")
}
return noErrors(at+1, depth)
}
func yesErrors(at, depth int) error {
if at >= depth {
return New("ye error")
}
return yesErrors(at+1, depth)
}
func BenchmarkErrors(b *testing.B) {
var toperr error
type run struct {
stack int
std bool
}
runs := []run{
{10, false},
{10, true},
{100, false},
{100, true},
{1000, false},
{1000, true},
}
for _, r := range runs {
part := "pkg/errors"
if r.std {
part = "errors"
}
name := fmt.Sprintf("%s-stack-%d", part, r.stack)
b.Run(name, func(b *testing.B) {
var err error
f := yesErrors
if r.std {
f = noErrors
}
b.ReportAllocs()
for i := 0; i < b.N; i++ {
err = f(0, r.stack)
}
b.StopTimer()
toperr = err
})
}
}

269
vendor/github.com/pkg/errors/errors.go generated vendored Normal file
View File

@@ -0,0 +1,269 @@
// Package errors provides simple error handling primitives.
//
// The traditional error handling idiom in Go is roughly akin to
//
// if err != nil {
// return err
// }
//
// which applied recursively up the call stack results in error reports
// without context or debugging information. The errors package allows
// programmers to add context to the failure path in their code in a way
// that does not destroy the original value of the error.
//
// Adding context to an error
//
// The errors.Wrap function returns a new error that adds context to the
// original error by recording a stack trace at the point Wrap is called,
// and the supplied message. For example
//
// _, err := ioutil.ReadAll(r)
// if err != nil {
// return errors.Wrap(err, "read failed")
// }
//
// If additional control is required the errors.WithStack and errors.WithMessage
// functions destructure errors.Wrap into its component operations of annotating
// an error with a stack trace and an a message, respectively.
//
// Retrieving the cause of an error
//
// Using errors.Wrap constructs a stack of errors, adding context to the
// preceding error. Depending on the nature of the error it may be necessary
// to reverse the operation of errors.Wrap to retrieve the original error
// for inspection. Any error value which implements this interface
//
// type causer interface {
// Cause() error
// }
//
// can be inspected by errors.Cause. errors.Cause will recursively retrieve
// the topmost error which does not implement causer, which is assumed to be
// the original cause. For example:
//
// switch err := errors.Cause(err).(type) {
// case *MyError:
// // handle specifically
// default:
// // unknown error
// }
//
// causer interface is not exported by this package, but is considered a part
// of stable public API.
//
// Formatted printing of errors
//
// All error values returned from this package implement fmt.Formatter and can
// be formatted by the fmt package. The following verbs are supported
//
// %s print the error. If the error has a Cause it will be
// printed recursively
// %v see %s
// %+v extended format. Each Frame of the error's StackTrace will
// be printed in detail.
//
// Retrieving the stack trace of an error or wrapper
//
// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
// invoked. This information can be retrieved with the following interface.
//
// type stackTracer interface {
// StackTrace() errors.StackTrace
// }
//
// Where errors.StackTrace is defined as
//
// type StackTrace []Frame
//
// The Frame type represents a call site in the stack trace. Frame supports
// the fmt.Formatter interface that can be used for printing information about
// the stack trace of this error. For example:
//
// if err, ok := err.(stackTracer); ok {
// for _, f := range err.StackTrace() {
// fmt.Printf("%+s:%d", f)
// }
// }
//
// stackTracer interface is not exported by this package, but is considered a part
// of stable public API.
//
// See the documentation for Frame.Format for more details.
package errors
import (
"fmt"
"io"
)
// New returns an error with the supplied message.
// New also records the stack trace at the point it was called.
func New(message string) error {
return &fundamental{
msg: message,
stack: callers(),
}
}
// Errorf formats according to a format specifier and returns the string
// as a value that satisfies error.
// Errorf also records the stack trace at the point it was called.
func Errorf(format string, args ...interface{}) error {
return &fundamental{
msg: fmt.Sprintf(format, args...),
stack: callers(),
}
}
// fundamental is an error that has a message and a stack, but no caller.
type fundamental struct {
msg string
*stack
}
func (f *fundamental) Error() string { return f.msg }
func (f *fundamental) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
io.WriteString(s, f.msg)
f.stack.Format(s, verb)
return
}
fallthrough
case 's':
io.WriteString(s, f.msg)
case 'q':
fmt.Fprintf(s, "%q", f.msg)
}
}
// WithStack annotates err with a stack trace at the point WithStack was called.
// If err is nil, WithStack returns nil.
func WithStack(err error) error {
if err == nil {
return nil
}
return &withStack{
err,
callers(),
}
}
type withStack struct {
error
*stack
}
func (w *withStack) Cause() error { return w.error }
func (w *withStack) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
fmt.Fprintf(s, "%+v", w.Cause())
w.stack.Format(s, verb)
return
}
fallthrough
case 's':
io.WriteString(s, w.Error())
case 'q':
fmt.Fprintf(s, "%q", w.Error())
}
}
// Wrap returns an error annotating err with a stack trace
// at the point Wrap is called, and the supplied message.
// If err is nil, Wrap returns nil.
func Wrap(err error, message string) error {
if err == nil {
return nil
}
err = &withMessage{
cause: err,
msg: message,
}
return &withStack{
err,
callers(),
}
}
// Wrapf returns an error annotating err with a stack trace
// at the point Wrapf is call, and the format specifier.
// If err is nil, Wrapf returns nil.
func Wrapf(err error, format string, args ...interface{}) error {
if err == nil {
return nil
}
err = &withMessage{
cause: err,
msg: fmt.Sprintf(format, args...),
}
return &withStack{
err,
callers(),
}
}
// WithMessage annotates err with a new message.
// If err is nil, WithMessage returns nil.
func WithMessage(err error, message string) error {
if err == nil {
return nil
}
return &withMessage{
cause: err,
msg: message,
}
}
type withMessage struct {
cause error
msg string
}
func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
func (w *withMessage) Cause() error { return w.cause }
func (w *withMessage) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
fmt.Fprintf(s, "%+v\n", w.Cause())
io.WriteString(s, w.msg)
return
}
fallthrough
case 's', 'q':
io.WriteString(s, w.Error())
}
}
// Cause returns the underlying cause of the error, if possible.
// An error value has a cause if it implements the following
// interface:
//
// type causer interface {
// Cause() error
// }
//
// If the error does not implement Cause, the original error will
// be returned. If the error is nil, nil will be returned without further
// investigation.
func Cause(err error) error {
type causer interface {
Cause() error
}
for err != nil {
cause, ok := err.(causer)
if !ok {
break
}
err = cause.Cause()
}
return err
}

225
vendor/github.com/pkg/errors/errors_test.go generated vendored Normal file
View File

@@ -0,0 +1,225 @@
package errors
import (
"errors"
"fmt"
"io"
"reflect"
"testing"
)
func TestNew(t *testing.T) {
tests := []struct {
err string
want error
}{
{"", fmt.Errorf("")},
{"foo", fmt.Errorf("foo")},
{"foo", New("foo")},
{"string with format specifiers: %v", errors.New("string with format specifiers: %v")},
}
for _, tt := range tests {
got := New(tt.err)
if got.Error() != tt.want.Error() {
t.Errorf("New.Error(): got: %q, want %q", got, tt.want)
}
}
}
func TestWrapNil(t *testing.T) {
got := Wrap(nil, "no error")
if got != nil {
t.Errorf("Wrap(nil, \"no error\"): got %#v, expected nil", got)
}
}
func TestWrap(t *testing.T) {
tests := []struct {
err error
message string
want string
}{
{io.EOF, "read error", "read error: EOF"},
{Wrap(io.EOF, "read error"), "client error", "client error: read error: EOF"},
}
for _, tt := range tests {
got := Wrap(tt.err, tt.message).Error()
if got != tt.want {
t.Errorf("Wrap(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want)
}
}
}
type nilError struct{}
func (nilError) Error() string { return "nil error" }
func TestCause(t *testing.T) {
x := New("error")
tests := []struct {
err error
want error
}{{
// nil error is nil
err: nil,
want: nil,
}, {
// explicit nil error is nil
err: (error)(nil),
want: nil,
}, {
// typed nil is nil
err: (*nilError)(nil),
want: (*nilError)(nil),
}, {
// uncaused error is unaffected
err: io.EOF,
want: io.EOF,
}, {
// caused error returns cause
err: Wrap(io.EOF, "ignored"),
want: io.EOF,
}, {
err: x, // return from errors.New
want: x,
}, {
WithMessage(nil, "whoops"),
nil,
}, {
WithMessage(io.EOF, "whoops"),
io.EOF,
}, {
WithStack(nil),
nil,
}, {
WithStack(io.EOF),
io.EOF,
}}
for i, tt := range tests {
got := Cause(tt.err)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("test %d: got %#v, want %#v", i+1, got, tt.want)
}
}
}
func TestWrapfNil(t *testing.T) {
got := Wrapf(nil, "no error")
if got != nil {
t.Errorf("Wrapf(nil, \"no error\"): got %#v, expected nil", got)
}
}
func TestWrapf(t *testing.T) {
tests := []struct {
err error
message string
want string
}{
{io.EOF, "read error", "read error: EOF"},
{Wrapf(io.EOF, "read error without format specifiers"), "client error", "client error: read error without format specifiers: EOF"},
{Wrapf(io.EOF, "read error with %d format specifier", 1), "client error", "client error: read error with 1 format specifier: EOF"},
}
for _, tt := range tests {
got := Wrapf(tt.err, tt.message).Error()
if got != tt.want {
t.Errorf("Wrapf(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want)
}
}
}
func TestErrorf(t *testing.T) {
tests := []struct {
err error
want string
}{
{Errorf("read error without format specifiers"), "read error without format specifiers"},
{Errorf("read error with %d format specifier", 1), "read error with 1 format specifier"},
}
for _, tt := range tests {
got := tt.err.Error()
if got != tt.want {
t.Errorf("Errorf(%v): got: %q, want %q", tt.err, got, tt.want)
}
}
}
func TestWithStackNil(t *testing.T) {
got := WithStack(nil)
if got != nil {
t.Errorf("WithStack(nil): got %#v, expected nil", got)
}
}
func TestWithStack(t *testing.T) {
tests := []struct {
err error
want string
}{
{io.EOF, "EOF"},
{WithStack(io.EOF), "EOF"},
}
for _, tt := range tests {
got := WithStack(tt.err).Error()
if got != tt.want {
t.Errorf("WithStack(%v): got: %v, want %v", tt.err, got, tt.want)
}
}
}
func TestWithMessageNil(t *testing.T) {
got := WithMessage(nil, "no error")
if got != nil {
t.Errorf("WithMessage(nil, \"no error\"): got %#v, expected nil", got)
}
}
func TestWithMessage(t *testing.T) {
tests := []struct {
err error
message string
want string
}{
{io.EOF, "read error", "read error: EOF"},
{WithMessage(io.EOF, "read error"), "client error", "client error: read error: EOF"},
}
for _, tt := range tests {
got := WithMessage(tt.err, tt.message).Error()
if got != tt.want {
t.Errorf("WithMessage(%v, %q): got: %q, want %q", tt.err, tt.message, got, tt.want)
}
}
}
// errors.New, etc values are not expected to be compared by value
// but the change in errors#27 made them incomparable. Assert that
// various kinds of errors have a functional equality operator, even
// if the result of that equality is always false.
func TestErrorEquality(t *testing.T) {
vals := []error{
nil,
io.EOF,
errors.New("EOF"),
New("EOF"),
Errorf("EOF"),
Wrap(io.EOF, "EOF"),
Wrapf(io.EOF, "EOF%d", 2),
WithMessage(nil, "whoops"),
WithMessage(io.EOF, "whoops"),
WithStack(io.EOF),
WithStack(nil),
}
for i := range vals {
for j := range vals {
_ = vals[i] == vals[j] // mustn't panic
}
}
}

205
vendor/github.com/pkg/errors/example_test.go generated vendored Normal file
View File

@@ -0,0 +1,205 @@
package errors_test
import (
"fmt"
"github.com/pkg/errors"
)
func ExampleNew() {
err := errors.New("whoops")
fmt.Println(err)
// Output: whoops
}
func ExampleNew_printf() {
err := errors.New("whoops")
fmt.Printf("%+v", err)
// Example output:
// whoops
// github.com/pkg/errors_test.ExampleNew_printf
// /home/dfc/src/github.com/pkg/errors/example_test.go:17
// testing.runExample
// /home/dfc/go/src/testing/example.go:114
// testing.RunExamples
// /home/dfc/go/src/testing/example.go:38
// testing.(*M).Run
// /home/dfc/go/src/testing/testing.go:744
// main.main
// /github.com/pkg/errors/_test/_testmain.go:106
// runtime.main
// /home/dfc/go/src/runtime/proc.go:183
// runtime.goexit
// /home/dfc/go/src/runtime/asm_amd64.s:2059
}
func ExampleWithMessage() {
cause := errors.New("whoops")
err := errors.WithMessage(cause, "oh noes")
fmt.Println(err)
// Output: oh noes: whoops
}
func ExampleWithStack() {
cause := errors.New("whoops")
err := errors.WithStack(cause)
fmt.Println(err)
// Output: whoops
}
func ExampleWithStack_printf() {
cause := errors.New("whoops")
err := errors.WithStack(cause)
fmt.Printf("%+v", err)
// Example Output:
// whoops
// github.com/pkg/errors_test.ExampleWithStack_printf
// /home/fabstu/go/src/github.com/pkg/errors/example_test.go:55
// testing.runExample
// /usr/lib/go/src/testing/example.go:114
// testing.RunExamples
// /usr/lib/go/src/testing/example.go:38
// testing.(*M).Run
// /usr/lib/go/src/testing/testing.go:744
// main.main
// github.com/pkg/errors/_test/_testmain.go:106
// runtime.main
// /usr/lib/go/src/runtime/proc.go:183
// runtime.goexit
// /usr/lib/go/src/runtime/asm_amd64.s:2086
// github.com/pkg/errors_test.ExampleWithStack_printf
// /home/fabstu/go/src/github.com/pkg/errors/example_test.go:56
// testing.runExample
// /usr/lib/go/src/testing/example.go:114
// testing.RunExamples
// /usr/lib/go/src/testing/example.go:38
// testing.(*M).Run
// /usr/lib/go/src/testing/testing.go:744
// main.main
// github.com/pkg/errors/_test/_testmain.go:106
// runtime.main
// /usr/lib/go/src/runtime/proc.go:183
// runtime.goexit
// /usr/lib/go/src/runtime/asm_amd64.s:2086
}
func ExampleWrap() {
cause := errors.New("whoops")
err := errors.Wrap(cause, "oh noes")
fmt.Println(err)
// Output: oh noes: whoops
}
func fn() error {
e1 := errors.New("error")
e2 := errors.Wrap(e1, "inner")
e3 := errors.Wrap(e2, "middle")
return errors.Wrap(e3, "outer")
}
func ExampleCause() {
err := fn()
fmt.Println(err)
fmt.Println(errors.Cause(err))
// Output: outer: middle: inner: error
// error
}
func ExampleWrap_extended() {
err := fn()
fmt.Printf("%+v\n", err)
// Example output:
// error
// github.com/pkg/errors_test.fn
// /home/dfc/src/github.com/pkg/errors/example_test.go:47
// github.com/pkg/errors_test.ExampleCause_printf
// /home/dfc/src/github.com/pkg/errors/example_test.go:63
// testing.runExample
// /home/dfc/go/src/testing/example.go:114
// testing.RunExamples
// /home/dfc/go/src/testing/example.go:38
// testing.(*M).Run
// /home/dfc/go/src/testing/testing.go:744
// main.main
// /github.com/pkg/errors/_test/_testmain.go:104
// runtime.main
// /home/dfc/go/src/runtime/proc.go:183
// runtime.goexit
// /home/dfc/go/src/runtime/asm_amd64.s:2059
// github.com/pkg/errors_test.fn
// /home/dfc/src/github.com/pkg/errors/example_test.go:48: inner
// github.com/pkg/errors_test.fn
// /home/dfc/src/github.com/pkg/errors/example_test.go:49: middle
// github.com/pkg/errors_test.fn
// /home/dfc/src/github.com/pkg/errors/example_test.go:50: outer
}
func ExampleWrapf() {
cause := errors.New("whoops")
err := errors.Wrapf(cause, "oh noes #%d", 2)
fmt.Println(err)
// Output: oh noes #2: whoops
}
func ExampleErrorf_extended() {
err := errors.Errorf("whoops: %s", "foo")
fmt.Printf("%+v", err)
// Example output:
// whoops: foo
// github.com/pkg/errors_test.ExampleErrorf
// /home/dfc/src/github.com/pkg/errors/example_test.go:101
// testing.runExample
// /home/dfc/go/src/testing/example.go:114
// testing.RunExamples
// /home/dfc/go/src/testing/example.go:38
// testing.(*M).Run
// /home/dfc/go/src/testing/testing.go:744
// main.main
// /github.com/pkg/errors/_test/_testmain.go:102
// runtime.main
// /home/dfc/go/src/runtime/proc.go:183
// runtime.goexit
// /home/dfc/go/src/runtime/asm_amd64.s:2059
}
func Example_stackTrace() {
type stackTracer interface {
StackTrace() errors.StackTrace
}
err, ok := errors.Cause(fn()).(stackTracer)
if !ok {
panic("oops, err does not implement stackTracer")
}
st := err.StackTrace()
fmt.Printf("%+v", st[0:2]) // top two frames
// Example output:
// github.com/pkg/errors_test.fn
// /home/dfc/src/github.com/pkg/errors/example_test.go:47
// github.com/pkg/errors_test.Example_stackTrace
// /home/dfc/src/github.com/pkg/errors/example_test.go:127
}
func ExampleCause_printf() {
err := errors.Wrap(func() error {
return func() error {
return errors.Errorf("hello %s", fmt.Sprintf("world"))
}()
}(), "failed")
fmt.Printf("%v", err)
// Output: failed: hello world
}

535
vendor/github.com/pkg/errors/format_test.go generated vendored Normal file
View File

@@ -0,0 +1,535 @@
package errors
import (
"errors"
"fmt"
"io"
"regexp"
"strings"
"testing"
)
func TestFormatNew(t *testing.T) {
tests := []struct {
error
format string
want string
}{{
New("error"),
"%s",
"error",
}, {
New("error"),
"%v",
"error",
}, {
New("error"),
"%+v",
"error\n" +
"github.com/pkg/errors.TestFormatNew\n" +
"\t.+/github.com/pkg/errors/format_test.go:26",
}, {
New("error"),
"%q",
`"error"`,
}}
for i, tt := range tests {
testFormatRegexp(t, i, tt.error, tt.format, tt.want)
}
}
func TestFormatErrorf(t *testing.T) {
tests := []struct {
error
format string
want string
}{{
Errorf("%s", "error"),
"%s",
"error",
}, {
Errorf("%s", "error"),
"%v",
"error",
}, {
Errorf("%s", "error"),
"%+v",
"error\n" +
"github.com/pkg/errors.TestFormatErrorf\n" +
"\t.+/github.com/pkg/errors/format_test.go:56",
}}
for i, tt := range tests {
testFormatRegexp(t, i, tt.error, tt.format, tt.want)
}
}
func TestFormatWrap(t *testing.T) {
tests := []struct {
error
format string
want string
}{{
Wrap(New("error"), "error2"),
"%s",
"error2: error",
}, {
Wrap(New("error"), "error2"),
"%v",
"error2: error",
}, {
Wrap(New("error"), "error2"),
"%+v",
"error\n" +
"github.com/pkg/errors.TestFormatWrap\n" +
"\t.+/github.com/pkg/errors/format_test.go:82",
}, {
Wrap(io.EOF, "error"),
"%s",
"error: EOF",
}, {
Wrap(io.EOF, "error"),
"%v",
"error: EOF",
}, {
Wrap(io.EOF, "error"),
"%+v",
"EOF\n" +
"error\n" +
"github.com/pkg/errors.TestFormatWrap\n" +
"\t.+/github.com/pkg/errors/format_test.go:96",
}, {
Wrap(Wrap(io.EOF, "error1"), "error2"),
"%+v",
"EOF\n" +
"error1\n" +
"github.com/pkg/errors.TestFormatWrap\n" +
"\t.+/github.com/pkg/errors/format_test.go:103\n",
}, {
Wrap(New("error with space"), "context"),
"%q",
`"context: error with space"`,
}}
for i, tt := range tests {
testFormatRegexp(t, i, tt.error, tt.format, tt.want)
}
}
func TestFormatWrapf(t *testing.T) {
tests := []struct {
error
format string
want string
}{{
Wrapf(io.EOF, "error%d", 2),
"%s",
"error2: EOF",
}, {
Wrapf(io.EOF, "error%d", 2),
"%v",
"error2: EOF",
}, {
Wrapf(io.EOF, "error%d", 2),
"%+v",
"EOF\n" +
"error2\n" +
"github.com/pkg/errors.TestFormatWrapf\n" +
"\t.+/github.com/pkg/errors/format_test.go:134",
}, {
Wrapf(New("error"), "error%d", 2),
"%s",
"error2: error",
}, {
Wrapf(New("error"), "error%d", 2),
"%v",
"error2: error",
}, {
Wrapf(New("error"), "error%d", 2),
"%+v",
"error\n" +
"github.com/pkg/errors.TestFormatWrapf\n" +
"\t.+/github.com/pkg/errors/format_test.go:149",
}}
for i, tt := range tests {
testFormatRegexp(t, i, tt.error, tt.format, tt.want)
}
}
func TestFormatWithStack(t *testing.T) {
tests := []struct {
error
format string
want []string
}{{
WithStack(io.EOF),
"%s",
[]string{"EOF"},
}, {
WithStack(io.EOF),
"%v",
[]string{"EOF"},
}, {
WithStack(io.EOF),
"%+v",
[]string{"EOF",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:175"},
}, {
WithStack(New("error")),
"%s",
[]string{"error"},
}, {
WithStack(New("error")),
"%v",
[]string{"error"},
}, {
WithStack(New("error")),
"%+v",
[]string{"error",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:189",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:189"},
}, {
WithStack(WithStack(io.EOF)),
"%+v",
[]string{"EOF",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:197",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:197"},
}, {
WithStack(WithStack(Wrapf(io.EOF, "message"))),
"%+v",
[]string{"EOF",
"message",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:205",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:205",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:205"},
}, {
WithStack(Errorf("error%d", 1)),
"%+v",
[]string{"error1",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:216",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:216"},
}}
for i, tt := range tests {
testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true)
}
}
func TestFormatWithMessage(t *testing.T) {
tests := []struct {
error
format string
want []string
}{{
WithMessage(New("error"), "error2"),
"%s",
[]string{"error2: error"},
}, {
WithMessage(New("error"), "error2"),
"%v",
[]string{"error2: error"},
}, {
WithMessage(New("error"), "error2"),
"%+v",
[]string{
"error",
"github.com/pkg/errors.TestFormatWithMessage\n" +
"\t.+/github.com/pkg/errors/format_test.go:244",
"error2"},
}, {
WithMessage(io.EOF, "addition1"),
"%s",
[]string{"addition1: EOF"},
}, {
WithMessage(io.EOF, "addition1"),
"%v",
[]string{"addition1: EOF"},
}, {
WithMessage(io.EOF, "addition1"),
"%+v",
[]string{"EOF", "addition1"},
}, {
WithMessage(WithMessage(io.EOF, "addition1"), "addition2"),
"%v",
[]string{"addition2: addition1: EOF"},
}, {
WithMessage(WithMessage(io.EOF, "addition1"), "addition2"),
"%+v",
[]string{"EOF", "addition1", "addition2"},
}, {
Wrap(WithMessage(io.EOF, "error1"), "error2"),
"%+v",
[]string{"EOF", "error1", "error2",
"github.com/pkg/errors.TestFormatWithMessage\n" +
"\t.+/github.com/pkg/errors/format_test.go:272"},
}, {
WithMessage(Errorf("error%d", 1), "error2"),
"%+v",
[]string{"error1",
"github.com/pkg/errors.TestFormatWithMessage\n" +
"\t.+/github.com/pkg/errors/format_test.go:278",
"error2"},
}, {
WithMessage(WithStack(io.EOF), "error"),
"%+v",
[]string{
"EOF",
"github.com/pkg/errors.TestFormatWithMessage\n" +
"\t.+/github.com/pkg/errors/format_test.go:285",
"error"},
}, {
WithMessage(Wrap(WithStack(io.EOF), "inside-error"), "outside-error"),
"%+v",
[]string{
"EOF",
"github.com/pkg/errors.TestFormatWithMessage\n" +
"\t.+/github.com/pkg/errors/format_test.go:293",
"inside-error",
"github.com/pkg/errors.TestFormatWithMessage\n" +
"\t.+/github.com/pkg/errors/format_test.go:293",
"outside-error"},
}}
for i, tt := range tests {
testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true)
}
}
func TestFormatGeneric(t *testing.T) {
starts := []struct {
err error
want []string
}{
{New("new-error"), []string{
"new-error",
"github.com/pkg/errors.TestFormatGeneric\n" +
"\t.+/github.com/pkg/errors/format_test.go:315"},
}, {Errorf("errorf-error"), []string{
"errorf-error",
"github.com/pkg/errors.TestFormatGeneric\n" +
"\t.+/github.com/pkg/errors/format_test.go:319"},
}, {errors.New("errors-new-error"), []string{
"errors-new-error"},
},
}
wrappers := []wrapper{
{
func(err error) error { return WithMessage(err, "with-message") },
[]string{"with-message"},
}, {
func(err error) error { return WithStack(err) },
[]string{
"github.com/pkg/errors.(func·002|TestFormatGeneric.func2)\n\t" +
".+/github.com/pkg/errors/format_test.go:333",
},
}, {
func(err error) error { return Wrap(err, "wrap-error") },
[]string{
"wrap-error",
"github.com/pkg/errors.(func·003|TestFormatGeneric.func3)\n\t" +
".+/github.com/pkg/errors/format_test.go:339",
},
}, {
func(err error) error { return Wrapf(err, "wrapf-error%d", 1) },
[]string{
"wrapf-error1",
"github.com/pkg/errors.(func·004|TestFormatGeneric.func4)\n\t" +
".+/github.com/pkg/errors/format_test.go:346",
},
},
}
for s := range starts {
err := starts[s].err
want := starts[s].want
testFormatCompleteCompare(t, s, err, "%+v", want, false)
testGenericRecursive(t, err, want, wrappers, 3)
}
}
func testFormatRegexp(t *testing.T, n int, arg interface{}, format, want string) {
got := fmt.Sprintf(format, arg)
gotLines := strings.SplitN(got, "\n", -1)
wantLines := strings.SplitN(want, "\n", -1)
if len(wantLines) > len(gotLines) {
t.Errorf("test %d: wantLines(%d) > gotLines(%d):\n got: %q\nwant: %q", n+1, len(wantLines), len(gotLines), got, want)
return
}
for i, w := range wantLines {
match, err := regexp.MatchString(w, gotLines[i])
if err != nil {
t.Fatal(err)
}
if !match {
t.Errorf("test %d: line %d: fmt.Sprintf(%q, err):\n got: %q\nwant: %q", n+1, i+1, format, got, want)
}
}
}
var stackLineR = regexp.MustCompile(`\.`)
// parseBlocks parses input into a slice, where:
// - incase entry contains a newline, its a stacktrace
// - incase entry contains no newline, its a solo line.
//
// Detecting stack boundaries only works incase the WithStack-calls are
// to be found on the same line, thats why it is optionally here.
//
// Example use:
//
// for _, e := range blocks {
// if strings.ContainsAny(e, "\n") {
// // Match as stack
// } else {
// // Match as line
// }
// }
//
func parseBlocks(input string, detectStackboundaries bool) ([]string, error) {
var blocks []string
stack := ""
wasStack := false
lines := map[string]bool{} // already found lines
for _, l := range strings.Split(input, "\n") {
isStackLine := stackLineR.MatchString(l)
switch {
case !isStackLine && wasStack:
blocks = append(blocks, stack, l)
stack = ""
lines = map[string]bool{}
case isStackLine:
if wasStack {
// Detecting two stacks after another, possible cause lines match in
// our tests due to WithStack(WithStack(io.EOF)) on same line.
if detectStackboundaries {
if lines[l] {
if len(stack) == 0 {
return nil, errors.New("len of block must not be zero here")
}
blocks = append(blocks, stack)
stack = l
lines = map[string]bool{l: true}
continue
}
}
stack = stack + "\n" + l
} else {
stack = l
}
lines[l] = true
case !isStackLine && !wasStack:
blocks = append(blocks, l)
default:
return nil, errors.New("must not happen")
}
wasStack = isStackLine
}
// Use up stack
if stack != "" {
blocks = append(blocks, stack)
}
return blocks, nil
}
func testFormatCompleteCompare(t *testing.T, n int, arg interface{}, format string, want []string, detectStackBoundaries bool) {
gotStr := fmt.Sprintf(format, arg)
got, err := parseBlocks(gotStr, detectStackBoundaries)
if err != nil {
t.Fatal(err)
}
if len(got) != len(want) {
t.Fatalf("test %d: fmt.Sprintf(%s, err) -> wrong number of blocks: got(%d) want(%d)\n got: %s\nwant: %s\ngotStr: %q",
n+1, format, len(got), len(want), prettyBlocks(got), prettyBlocks(want), gotStr)
}
for i := range got {
if strings.ContainsAny(want[i], "\n") {
// Match as stack
match, err := regexp.MatchString(want[i], got[i])
if err != nil {
t.Fatal(err)
}
if !match {
t.Fatalf("test %d: block %d: fmt.Sprintf(%q, err):\ngot:\n%q\nwant:\n%q\nall-got:\n%s\nall-want:\n%s\n",
n+1, i+1, format, got[i], want[i], prettyBlocks(got), prettyBlocks(want))
}
} else {
// Match as message
if got[i] != want[i] {
t.Fatalf("test %d: fmt.Sprintf(%s, err) at block %d got != want:\n got: %q\nwant: %q", n+1, format, i+1, got[i], want[i])
}
}
}
}
type wrapper struct {
wrap func(err error) error
want []string
}
func prettyBlocks(blocks []string, prefix ...string) string {
var out []string
for _, b := range blocks {
out = append(out, fmt.Sprintf("%v", b))
}
return " " + strings.Join(out, "\n ")
}
func testGenericRecursive(t *testing.T, beforeErr error, beforeWant []string, list []wrapper, maxDepth int) {
if len(beforeWant) == 0 {
panic("beforeWant must not be empty")
}
for _, w := range list {
if len(w.want) == 0 {
panic("want must not be empty")
}
err := w.wrap(beforeErr)
// Copy required cause append(beforeWant, ..) modified beforeWant subtly.
beforeCopy := make([]string, len(beforeWant))
copy(beforeCopy, beforeWant)
beforeWant := beforeCopy
last := len(beforeWant) - 1
var want []string
// Merge two stacks behind each other.
if strings.ContainsAny(beforeWant[last], "\n") && strings.ContainsAny(w.want[0], "\n") {
want = append(beforeWant[:last], append([]string{beforeWant[last] + "((?s).*)" + w.want[0]}, w.want[1:]...)...)
} else {
want = append(beforeWant, w.want...)
}
testFormatCompleteCompare(t, maxDepth, err, "%+v", want, false)
if maxDepth > 0 {
testGenericRecursive(t, err, want, list, maxDepth-1)
}
}
}

178
vendor/github.com/pkg/errors/stack.go generated vendored Normal file
View File

@@ -0,0 +1,178 @@
package errors
import (
"fmt"
"io"
"path"
"runtime"
"strings"
)
// Frame represents a program counter inside a stack frame.
type Frame uintptr
// pc returns the program counter for this frame;
// multiple frames may have the same PC value.
func (f Frame) pc() uintptr { return uintptr(f) - 1 }
// file returns the full path to the file that contains the
// function for this Frame's pc.
func (f Frame) file() string {
fn := runtime.FuncForPC(f.pc())
if fn == nil {
return "unknown"
}
file, _ := fn.FileLine(f.pc())
return file
}
// line returns the line number of source code of the
// function for this Frame's pc.
func (f Frame) line() int {
fn := runtime.FuncForPC(f.pc())
if fn == nil {
return 0
}
_, line := fn.FileLine(f.pc())
return line
}
// Format formats the frame according to the fmt.Formatter interface.
//
// %s source file
// %d source line
// %n function name
// %v equivalent to %s:%d
//
// Format accepts flags that alter the printing of some verbs, as follows:
//
// %+s path of source file relative to the compile time GOPATH
// %+v equivalent to %+s:%d
func (f Frame) Format(s fmt.State, verb rune) {
switch verb {
case 's':
switch {
case s.Flag('+'):
pc := f.pc()
fn := runtime.FuncForPC(pc)
if fn == nil {
io.WriteString(s, "unknown")
} else {
file, _ := fn.FileLine(pc)
fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
}
default:
io.WriteString(s, path.Base(f.file()))
}
case 'd':
fmt.Fprintf(s, "%d", f.line())
case 'n':
name := runtime.FuncForPC(f.pc()).Name()
io.WriteString(s, funcname(name))
case 'v':
f.Format(s, 's')
io.WriteString(s, ":")
f.Format(s, 'd')
}
}
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
type StackTrace []Frame
func (st StackTrace) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
switch {
case s.Flag('+'):
for _, f := range st {
fmt.Fprintf(s, "\n%+v", f)
}
case s.Flag('#'):
fmt.Fprintf(s, "%#v", []Frame(st))
default:
fmt.Fprintf(s, "%v", []Frame(st))
}
case 's':
fmt.Fprintf(s, "%s", []Frame(st))
}
}
// stack represents a stack of program counters.
type stack []uintptr
func (s *stack) Format(st fmt.State, verb rune) {
switch verb {
case 'v':
switch {
case st.Flag('+'):
for _, pc := range *s {
f := Frame(pc)
fmt.Fprintf(st, "\n%+v", f)
}
}
}
}
func (s *stack) StackTrace() StackTrace {
f := make([]Frame, len(*s))
for i := 0; i < len(f); i++ {
f[i] = Frame((*s)[i])
}
return f
}
func callers() *stack {
const depth = 32
var pcs [depth]uintptr
n := runtime.Callers(3, pcs[:])
var st stack = pcs[0:n]
return &st
}
// funcname removes the path prefix component of a function's name reported by func.Name().
func funcname(name string) string {
i := strings.LastIndex(name, "/")
name = name[i+1:]
i = strings.Index(name, ".")
return name[i+1:]
}
func trimGOPATH(name, file string) string {
// Here we want to get the source file path relative to the compile time
// GOPATH. As of Go 1.6.x there is no direct way to know the compiled
// GOPATH at runtime, but we can infer the number of path segments in the
// GOPATH. We note that fn.Name() returns the function name qualified by
// the import path, which does not include the GOPATH. Thus we can trim
// segments from the beginning of the file path until the number of path
// separators remaining is one more than the number of path separators in
// the function name. For example, given:
//
// GOPATH /home/user
// file /home/user/src/pkg/sub/file.go
// fn.Name() pkg/sub.Type.Method
//
// We want to produce:
//
// pkg/sub/file.go
//
// From this we can easily see that fn.Name() has one less path separator
// than our desired output. We count separators from the end of the file
// path until it finds two more than in the function name and then move
// one character forward to preserve the initial path segment without a
// leading separator.
const sep = "/"
goal := strings.Count(name, sep) + 2
i := len(file)
for n := 0; n < goal; n++ {
i = strings.LastIndex(file[:i], sep)
if i == -1 {
// not enough separators found, set i so that the slice expression
// below leaves file unmodified
i = -len(sep)
break
}
}
// get back to 0 or trim the leading separator
file = file[i+len(sep):]
return file
}

292
vendor/github.com/pkg/errors/stack_test.go generated vendored Normal file
View File

@@ -0,0 +1,292 @@
package errors
import (
"fmt"
"runtime"
"testing"
)
var initpc, _, _, _ = runtime.Caller(0)
func TestFrameLine(t *testing.T) {
var tests = []struct {
Frame
want int
}{{
Frame(initpc),
9,
}, {
func() Frame {
var pc, _, _, _ = runtime.Caller(0)
return Frame(pc)
}(),
20,
}, {
func() Frame {
var pc, _, _, _ = runtime.Caller(1)
return Frame(pc)
}(),
28,
}, {
Frame(0), // invalid PC
0,
}}
for _, tt := range tests {
got := tt.Frame.line()
want := tt.want
if want != got {
t.Errorf("Frame(%v): want: %v, got: %v", uintptr(tt.Frame), want, got)
}
}
}
type X struct{}
func (x X) val() Frame {
var pc, _, _, _ = runtime.Caller(0)
return Frame(pc)
}
func (x *X) ptr() Frame {
var pc, _, _, _ = runtime.Caller(0)
return Frame(pc)
}
func TestFrameFormat(t *testing.T) {
var tests = []struct {
Frame
format string
want string
}{{
Frame(initpc),
"%s",
"stack_test.go",
}, {
Frame(initpc),
"%+s",
"github.com/pkg/errors.init\n" +
"\t.+/github.com/pkg/errors/stack_test.go",
}, {
Frame(0),
"%s",
"unknown",
}, {
Frame(0),
"%+s",
"unknown",
}, {
Frame(initpc),
"%d",
"9",
}, {
Frame(0),
"%d",
"0",
}, {
Frame(initpc),
"%n",
"init",
}, {
func() Frame {
var x X
return x.ptr()
}(),
"%n",
`\(\*X\).ptr`,
}, {
func() Frame {
var x X
return x.val()
}(),
"%n",
"X.val",
}, {
Frame(0),
"%n",
"",
}, {
Frame(initpc),
"%v",
"stack_test.go:9",
}, {
Frame(initpc),
"%+v",
"github.com/pkg/errors.init\n" +
"\t.+/github.com/pkg/errors/stack_test.go:9",
}, {
Frame(0),
"%v",
"unknown:0",
}}
for i, tt := range tests {
testFormatRegexp(t, i, tt.Frame, tt.format, tt.want)
}
}
func TestFuncname(t *testing.T) {
tests := []struct {
name, want string
}{
{"", ""},
{"runtime.main", "main"},
{"github.com/pkg/errors.funcname", "funcname"},
{"funcname", "funcname"},
{"io.copyBuffer", "copyBuffer"},
{"main.(*R).Write", "(*R).Write"},
}
for _, tt := range tests {
got := funcname(tt.name)
want := tt.want
if got != want {
t.Errorf("funcname(%q): want: %q, got %q", tt.name, want, got)
}
}
}
func TestTrimGOPATH(t *testing.T) {
var tests = []struct {
Frame
want string
}{{
Frame(initpc),
"github.com/pkg/errors/stack_test.go",
}}
for i, tt := range tests {
pc := tt.Frame.pc()
fn := runtime.FuncForPC(pc)
file, _ := fn.FileLine(pc)
got := trimGOPATH(fn.Name(), file)
testFormatRegexp(t, i, got, "%s", tt.want)
}
}
func TestStackTrace(t *testing.T) {
tests := []struct {
err error
want []string
}{{
New("ooh"), []string{
"github.com/pkg/errors.TestStackTrace\n" +
"\t.+/github.com/pkg/errors/stack_test.go:172",
},
}, {
Wrap(New("ooh"), "ahh"), []string{
"github.com/pkg/errors.TestStackTrace\n" +
"\t.+/github.com/pkg/errors/stack_test.go:177", // this is the stack of Wrap, not New
},
}, {
Cause(Wrap(New("ooh"), "ahh")), []string{
"github.com/pkg/errors.TestStackTrace\n" +
"\t.+/github.com/pkg/errors/stack_test.go:182", // this is the stack of New
},
}, {
func() error { return New("ooh") }(), []string{
`github.com/pkg/errors.(func·009|TestStackTrace.func1)` +
"\n\t.+/github.com/pkg/errors/stack_test.go:187", // this is the stack of New
"github.com/pkg/errors.TestStackTrace\n" +
"\t.+/github.com/pkg/errors/stack_test.go:187", // this is the stack of New's caller
},
}, {
Cause(func() error {
return func() error {
return Errorf("hello %s", fmt.Sprintf("world"))
}()
}()), []string{
`github.com/pkg/errors.(func·010|TestStackTrace.func2.1)` +
"\n\t.+/github.com/pkg/errors/stack_test.go:196", // this is the stack of Errorf
`github.com/pkg/errors.(func·011|TestStackTrace.func2)` +
"\n\t.+/github.com/pkg/errors/stack_test.go:197", // this is the stack of Errorf's caller
"github.com/pkg/errors.TestStackTrace\n" +
"\t.+/github.com/pkg/errors/stack_test.go:198", // this is the stack of Errorf's caller's caller
},
}}
for i, tt := range tests {
x, ok := tt.err.(interface {
StackTrace() StackTrace
})
if !ok {
t.Errorf("expected %#v to implement StackTrace() StackTrace", tt.err)
continue
}
st := x.StackTrace()
for j, want := range tt.want {
testFormatRegexp(t, i, st[j], "%+v", want)
}
}
}
func stackTrace() StackTrace {
const depth = 8
var pcs [depth]uintptr
n := runtime.Callers(1, pcs[:])
var st stack = pcs[0:n]
return st.StackTrace()
}
func TestStackTraceFormat(t *testing.T) {
tests := []struct {
StackTrace
format string
want string
}{{
nil,
"%s",
`\[\]`,
}, {
nil,
"%v",
`\[\]`,
}, {
nil,
"%+v",
"",
}, {
nil,
"%#v",
`\[\]errors.Frame\(nil\)`,
}, {
make(StackTrace, 0),
"%s",
`\[\]`,
}, {
make(StackTrace, 0),
"%v",
`\[\]`,
}, {
make(StackTrace, 0),
"%+v",
"",
}, {
make(StackTrace, 0),
"%#v",
`\[\]errors.Frame{}`,
}, {
stackTrace()[:2],
"%s",
`\[stack_test.go stack_test.go\]`,
}, {
stackTrace()[:2],
"%v",
`\[stack_test.go:225 stack_test.go:272\]`,
}, {
stackTrace()[:2],
"%+v",
"\n" +
"github.com/pkg/errors.stackTrace\n" +
"\t.+/github.com/pkg/errors/stack_test.go:225\n" +
"github.com/pkg/errors.TestStackTraceFormat\n" +
"\t.+/github.com/pkg/errors/stack_test.go:276",
}, {
stackTrace()[:2],
"%#v",
`\[\]errors.Frame{stack_test.go:225, stack_test.go:284}`,
}}
for i, tt := range tests {
testFormatRegexp(t, i, tt.StackTrace, tt.format, tt.want)
}
}

22
vendor/github.com/xtaci/kcp-go/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2015 Daniel Fu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

263
vendor/github.com/xtaci/kcp-go/crypt.go generated vendored Normal file
View File

@@ -0,0 +1,263 @@
package kcp
import (
"crypto/aes"
"crypto/cipher"
"crypto/des"
"crypto/sha1"
"golang.org/x/crypto/blowfish"
"golang.org/x/crypto/cast5"
"golang.org/x/crypto/pbkdf2"
"golang.org/x/crypto/salsa20"
"golang.org/x/crypto/tea"
"golang.org/x/crypto/twofish"
"golang.org/x/crypto/xtea"
)
var (
initialVector = []byte{167, 115, 79, 156, 18, 172, 27, 1, 164, 21, 242, 193, 252, 120, 230, 107}
saltxor = `sH3CIVoF#rWLtJo6`
)
// BlockCrypt defines encryption/decryption methods for a given byte slice.
// Notes on implementing: the data to be encrypted contains a builtin
// nonce at the first 16 bytes
type BlockCrypt interface {
// Encrypt encrypts the whole block in src into dst.
// Dst and src may point at the same memory.
Encrypt(dst, src []byte)
// Decrypt decrypts the whole block in src into dst.
// Dst and src may point at the same memory.
Decrypt(dst, src []byte)
}
type salsa20BlockCrypt struct {
key [32]byte
}
// NewSalsa20BlockCrypt https://en.wikipedia.org/wiki/Salsa20
func NewSalsa20BlockCrypt(key []byte) (BlockCrypt, error) {
c := new(salsa20BlockCrypt)
copy(c.key[:], key)
return c, nil
}
func (c *salsa20BlockCrypt) Encrypt(dst, src []byte) {
salsa20.XORKeyStream(dst[8:], src[8:], src[:8], &c.key)
copy(dst[:8], src[:8])
}
func (c *salsa20BlockCrypt) Decrypt(dst, src []byte) {
salsa20.XORKeyStream(dst[8:], src[8:], src[:8], &c.key)
copy(dst[:8], src[:8])
}
type twofishBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewTwofishBlockCrypt https://en.wikipedia.org/wiki/Twofish
func NewTwofishBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(twofishBlockCrypt)
block, err := twofish.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, twofish.BlockSize)
c.decbuf = make([]byte, 2*twofish.BlockSize)
return c, nil
}
func (c *twofishBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *twofishBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type tripleDESBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewTripleDESBlockCrypt https://en.wikipedia.org/wiki/Triple_DES
func NewTripleDESBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(tripleDESBlockCrypt)
block, err := des.NewTripleDESCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, des.BlockSize)
c.decbuf = make([]byte, 2*des.BlockSize)
return c, nil
}
func (c *tripleDESBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *tripleDESBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type cast5BlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewCast5BlockCrypt https://en.wikipedia.org/wiki/CAST-128
func NewCast5BlockCrypt(key []byte) (BlockCrypt, error) {
c := new(cast5BlockCrypt)
block, err := cast5.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, cast5.BlockSize)
c.decbuf = make([]byte, 2*cast5.BlockSize)
return c, nil
}
func (c *cast5BlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *cast5BlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type blowfishBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewBlowfishBlockCrypt https://en.wikipedia.org/wiki/Blowfish_(cipher)
func NewBlowfishBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(blowfishBlockCrypt)
block, err := blowfish.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, blowfish.BlockSize)
c.decbuf = make([]byte, 2*blowfish.BlockSize)
return c, nil
}
func (c *blowfishBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *blowfishBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type aesBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewAESBlockCrypt https://en.wikipedia.org/wiki/Advanced_Encryption_Standard
func NewAESBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(aesBlockCrypt)
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, aes.BlockSize)
c.decbuf = make([]byte, 2*aes.BlockSize)
return c, nil
}
func (c *aesBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *aesBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type teaBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewTEABlockCrypt https://en.wikipedia.org/wiki/Tiny_Encryption_Algorithm
func NewTEABlockCrypt(key []byte) (BlockCrypt, error) {
c := new(teaBlockCrypt)
block, err := tea.NewCipherWithRounds(key, 16)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, tea.BlockSize)
c.decbuf = make([]byte, 2*tea.BlockSize)
return c, nil
}
func (c *teaBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *teaBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type xteaBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewXTEABlockCrypt https://en.wikipedia.org/wiki/XTEA
func NewXTEABlockCrypt(key []byte) (BlockCrypt, error) {
c := new(xteaBlockCrypt)
block, err := xtea.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, xtea.BlockSize)
c.decbuf = make([]byte, 2*xtea.BlockSize)
return c, nil
}
func (c *xteaBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *xteaBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type simpleXORBlockCrypt struct {
xortbl []byte
}
// NewSimpleXORBlockCrypt simple xor with key expanding
func NewSimpleXORBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(simpleXORBlockCrypt)
c.xortbl = pbkdf2.Key(key, []byte(saltxor), 32, mtuLimit, sha1.New)
return c, nil
}
func (c *simpleXORBlockCrypt) Encrypt(dst, src []byte) { xorBytes(dst, src, c.xortbl) }
func (c *simpleXORBlockCrypt) Decrypt(dst, src []byte) { xorBytes(dst, src, c.xortbl) }
type noneBlockCrypt struct{}
// NewNoneBlockCrypt does nothing but copying
func NewNoneBlockCrypt(key []byte) (BlockCrypt, error) {
return new(noneBlockCrypt), nil
}
func (c *noneBlockCrypt) Encrypt(dst, src []byte) { copy(dst, src) }
func (c *noneBlockCrypt) Decrypt(dst, src []byte) { copy(dst, src) }
// packet encryption with local CFB mode
func encrypt(block cipher.Block, dst, src, buf []byte) {
blocksize := block.BlockSize()
tbl := buf[:blocksize]
block.Encrypt(tbl, initialVector)
n := len(src) / blocksize
base := 0
for i := 0; i < n; i++ {
xorWords(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += blocksize
}
xorBytes(dst[base:], src[base:], tbl)
}
func decrypt(block cipher.Block, dst, src, buf []byte) {
blocksize := block.BlockSize()
tbl := buf[:blocksize]
next := buf[blocksize:]
block.Encrypt(tbl, initialVector)
n := len(src) / blocksize
base := 0
for i := 0; i < n; i++ {
block.Encrypt(next, src[base:])
xorWords(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += blocksize
}
xorBytes(dst[base:], src[base:], tbl)
}

242
vendor/github.com/xtaci/kcp-go/fec.go generated vendored Normal file
View File

@@ -0,0 +1,242 @@
package kcp
import (
"encoding/binary"
"sync/atomic"
"github.com/xtaci/reedsolomon"
)
const (
fecHeaderSize = 6
fecHeaderSizePlus2 = fecHeaderSize + 2 // plus 2B data size
typeData = 0xf1
typeFEC = 0xf2
fecExpire = 30000 // 30s
)
type (
// FEC defines forward error correction for packets
FEC struct {
rx []fecPacket // ordered receive queue
rxlimit int // queue size limit
dataShards int
parityShards int
shardSize int
next uint32 // next seqid
enc reedsolomon.Encoder
shards [][]byte
shards2 [][]byte // for calcECC
shardsflag []bool
paws uint32 // Protect Against Wrapped Sequence numbers
lastCheck uint32
}
fecPacket struct {
seqid uint32
flag uint16
data []byte
ts uint32
}
)
func newFEC(rxlimit, dataShards, parityShards int) *FEC {
if dataShards <= 0 || parityShards <= 0 {
return nil
}
if rxlimit < dataShards+parityShards {
return nil
}
fec := new(FEC)
fec.rxlimit = rxlimit
fec.dataShards = dataShards
fec.parityShards = parityShards
fec.shardSize = dataShards + parityShards
fec.paws = (0xffffffff/uint32(fec.shardSize) - 1) * uint32(fec.shardSize)
enc, err := reedsolomon.New(dataShards, parityShards)
if err != nil {
return nil
}
fec.enc = enc
fec.shards = make([][]byte, fec.shardSize)
fec.shards2 = make([][]byte, fec.shardSize)
fec.shardsflag = make([]bool, fec.shardSize)
return fec
}
// decode a fec packet
func (fec *FEC) decode(data []byte) fecPacket {
var pkt fecPacket
pkt.seqid = binary.LittleEndian.Uint32(data)
pkt.flag = binary.LittleEndian.Uint16(data[4:])
pkt.ts = currentMs()
// allocate memory & copy
buf := xmitBuf.Get().([]byte)[:len(data)-6]
copy(buf, data[6:])
pkt.data = buf
return pkt
}
func (fec *FEC) markData(data []byte) {
binary.LittleEndian.PutUint32(data, fec.next)
binary.LittleEndian.PutUint16(data[4:], typeData)
fec.next++
}
func (fec *FEC) markFEC(data []byte) {
binary.LittleEndian.PutUint32(data, fec.next)
binary.LittleEndian.PutUint16(data[4:], typeFEC)
fec.next++
if fec.next >= fec.paws { // paws would only occurs in markFEC
fec.next = 0
}
}
// input a fec packet
func (fec *FEC) input(pkt fecPacket) (recovered [][]byte) {
// expiration
now := currentMs()
if now-fec.lastCheck >= fecExpire {
var rx []fecPacket
for k := range fec.rx {
if now-fec.rx[k].ts < fecExpire {
rx = append(rx, fec.rx[k])
} else {
xmitBuf.Put(fec.rx[k].data)
}
}
fec.rx = rx
fec.lastCheck = now
}
// insertion
n := len(fec.rx) - 1
insertIdx := 0
for i := n; i >= 0; i-- {
if pkt.seqid == fec.rx[i].seqid { // de-duplicate
xmitBuf.Put(pkt.data)
return nil
} else if pkt.seqid > fec.rx[i].seqid { // insertion
insertIdx = i + 1
break
}
}
// insert into ordered rx queue
if insertIdx == n+1 {
fec.rx = append(fec.rx, pkt)
} else {
fec.rx = append(fec.rx, fecPacket{})
copy(fec.rx[insertIdx+1:], fec.rx[insertIdx:])
fec.rx[insertIdx] = pkt
}
// shard range for current packet
shardBegin := pkt.seqid - pkt.seqid%uint32(fec.shardSize)
shardEnd := shardBegin + uint32(fec.shardSize) - 1
// max search range in ordered queue for current shard
searchBegin := insertIdx - int(pkt.seqid%uint32(fec.shardSize))
if searchBegin < 0 {
searchBegin = 0
}
searchEnd := searchBegin + fec.shardSize - 1
if searchEnd >= len(fec.rx) {
searchEnd = len(fec.rx) - 1
}
if searchEnd > searchBegin && searchEnd-searchBegin+1 >= fec.dataShards {
numshard := 0
numDataShard := 0
first := -1
maxlen := 0
shards := fec.shards
shardsflag := fec.shardsflag
for k := range fec.shards {
shards[k] = nil
shardsflag[k] = false
}
for i := searchBegin; i <= searchEnd; i++ {
seqid := fec.rx[i].seqid
if seqid > shardEnd {
break
} else if seqid >= shardBegin {
shards[seqid%uint32(fec.shardSize)] = fec.rx[i].data
shardsflag[seqid%uint32(fec.shardSize)] = true
numshard++
if fec.rx[i].flag == typeData {
numDataShard++
}
if numshard == 1 {
first = i
}
if len(fec.rx[i].data) > maxlen {
maxlen = len(fec.rx[i].data)
}
}
}
if numDataShard == fec.dataShards { // no lost
for i := first; i < first+numshard; i++ { // free
xmitBuf.Put(fec.rx[i].data)
}
copy(fec.rx[first:], fec.rx[first+numshard:])
for i := 0; i < numshard; i++ { // dereference
fec.rx[len(fec.rx)-1-i] = fecPacket{}
}
fec.rx = fec.rx[:len(fec.rx)-numshard]
} else if numshard >= fec.dataShards { // recoverable
for k := range shards {
if shards[k] != nil {
dlen := len(shards[k])
shards[k] = shards[k][:maxlen]
xorBytes(shards[k][dlen:], shards[k][dlen:], shards[k][dlen:])
}
}
if err := fec.enc.Reconstruct(shards); err == nil {
for k := range shards[:fec.dataShards] {
if !shardsflag[k] {
recovered = append(recovered, shards[k])
}
}
}
for i := first; i < first+numshard; i++ { // free
xmitBuf.Put(fec.rx[i].data)
}
copy(fec.rx[first:], fec.rx[first+numshard:])
for i := 0; i < numshard; i++ { // dereference
fec.rx[len(fec.rx)-1-i] = fecPacket{}
}
fec.rx = fec.rx[:len(fec.rx)-numshard]
}
}
// keep rxlimit
if len(fec.rx) > fec.rxlimit {
if fec.rx[0].flag == typeData { // record unrecoverable data
atomic.AddUint64(&DefaultSnmp.FECShortShards, 1)
}
xmitBuf.Put(fec.rx[0].data) // free
fec.rx[0].data = nil
fec.rx = fec.rx[1:]
}
return
}
func (fec *FEC) calcECC(data [][]byte, offset, maxlen int) (ecc [][]byte) {
if len(data) != fec.shardSize {
return nil
}
shards := fec.shards2
for k := range shards {
shards[k] = data[k][offset:maxlen]
}
if err := fec.enc.Encode(shards); err != nil {
return nil
}
return data[fec.dataShards:]
}

964
vendor/github.com/xtaci/kcp-go/kcp.go generated vendored Normal file
View File

@@ -0,0 +1,964 @@
// Package kcp - A Fast and Reliable ARQ Protocol
package kcp
import (
"encoding/binary"
"sync/atomic"
)
const (
IKCP_RTO_NDL = 30 // no delay min rto
IKCP_RTO_MIN = 100 // normal min rto
IKCP_RTO_DEF = 200
IKCP_RTO_MAX = 60000
IKCP_CMD_PUSH = 81 // cmd: push data
IKCP_CMD_ACK = 82 // cmd: ack
IKCP_CMD_WASK = 83 // cmd: window probe (ask)
IKCP_CMD_WINS = 84 // cmd: window size (tell)
IKCP_ASK_SEND = 1 // need to send IKCP_CMD_WASK
IKCP_ASK_TELL = 2 // need to send IKCP_CMD_WINS
IKCP_WND_SND = 32
IKCP_WND_RCV = 32
IKCP_MTU_DEF = 1400
IKCP_ACK_FAST = 3
IKCP_INTERVAL = 100
IKCP_OVERHEAD = 24
IKCP_DEADLINK = 20
IKCP_THRESH_INIT = 2
IKCP_THRESH_MIN = 2
IKCP_PROBE_INIT = 7000 // 7 secs to probe window size
IKCP_PROBE_LIMIT = 120000 // up to 120 secs to probe window
)
// Output is a closure which captures conn and calls conn.Write
type Output func(buf []byte, size int)
/* encode 8 bits unsigned int */
func ikcp_encode8u(p []byte, c byte) []byte {
p[0] = c
return p[1:]
}
/* decode 8 bits unsigned int */
func ikcp_decode8u(p []byte, c *byte) []byte {
*c = p[0]
return p[1:]
}
/* encode 16 bits unsigned int (lsb) */
func ikcp_encode16u(p []byte, w uint16) []byte {
binary.LittleEndian.PutUint16(p, w)
return p[2:]
}
/* decode 16 bits unsigned int (lsb) */
func ikcp_decode16u(p []byte, w *uint16) []byte {
*w = binary.LittleEndian.Uint16(p)
return p[2:]
}
/* encode 32 bits unsigned int (lsb) */
func ikcp_encode32u(p []byte, l uint32) []byte {
binary.LittleEndian.PutUint32(p, l)
return p[4:]
}
/* decode 32 bits unsigned int (lsb) */
func ikcp_decode32u(p []byte, l *uint32) []byte {
*l = binary.LittleEndian.Uint32(p)
return p[4:]
}
func _imin_(a, b uint32) uint32 {
if a <= b {
return a
} else {
return b
}
}
func _imax_(a, b uint32) uint32 {
if a >= b {
return a
} else {
return b
}
}
func _ibound_(lower, middle, upper uint32) uint32 {
return _imin_(_imax_(lower, middle), upper)
}
func _itimediff(later, earlier uint32) int32 {
return (int32)(later - earlier)
}
// Segment defines a KCP segment
type Segment struct {
conv uint32
cmd uint32
frg uint32
wnd uint32
ts uint32
sn uint32
una uint32
resendts uint32
rto uint32
fastack uint32
xmit uint32
data []byte
}
// encode a segment into buffer
func (seg *Segment) encode(ptr []byte) []byte {
ptr = ikcp_encode32u(ptr, seg.conv)
ptr = ikcp_encode8u(ptr, uint8(seg.cmd))
ptr = ikcp_encode8u(ptr, uint8(seg.frg))
ptr = ikcp_encode16u(ptr, uint16(seg.wnd))
ptr = ikcp_encode32u(ptr, seg.ts)
ptr = ikcp_encode32u(ptr, seg.sn)
ptr = ikcp_encode32u(ptr, seg.una)
ptr = ikcp_encode32u(ptr, uint32(len(seg.data)))
return ptr
}
// KCP defines a single KCP connection
type KCP struct {
conv, mtu, mss, state uint32
snd_una, snd_nxt, rcv_nxt uint32
ssthresh uint32
rx_rttval, rx_srtt, rx_rto, rx_minrto uint32
snd_wnd, rcv_wnd, rmt_wnd, cwnd, probe uint32
interval, ts_flush, xmit uint32
nodelay, updated uint32
ts_probe, probe_wait uint32
dead_link, incr uint32
fastresend int32
nocwnd, stream int32
snd_queue []Segment
rcv_queue []Segment
snd_buf []Segment
rcv_buf []Segment
acklist []ackItem
buffer []byte
output Output
}
type ackItem struct {
sn uint32
ts uint32
}
// NewKCP create a new kcp control object, 'conv' must equal in two endpoint
// from the same connection.
func NewKCP(conv uint32, output Output) *KCP {
kcp := new(KCP)
kcp.conv = conv
kcp.snd_wnd = IKCP_WND_SND
kcp.rcv_wnd = IKCP_WND_RCV
kcp.rmt_wnd = IKCP_WND_RCV
kcp.mtu = IKCP_MTU_DEF
kcp.mss = kcp.mtu - IKCP_OVERHEAD
kcp.buffer = make([]byte, (kcp.mtu+IKCP_OVERHEAD)*3)
kcp.rx_rto = IKCP_RTO_DEF
kcp.rx_minrto = IKCP_RTO_MIN
kcp.interval = IKCP_INTERVAL
kcp.ts_flush = IKCP_INTERVAL
kcp.ssthresh = IKCP_THRESH_INIT
kcp.dead_link = IKCP_DEADLINK
kcp.output = output
return kcp
}
// newSegment creates a KCP segment
func (kcp *KCP) newSegment(size int) *Segment {
seg := new(Segment)
seg.data = xmitBuf.Get().([]byte)[:size]
return seg
}
// delSegment recycles a KCP segment
func (kcp *KCP) delSegment(seg *Segment) {
xmitBuf.Put(seg.data)
}
// PeekSize checks the size of next message in the recv queue
func (kcp *KCP) PeekSize() (length int) {
if len(kcp.rcv_queue) == 0 {
return -1
}
seg := &kcp.rcv_queue[0]
if seg.frg == 0 {
return len(seg.data)
}
if len(kcp.rcv_queue) < int(seg.frg+1) {
return -1
}
for k := range kcp.rcv_queue {
seg := &kcp.rcv_queue[k]
length += len(seg.data)
if seg.frg == 0 {
break
}
}
return
}
// Recv is user/upper level recv: returns size, returns below zero for EAGAIN
func (kcp *KCP) Recv(buffer []byte) (n int) {
if len(kcp.rcv_queue) == 0 {
return -1
}
peeksize := kcp.PeekSize()
if peeksize < 0 {
return -2
}
if peeksize > len(buffer) {
return -3
}
var fast_recover bool
if len(kcp.rcv_queue) >= int(kcp.rcv_wnd) {
fast_recover = true
}
// merge fragment
count := 0
for k := range kcp.rcv_queue {
seg := &kcp.rcv_queue[k]
copy(buffer, seg.data)
buffer = buffer[len(seg.data):]
n += len(seg.data)
count++
kcp.delSegment(seg)
if seg.frg == 0 {
break
}
}
kcp.rcv_queue = kcp.rcv_queue[count:]
// move available data from rcv_buf -> rcv_queue
count = 0
for k := range kcp.rcv_buf {
seg := &kcp.rcv_buf[k]
if seg.sn == kcp.rcv_nxt && len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
kcp.rcv_nxt++
count++
} else {
break
}
}
kcp.rcv_queue = append(kcp.rcv_queue, kcp.rcv_buf[:count]...)
kcp.rcv_buf = kcp.rcv_buf[count:]
// fast recover
if len(kcp.rcv_queue) < int(kcp.rcv_wnd) && fast_recover {
// ready to send back IKCP_CMD_WINS in ikcp_flush
// tell remote my window size
kcp.probe |= IKCP_ASK_TELL
}
return
}
// Send is user/upper level send, returns below zero for error
func (kcp *KCP) Send(buffer []byte) int {
var count int
if len(buffer) == 0 {
return -1
}
// append to previous segment in streaming mode (if possible)
if kcp.stream != 0 {
n := len(kcp.snd_queue)
if n > 0 {
old := &kcp.snd_queue[n-1]
if len(old.data) < int(kcp.mss) {
capacity := int(kcp.mss) - len(old.data)
extend := capacity
if len(buffer) < capacity {
extend = len(buffer)
}
seg := kcp.newSegment(len(old.data) + extend)
seg.frg = 0
copy(seg.data, old.data)
copy(seg.data[len(old.data):], buffer)
buffer = buffer[extend:]
kcp.delSegment(old)
kcp.snd_queue[n-1] = *seg
}
}
if len(buffer) == 0 {
return 0
}
}
if len(buffer) <= int(kcp.mss) {
count = 1
} else {
count = (len(buffer) + int(kcp.mss) - 1) / int(kcp.mss)
}
if count > 255 {
return -2
}
if count == 0 {
count = 1
}
for i := 0; i < count; i++ {
var size int
if len(buffer) > int(kcp.mss) {
size = int(kcp.mss)
} else {
size = len(buffer)
}
seg := kcp.newSegment(size)
copy(seg.data, buffer[:size])
if kcp.stream == 0 { // message mode
seg.frg = uint32(count - i - 1)
} else { // stream mode
seg.frg = 0
}
kcp.snd_queue = append(kcp.snd_queue, *seg)
buffer = buffer[size:]
}
return 0
}
func (kcp *KCP) update_ack(rtt int32) {
// https://tools.ietf.org/html/rfc6298
var rto uint32
if kcp.rx_srtt == 0 {
kcp.rx_srtt = uint32(rtt)
kcp.rx_rttval = uint32(rtt) / 2
} else {
delta := rtt - int32(kcp.rx_srtt)
if delta < 0 {
delta = -delta
}
kcp.rx_rttval = (3*kcp.rx_rttval + uint32(delta)) / 4
kcp.rx_srtt = (7*kcp.rx_srtt + uint32(rtt)) / 8
if kcp.rx_srtt < 1 {
kcp.rx_srtt = 1
}
}
rto = kcp.rx_srtt + _imax_(kcp.interval, 4*kcp.rx_rttval)
kcp.rx_rto = _ibound_(kcp.rx_minrto, rto, IKCP_RTO_MAX)
}
func (kcp *KCP) shrink_buf() {
if len(kcp.snd_buf) > 0 {
seg := &kcp.snd_buf[0]
kcp.snd_una = seg.sn
} else {
kcp.snd_una = kcp.snd_nxt
}
}
func (kcp *KCP) parse_ack(sn uint32) {
if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
return
}
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
if sn == seg.sn {
kcp.delSegment(seg)
copy(kcp.snd_buf[k:], kcp.snd_buf[k+1:])
kcp.snd_buf[len(kcp.snd_buf)-1] = Segment{}
kcp.snd_buf = kcp.snd_buf[:len(kcp.snd_buf)-1]
break
}
if _itimediff(sn, seg.sn) < 0 {
break
}
}
}
func (kcp *KCP) parse_fastack(sn uint32) {
if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
return
}
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
if _itimediff(sn, seg.sn) < 0 {
break
} else if sn != seg.sn { // && kcp.current >= seg.ts+kcp.rx_srtt {
seg.fastack++
}
}
}
func (kcp *KCP) parse_una(una uint32) {
count := 0
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
if _itimediff(una, seg.sn) > 0 {
kcp.delSegment(seg)
count++
} else {
break
}
}
kcp.snd_buf = kcp.snd_buf[count:]
}
// ack append
func (kcp *KCP) ack_push(sn, ts uint32) {
kcp.acklist = append(kcp.acklist, ackItem{sn, ts})
}
func (kcp *KCP) parse_data(newseg *Segment) {
sn := newseg.sn
if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) >= 0 ||
_itimediff(sn, kcp.rcv_nxt) < 0 {
kcp.delSegment(newseg)
return
}
n := len(kcp.rcv_buf) - 1
insert_idx := 0
repeat := false
for i := n; i >= 0; i-- {
seg := &kcp.rcv_buf[i]
if seg.sn == sn {
repeat = true
atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
break
}
if _itimediff(sn, seg.sn) > 0 {
insert_idx = i + 1
break
}
}
if !repeat {
if insert_idx == n+1 {
kcp.rcv_buf = append(kcp.rcv_buf, *newseg)
} else {
kcp.rcv_buf = append(kcp.rcv_buf, Segment{})
copy(kcp.rcv_buf[insert_idx+1:], kcp.rcv_buf[insert_idx:])
kcp.rcv_buf[insert_idx] = *newseg
}
} else {
kcp.delSegment(newseg)
}
// move available data from rcv_buf -> rcv_queue
count := 0
for k := range kcp.rcv_buf {
seg := &kcp.rcv_buf[k]
if seg.sn == kcp.rcv_nxt && len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
kcp.rcv_nxt++
count++
} else {
break
}
}
kcp.rcv_queue = append(kcp.rcv_queue, kcp.rcv_buf[:count]...)
kcp.rcv_buf = kcp.rcv_buf[count:]
}
// Input when you received a low level packet (eg. UDP packet), call it
func (kcp *KCP) Input(data []byte, update_ack bool) int {
una := kcp.snd_una
if len(data) < IKCP_OVERHEAD {
return -1
}
var maxack uint32
var recentack uint32
var flag int
for {
var ts, sn, length, una, conv uint32
var wnd uint16
var cmd, frg uint8
if len(data) < int(IKCP_OVERHEAD) {
break
}
data = ikcp_decode32u(data, &conv)
if conv != kcp.conv {
return -1
}
data = ikcp_decode8u(data, &cmd)
data = ikcp_decode8u(data, &frg)
data = ikcp_decode16u(data, &wnd)
data = ikcp_decode32u(data, &ts)
data = ikcp_decode32u(data, &sn)
data = ikcp_decode32u(data, &una)
data = ikcp_decode32u(data, &length)
if len(data) < int(length) {
return -2
}
if cmd != IKCP_CMD_PUSH && cmd != IKCP_CMD_ACK &&
cmd != IKCP_CMD_WASK && cmd != IKCP_CMD_WINS {
return -3
}
kcp.rmt_wnd = uint32(wnd)
kcp.parse_una(una)
kcp.shrink_buf()
if cmd == IKCP_CMD_ACK {
kcp.parse_ack(sn)
kcp.shrink_buf()
if flag == 0 {
flag = 1
maxack = sn
} else if _itimediff(sn, maxack) > 0 {
maxack = sn
}
recentack = ts
} else if cmd == IKCP_CMD_PUSH {
if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) < 0 {
kcp.ack_push(sn, ts)
if _itimediff(sn, kcp.rcv_nxt) >= 0 {
seg := kcp.newSegment(int(length))
seg.conv = conv
seg.cmd = uint32(cmd)
seg.frg = uint32(frg)
seg.wnd = uint32(wnd)
seg.ts = ts
seg.sn = sn
seg.una = una
copy(seg.data, data[:length])
kcp.parse_data(seg)
} else {
atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
}
} else {
atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
}
} else if cmd == IKCP_CMD_WASK {
// ready to send back IKCP_CMD_WINS in Ikcp_flush
// tell remote my window size
kcp.probe |= IKCP_ASK_TELL
} else if cmd == IKCP_CMD_WINS {
// do nothing
} else {
return -3
}
data = data[length:]
}
current := currentMs()
if flag != 0 && update_ack {
kcp.parse_fastack(maxack)
if _itimediff(current, recentack) >= 0 {
kcp.update_ack(_itimediff(current, recentack))
}
}
if _itimediff(kcp.snd_una, una) > 0 {
if kcp.cwnd < kcp.rmt_wnd {
mss := kcp.mss
if kcp.cwnd < kcp.ssthresh {
kcp.cwnd++
kcp.incr += mss
} else {
if kcp.incr < mss {
kcp.incr = mss
}
kcp.incr += (mss*mss)/kcp.incr + (mss / 16)
if (kcp.cwnd+1)*mss <= kcp.incr {
kcp.cwnd++
}
}
if kcp.cwnd > kcp.rmt_wnd {
kcp.cwnd = kcp.rmt_wnd
kcp.incr = kcp.rmt_wnd * mss
}
}
}
return 0
}
func (kcp *KCP) wnd_unused() int32 {
if len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
return int32(int(kcp.rcv_wnd) - len(kcp.rcv_queue))
}
return 0
}
// flush pending data
func (kcp *KCP) flush() {
buffer := kcp.buffer
change := 0
lost := false
var seg Segment
seg.conv = kcp.conv
seg.cmd = IKCP_CMD_ACK
seg.wnd = uint32(kcp.wnd_unused())
seg.una = kcp.rcv_nxt
// flush acknowledges
ptr := buffer
for i, ack := range kcp.acklist {
size := len(buffer) - len(ptr)
if size+IKCP_OVERHEAD > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
// filter jitters caused by bufferbloat
if ack.sn >= kcp.rcv_nxt || len(kcp.acklist)-1 == i {
seg.sn, seg.ts = ack.sn, ack.ts
ptr = seg.encode(ptr)
}
}
kcp.acklist = nil
current := currentMs()
// probe window size (if remote window size equals zero)
if kcp.rmt_wnd == 0 {
if kcp.probe_wait == 0 {
kcp.probe_wait = IKCP_PROBE_INIT
kcp.ts_probe = current + kcp.probe_wait
} else {
if _itimediff(current, kcp.ts_probe) >= 0 {
if kcp.probe_wait < IKCP_PROBE_INIT {
kcp.probe_wait = IKCP_PROBE_INIT
}
kcp.probe_wait += kcp.probe_wait / 2
if kcp.probe_wait > IKCP_PROBE_LIMIT {
kcp.probe_wait = IKCP_PROBE_LIMIT
}
kcp.ts_probe = current + kcp.probe_wait
kcp.probe |= IKCP_ASK_SEND
}
}
} else {
kcp.ts_probe = 0
kcp.probe_wait = 0
}
// flush window probing commands
if (kcp.probe & IKCP_ASK_SEND) != 0 {
seg.cmd = IKCP_CMD_WASK
size := len(buffer) - len(ptr)
if size+IKCP_OVERHEAD > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = seg.encode(ptr)
}
// flush window probing commands
if (kcp.probe & IKCP_ASK_TELL) != 0 {
seg.cmd = IKCP_CMD_WINS
size := len(buffer) - len(ptr)
if size+IKCP_OVERHEAD > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = seg.encode(ptr)
}
kcp.probe = 0
// calculate window size
cwnd := _imin_(kcp.snd_wnd, kcp.rmt_wnd)
if kcp.nocwnd == 0 {
cwnd = _imin_(kcp.cwnd, cwnd)
}
// sliding window, controlled by snd_nxt && sna_una+cwnd
count := 0
for k := range kcp.snd_queue {
if _itimediff(kcp.snd_nxt, kcp.snd_una+cwnd) >= 0 {
break
}
newseg := kcp.snd_queue[k]
newseg.conv = kcp.conv
newseg.cmd = IKCP_CMD_PUSH
newseg.wnd = seg.wnd
newseg.ts = current
newseg.sn = kcp.snd_nxt
newseg.una = kcp.rcv_nxt
newseg.resendts = newseg.ts
newseg.rto = kcp.rx_rto
kcp.snd_buf = append(kcp.snd_buf, newseg)
kcp.snd_nxt++
count++
kcp.snd_queue[k].data = nil
}
kcp.snd_queue = kcp.snd_queue[count:]
// flag pending data
hasPending := false
if count > 0 {
hasPending = true
}
// calculate resent
resent := uint32(kcp.fastresend)
if kcp.fastresend <= 0 {
resent = 0xffffffff
}
// flush data segments
var lostSegs, fastRetransSegs, earlyRetransSegs uint64
for k := range kcp.snd_buf {
current := currentMs()
segment := &kcp.snd_buf[k]
needsend := false
if segment.xmit == 0 {
needsend = true
segment.xmit++
segment.rto = kcp.rx_rto
segment.resendts = current + segment.rto
} else if _itimediff(current, segment.resendts) >= 0 {
needsend = true
segment.xmit++
kcp.xmit++
if kcp.nodelay == 0 {
segment.rto += kcp.rx_rto
} else {
segment.rto += kcp.rx_rto / 2
}
segment.resendts = current + segment.rto
lost = true
lostSegs++
} else if segment.fastack >= resent { // fast retransmit
lastsend := segment.resendts - segment.rto
if _itimediff(current, lastsend) >= int32(kcp.rx_rto/4) {
needsend = true
segment.xmit++
segment.fastack = 0
segment.resendts = current + segment.rto
change++
fastRetransSegs++
}
} else if segment.fastack > 0 && !hasPending { // early retransmit
lastsend := segment.resendts - segment.rto
if _itimediff(current, lastsend) >= int32(kcp.rx_rto/4) {
needsend = true
segment.xmit++
segment.fastack = 0
segment.resendts = current + segment.rto
change++
earlyRetransSegs++
}
}
if needsend {
segment.ts = current
segment.wnd = seg.wnd
segment.una = kcp.rcv_nxt
size := len(buffer) - len(ptr)
need := IKCP_OVERHEAD + len(segment.data)
if size+need > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = segment.encode(ptr)
copy(ptr, segment.data)
ptr = ptr[len(segment.data):]
if segment.xmit >= kcp.dead_link {
kcp.state = 0xFFFFFFFF
}
}
}
atomic.AddUint64(&DefaultSnmp.RetransSegs, lostSegs+fastRetransSegs+earlyRetransSegs)
atomic.AddUint64(&DefaultSnmp.LostSegs, lostSegs)
atomic.AddUint64(&DefaultSnmp.EarlyRetransSegs, earlyRetransSegs)
atomic.AddUint64(&DefaultSnmp.FastRetransSegs, fastRetransSegs)
// flash remain segments
size := len(buffer) - len(ptr)
if size > 0 {
kcp.output(buffer, size)
}
// update ssthresh
// rate halving, https://tools.ietf.org/html/rfc6937
if change != 0 {
inflight := kcp.snd_nxt - kcp.snd_una
kcp.ssthresh = inflight / 2
if kcp.ssthresh < IKCP_THRESH_MIN {
kcp.ssthresh = IKCP_THRESH_MIN
}
kcp.cwnd = kcp.ssthresh + resent
kcp.incr = kcp.cwnd * kcp.mss
}
// congestion control, https://tools.ietf.org/html/rfc5681
if lost {
kcp.ssthresh = cwnd / 2
if kcp.ssthresh < IKCP_THRESH_MIN {
kcp.ssthresh = IKCP_THRESH_MIN
}
kcp.cwnd = 1
kcp.incr = kcp.mss
}
if kcp.cwnd < 1 {
kcp.cwnd = 1
kcp.incr = kcp.mss
}
}
// Update updates state (call it repeatedly, every 10ms-100ms), or you can ask
// ikcp_check when to call it again (without ikcp_input/_send calling).
// 'current' - current timestamp in millisec.
func (kcp *KCP) Update() {
var slap int32
current := currentMs()
if kcp.updated == 0 {
kcp.updated = 1
kcp.ts_flush = current
}
slap = _itimediff(current, kcp.ts_flush)
if slap >= 10000 || slap < -10000 {
kcp.ts_flush = current
slap = 0
}
if slap >= 0 {
kcp.ts_flush += kcp.interval
if _itimediff(current, kcp.ts_flush) >= 0 {
kcp.ts_flush = current + kcp.interval
}
kcp.flush()
}
}
// Check determines when should you invoke ikcp_update:
// returns when you should invoke ikcp_update in millisec, if there
// is no ikcp_input/_send calling. you can call ikcp_update in that
// time, instead of call update repeatly.
// Important to reduce unnacessary ikcp_update invoking. use it to
// schedule ikcp_update (eg. implementing an epoll-like mechanism,
// or optimize ikcp_update when handling massive kcp connections)
func (kcp *KCP) Check() uint32 {
current := currentMs()
ts_flush := kcp.ts_flush
tm_flush := int32(0x7fffffff)
tm_packet := int32(0x7fffffff)
minimal := uint32(0)
if kcp.updated == 0 {
return current
}
if _itimediff(current, ts_flush) >= 10000 ||
_itimediff(current, ts_flush) < -10000 {
ts_flush = current
}
if _itimediff(current, ts_flush) >= 0 {
return current
}
tm_flush = _itimediff(ts_flush, current)
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
diff := _itimediff(seg.resendts, current)
if diff <= 0 {
return current
}
if diff < tm_packet {
tm_packet = diff
}
}
minimal = uint32(tm_packet)
if tm_packet >= tm_flush {
minimal = uint32(tm_flush)
}
if minimal >= kcp.interval {
minimal = kcp.interval
}
return current + minimal
}
// SetMtu changes MTU size, default is 1400
func (kcp *KCP) SetMtu(mtu int) int {
if mtu < 50 || mtu < IKCP_OVERHEAD {
return -1
}
buffer := make([]byte, (mtu+IKCP_OVERHEAD)*3)
if buffer == nil {
return -2
}
kcp.mtu = uint32(mtu)
kcp.mss = kcp.mtu - IKCP_OVERHEAD
kcp.buffer = buffer
return 0
}
// NoDelay options
// fastest: ikcp_nodelay(kcp, 1, 20, 2, 1)
// nodelay: 0:disable(default), 1:enable
// interval: internal update timer interval in millisec, default is 100ms
// resend: 0:disable fast resend(default), 1:enable fast resend
// nc: 0:normal congestion control(default), 1:disable congestion control
func (kcp *KCP) NoDelay(nodelay, interval, resend, nc int) int {
if nodelay >= 0 {
kcp.nodelay = uint32(nodelay)
if nodelay != 0 {
kcp.rx_minrto = IKCP_RTO_NDL
} else {
kcp.rx_minrto = IKCP_RTO_MIN
}
}
if interval >= 0 {
if interval > 5000 {
interval = 5000
} else if interval < 10 {
interval = 10
}
kcp.interval = uint32(interval)
}
if resend >= 0 {
kcp.fastresend = int32(resend)
}
if nc >= 0 {
kcp.nocwnd = int32(nc)
}
return 0
}
// WndSize sets maximum window size: sndwnd=32, rcvwnd=32 by default
func (kcp *KCP) WndSize(sndwnd, rcvwnd int) int {
if sndwnd > 0 {
kcp.snd_wnd = uint32(sndwnd)
}
if rcvwnd > 0 {
kcp.rcv_wnd = uint32(rcvwnd)
}
return 0
}
// WaitSnd gets how many packet is waiting to be sent
func (kcp *KCP) WaitSnd() int {
return len(kcp.snd_buf) + len(kcp.snd_queue)
}

937
vendor/github.com/xtaci/kcp-go/sess.go generated vendored Normal file
View File

@@ -0,0 +1,937 @@
package kcp
import (
"crypto/rand"
"encoding/binary"
"hash/crc32"
"io"
"net"
"sync"
"sync/atomic"
"time"
"github.com/pkg/errors"
"golang.org/x/net/ipv4"
)
type errTimeout struct {
error
}
func (errTimeout) Timeout() bool { return true }
func (errTimeout) Temporary() bool { return true }
func (errTimeout) Error() string { return "i/o timeout" }
const (
defaultWndSize = 128 // default window size, in packet
nonceSize = 16 // magic number
crcSize = 4 // 4bytes packet checksum
cryptHeaderSize = nonceSize + crcSize
mtuLimit = 2048
rxQueueLimit = 8192
rxFECMulti = 3 // FEC keeps rxFECMulti* (dataShard+parityShard) ordered packets in memory
defaultKeepAliveInterval = 10
)
const (
errBrokenPipe = "broken pipe"
errInvalidOperation = "invalid operation"
)
var (
xmitBuf sync.Pool
)
func init() {
xmitBuf.New = func() interface{} {
return make([]byte, mtuLimit)
}
}
type (
// UDPSession defines a KCP session implemented by UDP
UDPSession struct {
kcp *KCP // the core ARQ
l *Listener // point to server listener if it's a server socket
fec *FEC // forward error correction
conn net.PacketConn // the underlying packet socket
block BlockCrypt
remote net.Addr
rd time.Time // read deadline
wd time.Time // write deadline
sockbuff []byte // kcp receiving is based on packet, I turn it into stream
die chan struct{}
chReadEvent chan struct{}
chWriteEvent chan struct{}
chUDPOutput chan []byte
headerSize int
ackNoDelay bool
isClosed bool
keepAliveInterval int32
mu sync.Mutex
updateInterval int32
}
setReadBuffer interface {
SetReadBuffer(bytes int) error
}
setWriteBuffer interface {
SetWriteBuffer(bytes int) error
}
)
// newUDPSession create a new udp session for client or server
func newUDPSession(conv uint32, dataShards, parityShards int, l *Listener, conn net.PacketConn, remote net.Addr, block BlockCrypt) *UDPSession {
sess := new(UDPSession)
sess.chUDPOutput = make(chan []byte)
sess.die = make(chan struct{})
sess.chReadEvent = make(chan struct{}, 1)
sess.chWriteEvent = make(chan struct{}, 1)
sess.remote = remote
sess.conn = conn
sess.keepAliveInterval = defaultKeepAliveInterval
sess.l = l
sess.block = block
sess.fec = newFEC(rxFECMulti*(dataShards+parityShards), dataShards, parityShards)
// calculate header size
if sess.block != nil {
sess.headerSize += cryptHeaderSize
}
if sess.fec != nil {
sess.headerSize += fecHeaderSizePlus2
}
sess.kcp = NewKCP(conv, func(buf []byte, size int) {
if size >= IKCP_OVERHEAD {
ext := xmitBuf.Get().([]byte)[:sess.headerSize+size]
copy(ext[sess.headerSize:], buf)
select {
case sess.chUDPOutput <- ext:
case <-sess.die:
}
}
})
sess.kcp.WndSize(defaultWndSize, defaultWndSize)
sess.kcp.SetMtu(IKCP_MTU_DEF - sess.headerSize)
go sess.updateTask()
go sess.outputTask()
if sess.l == nil { // it's a client connection
go sess.readLoop()
atomic.AddUint64(&DefaultSnmp.ActiveOpens, 1)
} else {
atomic.AddUint64(&DefaultSnmp.PassiveOpens, 1)
}
currestab := atomic.AddUint64(&DefaultSnmp.CurrEstab, 1)
maxconn := atomic.LoadUint64(&DefaultSnmp.MaxConn)
if currestab > maxconn {
atomic.CompareAndSwapUint64(&DefaultSnmp.MaxConn, maxconn, currestab)
}
return sess
}
// Read implements the Conn Read method.
func (s *UDPSession) Read(b []byte) (n int, err error) {
for {
s.mu.Lock()
if len(s.sockbuff) > 0 { // copy from buffer
n = copy(b, s.sockbuff)
s.sockbuff = s.sockbuff[n:]
s.mu.Unlock()
return n, nil
}
if s.isClosed {
s.mu.Unlock()
return 0, errors.New(errBrokenPipe)
}
if !s.rd.IsZero() {
if time.Now().After(s.rd) { // timeout
s.mu.Unlock()
return 0, errTimeout{}
}
}
if n := s.kcp.PeekSize(); n > 0 { // data arrived
if len(b) >= n {
s.kcp.Recv(b)
} else {
buf := make([]byte, n)
s.kcp.Recv(buf)
n = copy(b, buf)
s.sockbuff = buf[n:] // store remaining bytes into sockbuff for next read
}
s.mu.Unlock()
atomic.AddUint64(&DefaultSnmp.BytesReceived, uint64(n))
return n, nil
}
var timeout *time.Timer
var c <-chan time.Time
if !s.rd.IsZero() {
delay := s.rd.Sub(time.Now())
timeout = time.NewTimer(delay)
c = timeout.C
}
s.mu.Unlock()
// wait for read event or timeout
select {
case <-s.chReadEvent:
case <-c:
case <-s.die:
}
if timeout != nil {
timeout.Stop()
}
}
}
// Write implements the Conn Write method.
func (s *UDPSession) Write(b []byte) (n int, err error) {
for {
s.mu.Lock()
if s.isClosed {
s.mu.Unlock()
return 0, errors.New(errBrokenPipe)
}
if !s.wd.IsZero() {
if time.Now().After(s.wd) { // timeout
s.mu.Unlock()
return 0, errTimeout{}
}
}
if s.kcp.WaitSnd() < int(s.kcp.snd_wnd) {
n = len(b)
max := s.kcp.mss << 8
for {
if len(b) <= int(max) { // in most cases
s.kcp.Send(b)
break
} else {
s.kcp.Send(b[:max])
b = b[max:]
}
}
s.kcp.flush()
s.mu.Unlock()
atomic.AddUint64(&DefaultSnmp.BytesSent, uint64(n))
return n, nil
}
var timeout *time.Timer
var c <-chan time.Time
if !s.wd.IsZero() {
delay := s.wd.Sub(time.Now())
timeout = time.NewTimer(delay)
c = timeout.C
}
s.mu.Unlock()
// wait for write event or timeout
select {
case <-s.chWriteEvent:
case <-c:
case <-s.die:
}
if timeout != nil {
timeout.Stop()
}
}
}
// Close closes the connection.
func (s *UDPSession) Close() error {
s.mu.Lock()
defer s.mu.Unlock()
if s.isClosed {
return errors.New(errBrokenPipe)
}
close(s.die)
s.isClosed = true
atomic.AddUint64(&DefaultSnmp.CurrEstab, ^uint64(0))
if s.l == nil { // client socket close
return s.conn.Close()
}
return nil
}
// LocalAddr returns the local network address. The Addr returned is shared by all invocations of LocalAddr, so do not modify it.
func (s *UDPSession) LocalAddr() net.Addr { return s.conn.LocalAddr() }
// RemoteAddr returns the remote network address. The Addr returned is shared by all invocations of RemoteAddr, so do not modify it.
func (s *UDPSession) RemoteAddr() net.Addr { return s.remote }
// SetDeadline sets the deadline associated with the listener. A zero time value disables the deadline.
func (s *UDPSession) SetDeadline(t time.Time) error {
s.mu.Lock()
defer s.mu.Unlock()
s.rd = t
s.wd = t
return nil
}
// SetReadDeadline implements the Conn SetReadDeadline method.
func (s *UDPSession) SetReadDeadline(t time.Time) error {
s.mu.Lock()
defer s.mu.Unlock()
s.rd = t
return nil
}
// SetWriteDeadline implements the Conn SetWriteDeadline method.
func (s *UDPSession) SetWriteDeadline(t time.Time) error {
s.mu.Lock()
defer s.mu.Unlock()
s.wd = t
return nil
}
// SetWindowSize set maximum window size
func (s *UDPSession) SetWindowSize(sndwnd, rcvwnd int) {
s.mu.Lock()
defer s.mu.Unlock()
s.kcp.WndSize(sndwnd, rcvwnd)
}
// SetMtu sets the maximum transmission unit
func (s *UDPSession) SetMtu(mtu int) {
s.mu.Lock()
defer s.mu.Unlock()
s.kcp.SetMtu(mtu - s.headerSize)
}
// SetStreamMode toggles the stream mode on/off
func (s *UDPSession) SetStreamMode(enable bool) {
s.mu.Lock()
defer s.mu.Unlock()
if enable {
s.kcp.stream = 1
} else {
s.kcp.stream = 0
}
}
// SetACKNoDelay changes ack flush option, set true to flush ack immediately,
func (s *UDPSession) SetACKNoDelay(nodelay bool) {
s.mu.Lock()
defer s.mu.Unlock()
s.ackNoDelay = nodelay
}
// SetNoDelay calls nodelay() of kcp
func (s *UDPSession) SetNoDelay(nodelay, interval, resend, nc int) {
s.mu.Lock()
defer s.mu.Unlock()
s.kcp.NoDelay(nodelay, interval, resend, nc)
atomic.StoreInt32(&s.updateInterval, int32(interval))
}
// SetDSCP sets the 6bit DSCP field of IP header, no effect if it's accepted from Listener
func (s *UDPSession) SetDSCP(dscp int) error {
s.mu.Lock()
defer s.mu.Unlock()
if s.l == nil {
if nc, ok := s.conn.(*ConnectedUDPConn); ok {
return ipv4.NewConn(nc.Conn).SetTOS(dscp << 2)
} else if nc, ok := s.conn.(net.Conn); ok {
return ipv4.NewConn(nc).SetTOS(dscp << 2)
}
}
return errors.New(errInvalidOperation)
}
// SetReadBuffer sets the socket read buffer, no effect if it's accepted from Listener
func (s *UDPSession) SetReadBuffer(bytes int) error {
s.mu.Lock()
defer s.mu.Unlock()
if s.l == nil {
if nc, ok := s.conn.(setReadBuffer); ok {
return nc.SetReadBuffer(bytes)
}
}
return errors.New(errInvalidOperation)
}
// SetWriteBuffer sets the socket write buffer, no effect if it's accepted from Listener
func (s *UDPSession) SetWriteBuffer(bytes int) error {
s.mu.Lock()
defer s.mu.Unlock()
if s.l == nil {
if nc, ok := s.conn.(setWriteBuffer); ok {
return nc.SetWriteBuffer(bytes)
}
}
return errors.New(errInvalidOperation)
}
// SetKeepAlive changes per-connection NAT keepalive interval; 0 to disable, default to 10s
func (s *UDPSession) SetKeepAlive(interval int) {
atomic.StoreInt32(&s.keepAliveInterval, int32(interval))
}
func (s *UDPSession) outputTask() {
// offset pre-compute
fecOffset := 0
if s.block != nil {
fecOffset = cryptHeaderSize
}
szOffset := fecOffset + fecHeaderSize
// fec data group
var cacheLine []byte
var fecGroup [][]byte
var fecCnt int
var fecMaxSize int
if s.fec != nil {
cacheLine = make([]byte, s.fec.shardSize*mtuLimit)
fecGroup = make([][]byte, s.fec.shardSize)
for k := range fecGroup {
fecGroup[k] = cacheLine[k*mtuLimit : (k+1)*mtuLimit]
}
}
// keepalive
var lastPing time.Time
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
// receive from a synchronous channel
// buffered channel must be avoided, because of "bufferbloat"
case ext := <-s.chUDPOutput:
var ecc [][]byte
if s.fec != nil {
s.fec.markData(ext[fecOffset:])
// explicit size, including 2bytes size itself.
binary.LittleEndian.PutUint16(ext[szOffset:], uint16(len(ext[szOffset:])))
// copy data to fec group
sz := len(ext)
fecGroup[fecCnt] = fecGroup[fecCnt][:sz]
copy(fecGroup[fecCnt], ext)
fecCnt++
if sz > fecMaxSize {
fecMaxSize = sz
}
// calculate Reed-Solomon Erasure Code
if fecCnt == s.fec.dataShards {
for i := 0; i < s.fec.dataShards; i++ {
shard := fecGroup[i]
slen := len(shard)
xorBytes(shard[slen:fecMaxSize], shard[slen:fecMaxSize], shard[slen:fecMaxSize])
}
ecc = s.fec.calcECC(fecGroup, szOffset, fecMaxSize)
for k := range ecc {
s.fec.markFEC(ecc[k][fecOffset:])
ecc[k] = ecc[k][:fecMaxSize]
}
fecCnt = 0
fecMaxSize = 0
}
}
if s.block != nil {
io.ReadFull(rand.Reader, ext[:nonceSize])
checksum := crc32.ChecksumIEEE(ext[cryptHeaderSize:])
binary.LittleEndian.PutUint32(ext[nonceSize:], checksum)
s.block.Encrypt(ext, ext)
if ecc != nil {
for k := range ecc {
io.ReadFull(rand.Reader, ecc[k][:nonceSize])
checksum := crc32.ChecksumIEEE(ecc[k][cryptHeaderSize:])
binary.LittleEndian.PutUint32(ecc[k][nonceSize:], checksum)
s.block.Encrypt(ecc[k], ecc[k])
}
}
}
nbytes := 0
nsegs := 0
// if mrand.Intn(100) < 50 {
if n, err := s.conn.WriteTo(ext, s.remote); err == nil {
nbytes += n
nsegs++
}
// }
if ecc != nil {
for k := range ecc {
if n, err := s.conn.WriteTo(ecc[k], s.remote); err == nil {
nbytes += n
nsegs++
}
}
}
atomic.AddUint64(&DefaultSnmp.OutSegs, uint64(nsegs))
atomic.AddUint64(&DefaultSnmp.OutBytes, uint64(nbytes))
xmitBuf.Put(ext)
case <-ticker.C: // NAT keep-alive
interval := time.Duration(atomic.LoadInt32(&s.keepAliveInterval)) * time.Second
if interval > 0 && time.Now().After(lastPing.Add(interval)) {
var rnd uint16
binary.Read(rand.Reader, binary.LittleEndian, &rnd)
sz := int(rnd)%(IKCP_MTU_DEF-s.headerSize-IKCP_OVERHEAD) + s.headerSize + IKCP_OVERHEAD
ping := make([]byte, sz) // randomized ping packet
io.ReadFull(rand.Reader, ping)
s.conn.WriteTo(ping, s.remote)
lastPing = time.Now()
}
case <-s.die:
return
}
}
}
// kcp update, input loop
func (s *UDPSession) updateTask() {
tc := time.After(time.Duration(atomic.LoadInt32(&s.updateInterval)) * time.Millisecond)
for {
select {
case <-tc:
s.mu.Lock()
s.kcp.flush()
if s.kcp.WaitSnd() < int(s.kcp.snd_wnd) {
s.notifyWriteEvent()
}
s.mu.Unlock()
tc = time.After(time.Duration(atomic.LoadInt32(&s.updateInterval)) * time.Millisecond)
case <-s.die:
if s.l != nil { // has listener
select {
case s.l.chDeadlinks <- s.remote:
case <-s.l.die:
}
}
return
}
}
}
// GetConv gets conversation id of a session
func (s *UDPSession) GetConv() uint32 {
return s.kcp.conv
}
func (s *UDPSession) notifyReadEvent() {
select {
case s.chReadEvent <- struct{}{}:
default:
}
}
func (s *UDPSession) notifyWriteEvent() {
select {
case s.chWriteEvent <- struct{}{}:
default:
}
}
func (s *UDPSession) kcpInput(data []byte) {
var kcpInErrors, fecErrs, fecRecovered, fecSegs uint64
if s.fec != nil {
f := s.fec.decode(data)
s.mu.Lock()
if f.flag == typeData {
if ret := s.kcp.Input(data[fecHeaderSizePlus2:], true); ret != 0 {
kcpInErrors++
}
}
if f.flag == typeData || f.flag == typeFEC {
if f.flag == typeFEC {
fecSegs++
}
if recovers := s.fec.input(f); recovers != nil {
for _, r := range recovers {
if len(r) >= 2 { // must be larger than 2bytes
sz := binary.LittleEndian.Uint16(r)
if int(sz) <= len(r) && sz >= 2 {
if ret := s.kcp.Input(r[2:sz], false); ret == 0 {
fecRecovered++
} else {
kcpInErrors++
}
} else {
fecErrs++
}
} else {
fecErrs++
}
}
}
}
// notify reader
if n := s.kcp.PeekSize(); n > 0 {
s.notifyReadEvent()
}
if s.ackNoDelay {
s.kcp.flush()
}
s.mu.Unlock()
} else {
s.mu.Lock()
if ret := s.kcp.Input(data, true); ret != 0 {
kcpInErrors++
}
// notify reader
if n := s.kcp.PeekSize(); n > 0 {
s.notifyReadEvent()
}
if s.ackNoDelay {
s.kcp.flush()
}
s.mu.Unlock()
}
atomic.AddUint64(&DefaultSnmp.InSegs, 1)
atomic.AddUint64(&DefaultSnmp.InBytes, uint64(len(data)))
if fecSegs > 0 {
atomic.AddUint64(&DefaultSnmp.FECSegs, fecSegs)
}
if kcpInErrors > 0 {
atomic.AddUint64(&DefaultSnmp.KCPInErrors, kcpInErrors)
}
if fecErrs > 0 {
atomic.AddUint64(&DefaultSnmp.FECErrs, fecErrs)
}
if fecRecovered > 0 {
atomic.AddUint64(&DefaultSnmp.FECRecovered, fecRecovered)
}
}
func (s *UDPSession) receiver(ch chan []byte) {
for {
data := xmitBuf.Get().([]byte)[:mtuLimit]
if n, _, err := s.conn.ReadFrom(data); err == nil && n >= s.headerSize+IKCP_OVERHEAD {
select {
case ch <- data[:n]:
case <-s.die:
}
} else if err != nil {
return
} else {
atomic.AddUint64(&DefaultSnmp.InErrs, 1)
}
}
}
// read loop for client session
func (s *UDPSession) readLoop() {
chPacket := make(chan []byte, rxQueueLimit)
go s.receiver(chPacket)
for {
select {
case data := <-chPacket:
raw := data
dataValid := false
if s.block != nil {
s.block.Decrypt(data, data)
data = data[nonceSize:]
checksum := crc32.ChecksumIEEE(data[crcSize:])
if checksum == binary.LittleEndian.Uint32(data) {
data = data[crcSize:]
dataValid = true
} else {
atomic.AddUint64(&DefaultSnmp.InCsumErrors, 1)
}
} else if s.block == nil {
dataValid = true
}
if dataValid {
s.kcpInput(data)
}
xmitBuf.Put(raw)
case <-s.die:
return
}
}
}
type (
// Listener defines a server listening for connections
Listener struct {
block BlockCrypt
dataShards, parityShards int
fec *FEC // for fec init test
conn net.PacketConn
sessions map[string]*UDPSession
chAccepts chan *UDPSession
chDeadlinks chan net.Addr
headerSize int
die chan struct{}
rxbuf sync.Pool
rd atomic.Value
wd atomic.Value
}
packet struct {
from net.Addr
data []byte
}
)
// monitor incoming data for all connections of server
func (l *Listener) monitor() {
chPacket := make(chan packet, rxQueueLimit)
go l.receiver(chPacket)
for {
select {
case p := <-chPacket:
raw := p.data
data := p.data
from := p.from
dataValid := false
if l.block != nil {
l.block.Decrypt(data, data)
data = data[nonceSize:]
checksum := crc32.ChecksumIEEE(data[crcSize:])
if checksum == binary.LittleEndian.Uint32(data) {
data = data[crcSize:]
dataValid = true
} else {
atomic.AddUint64(&DefaultSnmp.InCsumErrors, 1)
}
} else if l.block == nil {
dataValid = true
}
if dataValid {
addr := from.String()
s, ok := l.sessions[addr]
if !ok { // new session
var conv uint32
convValid := false
if l.fec != nil {
isfec := binary.LittleEndian.Uint16(data[4:])
if isfec == typeData {
conv = binary.LittleEndian.Uint32(data[fecHeaderSizePlus2:])
convValid = true
}
} else {
conv = binary.LittleEndian.Uint32(data)
convValid = true
}
if convValid {
s := newUDPSession(conv, l.dataShards, l.parityShards, l, l.conn, from, l.block)
s.kcpInput(data)
l.sessions[addr] = s
l.chAccepts <- s
}
} else {
s.kcpInput(data)
}
}
l.rxbuf.Put(raw)
case deadlink := <-l.chDeadlinks:
delete(l.sessions, deadlink.String())
case <-l.die:
return
}
}
}
func (l *Listener) receiver(ch chan packet) {
for {
data := l.rxbuf.Get().([]byte)[:mtuLimit]
if n, from, err := l.conn.ReadFrom(data); err == nil && n >= l.headerSize+IKCP_OVERHEAD {
ch <- packet{from, data[:n]}
} else if err != nil {
return
} else {
atomic.AddUint64(&DefaultSnmp.InErrs, 1)
}
}
}
// SetReadBuffer sets the socket read buffer for the Listener
func (l *Listener) SetReadBuffer(bytes int) error {
if nc, ok := l.conn.(setReadBuffer); ok {
return nc.SetReadBuffer(bytes)
}
return errors.New(errInvalidOperation)
}
// SetWriteBuffer sets the socket write buffer for the Listener
func (l *Listener) SetWriteBuffer(bytes int) error {
if nc, ok := l.conn.(setWriteBuffer); ok {
return nc.SetWriteBuffer(bytes)
}
return errors.New(errInvalidOperation)
}
// SetDSCP sets the 6bit DSCP field of IP header
func (l *Listener) SetDSCP(dscp int) error {
if nc, ok := l.conn.(net.Conn); ok {
return ipv4.NewConn(nc).SetTOS(dscp << 2)
}
return errors.New(errInvalidOperation)
}
// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn.
func (l *Listener) Accept() (net.Conn, error) {
return l.AcceptKCP()
}
// AcceptKCP accepts a KCP connection
func (l *Listener) AcceptKCP() (*UDPSession, error) {
var timeout <-chan time.Time
if tdeadline, ok := l.rd.Load().(time.Time); ok && !tdeadline.IsZero() {
timeout = time.After(tdeadline.Sub(time.Now()))
}
select {
case <-timeout:
return nil, &errTimeout{}
case c := <-l.chAccepts:
return c, nil
case <-l.die:
return nil, errors.New(errBrokenPipe)
}
}
// SetDeadline sets the deadline associated with the listener. A zero time value disables the deadline.
func (l *Listener) SetDeadline(t time.Time) error {
l.SetReadDeadline(t)
l.SetWriteDeadline(t)
return nil
}
// SetReadDeadline implements the Conn SetReadDeadline method.
func (l *Listener) SetReadDeadline(t time.Time) error {
l.rd.Store(t)
return nil
}
// SetWriteDeadline implements the Conn SetWriteDeadline method.
func (l *Listener) SetWriteDeadline(t time.Time) error {
l.wd.Store(t)
return nil
}
// Close stops listening on the UDP address. Already Accepted connections are not closed.
func (l *Listener) Close() error {
close(l.die)
return l.conn.Close()
}
// Addr returns the listener's network address, The Addr returned is shared by all invocations of Addr, so do not modify it.
func (l *Listener) Addr() net.Addr {
return l.conn.LocalAddr()
}
// Listen listens for incoming KCP packets addressed to the local address laddr on the network "udp",
func Listen(laddr string) (net.Listener, error) {
return ListenWithOptions(laddr, nil, 0, 0)
}
// ListenWithOptions listens for incoming KCP packets addressed to the local address laddr on the network "udp" with packet encryption,
// dataShards, parityShards defines Reed-Solomon Erasure Coding parameters
func ListenWithOptions(laddr string, block BlockCrypt, dataShards, parityShards int) (*Listener, error) {
udpaddr, err := net.ResolveUDPAddr("udp", laddr)
if err != nil {
return nil, errors.Wrap(err, "net.ResolveUDPAddr")
}
conn, err := net.ListenUDP("udp", udpaddr)
if err != nil {
return nil, errors.Wrap(err, "net.ListenUDP")
}
return ServeConn(block, dataShards, parityShards, conn)
}
// ServeConn serves KCP protocol for a single packet connection.
func ServeConn(block BlockCrypt, dataShards, parityShards int, conn net.PacketConn) (*Listener, error) {
l := new(Listener)
l.conn = conn
l.sessions = make(map[string]*UDPSession)
l.chAccepts = make(chan *UDPSession, 1024)
l.chDeadlinks = make(chan net.Addr, 1024)
l.die = make(chan struct{})
l.dataShards = dataShards
l.parityShards = parityShards
l.block = block
l.fec = newFEC(rxFECMulti*(dataShards+parityShards), dataShards, parityShards)
l.rxbuf.New = func() interface{} {
return make([]byte, mtuLimit)
}
// calculate header size
if l.block != nil {
l.headerSize += cryptHeaderSize
}
if l.fec != nil {
l.headerSize += fecHeaderSizePlus2
}
go l.monitor()
return l, nil
}
// Dial connects to the remote address "raddr" on the network "udp"
func Dial(raddr string) (net.Conn, error) {
return DialWithOptions(raddr, nil, 0, 0)
}
// DialWithOptions connects to the remote address "raddr" on the network "udp" with packet encryption
func DialWithOptions(raddr string, block BlockCrypt, dataShards, parityShards int) (*UDPSession, error) {
udpaddr, err := net.ResolveUDPAddr("udp", raddr)
if err != nil {
return nil, errors.Wrap(err, "net.ResolveUDPAddr")
}
udpconn, err := net.DialUDP("udp", nil, udpaddr)
if err != nil {
return nil, errors.Wrap(err, "net.DialUDP")
}
return NewConn(raddr, block, dataShards, parityShards, &ConnectedUDPConn{udpconn, udpconn})
}
// NewConn establishes a session and talks KCP protocol over a packet connection.
func NewConn(raddr string, block BlockCrypt, dataShards, parityShards int, conn net.PacketConn) (*UDPSession, error) {
udpaddr, err := net.ResolveUDPAddr("udp", raddr)
if err != nil {
return nil, errors.Wrap(err, "net.ResolveUDPAddr")
}
var convid uint32
binary.Read(rand.Reader, binary.LittleEndian, &convid)
return newUDPSession(convid, dataShards, parityShards, nil, conn, udpaddr, block), nil
}
func currentMs() uint32 {
return uint32(time.Now().UnixNano() / int64(time.Millisecond))
}
// ConnectedUDPConn is a wrapper for net.UDPConn which converts WriteTo syscalls
// to Write syscalls that are 4 times faster on some OS'es. This should only be
// used for connections that were produced by a net.Dial* call.
type ConnectedUDPConn struct {
*net.UDPConn
Conn net.Conn // underlying connection if any
}
// WriteTo redirects all writes to the Write syscall, which is 4 times faster.
func (c *ConnectedUDPConn) WriteTo(b []byte, addr net.Addr) (int, error) {
return c.Write(b)
}

152
vendor/github.com/xtaci/kcp-go/snmp.go generated vendored Normal file
View File

@@ -0,0 +1,152 @@
package kcp
import (
"fmt"
"sync/atomic"
)
// Snmp defines network statistics indicator
type Snmp struct {
BytesSent uint64 // raw bytes sent
BytesReceived uint64
MaxConn uint64
ActiveOpens uint64
PassiveOpens uint64
CurrEstab uint64 // count of connections for now
InErrs uint64 // udp read errors
InCsumErrors uint64 // checksum errors from CRC32
KCPInErrors uint64 // packet iput errors from kcp
InSegs uint64
OutSegs uint64
InBytes uint64 // udp bytes received
OutBytes uint64 // udp bytes sent
RetransSegs uint64
FastRetransSegs uint64
EarlyRetransSegs uint64
LostSegs uint64 // number of segs infered as lost
RepeatSegs uint64 // number of segs duplicated
FECRecovered uint64 // correct packets recovered from FEC
FECErrs uint64 // incorrect packets recovered from FEC
FECSegs uint64 // FEC segments received
FECShortShards uint64 // number of data shards that's not enough for recovery
}
func newSnmp() *Snmp {
return new(Snmp)
}
func (s *Snmp) Header() []string {
return []string{
"BytesSent",
"BytesReceived",
"MaxConn",
"ActiveOpens",
"PassiveOpens",
"CurrEstab",
"InErrs",
"InCsumErrors",
"KCPInErrors",
"InSegs",
"OutSegs",
"InBytes",
"OutBytes",
"RetransSegs",
"FastRetransSegs",
"EarlyRetransSegs",
"LostSegs",
"RepeatSegs",
"FECSegs",
"FECErrs",
"FECRecovered",
"FECShortShards",
}
}
func (s *Snmp) ToSlice() []string {
snmp := s.Copy()
return []string{
fmt.Sprint(snmp.BytesSent),
fmt.Sprint(snmp.BytesReceived),
fmt.Sprint(snmp.MaxConn),
fmt.Sprint(snmp.ActiveOpens),
fmt.Sprint(snmp.PassiveOpens),
fmt.Sprint(snmp.CurrEstab),
fmt.Sprint(snmp.InErrs),
fmt.Sprint(snmp.InCsumErrors),
fmt.Sprint(snmp.KCPInErrors),
fmt.Sprint(snmp.InSegs),
fmt.Sprint(snmp.OutSegs),
fmt.Sprint(snmp.InBytes),
fmt.Sprint(snmp.OutBytes),
fmt.Sprint(snmp.RetransSegs),
fmt.Sprint(snmp.FastRetransSegs),
fmt.Sprint(snmp.EarlyRetransSegs),
fmt.Sprint(snmp.LostSegs),
fmt.Sprint(snmp.RepeatSegs),
fmt.Sprint(snmp.FECSegs),
fmt.Sprint(snmp.FECErrs),
fmt.Sprint(snmp.FECRecovered),
fmt.Sprint(snmp.FECShortShards),
}
}
// Copy make a copy of current snmp snapshot
func (s *Snmp) Copy() *Snmp {
d := newSnmp()
d.BytesSent = atomic.LoadUint64(&s.BytesSent)
d.BytesReceived = atomic.LoadUint64(&s.BytesReceived)
d.MaxConn = atomic.LoadUint64(&s.MaxConn)
d.ActiveOpens = atomic.LoadUint64(&s.ActiveOpens)
d.PassiveOpens = atomic.LoadUint64(&s.PassiveOpens)
d.CurrEstab = atomic.LoadUint64(&s.CurrEstab)
d.InErrs = atomic.LoadUint64(&s.InErrs)
d.InCsumErrors = atomic.LoadUint64(&s.InCsumErrors)
d.KCPInErrors = atomic.LoadUint64(&s.KCPInErrors)
d.InSegs = atomic.LoadUint64(&s.InSegs)
d.OutSegs = atomic.LoadUint64(&s.OutSegs)
d.InBytes = atomic.LoadUint64(&s.InBytes)
d.OutBytes = atomic.LoadUint64(&s.OutBytes)
d.RetransSegs = atomic.LoadUint64(&s.RetransSegs)
d.FastRetransSegs = atomic.LoadUint64(&s.FastRetransSegs)
d.EarlyRetransSegs = atomic.LoadUint64(&s.EarlyRetransSegs)
d.LostSegs = atomic.LoadUint64(&s.LostSegs)
d.RepeatSegs = atomic.LoadUint64(&s.RepeatSegs)
d.FECSegs = atomic.LoadUint64(&s.FECSegs)
d.FECErrs = atomic.LoadUint64(&s.FECErrs)
d.FECRecovered = atomic.LoadUint64(&s.FECRecovered)
d.FECShortShards = atomic.LoadUint64(&s.FECShortShards)
return d
}
// Reset values to zero
func (s *Snmp) Reset() {
atomic.StoreUint64(&s.BytesSent, 0)
atomic.StoreUint64(&s.BytesReceived, 0)
atomic.StoreUint64(&s.MaxConn, 0)
atomic.StoreUint64(&s.ActiveOpens, 0)
atomic.StoreUint64(&s.PassiveOpens, 0)
atomic.StoreUint64(&s.CurrEstab, 0)
atomic.StoreUint64(&s.InErrs, 0)
atomic.StoreUint64(&s.InCsumErrors, 0)
atomic.StoreUint64(&s.KCPInErrors, 0)
atomic.StoreUint64(&s.InSegs, 0)
atomic.StoreUint64(&s.OutSegs, 0)
atomic.StoreUint64(&s.InBytes, 0)
atomic.StoreUint64(&s.OutBytes, 0)
atomic.StoreUint64(&s.RetransSegs, 0)
atomic.StoreUint64(&s.FastRetransSegs, 0)
atomic.StoreUint64(&s.EarlyRetransSegs, 0)
atomic.StoreUint64(&s.LostSegs, 0)
atomic.StoreUint64(&s.RepeatSegs, 0)
atomic.StoreUint64(&s.FECSegs, 0)
atomic.StoreUint64(&s.FECErrs, 0)
atomic.StoreUint64(&s.FECRecovered, 0)
atomic.StoreUint64(&s.FECShortShards, 0)
}
// DefaultSnmp is the global KCP connection statistics collector
var DefaultSnmp *Snmp
func init() {
DefaultSnmp = newSnmp()
}

111
vendor/github.com/xtaci/kcp-go/xor.go generated vendored Normal file
View File

@@ -0,0 +1,111 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package kcp
import (
"runtime"
"unsafe"
)
const wordSize = int(unsafe.Sizeof(uintptr(0)))
const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"
// fastXORBytes xors in bulk. It only works on architectures that
// support unaligned read/writes.
func fastXORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
w := n / wordSize
if w > 0 {
wordBytes := w * wordSize
fastXORWords(dst[:wordBytes], a[:wordBytes], b[:wordBytes])
}
for i := (n - n%wordSize); i < n; i++ {
dst[i] = a[i] ^ b[i]
}
return n
}
func safeXORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
ex := n % 8
for i := 0; i < ex; i++ {
dst[i] = a[i] ^ b[i]
}
for i := ex; i < n; i += 8 {
_dst := dst[i : i+8]
_a := a[i : i+8]
_b := b[i : i+8]
_dst[0] = _a[0] ^ _b[0]
_dst[1] = _a[1] ^ _b[1]
_dst[2] = _a[2] ^ _b[2]
_dst[3] = _a[3] ^ _b[3]
_dst[4] = _a[4] ^ _b[4]
_dst[5] = _a[5] ^ _b[5]
_dst[6] = _a[6] ^ _b[6]
_dst[7] = _a[7] ^ _b[7]
}
return n
}
// xorBytes xors the bytes in a and b. The destination is assumed to have enough
// space. Returns the number of bytes xor'd.
func xorBytes(dst, a, b []byte) int {
if supportsUnaligned {
return fastXORBytes(dst, a, b)
} else {
// TODO(hanwen): if (dst, a, b) have common alignment
// we could still try fastXORBytes. It is not clear
// how often this happens, and it's only worth it if
// the block encryption itself is hardware
// accelerated.
return safeXORBytes(dst, a, b)
}
}
// fastXORWords XORs multiples of 4 or 8 bytes (depending on architecture.)
// The arguments are assumed to be of equal length.
func fastXORWords(dst, a, b []byte) {
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
aw := *(*[]uintptr)(unsafe.Pointer(&a))
bw := *(*[]uintptr)(unsafe.Pointer(&b))
n := len(b) / wordSize
ex := n % 8
for i := 0; i < ex; i++ {
dw[i] = aw[i] ^ bw[i]
}
for i := ex; i < n; i += 8 {
_dw := dw[i : i+8]
_aw := aw[i : i+8]
_bw := bw[i : i+8]
_dw[0] = _aw[0] ^ _bw[0]
_dw[1] = _aw[1] ^ _bw[1]
_dw[2] = _aw[2] ^ _bw[2]
_dw[3] = _aw[3] ^ _bw[3]
_dw[4] = _aw[4] ^ _bw[4]
_dw[5] = _aw[5] ^ _bw[5]
_dw[6] = _aw[6] ^ _bw[6]
_dw[7] = _aw[7] ^ _bw[7]
}
}
func xorWords(dst, a, b []byte) {
if supportsUnaligned {
fastXORWords(dst, a, b)
} else {
safeXORBytes(dst, a, b)
}
}

Some files were not shown because too many files have changed in this diff Show More