vendor: Mega update all dependencies
GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/4080
This commit is contained in:
209
vendor/github.com/klauspost/reedsolomon/examples_test.go
generated
vendored
209
vendor/github.com/klauspost/reedsolomon/examples_test.go
generated
vendored
@@ -1,209 +0,0 @@
|
||||
package reedsolomon_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
|
||||
"github.com/klauspost/reedsolomon"
|
||||
)
|
||||
|
||||
func fillRandom(p []byte) {
|
||||
for i := 0; i < len(p); i += 7 {
|
||||
val := rand.Int63()
|
||||
for j := 0; i+j < len(p) && j < 7; j++ {
|
||||
p[i+j] = byte(val)
|
||||
val >>= 8
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Simple example of how to use all functions of the Encoder.
|
||||
// Note that all error checks have been removed to keep it short.
|
||||
func ExampleEncoder() {
|
||||
// Create some sample data
|
||||
var data = make([]byte, 250000)
|
||||
fillRandom(data)
|
||||
|
||||
// Create an encoder with 17 data and 3 parity slices.
|
||||
enc, _ := reedsolomon.New(17, 3)
|
||||
|
||||
// Split the data into shards
|
||||
shards, _ := enc.Split(data)
|
||||
|
||||
// Encode the parity set
|
||||
_ = enc.Encode(shards)
|
||||
|
||||
// Verify the parity set
|
||||
ok, _ := enc.Verify(shards)
|
||||
if ok {
|
||||
fmt.Println("ok")
|
||||
}
|
||||
|
||||
// Delete two shards
|
||||
shards[10], shards[11] = nil, nil
|
||||
|
||||
// Reconstruct the shards
|
||||
_ = enc.Reconstruct(shards)
|
||||
|
||||
// Verify the data set
|
||||
ok, _ = enc.Verify(shards)
|
||||
if ok {
|
||||
fmt.Println("ok")
|
||||
}
|
||||
// Output: ok
|
||||
// ok
|
||||
}
|
||||
|
||||
// This demonstrates that shards can be arbitrary sliced and
|
||||
// merged and still remain valid.
|
||||
func ExampleEncoder_slicing() {
|
||||
// Create some sample data
|
||||
var data = make([]byte, 250000)
|
||||
fillRandom(data)
|
||||
|
||||
// Create 5 data slices of 50000 elements each
|
||||
enc, _ := reedsolomon.New(5, 3)
|
||||
shards, _ := enc.Split(data)
|
||||
err := enc.Encode(shards)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Check that it verifies
|
||||
ok, err := enc.Verify(shards)
|
||||
if ok && err == nil {
|
||||
fmt.Println("encode ok")
|
||||
}
|
||||
|
||||
// Split the data set of 50000 elements into two of 25000
|
||||
splitA := make([][]byte, 8)
|
||||
splitB := make([][]byte, 8)
|
||||
|
||||
// Merge into a 100000 element set
|
||||
merged := make([][]byte, 8)
|
||||
|
||||
// Split/merge the shards
|
||||
for i := range shards {
|
||||
splitA[i] = shards[i][:25000]
|
||||
splitB[i] = shards[i][25000:]
|
||||
|
||||
// Concencate it to itself
|
||||
merged[i] = append(make([]byte, 0, len(shards[i])*2), shards[i]...)
|
||||
merged[i] = append(merged[i], shards[i]...)
|
||||
}
|
||||
|
||||
// Each part should still verify as ok.
|
||||
ok, err = enc.Verify(shards)
|
||||
if ok && err == nil {
|
||||
fmt.Println("splitA ok")
|
||||
}
|
||||
|
||||
ok, err = enc.Verify(splitB)
|
||||
if ok && err == nil {
|
||||
fmt.Println("splitB ok")
|
||||
}
|
||||
|
||||
ok, err = enc.Verify(merged)
|
||||
if ok && err == nil {
|
||||
fmt.Println("merge ok")
|
||||
}
|
||||
// Output: encode ok
|
||||
// splitA ok
|
||||
// splitB ok
|
||||
// merge ok
|
||||
}
|
||||
|
||||
// This demonstrates that shards can xor'ed and
|
||||
// still remain a valid set.
|
||||
//
|
||||
// The xor value must be the same for element 'n' in each shard,
|
||||
// except if you xor with a similar sized encoded shard set.
|
||||
func ExampleEncoder_xor() {
|
||||
// Create some sample data
|
||||
var data = make([]byte, 25000)
|
||||
fillRandom(data)
|
||||
|
||||
// Create 5 data slices of 5000 elements each
|
||||
enc, _ := reedsolomon.New(5, 3)
|
||||
shards, _ := enc.Split(data)
|
||||
err := enc.Encode(shards)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Check that it verifies
|
||||
ok, err := enc.Verify(shards)
|
||||
if !ok || err != nil {
|
||||
fmt.Println("falied initial verify", err)
|
||||
}
|
||||
|
||||
// Create an xor'ed set
|
||||
xored := make([][]byte, 8)
|
||||
|
||||
// We xor by the index, so you can see that the xor can change,
|
||||
// It should however be constant vertically through your slices.
|
||||
for i := range shards {
|
||||
xored[i] = make([]byte, len(shards[i]))
|
||||
for j := range xored[i] {
|
||||
xored[i][j] = shards[i][j] ^ byte(j&0xff)
|
||||
}
|
||||
}
|
||||
|
||||
// Each part should still verify as ok.
|
||||
ok, err = enc.Verify(xored)
|
||||
if ok && err == nil {
|
||||
fmt.Println("verified ok after xor")
|
||||
}
|
||||
// Output: verified ok after xor
|
||||
}
|
||||
|
||||
// This will show a simple stream encoder where we encode from
|
||||
// a []io.Reader which contain a reader for each shard.
|
||||
//
|
||||
// Input and output can be exchanged with files, network streams
|
||||
// or what may suit your needs.
|
||||
func ExampleStreamEncoder() {
|
||||
dataShards := 5
|
||||
parityShards := 2
|
||||
|
||||
// Create a StreamEncoder with the number of data and
|
||||
// parity shards.
|
||||
rs, err := reedsolomon.NewStream(dataShards, parityShards)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
shardSize := 50000
|
||||
|
||||
// Create input data shards.
|
||||
input := make([][]byte, dataShards)
|
||||
for s := range input {
|
||||
input[s] = make([]byte, shardSize)
|
||||
fillRandom(input[s])
|
||||
}
|
||||
|
||||
// Convert our buffers to io.Readers
|
||||
readers := make([]io.Reader, dataShards)
|
||||
for i := range readers {
|
||||
readers[i] = io.Reader(bytes.NewBuffer(input[i]))
|
||||
}
|
||||
|
||||
// Create our output io.Writers
|
||||
out := make([]io.Writer, parityShards)
|
||||
for i := range out {
|
||||
out[i] = ioutil.Discard
|
||||
}
|
||||
|
||||
// Encode from input to output.
|
||||
err = rs.Encode(readers, out)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("ok")
|
||||
// OUTPUT: ok
|
||||
}
|
||||
155
vendor/github.com/klauspost/reedsolomon/galois_test.go
generated
vendored
155
vendor/github.com/klauspost/reedsolomon/galois_test.go
generated
vendored
@@ -1,155 +0,0 @@
|
||||
/**
|
||||
* Unit tests for Galois
|
||||
*
|
||||
* Copyright 2015, Klaus Post
|
||||
* Copyright 2015, Backblaze, Inc.
|
||||
*/
|
||||
|
||||
package reedsolomon
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAssociativity(t *testing.T) {
|
||||
for i := 0; i < 256; i++ {
|
||||
a := byte(i)
|
||||
for j := 0; j < 256; j++ {
|
||||
b := byte(j)
|
||||
for k := 0; k < 256; k++ {
|
||||
c := byte(k)
|
||||
x := galAdd(a, galAdd(b, c))
|
||||
y := galAdd(galAdd(a, b), c)
|
||||
if x != y {
|
||||
t.Fatal("add does not match:", x, "!=", y)
|
||||
}
|
||||
x = galMultiply(a, galMultiply(b, c))
|
||||
y = galMultiply(galMultiply(a, b), c)
|
||||
if x != y {
|
||||
t.Fatal("multiply does not match:", x, "!=", y)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIdentity(t *testing.T) {
|
||||
for i := 0; i < 256; i++ {
|
||||
a := byte(i)
|
||||
b := galAdd(a, 0)
|
||||
if a != b {
|
||||
t.Fatal("Add zero should yield same result", a, "!=", b)
|
||||
}
|
||||
b = galMultiply(a, 1)
|
||||
if a != b {
|
||||
t.Fatal("Mul by one should yield same result", a, "!=", b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInverse(t *testing.T) {
|
||||
for i := 0; i < 256; i++ {
|
||||
a := byte(i)
|
||||
b := galSub(0, a)
|
||||
c := galAdd(a, b)
|
||||
if c != 0 {
|
||||
t.Fatal("inverse sub/add", c, "!=", 0)
|
||||
}
|
||||
if a != 0 {
|
||||
b = galDivide(1, a)
|
||||
c = galMultiply(a, b)
|
||||
if c != 1 {
|
||||
t.Fatal("inverse div/mul", c, "!=", 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommutativity(t *testing.T) {
|
||||
for i := 0; i < 256; i++ {
|
||||
a := byte(i)
|
||||
for j := 0; j < 256; j++ {
|
||||
b := byte(j)
|
||||
x := galAdd(a, b)
|
||||
y := galAdd(b, a)
|
||||
if x != y {
|
||||
t.Fatal(x, "!= ", y)
|
||||
}
|
||||
x = galMultiply(a, b)
|
||||
y = galMultiply(b, a)
|
||||
if x != y {
|
||||
t.Fatal(x, "!= ", y)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistributivity(t *testing.T) {
|
||||
for i := 0; i < 256; i++ {
|
||||
a := byte(i)
|
||||
for j := 0; j < 256; j++ {
|
||||
b := byte(j)
|
||||
for k := 0; k < 256; k++ {
|
||||
c := byte(k)
|
||||
x := galMultiply(a, galAdd(b, c))
|
||||
y := galAdd(galMultiply(a, b), galMultiply(a, c))
|
||||
if x != y {
|
||||
t.Fatal(x, "!= ", y)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExp(t *testing.T) {
|
||||
for i := 0; i < 256; i++ {
|
||||
a := byte(i)
|
||||
power := byte(1)
|
||||
for j := 0; j < 256; j++ {
|
||||
x := galExp(a, j)
|
||||
if x != power {
|
||||
t.Fatal(x, "!=", power)
|
||||
}
|
||||
power = galMultiply(power, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGalois(t *testing.T) {
|
||||
// These values were copied output of the Python code.
|
||||
if galMultiply(3, 4) != 12 {
|
||||
t.Fatal("galMultiply(3, 4) != 12")
|
||||
}
|
||||
if galMultiply(7, 7) != 21 {
|
||||
t.Fatal("galMultiply(7, 7) != 21")
|
||||
}
|
||||
if galMultiply(23, 45) != 41 {
|
||||
t.Fatal("galMultiply(23, 45) != 41")
|
||||
}
|
||||
|
||||
// Test slices (>16 entries to test assembler)
|
||||
in := []byte{0, 1, 2, 3, 4, 5, 6, 10, 50, 100, 150, 174, 201, 255, 99, 32, 67, 85}
|
||||
out := make([]byte, len(in))
|
||||
galMulSlice(25, in, out, false, false)
|
||||
expect := []byte{0x0, 0x19, 0x32, 0x2b, 0x64, 0x7d, 0x56, 0xfa, 0xb8, 0x6d, 0xc7, 0x85, 0xc3, 0x1f, 0x22, 0x7, 0x25, 0xfe}
|
||||
if 0 != bytes.Compare(out, expect) {
|
||||
t.Errorf("got %#v, expected %#v", out, expect)
|
||||
}
|
||||
|
||||
galMulSlice(177, in, out, false, false)
|
||||
expect = []byte{0x0, 0xb1, 0x7f, 0xce, 0xfe, 0x4f, 0x81, 0x9e, 0x3, 0x6, 0xe8, 0x75, 0xbd, 0x40, 0x36, 0xa3, 0x95, 0xcb}
|
||||
if 0 != bytes.Compare(out, expect) {
|
||||
t.Errorf("got %#v, expected %#v", out, expect)
|
||||
}
|
||||
|
||||
if galExp(2, 2) != 4 {
|
||||
t.Fatal("galExp(2, 2) != 4")
|
||||
}
|
||||
if galExp(5, 20) != 235 {
|
||||
t.Fatal("galExp(5, 20) != 235")
|
||||
}
|
||||
if galExp(13, 7) != 43 {
|
||||
t.Fatal("galExp(13, 7) != 43")
|
||||
}
|
||||
}
|
||||
125
vendor/github.com/klauspost/reedsolomon/inversion_tree_test.go
generated
vendored
125
vendor/github.com/klauspost/reedsolomon/inversion_tree_test.go
generated
vendored
@@ -1,125 +0,0 @@
|
||||
/**
|
||||
* Unit tests for inversion tree.
|
||||
*
|
||||
* Copyright 2016, Peter Collins
|
||||
*/
|
||||
|
||||
package reedsolomon
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewInversionTree(t *testing.T) {
|
||||
tree := newInversionTree(3, 2)
|
||||
|
||||
children := len(tree.root.children)
|
||||
if children != 5 {
|
||||
t.Fatal("Root node children list length", children, "!=", 5)
|
||||
}
|
||||
|
||||
str := tree.root.matrix.String()
|
||||
expect := "[[1, 0, 0], [0, 1, 0], [0, 0, 1]]"
|
||||
if str != expect {
|
||||
t.Fatal(str, "!=", expect)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetInvertedMatrix(t *testing.T) {
|
||||
tree := newInversionTree(3, 2)
|
||||
|
||||
matrix := tree.GetInvertedMatrix([]int{})
|
||||
str := matrix.String()
|
||||
expect := "[[1, 0, 0], [0, 1, 0], [0, 0, 1]]"
|
||||
if str != expect {
|
||||
t.Fatal(str, "!=", expect)
|
||||
}
|
||||
|
||||
matrix = tree.GetInvertedMatrix([]int{1})
|
||||
if matrix != nil {
|
||||
t.Fatal(matrix, "!= nil")
|
||||
}
|
||||
|
||||
matrix = tree.GetInvertedMatrix([]int{1, 2})
|
||||
if matrix != nil {
|
||||
t.Fatal(matrix, "!= nil")
|
||||
}
|
||||
|
||||
matrix, err := newMatrix(3, 3)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed initializing new Matrix : %s", err)
|
||||
}
|
||||
err = tree.InsertInvertedMatrix([]int{1}, matrix, 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed inserting new Matrix : %s", err)
|
||||
}
|
||||
|
||||
cachedMatrix := tree.GetInvertedMatrix([]int{1})
|
||||
if cachedMatrix == nil {
|
||||
t.Fatal(cachedMatrix, "== nil")
|
||||
}
|
||||
if matrix.String() != cachedMatrix.String() {
|
||||
t.Fatal(matrix.String(), "!=", cachedMatrix.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertInvertedMatrix(t *testing.T) {
|
||||
tree := newInversionTree(3, 2)
|
||||
|
||||
matrix, err := newMatrix(3, 3)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed initializing new Matrix : %s", err)
|
||||
}
|
||||
err = tree.InsertInvertedMatrix([]int{1}, matrix, 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed inserting new Matrix : %s", err)
|
||||
}
|
||||
|
||||
err = tree.InsertInvertedMatrix([]int{}, matrix, 5)
|
||||
if err == nil {
|
||||
t.Fatal("Should have failed inserting the root node matrix", matrix)
|
||||
}
|
||||
|
||||
matrix, err = newMatrix(3, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed initializing new Matrix : %s", err)
|
||||
}
|
||||
err = tree.InsertInvertedMatrix([]int{2}, matrix, 5)
|
||||
if err == nil {
|
||||
t.Fatal("Should have failed inserting a non-square matrix", matrix)
|
||||
}
|
||||
|
||||
matrix, err = newMatrix(3, 3)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed initializing new Matrix : %s", err)
|
||||
}
|
||||
err = tree.InsertInvertedMatrix([]int{0, 1}, matrix, 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed inserting new Matrix : %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDoubleInsertInvertedMatrix(t *testing.T) {
|
||||
tree := newInversionTree(3, 2)
|
||||
|
||||
matrix, err := newMatrix(3, 3)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed initializing new Matrix : %s", err)
|
||||
}
|
||||
err = tree.InsertInvertedMatrix([]int{1}, matrix, 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed inserting new Matrix : %s", err)
|
||||
}
|
||||
err = tree.InsertInvertedMatrix([]int{1}, matrix, 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed inserting new Matrix : %s", err)
|
||||
}
|
||||
|
||||
cachedMatrix := tree.GetInvertedMatrix([]int{1})
|
||||
if cachedMatrix == nil {
|
||||
t.Fatal(cachedMatrix, "== nil")
|
||||
}
|
||||
if matrix.String() != cachedMatrix.String() {
|
||||
t.Fatal(matrix.String(), "!=", cachedMatrix.String())
|
||||
}
|
||||
}
|
||||
217
vendor/github.com/klauspost/reedsolomon/matrix_test.go
generated
vendored
217
vendor/github.com/klauspost/reedsolomon/matrix_test.go
generated
vendored
@@ -1,217 +0,0 @@
|
||||
/**
|
||||
* Unit tests for Matrix
|
||||
*
|
||||
* Copyright 2015, Klaus Post
|
||||
* Copyright 2015, Backblaze, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
package reedsolomon
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestNewMatrix - Tests validate the result for invalid input and the allocations made by newMatrix method.
|
||||
func TestNewMatrix(t *testing.T) {
|
||||
testCases := []struct {
|
||||
rows int
|
||||
columns int
|
||||
|
||||
// flag to indicate whether the test should pass.
|
||||
shouldPass bool
|
||||
expectedResult matrix
|
||||
expectedErr error
|
||||
}{
|
||||
// Test case - 1.
|
||||
// Test case with a negative row size.
|
||||
{-1, 10, false, nil, errInvalidRowSize},
|
||||
// Test case - 2.
|
||||
// Test case with a negative column size.
|
||||
{10, -1, false, nil, errInvalidColSize},
|
||||
// Test case - 3.
|
||||
// Test case with negative value for both row and column size.
|
||||
{-1, -1, false, nil, errInvalidRowSize},
|
||||
// Test case - 4.
|
||||
// Test case with 0 value for row size.
|
||||
{0, 10, false, nil, errInvalidRowSize},
|
||||
// Test case - 5.
|
||||
// Test case with 0 value for column size.
|
||||
{-1, 0, false, nil, errInvalidRowSize},
|
||||
// Test case - 6.
|
||||
// Test case with 0 value for both row and column size.
|
||||
{0, 0, false, nil, errInvalidRowSize},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
actualResult, actualErr := newMatrix(testCase.rows, testCase.columns)
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, actualErr.Error())
|
||||
}
|
||||
if actualErr == nil && !testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead.", i+1, testCase.expectedErr)
|
||||
}
|
||||
// Failed as expected, but does it fail for the expected reason.
|
||||
if actualErr != nil && !testCase.shouldPass {
|
||||
if testCase.expectedErr != actualErr {
|
||||
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", i+1, testCase.expectedErr, actualErr)
|
||||
}
|
||||
}
|
||||
// Test passes as expected, but the output values
|
||||
// are verified for correctness here.
|
||||
if actualErr == nil && testCase.shouldPass {
|
||||
if testCase.rows != len(actualResult) {
|
||||
// End the tests here if the the size doesn't match number of rows.
|
||||
t.Fatalf("Test %d: Expected the size of the row of the new matrix to be `%d`, but instead found `%d`", i+1, testCase.rows, len(actualResult))
|
||||
}
|
||||
// Iterating over each row and validating the size of the column.
|
||||
for j, row := range actualResult {
|
||||
// If the row check passes, verify the size of each columns.
|
||||
if testCase.columns != len(row) {
|
||||
t.Errorf("Test %d: Row %d: Expected the size of the column of the new matrix to be `%d`, but instead found `%d`", i+1, j+1, testCase.columns, len(row))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestMatrixIdentity - validates the method for returning identity matrix of given size.
|
||||
func TestMatrixIdentity(t *testing.T) {
|
||||
m, err := identityMatrix(3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
str := m.String()
|
||||
expect := "[[1, 0, 0], [0, 1, 0], [0, 0, 1]]"
|
||||
if str != expect {
|
||||
t.Fatal(str, "!=", expect)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests validate the output of matix multiplication method.
|
||||
func TestMatrixMultiply(t *testing.T) {
|
||||
m1, err := newMatrixData(
|
||||
[][]byte{
|
||||
[]byte{1, 2},
|
||||
[]byte{3, 4},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
m2, err := newMatrixData(
|
||||
[][]byte{
|
||||
[]byte{5, 6},
|
||||
[]byte{7, 8},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
actual, err := m1.Multiply(m2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
str := actual.String()
|
||||
expect := "[[11, 22], [19, 42]]"
|
||||
if str != expect {
|
||||
t.Fatal(str, "!=", expect)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests validate the output of the method with computes inverse of matrix.
|
||||
func TestMatrixInverse(t *testing.T) {
|
||||
testCases := []struct {
|
||||
matrixData [][]byte
|
||||
// expected inverse matrix.
|
||||
expectedResult string
|
||||
// flag indicating whether the test should pass.
|
||||
shouldPass bool
|
||||
expectedErr error
|
||||
}{
|
||||
// Test case - 1.
|
||||
// Test case validating inverse of the input Matrix.
|
||||
{
|
||||
// input data to construct the matrix.
|
||||
[][]byte{
|
||||
[]byte{56, 23, 98},
|
||||
[]byte{3, 100, 200},
|
||||
[]byte{45, 201, 123},
|
||||
},
|
||||
// expected Inverse matrix.
|
||||
"[[175, 133, 33], [130, 13, 245], [112, 35, 126]]",
|
||||
// test is expected to pass.
|
||||
true,
|
||||
nil,
|
||||
},
|
||||
// Test case - 2.
|
||||
// Test case validating inverse of the input Matrix.
|
||||
{
|
||||
// input data to contruct the matrix.
|
||||
[][]byte{
|
||||
[]byte{1, 0, 0, 0, 0},
|
||||
[]byte{0, 1, 0, 0, 0},
|
||||
[]byte{0, 0, 0, 1, 0},
|
||||
[]byte{0, 0, 0, 0, 1},
|
||||
[]byte{7, 7, 6, 6, 1},
|
||||
},
|
||||
// expectedInverse matrix.
|
||||
"[[1, 0, 0, 0, 0]," +
|
||||
" [0, 1, 0, 0, 0]," +
|
||||
" [123, 123, 1, 122, 122]," +
|
||||
" [0, 0, 1, 0, 0]," +
|
||||
" [0, 0, 0, 1, 0]]",
|
||||
// test is expected to pass.
|
||||
true,
|
||||
nil,
|
||||
},
|
||||
// Test case with a non-square matrix.
|
||||
// expected to fail with errNotSquare.
|
||||
{
|
||||
[][]byte{
|
||||
[]byte{56, 23},
|
||||
[]byte{3, 100},
|
||||
[]byte{45, 201},
|
||||
},
|
||||
"",
|
||||
false,
|
||||
errNotSquare,
|
||||
},
|
||||
// Test case with singular matrix.
|
||||
// expected to fail with error errSingular.
|
||||
{
|
||||
|
||||
[][]byte{
|
||||
[]byte{4, 2},
|
||||
[]byte{12, 6},
|
||||
},
|
||||
"",
|
||||
false,
|
||||
errSingular,
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
m, err := newMatrixData(testCase.matrixData)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed initializing new Matrix : %s", i+1, err)
|
||||
}
|
||||
actualResult, actualErr := m.Invert()
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, actualErr.Error())
|
||||
}
|
||||
if actualErr == nil && !testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead.", i+1, testCase.expectedErr)
|
||||
}
|
||||
// Failed as expected, but does it fail for the expected reason.
|
||||
if actualErr != nil && !testCase.shouldPass {
|
||||
if testCase.expectedErr != actualErr {
|
||||
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", i+1, testCase.expectedErr, actualErr)
|
||||
}
|
||||
}
|
||||
// Test passes as expected, but the output values
|
||||
// are verified for correctness here.
|
||||
if actualErr == nil && testCase.shouldPass {
|
||||
if testCase.expectedResult != actualResult.String() {
|
||||
t.Errorf("Test %d: The inverse matrix doesnt't match the expected result", i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
761
vendor/github.com/klauspost/reedsolomon/reedsolomon_test.go
generated
vendored
761
vendor/github.com/klauspost/reedsolomon/reedsolomon_test.go
generated
vendored
@@ -1,761 +0,0 @@
|
||||
/**
|
||||
* Unit tests for ReedSolomon
|
||||
*
|
||||
* Copyright 2015, Klaus Post
|
||||
* Copyright 2015, Backblaze, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
package reedsolomon
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testOpts() [][]Option {
|
||||
if !testing.Short() {
|
||||
return [][]Option{}
|
||||
}
|
||||
opts := [][]Option{
|
||||
{WithMaxGoroutines(1), WithMinSplitSize(500), withSSE3(false), withAVX2(false)},
|
||||
{WithMaxGoroutines(5000), WithMinSplitSize(50), withSSE3(false), withAVX2(false)},
|
||||
{WithMaxGoroutines(5000), WithMinSplitSize(500000), withSSE3(false), withAVX2(false)},
|
||||
{WithMaxGoroutines(1), WithMinSplitSize(500000), withSSE3(false), withAVX2(false)},
|
||||
}
|
||||
for _, o := range opts[:] {
|
||||
if defaultOptions.useSSSE3 {
|
||||
n := make([]Option, len(o), len(o)+1)
|
||||
copy(n, o)
|
||||
n = append(n, withSSE3(true))
|
||||
opts = append(opts, n)
|
||||
}
|
||||
if defaultOptions.useAVX2 {
|
||||
n := make([]Option, len(o), len(o)+1)
|
||||
copy(n, o)
|
||||
n = append(n, withAVX2(true))
|
||||
opts = append(opts, n)
|
||||
}
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
func TestEncoding(t *testing.T) {
|
||||
testEncoding(t)
|
||||
for _, o := range testOpts() {
|
||||
testEncoding(t, o...)
|
||||
}
|
||||
}
|
||||
|
||||
func testEncoding(t *testing.T, o ...Option) {
|
||||
perShard := 50000
|
||||
r, err := New(10, 3, o...)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
shards := make([][]byte, 13)
|
||||
for s := range shards {
|
||||
shards[s] = make([]byte, perShard)
|
||||
}
|
||||
|
||||
rand.Seed(0)
|
||||
for s := 0; s < 13; s++ {
|
||||
fillRandom(shards[s])
|
||||
}
|
||||
|
||||
err = r.Encode(shards)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ok, err := r.Verify(shards)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("Verification failed")
|
||||
}
|
||||
|
||||
err = r.Encode(make([][]byte, 1))
|
||||
if err != ErrTooFewShards {
|
||||
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
|
||||
}
|
||||
|
||||
badShards := make([][]byte, 13)
|
||||
badShards[0] = make([]byte, 1)
|
||||
err = r.Encode(badShards)
|
||||
if err != ErrShardSize {
|
||||
t.Errorf("expected %v, got %v", ErrShardSize, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReconstruct(t *testing.T) {
|
||||
testReconstruct(t)
|
||||
for _, o := range testOpts() {
|
||||
testReconstruct(t, o...)
|
||||
}
|
||||
}
|
||||
|
||||
func testReconstruct(t *testing.T, o ...Option) {
|
||||
perShard := 50000
|
||||
r, err := New(10, 3, o...)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
shards := make([][]byte, 13)
|
||||
for s := range shards {
|
||||
shards[s] = make([]byte, perShard)
|
||||
}
|
||||
|
||||
rand.Seed(0)
|
||||
for s := 0; s < 13; s++ {
|
||||
fillRandom(shards[s])
|
||||
}
|
||||
|
||||
err = r.Encode(shards)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Reconstruct with all shards present
|
||||
err = r.Reconstruct(shards)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Reconstruct with 10 shards present
|
||||
shards[0] = nil
|
||||
shards[7] = nil
|
||||
shards[11] = nil
|
||||
|
||||
err = r.Reconstruct(shards)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ok, err := r.Verify(shards)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("Verification failed")
|
||||
}
|
||||
|
||||
// Reconstruct with 9 shards present (should fail)
|
||||
shards[0] = nil
|
||||
shards[4] = nil
|
||||
shards[7] = nil
|
||||
shards[11] = nil
|
||||
|
||||
err = r.Reconstruct(shards)
|
||||
if err != ErrTooFewShards {
|
||||
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
|
||||
}
|
||||
|
||||
err = r.Reconstruct(make([][]byte, 1))
|
||||
if err != ErrTooFewShards {
|
||||
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
|
||||
}
|
||||
err = r.Reconstruct(make([][]byte, 13))
|
||||
if err != ErrShardNoData {
|
||||
t.Errorf("expected %v, got %v", ErrShardNoData, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerify(t *testing.T) {
|
||||
testVerify(t)
|
||||
for _, o := range testOpts() {
|
||||
testVerify(t, o...)
|
||||
}
|
||||
}
|
||||
|
||||
func testVerify(t *testing.T, o ...Option) {
|
||||
perShard := 33333
|
||||
r, err := New(10, 4, o...)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
shards := make([][]byte, 14)
|
||||
for s := range shards {
|
||||
shards[s] = make([]byte, perShard)
|
||||
}
|
||||
|
||||
rand.Seed(0)
|
||||
for s := 0; s < 10; s++ {
|
||||
fillRandom(shards[s])
|
||||
}
|
||||
|
||||
err = r.Encode(shards)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ok, err := r.Verify(shards)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("Verification failed")
|
||||
}
|
||||
|
||||
// Put in random data. Verification should fail
|
||||
fillRandom(shards[10])
|
||||
ok, err = r.Verify(shards)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ok {
|
||||
t.Fatal("Verification did not fail")
|
||||
}
|
||||
// Re-encode
|
||||
err = r.Encode(shards)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Fill a data segment with random data
|
||||
fillRandom(shards[0])
|
||||
ok, err = r.Verify(shards)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ok {
|
||||
t.Fatal("Verification did not fail")
|
||||
}
|
||||
|
||||
_, err = r.Verify(make([][]byte, 1))
|
||||
if err != ErrTooFewShards {
|
||||
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
|
||||
}
|
||||
|
||||
_, err = r.Verify(make([][]byte, 14))
|
||||
if err != ErrShardNoData {
|
||||
t.Errorf("expected %v, got %v", ErrShardNoData, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOneEncode(t *testing.T) {
|
||||
codec, err := New(5, 5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
shards := [][]byte{
|
||||
{0, 1},
|
||||
{4, 5},
|
||||
{2, 3},
|
||||
{6, 7},
|
||||
{8, 9},
|
||||
{0, 0},
|
||||
{0, 0},
|
||||
{0, 0},
|
||||
{0, 0},
|
||||
{0, 0},
|
||||
}
|
||||
codec.Encode(shards)
|
||||
if shards[5][0] != 12 || shards[5][1] != 13 {
|
||||
t.Fatal("shard 5 mismatch")
|
||||
}
|
||||
if shards[6][0] != 10 || shards[6][1] != 11 {
|
||||
t.Fatal("shard 6 mismatch")
|
||||
}
|
||||
if shards[7][0] != 14 || shards[7][1] != 15 {
|
||||
t.Fatal("shard 7 mismatch")
|
||||
}
|
||||
if shards[8][0] != 90 || shards[8][1] != 91 {
|
||||
t.Fatal("shard 8 mismatch")
|
||||
}
|
||||
if shards[9][0] != 94 || shards[9][1] != 95 {
|
||||
t.Fatal("shard 9 mismatch")
|
||||
}
|
||||
|
||||
ok, err := codec.Verify(shards)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("did not verify")
|
||||
}
|
||||
shards[8][0]++
|
||||
ok, err = codec.Verify(shards)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ok {
|
||||
t.Fatal("verify did not fail as expected")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func fillRandom(p []byte) {
|
||||
for i := 0; i < len(p); i += 7 {
|
||||
val := rand.Int63()
|
||||
for j := 0; i+j < len(p) && j < 7; j++ {
|
||||
p[i+j] = byte(val)
|
||||
val >>= 8
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkEncode(b *testing.B, dataShards, parityShards, shardSize int) {
|
||||
r, err := New(dataShards, parityShards)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
shards := make([][]byte, dataShards+parityShards)
|
||||
for s := range shards {
|
||||
shards[s] = make([]byte, shardSize)
|
||||
}
|
||||
|
||||
rand.Seed(0)
|
||||
for s := 0; s < dataShards; s++ {
|
||||
fillRandom(shards[s])
|
||||
}
|
||||
|
||||
b.SetBytes(int64(shardSize * dataShards))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err = r.Encode(shards)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncode10x2x10000(b *testing.B) {
|
||||
benchmarkEncode(b, 10, 2, 10000)
|
||||
}
|
||||
|
||||
func BenchmarkEncode100x20x10000(b *testing.B) {
|
||||
benchmarkEncode(b, 100, 20, 10000)
|
||||
}
|
||||
|
||||
func BenchmarkEncode17x3x1M(b *testing.B) {
|
||||
benchmarkEncode(b, 17, 3, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 10 data shards and 4 parity shards with 16MB each.
|
||||
func BenchmarkEncode10x4x16M(b *testing.B) {
|
||||
benchmarkEncode(b, 10, 4, 16*1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 5 data shards and 2 parity shards with 1MB each.
|
||||
func BenchmarkEncode5x2x1M(b *testing.B) {
|
||||
benchmarkEncode(b, 5, 2, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 1 data shards and 2 parity shards with 1MB each.
|
||||
func BenchmarkEncode10x2x1M(b *testing.B) {
|
||||
benchmarkEncode(b, 10, 2, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 10 data shards and 4 parity shards with 1MB each.
|
||||
func BenchmarkEncode10x4x1M(b *testing.B) {
|
||||
benchmarkEncode(b, 10, 4, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 50 data shards and 20 parity shards with 1MB each.
|
||||
func BenchmarkEncode50x20x1M(b *testing.B) {
|
||||
benchmarkEncode(b, 50, 20, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 17 data shards and 3 parity shards with 16MB each.
|
||||
func BenchmarkEncode17x3x16M(b *testing.B) {
|
||||
benchmarkEncode(b, 17, 3, 16*1024*1024)
|
||||
}
|
||||
|
||||
func benchmarkVerify(b *testing.B, dataShards, parityShards, shardSize int) {
|
||||
r, err := New(dataShards, parityShards)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
shards := make([][]byte, parityShards+dataShards)
|
||||
for s := range shards {
|
||||
shards[s] = make([]byte, shardSize)
|
||||
}
|
||||
|
||||
rand.Seed(0)
|
||||
for s := 0; s < dataShards; s++ {
|
||||
fillRandom(shards[s])
|
||||
}
|
||||
err = r.Encode(shards)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.SetBytes(int64(shardSize * dataShards))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err = r.Verify(shards)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark 10 data slices with 2 parity slices holding 10000 bytes each
|
||||
func BenchmarkVerify10x2x10000(b *testing.B) {
|
||||
benchmarkVerify(b, 10, 2, 10000)
|
||||
}
|
||||
|
||||
// Benchmark 50 data slices with 5 parity slices holding 100000 bytes each
|
||||
func BenchmarkVerify50x5x50000(b *testing.B) {
|
||||
benchmarkVerify(b, 50, 5, 100000)
|
||||
}
|
||||
|
||||
// Benchmark 10 data slices with 2 parity slices holding 1MB bytes each
|
||||
func BenchmarkVerify10x2x1M(b *testing.B) {
|
||||
benchmarkVerify(b, 10, 2, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 5 data slices with 2 parity slices holding 1MB bytes each
|
||||
func BenchmarkVerify5x2x1M(b *testing.B) {
|
||||
benchmarkVerify(b, 5, 2, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 10 data slices with 4 parity slices holding 1MB bytes each
|
||||
func BenchmarkVerify10x4x1M(b *testing.B) {
|
||||
benchmarkVerify(b, 10, 4, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 5 data slices with 2 parity slices holding 1MB bytes each
|
||||
func BenchmarkVerify50x20x1M(b *testing.B) {
|
||||
benchmarkVerify(b, 50, 20, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 10 data slices with 4 parity slices holding 16MB bytes each
|
||||
func BenchmarkVerify10x4x16M(b *testing.B) {
|
||||
benchmarkVerify(b, 10, 4, 16*1024*1024)
|
||||
}
|
||||
|
||||
func corruptRandom(shards [][]byte, dataShards, parityShards int) {
|
||||
shardsToCorrupt := rand.Intn(parityShards)
|
||||
for i := 1; i <= shardsToCorrupt; i++ {
|
||||
shards[rand.Intn(dataShards+parityShards)] = nil
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkReconstruct(b *testing.B, dataShards, parityShards, shardSize int) {
|
||||
r, err := New(dataShards, parityShards)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
shards := make([][]byte, parityShards+dataShards)
|
||||
for s := range shards {
|
||||
shards[s] = make([]byte, shardSize)
|
||||
}
|
||||
|
||||
rand.Seed(0)
|
||||
for s := 0; s < dataShards; s++ {
|
||||
fillRandom(shards[s])
|
||||
}
|
||||
err = r.Encode(shards)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.SetBytes(int64(shardSize * dataShards))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
corruptRandom(shards, dataShards, parityShards)
|
||||
|
||||
err = r.Reconstruct(shards)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
ok, err := r.Verify(shards)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
b.Fatal("Verification failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark 10 data slices with 2 parity slices holding 10000 bytes each
|
||||
func BenchmarkReconstruct10x2x10000(b *testing.B) {
|
||||
benchmarkReconstruct(b, 10, 2, 10000)
|
||||
}
|
||||
|
||||
// Benchmark 50 data slices with 5 parity slices holding 100000 bytes each
|
||||
func BenchmarkReconstruct50x5x50000(b *testing.B) {
|
||||
benchmarkReconstruct(b, 50, 5, 100000)
|
||||
}
|
||||
|
||||
// Benchmark 10 data slices with 2 parity slices holding 1MB bytes each
|
||||
func BenchmarkReconstruct10x2x1M(b *testing.B) {
|
||||
benchmarkReconstruct(b, 10, 2, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 5 data slices with 2 parity slices holding 1MB bytes each
|
||||
func BenchmarkReconstruct5x2x1M(b *testing.B) {
|
||||
benchmarkReconstruct(b, 5, 2, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 10 data slices with 4 parity slices holding 1MB bytes each
|
||||
func BenchmarkReconstruct10x4x1M(b *testing.B) {
|
||||
benchmarkReconstruct(b, 10, 4, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 5 data slices with 2 parity slices holding 1MB bytes each
|
||||
func BenchmarkReconstruct50x20x1M(b *testing.B) {
|
||||
benchmarkReconstruct(b, 50, 20, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 10 data slices with 4 parity slices holding 16MB bytes each
|
||||
func BenchmarkReconstruct10x4x16M(b *testing.B) {
|
||||
benchmarkReconstruct(b, 10, 4, 16*1024*1024)
|
||||
}
|
||||
|
||||
func benchmarkReconstructP(b *testing.B, dataShards, parityShards, shardSize int) {
|
||||
r, err := New(dataShards, parityShards)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.SetBytes(int64(shardSize * dataShards))
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
b.ResetTimer()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
shards := make([][]byte, parityShards+dataShards)
|
||||
for s := range shards {
|
||||
shards[s] = make([]byte, shardSize)
|
||||
}
|
||||
|
||||
rand.Seed(0)
|
||||
for s := 0; s < dataShards; s++ {
|
||||
fillRandom(shards[s])
|
||||
}
|
||||
err = r.Encode(shards)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
for pb.Next() {
|
||||
corruptRandom(shards, dataShards, parityShards)
|
||||
|
||||
err = r.Reconstruct(shards)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
ok, err := r.Verify(shards)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
b.Fatal("Verification failed")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Benchmark 10 data slices with 2 parity slices holding 10000 bytes each
|
||||
func BenchmarkReconstructP10x2x10000(b *testing.B) {
|
||||
benchmarkReconstructP(b, 10, 2, 10000)
|
||||
}
|
||||
|
||||
// Benchmark 50 data slices with 5 parity slices holding 100000 bytes each
|
||||
func BenchmarkReconstructP50x5x50000(b *testing.B) {
|
||||
benchmarkReconstructP(b, 50, 5, 100000)
|
||||
}
|
||||
|
||||
// Benchmark 10 data slices with 2 parity slices holding 1MB bytes each
|
||||
func BenchmarkReconstructP10x2x1M(b *testing.B) {
|
||||
benchmarkReconstructP(b, 10, 2, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 5 data slices with 2 parity slices holding 1MB bytes each
|
||||
func BenchmarkReconstructP5x2x1M(b *testing.B) {
|
||||
benchmarkReconstructP(b, 5, 2, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 10 data slices with 4 parity slices holding 1MB bytes each
|
||||
func BenchmarkReconstructP10x4x1M(b *testing.B) {
|
||||
benchmarkReconstructP(b, 10, 4, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 5 data slices with 2 parity slices holding 1MB bytes each
|
||||
func BenchmarkReconstructP50x20x1M(b *testing.B) {
|
||||
benchmarkReconstructP(b, 50, 20, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 10 data slices with 4 parity slices holding 16MB bytes each
|
||||
func BenchmarkReconstructP10x4x16M(b *testing.B) {
|
||||
benchmarkReconstructP(b, 10, 4, 16*1024*1024)
|
||||
}
|
||||
|
||||
func TestEncoderReconstruct(t *testing.T) {
|
||||
testEncoderReconstruct(t)
|
||||
for _, o := range testOpts() {
|
||||
testEncoderReconstruct(t, o...)
|
||||
}
|
||||
}
|
||||
|
||||
func testEncoderReconstruct(t *testing.T, o ...Option) {
|
||||
// Create some sample data
|
||||
var data = make([]byte, 250000)
|
||||
fillRandom(data)
|
||||
|
||||
// Create 5 data slices of 50000 elements each
|
||||
enc, err := New(5, 3, o...)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
shards, err := enc.Split(data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = enc.Encode(shards)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check that it verifies
|
||||
ok, err := enc.Verify(shards)
|
||||
if !ok || err != nil {
|
||||
t.Fatal("not ok:", ok, "err:", err)
|
||||
}
|
||||
|
||||
// Delete a shard
|
||||
shards[0] = nil
|
||||
|
||||
// Should reconstruct
|
||||
err = enc.Reconstruct(shards)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check that it verifies
|
||||
ok, err = enc.Verify(shards)
|
||||
if !ok || err != nil {
|
||||
t.Fatal("not ok:", ok, "err:", err)
|
||||
}
|
||||
|
||||
// Recover original bytes
|
||||
buf := new(bytes.Buffer)
|
||||
err = enc.Join(buf, shards, len(data))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(buf.Bytes(), data) {
|
||||
t.Fatal("recovered bytes do not match")
|
||||
}
|
||||
|
||||
// Corrupt a shard
|
||||
shards[0] = nil
|
||||
shards[1][0], shards[1][500] = 75, 75
|
||||
|
||||
// Should reconstruct (but with corrupted data)
|
||||
err = enc.Reconstruct(shards)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check that it verifies
|
||||
ok, err = enc.Verify(shards)
|
||||
if ok || err != nil {
|
||||
t.Fatal("error or ok:", ok, "err:", err)
|
||||
}
|
||||
|
||||
// Recovered data should not match original
|
||||
buf.Reset()
|
||||
err = enc.Join(buf, shards, len(data))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if bytes.Equal(buf.Bytes(), data) {
|
||||
t.Fatal("corrupted data matches original")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitJoin(t *testing.T) {
|
||||
var data = make([]byte, 250000)
|
||||
rand.Seed(0)
|
||||
fillRandom(data)
|
||||
|
||||
enc, _ := New(5, 3)
|
||||
shards, err := enc.Split(data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = enc.Split([]byte{})
|
||||
if err != ErrShortData {
|
||||
t.Errorf("expected %v, got %v", ErrShortData, err)
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
err = enc.Join(buf, shards, 50)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(buf.Bytes(), data[:50]) {
|
||||
t.Fatal("recovered data does match original")
|
||||
}
|
||||
|
||||
err = enc.Join(buf, [][]byte{}, 0)
|
||||
if err != ErrTooFewShards {
|
||||
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
|
||||
}
|
||||
|
||||
err = enc.Join(buf, shards, len(data)+1)
|
||||
if err != ErrShortData {
|
||||
t.Errorf("expected %v, got %v", ErrShortData, err)
|
||||
}
|
||||
|
||||
shards[0] = nil
|
||||
err = enc.Join(buf, shards, len(data))
|
||||
if err != ErrReconstructRequired {
|
||||
t.Errorf("expected %v, got %v", ErrReconstructRequired, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCodeSomeShards(t *testing.T) {
|
||||
var data = make([]byte, 250000)
|
||||
fillRandom(data)
|
||||
enc, _ := New(5, 3)
|
||||
r := enc.(*reedSolomon) // need to access private methods
|
||||
shards, _ := enc.Split(data)
|
||||
|
||||
old := runtime.GOMAXPROCS(1)
|
||||
r.codeSomeShards(r.parity, shards[:r.DataShards], shards[r.DataShards:], r.ParityShards, len(shards[0]))
|
||||
|
||||
// hopefully more than 1 CPU
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
r.codeSomeShards(r.parity, shards[:r.DataShards], shards[r.DataShards:], r.ParityShards, len(shards[0]))
|
||||
|
||||
// reset MAXPROCS, otherwise testing complains
|
||||
runtime.GOMAXPROCS(old)
|
||||
}
|
||||
|
||||
func TestAllMatrices(t *testing.T) {
|
||||
t.Skip("Skipping slow matrix check")
|
||||
for i := 1; i < 257; i++ {
|
||||
_, err := New(i, i)
|
||||
if err != nil {
|
||||
t.Fatal("creating matrix size", i, i, ":", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
tests := []struct {
|
||||
data, parity int
|
||||
err error
|
||||
}{
|
||||
{127, 127, nil},
|
||||
{256, 256, ErrMaxShardNum},
|
||||
|
||||
{0, 1, ErrInvShardNum},
|
||||
{1, 0, ErrInvShardNum},
|
||||
{257, 1, ErrMaxShardNum},
|
||||
|
||||
// overflow causes r.Shards to be negative
|
||||
{256, int(^uint(0) >> 1), errInvalidRowSize},
|
||||
}
|
||||
for _, test := range tests {
|
||||
_, err := New(test.data, test.parity)
|
||||
if err != test.err {
|
||||
t.Errorf("New(%v, %v): expected %v, got %v", test.data, test.parity, test.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
604
vendor/github.com/klauspost/reedsolomon/streaming_test.go
generated
vendored
604
vendor/github.com/klauspost/reedsolomon/streaming_test.go
generated
vendored
@@ -1,604 +0,0 @@
|
||||
/**
|
||||
* Unit tests for ReedSolomon Streaming API
|
||||
*
|
||||
* Copyright 2015, Klaus Post
|
||||
*/
|
||||
|
||||
package reedsolomon
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestStreamEncoding(t *testing.T) {
|
||||
perShard := 10 << 20
|
||||
if testing.Short() {
|
||||
perShard = 50000
|
||||
}
|
||||
r, err := NewStream(10, 3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rand.Seed(0)
|
||||
input := randomBytes(10, perShard)
|
||||
data := toBuffers(input)
|
||||
par := emptyBuffers(3)
|
||||
|
||||
err = r.Encode(toReaders(data), toWriters(par))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Reset Data
|
||||
data = toBuffers(input)
|
||||
|
||||
all := append(toReaders(data), toReaders(par)...)
|
||||
ok, err := r.Verify(all)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("Verification failed")
|
||||
}
|
||||
|
||||
err = r.Encode(toReaders(emptyBuffers(1)), toWriters(emptyBuffers(1)))
|
||||
if err != ErrTooFewShards {
|
||||
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
|
||||
}
|
||||
err = r.Encode(toReaders(emptyBuffers(10)), toWriters(emptyBuffers(1)))
|
||||
if err != ErrTooFewShards {
|
||||
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
|
||||
}
|
||||
err = r.Encode(toReaders(emptyBuffers(10)), toWriters(emptyBuffers(3)))
|
||||
if err != ErrShardNoData {
|
||||
t.Errorf("expected %v, got %v", ErrShardNoData, err)
|
||||
}
|
||||
|
||||
badShards := emptyBuffers(10)
|
||||
badShards[0] = randomBuffer(123)
|
||||
err = r.Encode(toReaders(badShards), toWriters(emptyBuffers(3)))
|
||||
if err != ErrShardSize {
|
||||
t.Errorf("expected %v, got %v", ErrShardSize, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStreamEncodingConcurrent(t *testing.T) {
|
||||
perShard := 10 << 20
|
||||
if testing.Short() {
|
||||
perShard = 50000
|
||||
}
|
||||
r, err := NewStreamC(10, 3, true, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rand.Seed(0)
|
||||
input := randomBytes(10, perShard)
|
||||
data := toBuffers(input)
|
||||
par := emptyBuffers(3)
|
||||
|
||||
err = r.Encode(toReaders(data), toWriters(par))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Reset Data
|
||||
data = toBuffers(input)
|
||||
|
||||
all := append(toReaders(data), toReaders(par)...)
|
||||
ok, err := r.Verify(all)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("Verification failed")
|
||||
}
|
||||
|
||||
err = r.Encode(toReaders(emptyBuffers(1)), toWriters(emptyBuffers(1)))
|
||||
if err != ErrTooFewShards {
|
||||
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
|
||||
}
|
||||
err = r.Encode(toReaders(emptyBuffers(10)), toWriters(emptyBuffers(1)))
|
||||
if err != ErrTooFewShards {
|
||||
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
|
||||
}
|
||||
err = r.Encode(toReaders(emptyBuffers(10)), toWriters(emptyBuffers(3)))
|
||||
if err != ErrShardNoData {
|
||||
t.Errorf("expected %v, got %v", ErrShardNoData, err)
|
||||
}
|
||||
|
||||
badShards := emptyBuffers(10)
|
||||
badShards[0] = randomBuffer(123)
|
||||
badShards[1] = randomBuffer(123)
|
||||
err = r.Encode(toReaders(badShards), toWriters(emptyBuffers(3)))
|
||||
if err != ErrShardSize {
|
||||
t.Errorf("expected %v, got %v", ErrShardSize, err)
|
||||
}
|
||||
}
|
||||
|
||||
func randomBuffer(length int) *bytes.Buffer {
|
||||
b := make([]byte, length)
|
||||
fillRandom(b)
|
||||
return bytes.NewBuffer(b)
|
||||
}
|
||||
|
||||
func randomBytes(n, length int) [][]byte {
|
||||
bufs := make([][]byte, n)
|
||||
for j := range bufs {
|
||||
bufs[j] = make([]byte, length)
|
||||
fillRandom(bufs[j])
|
||||
}
|
||||
return bufs
|
||||
}
|
||||
|
||||
func toBuffers(in [][]byte) []*bytes.Buffer {
|
||||
out := make([]*bytes.Buffer, len(in))
|
||||
for i := range in {
|
||||
out[i] = bytes.NewBuffer(in[i])
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func toReaders(in []*bytes.Buffer) []io.Reader {
|
||||
out := make([]io.Reader, len(in))
|
||||
for i := range in {
|
||||
out[i] = in[i]
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func toWriters(in []*bytes.Buffer) []io.Writer {
|
||||
out := make([]io.Writer, len(in))
|
||||
for i := range in {
|
||||
out[i] = in[i]
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func nilWriters(n int) []io.Writer {
|
||||
out := make([]io.Writer, n)
|
||||
for i := range out {
|
||||
out[i] = nil
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func emptyBuffers(n int) []*bytes.Buffer {
|
||||
b := make([]*bytes.Buffer, n)
|
||||
for i := range b {
|
||||
b[i] = &bytes.Buffer{}
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func toBytes(in []*bytes.Buffer) [][]byte {
|
||||
b := make([][]byte, len(in))
|
||||
for i := range in {
|
||||
b[i] = in[i].Bytes()
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func TestStreamReconstruct(t *testing.T) {
|
||||
perShard := 10 << 20
|
||||
if testing.Short() {
|
||||
perShard = 50000
|
||||
}
|
||||
r, err := NewStream(10, 3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rand.Seed(0)
|
||||
shards := randomBytes(10, perShard)
|
||||
parb := emptyBuffers(3)
|
||||
|
||||
err = r.Encode(toReaders(toBuffers(shards)), toWriters(parb))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
parity := toBytes(parb)
|
||||
|
||||
all := append(toReaders(toBuffers(shards)), toReaders(toBuffers(parity))...)
|
||||
fill := make([]io.Writer, 13)
|
||||
|
||||
// Reconstruct with all shards present, all fill nil
|
||||
err = r.Reconstruct(all, fill)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
all = append(toReaders(toBuffers(shards)), toReaders(toBuffers(parity))...)
|
||||
|
||||
// Reconstruct with 10 shards present
|
||||
all[0] = nil
|
||||
fill[0] = emptyBuffers(1)[0]
|
||||
all[7] = nil
|
||||
fill[7] = emptyBuffers(1)[0]
|
||||
all[11] = nil
|
||||
fill[11] = emptyBuffers(1)[0]
|
||||
|
||||
err = r.Reconstruct(all, fill)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
shards[0] = fill[0].(*bytes.Buffer).Bytes()
|
||||
shards[7] = fill[7].(*bytes.Buffer).Bytes()
|
||||
parity[1] = fill[11].(*bytes.Buffer).Bytes()
|
||||
|
||||
all = append(toReaders(toBuffers(shards)), toReaders(toBuffers(parity))...)
|
||||
|
||||
ok, err := r.Verify(all)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("Verification failed")
|
||||
}
|
||||
|
||||
all = append(toReaders(toBuffers(shards)), toReaders(toBuffers(parity))...)
|
||||
|
||||
// Reconstruct with 9 shards present (should fail)
|
||||
all[0] = nil
|
||||
fill[0] = emptyBuffers(1)[0]
|
||||
all[4] = nil
|
||||
fill[4] = emptyBuffers(1)[0]
|
||||
all[7] = nil
|
||||
fill[7] = emptyBuffers(1)[0]
|
||||
all[11] = nil
|
||||
fill[11] = emptyBuffers(1)[0]
|
||||
|
||||
err = r.Reconstruct(all, fill)
|
||||
if err != ErrTooFewShards {
|
||||
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
|
||||
}
|
||||
|
||||
err = r.Reconstruct(toReaders(emptyBuffers(3)), toWriters(emptyBuffers(3)))
|
||||
if err != ErrTooFewShards {
|
||||
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
|
||||
}
|
||||
err = r.Reconstruct(toReaders(emptyBuffers(13)), toWriters(emptyBuffers(3)))
|
||||
if err != ErrTooFewShards {
|
||||
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
|
||||
}
|
||||
err = r.Reconstruct(toReaders(emptyBuffers(13)), toWriters(emptyBuffers(13)))
|
||||
if err != ErrReconstructMismatch {
|
||||
t.Errorf("expected %v, got %v", ErrReconstructMismatch, err)
|
||||
}
|
||||
err = r.Reconstruct(toReaders(emptyBuffers(13)), nilWriters(13))
|
||||
if err != ErrShardNoData {
|
||||
t.Errorf("expected %v, got %v", ErrShardNoData, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStreamVerify(t *testing.T) {
|
||||
perShard := 10 << 20
|
||||
if testing.Short() {
|
||||
perShard = 50000
|
||||
}
|
||||
r, err := NewStream(10, 4)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
shards := randomBytes(10, perShard)
|
||||
parb := emptyBuffers(4)
|
||||
|
||||
err = r.Encode(toReaders(toBuffers(shards)), toWriters(parb))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
parity := toBytes(parb)
|
||||
all := append(toReaders(toBuffers(shards)), toReaders(parb)...)
|
||||
ok, err := r.Verify(all)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("Verification failed")
|
||||
}
|
||||
|
||||
// Flip bits in a random byte
|
||||
parity[0][len(parity[0])-20000] = parity[0][len(parity[0])-20000] ^ 0xff
|
||||
|
||||
all = append(toReaders(toBuffers(shards)), toReaders(toBuffers(parity))...)
|
||||
ok, err = r.Verify(all)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ok {
|
||||
t.Fatal("Verification did not fail")
|
||||
}
|
||||
// Re-encode
|
||||
err = r.Encode(toReaders(toBuffers(shards)), toWriters(parb))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Fill a data segment with random data
|
||||
shards[0][len(shards[0])-30000] = shards[0][len(shards[0])-30000] ^ 0xff
|
||||
all = append(toReaders(toBuffers(shards)), toReaders(parb)...)
|
||||
ok, err = r.Verify(all)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ok {
|
||||
t.Fatal("Verification did not fail")
|
||||
}
|
||||
|
||||
_, err = r.Verify(toReaders(emptyBuffers(10)))
|
||||
if err != ErrTooFewShards {
|
||||
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
|
||||
}
|
||||
|
||||
_, err = r.Verify(toReaders(emptyBuffers(14)))
|
||||
if err != ErrShardNoData {
|
||||
t.Errorf("expected %v, got %v", ErrShardNoData, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStreamOneEncode(t *testing.T) {
|
||||
codec, err := NewStream(5, 5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
shards := [][]byte{
|
||||
{0, 1},
|
||||
{4, 5},
|
||||
{2, 3},
|
||||
{6, 7},
|
||||
{8, 9},
|
||||
}
|
||||
parb := emptyBuffers(5)
|
||||
codec.Encode(toReaders(toBuffers(shards)), toWriters(parb))
|
||||
parity := toBytes(parb)
|
||||
if parity[0][0] != 12 || parity[0][1] != 13 {
|
||||
t.Fatal("shard 5 mismatch")
|
||||
}
|
||||
if parity[1][0] != 10 || parity[1][1] != 11 {
|
||||
t.Fatal("shard 6 mismatch")
|
||||
}
|
||||
if parity[2][0] != 14 || parity[2][1] != 15 {
|
||||
t.Fatal("shard 7 mismatch")
|
||||
}
|
||||
if parity[3][0] != 90 || parity[3][1] != 91 {
|
||||
t.Fatal("shard 8 mismatch")
|
||||
}
|
||||
if parity[4][0] != 94 || parity[4][1] != 95 {
|
||||
t.Fatal("shard 9 mismatch")
|
||||
}
|
||||
|
||||
all := append(toReaders(toBuffers(shards)), toReaders(toBuffers(parity))...)
|
||||
ok, err := codec.Verify(all)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("did not verify")
|
||||
}
|
||||
shards[3][0]++
|
||||
all = append(toReaders(toBuffers(shards)), toReaders(toBuffers(parity))...)
|
||||
ok, err = codec.Verify(all)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ok {
|
||||
t.Fatal("verify did not fail as expected")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func benchmarkStreamEncode(b *testing.B, dataShards, parityShards, shardSize int) {
|
||||
r, err := NewStream(dataShards, parityShards)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
shards := make([][]byte, dataShards)
|
||||
for s := range shards {
|
||||
shards[s] = make([]byte, shardSize)
|
||||
}
|
||||
|
||||
rand.Seed(0)
|
||||
for s := 0; s < dataShards; s++ {
|
||||
fillRandom(shards[s])
|
||||
}
|
||||
|
||||
b.SetBytes(int64(shardSize * dataShards))
|
||||
b.ResetTimer()
|
||||
out := make([]io.Writer, parityShards)
|
||||
for i := range out {
|
||||
out[i] = ioutil.Discard
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
err = r.Encode(toReaders(toBuffers(shards)), out)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkStreamEncode10x2x10000(b *testing.B) {
|
||||
benchmarkStreamEncode(b, 10, 2, 10000)
|
||||
}
|
||||
|
||||
func BenchmarkStreamEncode100x20x10000(b *testing.B) {
|
||||
benchmarkStreamEncode(b, 100, 20, 10000)
|
||||
}
|
||||
|
||||
func BenchmarkStreamEncode17x3x1M(b *testing.B) {
|
||||
benchmarkStreamEncode(b, 17, 3, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 10 data shards and 4 parity shards with 16MB each.
|
||||
func BenchmarkStreamEncode10x4x16M(b *testing.B) {
|
||||
benchmarkStreamEncode(b, 10, 4, 16*1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 5 data shards and 2 parity shards with 1MB each.
|
||||
func BenchmarkStreamEncode5x2x1M(b *testing.B) {
|
||||
benchmarkStreamEncode(b, 5, 2, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 1 data shards and 2 parity shards with 1MB each.
|
||||
func BenchmarkStreamEncode10x2x1M(b *testing.B) {
|
||||
benchmarkStreamEncode(b, 10, 2, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 10 data shards and 4 parity shards with 1MB each.
|
||||
func BenchmarkStreamEncode10x4x1M(b *testing.B) {
|
||||
benchmarkStreamEncode(b, 10, 4, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 50 data shards and 20 parity shards with 1MB each.
|
||||
func BenchmarkStreamEncode50x20x1M(b *testing.B) {
|
||||
benchmarkStreamEncode(b, 50, 20, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 17 data shards and 3 parity shards with 16MB each.
|
||||
func BenchmarkStreamEncode17x3x16M(b *testing.B) {
|
||||
benchmarkStreamEncode(b, 17, 3, 16*1024*1024)
|
||||
}
|
||||
|
||||
func benchmarkStreamVerify(b *testing.B, dataShards, parityShards, shardSize int) {
|
||||
r, err := NewStream(dataShards, parityShards)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
shards := make([][]byte, parityShards+dataShards)
|
||||
for s := range shards {
|
||||
shards[s] = make([]byte, shardSize)
|
||||
}
|
||||
|
||||
rand.Seed(0)
|
||||
for s := 0; s < dataShards; s++ {
|
||||
fillRandom(shards[s])
|
||||
}
|
||||
err = r.Encode(toReaders(toBuffers(shards[:dataShards])), toWriters(toBuffers(shards[dataShards:])))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.SetBytes(int64(shardSize * dataShards))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err = r.Verify(toReaders(toBuffers(shards)))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark 10 data slices with 2 parity slices holding 10000 bytes each
|
||||
func BenchmarkStreamVerify10x2x10000(b *testing.B) {
|
||||
benchmarkStreamVerify(b, 10, 2, 10000)
|
||||
}
|
||||
|
||||
// Benchmark 50 data slices with 5 parity slices holding 100000 bytes each
|
||||
func BenchmarkStreamVerify50x5x50000(b *testing.B) {
|
||||
benchmarkStreamVerify(b, 50, 5, 100000)
|
||||
}
|
||||
|
||||
// Benchmark 10 data slices with 2 parity slices holding 1MB bytes each
|
||||
func BenchmarkStreamVerify10x2x1M(b *testing.B) {
|
||||
benchmarkStreamVerify(b, 10, 2, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 5 data slices with 2 parity slices holding 1MB bytes each
|
||||
func BenchmarkStreamVerify5x2x1M(b *testing.B) {
|
||||
benchmarkStreamVerify(b, 5, 2, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 10 data slices with 4 parity slices holding 1MB bytes each
|
||||
func BenchmarkStreamVerify10x4x1M(b *testing.B) {
|
||||
benchmarkStreamVerify(b, 10, 4, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 5 data slices with 2 parity slices holding 1MB bytes each
|
||||
func BenchmarkStreamVerify50x20x1M(b *testing.B) {
|
||||
benchmarkStreamVerify(b, 50, 20, 1024*1024)
|
||||
}
|
||||
|
||||
// Benchmark 10 data slices with 4 parity slices holding 16MB bytes each
|
||||
func BenchmarkStreamVerify10x4x16M(b *testing.B) {
|
||||
benchmarkStreamVerify(b, 10, 4, 16*1024*1024)
|
||||
}
|
||||
|
||||
func TestStreamSplitJoin(t *testing.T) {
|
||||
var data = make([]byte, 250000)
|
||||
rand.Seed(0)
|
||||
fillRandom(data)
|
||||
|
||||
enc, _ := NewStream(5, 3)
|
||||
split := emptyBuffers(5)
|
||||
err := enc.Split(bytes.NewBuffer(data), toWriters(split), int64(len(data)))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
splits := toBytes(split)
|
||||
expect := len(data) / 5
|
||||
// Beware, if changing data size
|
||||
if split[0].Len() != expect {
|
||||
t.Errorf("unexpected size. expected %d, got %d", expect, split[0].Len())
|
||||
}
|
||||
|
||||
err = enc.Split(bytes.NewBuffer([]byte{}), toWriters(emptyBuffers(3)), 0)
|
||||
if err != ErrShortData {
|
||||
t.Errorf("expected %v, got %v", ErrShortData, err)
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
err = enc.Join(buf, toReaders(toBuffers(splits)), int64(len(data)))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
joined := buf.Bytes()
|
||||
if !bytes.Equal(joined, data) {
|
||||
t.Fatal("recovered data does match original", joined[:8], data[:8], "... lengths:", len(joined), len(data))
|
||||
}
|
||||
|
||||
err = enc.Join(buf, toReaders(emptyBuffers(2)), 0)
|
||||
if err != ErrTooFewShards {
|
||||
t.Errorf("expected %v, got %v", ErrTooFewShards, err)
|
||||
}
|
||||
bufs := toReaders(emptyBuffers(5))
|
||||
bufs[2] = nil
|
||||
err = enc.Join(buf, bufs, 0)
|
||||
if se, ok := err.(StreamReadError); ok {
|
||||
if se.Err != ErrShardNoData {
|
||||
t.Errorf("expected %v, got %v", ErrShardNoData, se.Err)
|
||||
}
|
||||
if se.Stream != 2 {
|
||||
t.Errorf("Expected error on stream 2, got %d", se.Stream)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("expected error type %T, got %T", StreamReadError{}, err)
|
||||
}
|
||||
|
||||
err = enc.Join(buf, toReaders(toBuffers(splits)), int64(len(data)+1))
|
||||
if err != ErrShortData {
|
||||
t.Errorf("expected %v, got %v", ErrShortData, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewStream(t *testing.T) {
|
||||
tests := []struct {
|
||||
data, parity int
|
||||
err error
|
||||
}{
|
||||
{127, 127, nil},
|
||||
{256, 256, ErrMaxShardNum},
|
||||
|
||||
{0, 1, ErrInvShardNum},
|
||||
{1, 0, ErrInvShardNum},
|
||||
{257, 1, ErrMaxShardNum},
|
||||
|
||||
// overflow causes r.Shards to be negative
|
||||
{256, int(^uint(0) >> 1), errInvalidRowSize},
|
||||
}
|
||||
for _, test := range tests {
|
||||
_, err := NewStream(test.data, test.parity)
|
||||
if err != test.err {
|
||||
t.Errorf("New(%v, %v): expected %v, got %v", test.data, test.parity, test.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user