go vendor; track uptime, health

This commit is contained in:
Ethan Buchman
2016-02-02 13:10:43 -05:00
parent bfbfb646b9
commit 2b3fc11697
680 changed files with 103113 additions and 26253 deletions

101
Godeps/Godeps.json generated
View File

@ -1,69 +1,64 @@
{ {
"ImportPath": "github.com/tendermint/netmon", "ImportPath": "github.com/tendermint/netmon",
"GoVersion": "go1.5.2", "GoVersion": "go1.5",
"Packages": [ "Packages": [
"./..." "./..."
], ],
"Deps": [ "Deps": [
{
"ImportPath": "code.google.com/p/go.crypto/ripemd160",
"Comment": "null-236",
"Rev": "69e2a90ed92d03812364aeb947b7068dc42e561e"
},
{
"ImportPath": "github.com/agl/ed25519/edwards25519",
"Rev": "d2b94fd789ea21d12fac1a4443dd3a3f79cda72c"
},
{ {
"ImportPath": "github.com/codegangsta/cli", "ImportPath": "github.com/codegangsta/cli",
"Comment": "1.2.0-187-gc31a797", "Comment": "1.2.0-193-gf9cc300",
"Rev": "c31a7975863e7810c92e2e288a9ab074f9a88f29" "Rev": "f9cc3001e04f9783cb4ad08ca6791aa07134787c"
},
{
"ImportPath": "github.com/golang/snappy",
"Rev": "1963d058044b19e16595f80d5050fa54e2070438"
}, },
{ {
"ImportPath": "github.com/gorilla/websocket", "ImportPath": "github.com/gorilla/websocket",
"Rev": "a3ec486e6a7a41858210b0fc5d7b5df593b3c4a3" "Rev": "234959944d9cf05229b02e8b386e5cffe1e4e04a"
}, },
{ {
"ImportPath": "github.com/inconshreveable/log15/stack", "ImportPath": "github.com/inconshreveable/log15/stack",
"Comment": "v2.3-42-gee210fc", "Comment": "v2.3-48-g210d6fd",
"Rev": "ee210fc98cc7756aa0cf55d8d554148828e8e658" "Rev": "210d6fdc4d979ef6579778f1b6ed84571454abb4"
}, },
{ {
"ImportPath": "github.com/inconshreveable/log15/term", "ImportPath": "github.com/inconshreveable/log15/term",
"Comment": "v2.3-42-gee210fc", "Comment": "v2.3-48-g210d6fd",
"Rev": "ee210fc98cc7756aa0cf55d8d554148828e8e658" "Rev": "210d6fdc4d979ef6579778f1b6ed84571454abb4"
}, },
{ {
"ImportPath": "github.com/mattn/go-colorable", "ImportPath": "github.com/mattn/go-colorable",
"Rev": "d67e0b7d1797975196499f79bcc322c08b9f218b" "Rev": "4af63d73f5bea08b682ad2cd198b1e607afd6be0"
},
{
"ImportPath": "github.com/mattn/go-isatty",
"Rev": "56b76bdf51f7708750eac80fa38b952bb9f32639"
}, },
{ {
"ImportPath": "github.com/naoina/go-stringutil", "ImportPath": "github.com/naoina/go-stringutil",
"Rev": "360db0db4b01d34e12a2ec042c09e7d37fece761" "Rev": "6b638e95a32d0c1131db0e7fe83775cbea4a0d0b"
}, },
{ {
"ImportPath": "github.com/naoina/toml", "ImportPath": "github.com/naoina/toml",
"Rev": "5667c316ee9576e9d5bca793ce4ec813a88ce7d3" "Rev": "751171607256bb66e64c9f0220c00662420c38e9"
}, },
{ {
"ImportPath": "github.com/rcrowley/go-metrics", "ImportPath": "github.com/rcrowley/go-metrics",
"Rev": "a5cfc242a56ba7fa70b785f678d6214837bf93b9" "Rev": "51425a2415d21afadfd55cd93432c0bc69e9598d"
}, },
{ {
"ImportPath": "github.com/sfreiberg/gotwilio", "ImportPath": "github.com/sfreiberg/gotwilio",
"Rev": "b7230c284bd0c1614c94d00b9998c49f9a2737d8" "Rev": "f024bbefe80fdb7bcc8c43b6add05dae97744e0e"
}, },
{ {
"ImportPath": "github.com/syndtr/goleveldb/leveldb", "ImportPath": "github.com/syndtr/goleveldb/leveldb",
"Rev": "4875955338b0a434238a31165cb87255ab6e9e4a" "Rev": "b7c1cafa822344831a63ad9c8fafb1556e66d33d"
},
{
"ImportPath": "github.com/syndtr/gosnappy/snappy",
"Rev": "156a073208e131d7d2e212cb749feae7c339e846"
}, },
{ {
"ImportPath": "github.com/tendermint/ed25519", "ImportPath": "github.com/tendermint/ed25519",
"Rev": "533fb6548e2071076888eda3c38749d707ba49bc" "Rev": "fdac6641497281ed1cc368687ec6377e96e02b24"
}, },
{ {
"ImportPath": "github.com/tendermint/flowcontrol", "ImportPath": "github.com/tendermint/flowcontrol",
@ -75,7 +70,7 @@
}, },
{ {
"ImportPath": "github.com/tendermint/go-common", "ImportPath": "github.com/tendermint/go-common",
"Rev": "8dacd4172ef28a6601612f25385120bbc8d36f45" "Rev": "19f5a93cffe6df6734c3e7cf3896ba3cea546348"
}, },
{ {
"ImportPath": "github.com/tendermint/go-config", "ImportPath": "github.com/tendermint/go-config",
@ -87,23 +82,23 @@
}, },
{ {
"ImportPath": "github.com/tendermint/go-db", "ImportPath": "github.com/tendermint/go-db",
"Rev": "28d39f8726c76b163e881c3d05dad227c93200ae" "Rev": "a7878f1d0d8eaebf15f87bc2df15f7a1088cce7f"
}, },
{ {
"ImportPath": "github.com/tendermint/go-event-meter", "ImportPath": "github.com/tendermint/go-event-meter",
"Rev": "bf8aff7f677e03c3141cbb85895984847a1be02a" "Rev": "1d455b3fc1930f0d12609342b668aed385655ed8"
}, },
{ {
"ImportPath": "github.com/tendermint/go-events", "ImportPath": "github.com/tendermint/go-events",
"Rev": "e8ffe6bb4c43447fbc0cda2b4cf0967e4ba4796d" "Rev": "7b75ca7bb55aa25e9ef765eb8c0b69486b227357"
}, },
{ {
"ImportPath": "github.com/tendermint/go-logger", "ImportPath": "github.com/tendermint/go-logger",
"Rev": "acdbd88e88a5d75b97348273706ec4b10e673b90" "Rev": "980f02a5001b46f02ab3fbb036531d4ea789d2bf"
}, },
{ {
"ImportPath": "github.com/tendermint/go-merkle", "ImportPath": "github.com/tendermint/go-merkle",
"Rev": "d7361b77dcb84f907a1db0e99ca79274b8f8930d" "Rev": "67b535ce9633be7df575dc3a7833fa2301020c25"
}, },
{ {
"ImportPath": "github.com/tendermint/go-p2p", "ImportPath": "github.com/tendermint/go-p2p",
@ -111,7 +106,7 @@
}, },
{ {
"ImportPath": "github.com/tendermint/go-process", "ImportPath": "github.com/tendermint/go-process",
"Rev": "70b9a368caee165dd0c0754448f9bcb664014bbf" "Rev": "ba01cfbb58d446673beff17e72883cb49c835fb9"
}, },
{ {
"ImportPath": "github.com/tendermint/go-rpc/client", "ImportPath": "github.com/tendermint/go-rpc/client",
@ -127,52 +122,56 @@
}, },
{ {
"ImportPath": "github.com/tendermint/go-wire", "ImportPath": "github.com/tendermint/go-wire",
"Comment": "1.0rc-24-g20dd2e3", "Comment": "1.0rc-26-gc7ef03a",
"Rev": "20dd2e3a6739fb5af32bfe1dd7121b31e5e5890d" "Rev": "c7ef03a4e85532dd21bfa644bd8dd4a89afd44e0"
}, },
{ {
"ImportPath": "github.com/tendermint/log15", "ImportPath": "github.com/tendermint/log15",
"Comment": "v2.3-36-gc65281b", "Comment": "v2.3-36-g6e46075",
"Rev": "c65281bb703b7612f60558e75b07c434c06e2636" "Rev": "6e460758f10ef42a4724b8e4a82fee59aaa0e41d"
}, },
{ {
"ImportPath": "github.com/tendermint/tendermint/config/tendermint", "ImportPath": "github.com/tendermint/tendermint/config/tendermint",
"Comment": "0.2-96-g1b1d265", "Comment": "0.2-114-gc0024cc",
"Rev": "1b1d265cd480ad56fe516be72512114891fc78f1" "Rev": "c0024cc7b209e022630e47782e787bec3640af30"
}, },
{ {
"ImportPath": "github.com/tendermint/tendermint/rpc/core/types", "ImportPath": "github.com/tendermint/tendermint/rpc/core/types",
"Comment": "0.2-96-g1b1d265", "Comment": "0.2-114-gc0024cc",
"Rev": "1b1d265cd480ad56fe516be72512114891fc78f1" "Rev": "c0024cc7b209e022630e47782e787bec3640af30"
}, },
{ {
"ImportPath": "github.com/tendermint/tendermint/types", "ImportPath": "github.com/tendermint/tendermint/types",
"Comment": "0.2-96-g1b1d265", "Comment": "0.2-114-gc0024cc",
"Rev": "1b1d265cd480ad56fe516be72512114891fc78f1" "Rev": "c0024cc7b209e022630e47782e787bec3640af30"
}, },
{ {
"ImportPath": "golang.org/x/crypto/curve25519", "ImportPath": "golang.org/x/crypto/curve25519",
"Rev": "4ed45ec682102c643324fae5dff8dab085b6c300" "Rev": "1f22c0103821b9390939b6776727195525381532"
}, },
{ {
"ImportPath": "golang.org/x/crypto/nacl/box", "ImportPath": "golang.org/x/crypto/nacl/box",
"Rev": "4ed45ec682102c643324fae5dff8dab085b6c300" "Rev": "1f22c0103821b9390939b6776727195525381532"
}, },
{ {
"ImportPath": "golang.org/x/crypto/nacl/secretbox", "ImportPath": "golang.org/x/crypto/nacl/secretbox",
"Rev": "4ed45ec682102c643324fae5dff8dab085b6c300" "Rev": "1f22c0103821b9390939b6776727195525381532"
}, },
{ {
"ImportPath": "golang.org/x/crypto/poly1305", "ImportPath": "golang.org/x/crypto/poly1305",
"Rev": "4ed45ec682102c643324fae5dff8dab085b6c300" "Rev": "1f22c0103821b9390939b6776727195525381532"
}, },
{ {
"ImportPath": "golang.org/x/crypto/ripemd160", "ImportPath": "golang.org/x/crypto/ripemd160",
"Rev": "4ed45ec682102c643324fae5dff8dab085b6c300" "Rev": "1f22c0103821b9390939b6776727195525381532"
}, },
{ {
"ImportPath": "golang.org/x/crypto/salsa20/salsa", "ImportPath": "golang.org/x/crypto/salsa20/salsa",
"Rev": "4ed45ec682102c643324fae5dff8dab085b6c300" "Rev": "1f22c0103821b9390939b6776727195525381532"
},
{
"ImportPath": "golang.org/x/sys/unix",
"Rev": "eb2c74142fd19a79b3f237334c7384d5167b1b46"
} }
] ]
} }

2
Godeps/_workspace/.gitignore generated vendored
View File

@ -1,2 +0,0 @@
/pkg
/bin

View File

@ -1,64 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ripemd160
// Test vectors are from:
// http://homes.esat.kuleuven.be/~bosselae/ripemd160.html
import (
"fmt"
"io"
"testing"
)
type mdTest struct {
out string
in string
}
var vectors = [...]mdTest{
{"9c1185a5c5e9fc54612808977ee8f548b2258d31", ""},
{"0bdc9d2d256b3ee9daae347be6f4dc835a467ffe", "a"},
{"8eb208f7e05d987a9b044a8e98c6b087f15a0bfc", "abc"},
{"5d0689ef49d2fae572b881b123a85ffa21595f36", "message digest"},
{"f71c27109c692c1b56bbdceb5b9d2865b3708dbc", "abcdefghijklmnopqrstuvwxyz"},
{"12a053384a9c0c88e405a06c27dcf49ada62eb2b", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"},
{"b0e20b6e3116640286ed3a87a5713079b21f5189", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"},
{"9b752e45573d4b39f4dbd3323cab82bf63326bfb", "12345678901234567890123456789012345678901234567890123456789012345678901234567890"},
}
func TestVectors(t *testing.T) {
for i := 0; i < len(vectors); i++ {
tv := vectors[i]
md := New()
for j := 0; j < 3; j++ {
if j < 2 {
io.WriteString(md, tv.in)
} else {
io.WriteString(md, tv.in[0:len(tv.in)/2])
md.Sum(nil)
io.WriteString(md, tv.in[len(tv.in)/2:])
}
s := fmt.Sprintf("%x", md.Sum(nil))
if s != tv.out {
t.Fatalf("RIPEMD-160[%d](%s) = %s, expected %s", j, tv.in, s, tv.out)
}
md.Reset()
}
}
}
func TestMillionA(t *testing.T) {
md := New()
for i := 0; i < 100000; i++ {
io.WriteString(md, "aaaaaaaaaa")
}
out := "52783243c1697bdbe16d37f97f68f08325dc1528"
s := fmt.Sprintf("%x", md.Sum(nil))
if s != out {
t.Fatalf("RIPEMD-160 (1 million 'a') = %s, expected %s", s, out)
}
md.Reset()
}

View File

@ -1,962 +0,0 @@
package cli
import (
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"testing"
)
func ExampleApp_Run() {
// set args for examples sake
os.Args = []string{"greet", "--name", "Jeremy"}
app := NewApp()
app.Name = "greet"
app.Flags = []Flag{
StringFlag{Name: "name", Value: "bob", Usage: "a name to say"},
}
app.Action = func(c *Context) {
fmt.Printf("Hello %v\n", c.String("name"))
}
app.Author = "Harrison"
app.Email = "harrison@lolwut.com"
app.Authors = []Author{Author{Name: "Oliver Allen", Email: "oliver@toyshop.com"}}
app.Run(os.Args)
// Output:
// Hello Jeremy
}
func ExampleApp_Run_subcommand() {
// set args for examples sake
os.Args = []string{"say", "hi", "english", "--name", "Jeremy"}
app := NewApp()
app.Name = "say"
app.Commands = []Command{
{
Name: "hello",
Aliases: []string{"hi"},
Usage: "use it to see a description",
Description: "This is how we describe hello the function",
Subcommands: []Command{
{
Name: "english",
Aliases: []string{"en"},
Usage: "sends a greeting in english",
Description: "greets someone in english",
Flags: []Flag{
StringFlag{
Name: "name",
Value: "Bob",
Usage: "Name of the person to greet",
},
},
Action: func(c *Context) {
fmt.Println("Hello,", c.String("name"))
},
},
},
},
}
app.Run(os.Args)
// Output:
// Hello, Jeremy
}
func ExampleApp_Run_help() {
// set args for examples sake
os.Args = []string{"greet", "h", "describeit"}
app := NewApp()
app.Name = "greet"
app.Flags = []Flag{
StringFlag{Name: "name", Value: "bob", Usage: "a name to say"},
}
app.Commands = []Command{
{
Name: "describeit",
Aliases: []string{"d"},
Usage: "use it to see a description",
Description: "This is how we describe describeit the function",
Action: func(c *Context) {
fmt.Printf("i like to describe things")
},
},
}
app.Run(os.Args)
// Output:
// NAME:
// greet describeit - use it to see a description
//
// USAGE:
// greet describeit [arguments...]
//
// DESCRIPTION:
// This is how we describe describeit the function
}
func ExampleApp_Run_bashComplete() {
// set args for examples sake
os.Args = []string{"greet", "--generate-bash-completion"}
app := NewApp()
app.Name = "greet"
app.EnableBashCompletion = true
app.Commands = []Command{
{
Name: "describeit",
Aliases: []string{"d"},
Usage: "use it to see a description",
Description: "This is how we describe describeit the function",
Action: func(c *Context) {
fmt.Printf("i like to describe things")
},
}, {
Name: "next",
Usage: "next example",
Description: "more stuff to see when generating bash completion",
Action: func(c *Context) {
fmt.Printf("the next example")
},
},
}
app.Run(os.Args)
// Output:
// describeit
// d
// next
// help
// h
}
func TestApp_Run(t *testing.T) {
s := ""
app := NewApp()
app.Action = func(c *Context) {
s = s + c.Args().First()
}
err := app.Run([]string{"command", "foo"})
expect(t, err, nil)
err = app.Run([]string{"command", "bar"})
expect(t, err, nil)
expect(t, s, "foobar")
}
var commandAppTests = []struct {
name string
expected bool
}{
{"foobar", true},
{"batbaz", true},
{"b", true},
{"f", true},
{"bat", false},
{"nothing", false},
}
func TestApp_Command(t *testing.T) {
app := NewApp()
fooCommand := Command{Name: "foobar", Aliases: []string{"f"}}
batCommand := Command{Name: "batbaz", Aliases: []string{"b"}}
app.Commands = []Command{
fooCommand,
batCommand,
}
for _, test := range commandAppTests {
expect(t, app.Command(test.name) != nil, test.expected)
}
}
func TestApp_CommandWithArgBeforeFlags(t *testing.T) {
var parsedOption, firstArg string
app := NewApp()
command := Command{
Name: "cmd",
Flags: []Flag{
StringFlag{Name: "option", Value: "", Usage: "some option"},
},
Action: func(c *Context) {
parsedOption = c.String("option")
firstArg = c.Args().First()
},
}
app.Commands = []Command{command}
app.Run([]string{"", "cmd", "my-arg", "--option", "my-option"})
expect(t, parsedOption, "my-option")
expect(t, firstArg, "my-arg")
}
func TestApp_RunAsSubcommandParseFlags(t *testing.T) {
var context *Context
a := NewApp()
a.Commands = []Command{
{
Name: "foo",
Action: func(c *Context) {
context = c
},
Flags: []Flag{
StringFlag{
Name: "lang",
Value: "english",
Usage: "language for the greeting",
},
},
Before: func(_ *Context) error { return nil },
},
}
a.Run([]string{"", "foo", "--lang", "spanish", "abcd"})
expect(t, context.Args().Get(0), "abcd")
expect(t, context.String("lang"), "spanish")
}
func TestApp_CommandWithFlagBeforeTerminator(t *testing.T) {
var parsedOption string
var args []string
app := NewApp()
command := Command{
Name: "cmd",
Flags: []Flag{
StringFlag{Name: "option", Value: "", Usage: "some option"},
},
Action: func(c *Context) {
parsedOption = c.String("option")
args = c.Args()
},
}
app.Commands = []Command{command}
app.Run([]string{"", "cmd", "my-arg", "--option", "my-option", "--", "--notARealFlag"})
expect(t, parsedOption, "my-option")
expect(t, args[0], "my-arg")
expect(t, args[1], "--")
expect(t, args[2], "--notARealFlag")
}
func TestApp_CommandWithNoFlagBeforeTerminator(t *testing.T) {
var args []string
app := NewApp()
command := Command{
Name: "cmd",
Action: func(c *Context) {
args = c.Args()
},
}
app.Commands = []Command{command}
app.Run([]string{"", "cmd", "my-arg", "--", "notAFlagAtAll"})
expect(t, args[0], "my-arg")
expect(t, args[1], "--")
expect(t, args[2], "notAFlagAtAll")
}
func TestApp_Float64Flag(t *testing.T) {
var meters float64
app := NewApp()
app.Flags = []Flag{
Float64Flag{Name: "height", Value: 1.5, Usage: "Set the height, in meters"},
}
app.Action = func(c *Context) {
meters = c.Float64("height")
}
app.Run([]string{"", "--height", "1.93"})
expect(t, meters, 1.93)
}
func TestApp_ParseSliceFlags(t *testing.T) {
var parsedOption, firstArg string
var parsedIntSlice []int
var parsedStringSlice []string
app := NewApp()
command := Command{
Name: "cmd",
Flags: []Flag{
IntSliceFlag{Name: "p", Value: &IntSlice{}, Usage: "set one or more ip addr"},
StringSliceFlag{Name: "ip", Value: &StringSlice{}, Usage: "set one or more ports to open"},
},
Action: func(c *Context) {
parsedIntSlice = c.IntSlice("p")
parsedStringSlice = c.StringSlice("ip")
parsedOption = c.String("option")
firstArg = c.Args().First()
},
}
app.Commands = []Command{command}
app.Run([]string{"", "cmd", "my-arg", "-p", "22", "-p", "80", "-ip", "8.8.8.8", "-ip", "8.8.4.4"})
IntsEquals := func(a, b []int) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}
StrsEquals := func(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}
var expectedIntSlice = []int{22, 80}
var expectedStringSlice = []string{"8.8.8.8", "8.8.4.4"}
if !IntsEquals(parsedIntSlice, expectedIntSlice) {
t.Errorf("%v does not match %v", parsedIntSlice, expectedIntSlice)
}
if !StrsEquals(parsedStringSlice, expectedStringSlice) {
t.Errorf("%v does not match %v", parsedStringSlice, expectedStringSlice)
}
}
func TestApp_ParseSliceFlagsWithMissingValue(t *testing.T) {
var parsedIntSlice []int
var parsedStringSlice []string
app := NewApp()
command := Command{
Name: "cmd",
Flags: []Flag{
IntSliceFlag{Name: "a", Usage: "set numbers"},
StringSliceFlag{Name: "str", Usage: "set strings"},
},
Action: func(c *Context) {
parsedIntSlice = c.IntSlice("a")
parsedStringSlice = c.StringSlice("str")
},
}
app.Commands = []Command{command}
app.Run([]string{"", "cmd", "my-arg", "-a", "2", "-str", "A"})
var expectedIntSlice = []int{2}
var expectedStringSlice = []string{"A"}
if parsedIntSlice[0] != expectedIntSlice[0] {
t.Errorf("%v does not match %v", parsedIntSlice[0], expectedIntSlice[0])
}
if parsedStringSlice[0] != expectedStringSlice[0] {
t.Errorf("%v does not match %v", parsedIntSlice[0], expectedIntSlice[0])
}
}
func TestApp_DefaultStdout(t *testing.T) {
app := NewApp()
if app.Writer != os.Stdout {
t.Error("Default output writer not set.")
}
}
type mockWriter struct {
written []byte
}
func (fw *mockWriter) Write(p []byte) (n int, err error) {
if fw.written == nil {
fw.written = p
} else {
fw.written = append(fw.written, p...)
}
return len(p), nil
}
func (fw *mockWriter) GetWritten() (b []byte) {
return fw.written
}
func TestApp_SetStdout(t *testing.T) {
w := &mockWriter{}
app := NewApp()
app.Name = "test"
app.Writer = w
err := app.Run([]string{"help"})
if err != nil {
t.Fatalf("Run error: %s", err)
}
if len(w.written) == 0 {
t.Error("App did not write output to desired writer.")
}
}
func TestApp_BeforeFunc(t *testing.T) {
beforeRun, subcommandRun := false, false
beforeError := fmt.Errorf("fail")
var err error
app := NewApp()
app.Before = func(c *Context) error {
beforeRun = true
s := c.String("opt")
if s == "fail" {
return beforeError
}
return nil
}
app.Commands = []Command{
Command{
Name: "sub",
Action: func(c *Context) {
subcommandRun = true
},
},
}
app.Flags = []Flag{
StringFlag{Name: "opt"},
}
// run with the Before() func succeeding
err = app.Run([]string{"command", "--opt", "succeed", "sub"})
if err != nil {
t.Fatalf("Run error: %s", err)
}
if beforeRun == false {
t.Errorf("Before() not executed when expected")
}
if subcommandRun == false {
t.Errorf("Subcommand not executed when expected")
}
// reset
beforeRun, subcommandRun = false, false
// run with the Before() func failing
err = app.Run([]string{"command", "--opt", "fail", "sub"})
// should be the same error produced by the Before func
if err != beforeError {
t.Errorf("Run error expected, but not received")
}
if beforeRun == false {
t.Errorf("Before() not executed when expected")
}
if subcommandRun == true {
t.Errorf("Subcommand executed when NOT expected")
}
}
func TestApp_AfterFunc(t *testing.T) {
afterRun, subcommandRun := false, false
afterError := fmt.Errorf("fail")
var err error
app := NewApp()
app.After = func(c *Context) error {
afterRun = true
s := c.String("opt")
if s == "fail" {
return afterError
}
return nil
}
app.Commands = []Command{
Command{
Name: "sub",
Action: func(c *Context) {
subcommandRun = true
},
},
}
app.Flags = []Flag{
StringFlag{Name: "opt"},
}
// run with the After() func succeeding
err = app.Run([]string{"command", "--opt", "succeed", "sub"})
if err != nil {
t.Fatalf("Run error: %s", err)
}
if afterRun == false {
t.Errorf("After() not executed when expected")
}
if subcommandRun == false {
t.Errorf("Subcommand not executed when expected")
}
// reset
afterRun, subcommandRun = false, false
// run with the Before() func failing
err = app.Run([]string{"command", "--opt", "fail", "sub"})
// should be the same error produced by the Before func
if err != afterError {
t.Errorf("Run error expected, but not received")
}
if afterRun == false {
t.Errorf("After() not executed when expected")
}
if subcommandRun == false {
t.Errorf("Subcommand not executed when expected")
}
}
func TestAppNoHelpFlag(t *testing.T) {
oldFlag := HelpFlag
defer func() {
HelpFlag = oldFlag
}()
HelpFlag = BoolFlag{}
app := NewApp()
app.Writer = ioutil.Discard
err := app.Run([]string{"test", "-h"})
if err != flag.ErrHelp {
t.Errorf("expected error about missing help flag, but got: %s (%T)", err, err)
}
}
func TestAppHelpPrinter(t *testing.T) {
oldPrinter := HelpPrinter
defer func() {
HelpPrinter = oldPrinter
}()
var wasCalled = false
HelpPrinter = func(w io.Writer, template string, data interface{}) {
wasCalled = true
}
app := NewApp()
app.Run([]string{"-h"})
if wasCalled == false {
t.Errorf("Help printer expected to be called, but was not")
}
}
func TestAppVersionPrinter(t *testing.T) {
oldPrinter := VersionPrinter
defer func() {
VersionPrinter = oldPrinter
}()
var wasCalled = false
VersionPrinter = func(c *Context) {
wasCalled = true
}
app := NewApp()
ctx := NewContext(app, nil, nil)
ShowVersion(ctx)
if wasCalled == false {
t.Errorf("Version printer expected to be called, but was not")
}
}
func TestAppCommandNotFound(t *testing.T) {
beforeRun, subcommandRun := false, false
app := NewApp()
app.CommandNotFound = func(c *Context, command string) {
beforeRun = true
}
app.Commands = []Command{
Command{
Name: "bar",
Action: func(c *Context) {
subcommandRun = true
},
},
}
app.Run([]string{"command", "foo"})
expect(t, beforeRun, true)
expect(t, subcommandRun, false)
}
func TestGlobalFlag(t *testing.T) {
var globalFlag string
var globalFlagSet bool
app := NewApp()
app.Flags = []Flag{
StringFlag{Name: "global, g", Usage: "global"},
}
app.Action = func(c *Context) {
globalFlag = c.GlobalString("global")
globalFlagSet = c.GlobalIsSet("global")
}
app.Run([]string{"command", "-g", "foo"})
expect(t, globalFlag, "foo")
expect(t, globalFlagSet, true)
}
func TestGlobalFlagsInSubcommands(t *testing.T) {
subcommandRun := false
parentFlag := false
app := NewApp()
app.Flags = []Flag{
BoolFlag{Name: "debug, d", Usage: "Enable debugging"},
}
app.Commands = []Command{
Command{
Name: "foo",
Flags: []Flag{
BoolFlag{Name: "parent, p", Usage: "Parent flag"},
},
Subcommands: []Command{
{
Name: "bar",
Action: func(c *Context) {
if c.GlobalBool("debug") {
subcommandRun = true
}
if c.GlobalBool("parent") {
parentFlag = true
}
},
},
},
},
}
app.Run([]string{"command", "-d", "foo", "-p", "bar"})
expect(t, subcommandRun, true)
expect(t, parentFlag, true)
}
func TestApp_Run_CommandWithSubcommandHasHelpTopic(t *testing.T) {
var subcommandHelpTopics = [][]string{
{"command", "foo", "--help"},
{"command", "foo", "-h"},
{"command", "foo", "help"},
}
for _, flagSet := range subcommandHelpTopics {
t.Logf("==> checking with flags %v", flagSet)
app := NewApp()
buf := new(bytes.Buffer)
app.Writer = buf
subCmdBar := Command{
Name: "bar",
Usage: "does bar things",
}
subCmdBaz := Command{
Name: "baz",
Usage: "does baz things",
}
cmd := Command{
Name: "foo",
Description: "descriptive wall of text about how it does foo things",
Subcommands: []Command{subCmdBar, subCmdBaz},
}
app.Commands = []Command{cmd}
err := app.Run(flagSet)
if err != nil {
t.Error(err)
}
output := buf.String()
t.Logf("output: %q\n", buf.Bytes())
if strings.Contains(output, "No help topic for") {
t.Errorf("expect a help topic, got none: \n%q", output)
}
for _, shouldContain := range []string{
cmd.Name, cmd.Description,
subCmdBar.Name, subCmdBar.Usage,
subCmdBaz.Name, subCmdBaz.Usage,
} {
if !strings.Contains(output, shouldContain) {
t.Errorf("want help to contain %q, did not: \n%q", shouldContain, output)
}
}
}
}
func TestApp_Run_SubcommandFullPath(t *testing.T) {
app := NewApp()
buf := new(bytes.Buffer)
app.Writer = buf
app.Name = "command"
subCmd := Command{
Name: "bar",
Usage: "does bar things",
}
cmd := Command{
Name: "foo",
Description: "foo commands",
Subcommands: []Command{subCmd},
}
app.Commands = []Command{cmd}
err := app.Run([]string{"command", "foo", "bar", "--help"})
if err != nil {
t.Error(err)
}
output := buf.String()
if !strings.Contains(output, "command foo bar - does bar things") {
t.Errorf("expected full path to subcommand: %s", output)
}
if !strings.Contains(output, "command foo bar [arguments...]") {
t.Errorf("expected full path to subcommand: %s", output)
}
}
func TestApp_Run_SubcommandHelpName(t *testing.T) {
app := NewApp()
buf := new(bytes.Buffer)
app.Writer = buf
app.Name = "command"
subCmd := Command{
Name: "bar",
HelpName: "custom",
Usage: "does bar things",
}
cmd := Command{
Name: "foo",
Description: "foo commands",
Subcommands: []Command{subCmd},
}
app.Commands = []Command{cmd}
err := app.Run([]string{"command", "foo", "bar", "--help"})
if err != nil {
t.Error(err)
}
output := buf.String()
if !strings.Contains(output, "custom - does bar things") {
t.Errorf("expected HelpName for subcommand: %s", output)
}
if !strings.Contains(output, "custom [arguments...]") {
t.Errorf("expected HelpName to subcommand: %s", output)
}
}
func TestApp_Run_CommandHelpName(t *testing.T) {
app := NewApp()
buf := new(bytes.Buffer)
app.Writer = buf
app.Name = "command"
subCmd := Command{
Name: "bar",
Usage: "does bar things",
}
cmd := Command{
Name: "foo",
HelpName: "custom",
Description: "foo commands",
Subcommands: []Command{subCmd},
}
app.Commands = []Command{cmd}
err := app.Run([]string{"command", "foo", "bar", "--help"})
if err != nil {
t.Error(err)
}
output := buf.String()
if !strings.Contains(output, "command foo bar - does bar things") {
t.Errorf("expected full path to subcommand: %s", output)
}
if !strings.Contains(output, "command foo bar [arguments...]") {
t.Errorf("expected full path to subcommand: %s", output)
}
}
func TestApp_Run_CommandSubcommandHelpName(t *testing.T) {
app := NewApp()
buf := new(bytes.Buffer)
app.Writer = buf
app.Name = "base"
subCmd := Command{
Name: "bar",
HelpName: "custom",
Usage: "does bar things",
}
cmd := Command{
Name: "foo",
Description: "foo commands",
Subcommands: []Command{subCmd},
}
app.Commands = []Command{cmd}
err := app.Run([]string{"command", "foo", "--help"})
if err != nil {
t.Error(err)
}
output := buf.String()
if !strings.Contains(output, "base foo - foo commands") {
t.Errorf("expected full path to subcommand: %s", output)
}
if !strings.Contains(output, "base foo command [command options] [arguments...]") {
t.Errorf("expected full path to subcommand: %s", output)
}
}
func TestApp_Run_Help(t *testing.T) {
var helpArguments = [][]string{{"boom", "--help"}, {"boom", "-h"}, {"boom", "help"}}
for _, args := range helpArguments {
buf := new(bytes.Buffer)
t.Logf("==> checking with arguments %v", args)
app := NewApp()
app.Name = "boom"
app.Usage = "make an explosive entrance"
app.Writer = buf
app.Action = func(c *Context) {
buf.WriteString("boom I say!")
}
err := app.Run(args)
if err != nil {
t.Error(err)
}
output := buf.String()
t.Logf("output: %q\n", buf.Bytes())
if !strings.Contains(output, "boom - make an explosive entrance") {
t.Errorf("want help to contain %q, did not: \n%q", "boom - make an explosive entrance", output)
}
}
}
func TestApp_Run_Version(t *testing.T) {
var versionArguments = [][]string{{"boom", "--version"}, {"boom", "-v"}}
for _, args := range versionArguments {
buf := new(bytes.Buffer)
t.Logf("==> checking with arguments %v", args)
app := NewApp()
app.Name = "boom"
app.Usage = "make an explosive entrance"
app.Version = "0.1.0"
app.Writer = buf
app.Action = func(c *Context) {
buf.WriteString("boom I say!")
}
err := app.Run(args)
if err != nil {
t.Error(err)
}
output := buf.String()
t.Logf("output: %q\n", buf.Bytes())
if !strings.Contains(output, "0.1.0") {
t.Errorf("want version to contain %q, did not: \n%q", "0.1.0", output)
}
}
}
func TestApp_Run_DoesNotOverwriteErrorFromBefore(t *testing.T) {
app := NewApp()
app.Action = func(c *Context) {}
app.Before = func(c *Context) error { return fmt.Errorf("before error") }
app.After = func(c *Context) error { return fmt.Errorf("after error") }
err := app.Run([]string{"foo"})
if err == nil {
t.Fatalf("expected to recieve error from Run, got none")
}
if !strings.Contains(err.Error(), "before error") {
t.Errorf("expected text of error from Before method, but got none in \"%v\"", err)
}
if !strings.Contains(err.Error(), "after error") {
t.Errorf("expected text of error from After method, but got none in \"%v\"", err)
}
}
func TestApp_Run_SubcommandDoesNotOverwriteErrorFromBefore(t *testing.T) {
app := NewApp()
app.Commands = []Command{
Command{
Name: "bar",
Before: func(c *Context) error { return fmt.Errorf("before error") },
After: func(c *Context) error { return fmt.Errorf("after error") },
},
}
err := app.Run([]string{"foo", "bar"})
if err == nil {
t.Fatalf("expected to recieve error from Run, got none")
}
if !strings.Contains(err.Error(), "before error") {
t.Errorf("expected text of error from Before method, but got none in \"%v\"", err)
}
if !strings.Contains(err.Error(), "after error") {
t.Errorf("expected text of error from After method, but got none in \"%v\"", err)
}
}

View File

@ -1,45 +0,0 @@
package cli
import (
"errors"
"flag"
"io/ioutil"
"testing"
)
func TestCommandFlagParsing(t *testing.T) {
cases := []struct {
testArgs []string
skipFlagParsing bool
expectedErr error
}{
{[]string{"blah", "blah", "-break"}, false, errors.New("flag provided but not defined: -break")}, // Test normal "not ignoring flags" flow
{[]string{"blah", "blah"}, true, nil}, // Test SkipFlagParsing without any args that look like flags
{[]string{"blah", "-break"}, true, nil}, // Test SkipFlagParsing with random flag arg
{[]string{"blah", "-help"}, true, nil}, // Test SkipFlagParsing with "special" help flag arg
}
for _, c := range cases {
app := NewApp()
app.Writer = ioutil.Discard
set := flag.NewFlagSet("test", 0)
set.Parse(c.testArgs)
context := NewContext(app, set, nil)
command := Command{
Name: "test-cmd",
Aliases: []string{"tc"},
Usage: "this is for testing",
Description: "testing",
Action: func(_ *Context) {},
}
command.SkipFlagParsing = c.skipFlagParsing
err := command.Run(context)
expect(t, err, c.expectedErr)
expect(t, []string(context.Args()), c.testArgs)
}
}

View File

@ -1,113 +0,0 @@
package cli
import (
"flag"
"testing"
"time"
)
func TestNewContext(t *testing.T) {
set := flag.NewFlagSet("test", 0)
set.Int("myflag", 12, "doc")
globalSet := flag.NewFlagSet("test", 0)
globalSet.Int("myflag", 42, "doc")
globalCtx := NewContext(nil, globalSet, nil)
command := Command{Name: "mycommand"}
c := NewContext(nil, set, globalCtx)
c.Command = command
expect(t, c.Int("myflag"), 12)
expect(t, c.GlobalInt("myflag"), 42)
expect(t, c.Command.Name, "mycommand")
}
func TestContext_Int(t *testing.T) {
set := flag.NewFlagSet("test", 0)
set.Int("myflag", 12, "doc")
c := NewContext(nil, set, nil)
expect(t, c.Int("myflag"), 12)
}
func TestContext_Duration(t *testing.T) {
set := flag.NewFlagSet("test", 0)
set.Duration("myflag", time.Duration(12*time.Second), "doc")
c := NewContext(nil, set, nil)
expect(t, c.Duration("myflag"), time.Duration(12*time.Second))
}
func TestContext_String(t *testing.T) {
set := flag.NewFlagSet("test", 0)
set.String("myflag", "hello world", "doc")
c := NewContext(nil, set, nil)
expect(t, c.String("myflag"), "hello world")
}
func TestContext_Bool(t *testing.T) {
set := flag.NewFlagSet("test", 0)
set.Bool("myflag", false, "doc")
c := NewContext(nil, set, nil)
expect(t, c.Bool("myflag"), false)
}
func TestContext_BoolT(t *testing.T) {
set := flag.NewFlagSet("test", 0)
set.Bool("myflag", true, "doc")
c := NewContext(nil, set, nil)
expect(t, c.BoolT("myflag"), true)
}
func TestContext_Args(t *testing.T) {
set := flag.NewFlagSet("test", 0)
set.Bool("myflag", false, "doc")
c := NewContext(nil, set, nil)
set.Parse([]string{"--myflag", "bat", "baz"})
expect(t, len(c.Args()), 2)
expect(t, c.Bool("myflag"), true)
}
func TestContext_IsSet(t *testing.T) {
set := flag.NewFlagSet("test", 0)
set.Bool("myflag", false, "doc")
set.String("otherflag", "hello world", "doc")
globalSet := flag.NewFlagSet("test", 0)
globalSet.Bool("myflagGlobal", true, "doc")
globalCtx := NewContext(nil, globalSet, nil)
c := NewContext(nil, set, globalCtx)
set.Parse([]string{"--myflag", "bat", "baz"})
globalSet.Parse([]string{"--myflagGlobal", "bat", "baz"})
expect(t, c.IsSet("myflag"), true)
expect(t, c.IsSet("otherflag"), false)
expect(t, c.IsSet("bogusflag"), false)
expect(t, c.IsSet("myflagGlobal"), false)
}
func TestContext_GlobalIsSet(t *testing.T) {
set := flag.NewFlagSet("test", 0)
set.Bool("myflag", false, "doc")
set.String("otherflag", "hello world", "doc")
globalSet := flag.NewFlagSet("test", 0)
globalSet.Bool("myflagGlobal", true, "doc")
globalSet.Bool("myflagGlobalUnset", true, "doc")
globalCtx := NewContext(nil, globalSet, nil)
c := NewContext(nil, set, globalCtx)
set.Parse([]string{"--myflag", "bat", "baz"})
globalSet.Parse([]string{"--myflagGlobal", "bat", "baz"})
expect(t, c.GlobalIsSet("myflag"), false)
expect(t, c.GlobalIsSet("otherflag"), false)
expect(t, c.GlobalIsSet("bogusflag"), false)
expect(t, c.GlobalIsSet("myflagGlobal"), true)
expect(t, c.GlobalIsSet("myflagGlobalUnset"), false)
expect(t, c.GlobalIsSet("bogusGlobal"), false)
}
func TestContext_NumFlags(t *testing.T) {
set := flag.NewFlagSet("test", 0)
set.Bool("myflag", false, "doc")
set.String("otherflag", "hello world", "doc")
globalSet := flag.NewFlagSet("test", 0)
globalSet.Bool("myflagGlobal", true, "doc")
globalCtx := NewContext(nil, globalSet, nil)
c := NewContext(nil, set, globalCtx)
set.Parse([]string{"--myflag", "--otherflag=foo"})
globalSet.Parse([]string{"--myflagGlobal"})
expect(t, c.NumFlags(), 2)
}

View File

@ -1,830 +0,0 @@
package cli
import (
"fmt"
"os"
"reflect"
"strings"
"testing"
)
var boolFlagTests = []struct {
name string
expected string
}{
{"help", "--help\t"},
{"h", "-h\t"},
}
func TestBoolFlagHelpOutput(t *testing.T) {
for _, test := range boolFlagTests {
flag := BoolFlag{Name: test.name}
output := flag.String()
if output != test.expected {
t.Errorf("%s does not match %s", output, test.expected)
}
}
}
var stringFlagTests = []struct {
name string
value string
expected string
}{
{"help", "", "--help \t"},
{"h", "", "-h \t"},
{"h", "", "-h \t"},
{"test", "Something", "--test \"Something\"\t"},
}
func TestStringFlagHelpOutput(t *testing.T) {
for _, test := range stringFlagTests {
flag := StringFlag{Name: test.name, Value: test.value}
output := flag.String()
if output != test.expected {
t.Errorf("%s does not match %s", output, test.expected)
}
}
}
func TestStringFlagWithEnvVarHelpOutput(t *testing.T) {
os.Clearenv()
os.Setenv("APP_FOO", "derp")
for _, test := range stringFlagTests {
flag := StringFlag{Name: test.name, Value: test.value, EnvVar: "APP_FOO"}
output := flag.String()
if !strings.HasSuffix(output, " [$APP_FOO]") {
t.Errorf("%s does not end with [$APP_FOO]", output)
}
}
}
var stringSliceFlagTests = []struct {
name string
value *StringSlice
expected string
}{
{"help", func() *StringSlice {
s := &StringSlice{}
s.Set("")
return s
}(), "--help [--help option --help option]\t"},
{"h", func() *StringSlice {
s := &StringSlice{}
s.Set("")
return s
}(), "-h [-h option -h option]\t"},
{"h", func() *StringSlice {
s := &StringSlice{}
s.Set("")
return s
}(), "-h [-h option -h option]\t"},
{"test", func() *StringSlice {
s := &StringSlice{}
s.Set("Something")
return s
}(), "--test [--test option --test option]\t"},
}
func TestStringSliceFlagHelpOutput(t *testing.T) {
for _, test := range stringSliceFlagTests {
flag := StringSliceFlag{Name: test.name, Value: test.value}
output := flag.String()
if output != test.expected {
t.Errorf("%q does not match %q", output, test.expected)
}
}
}
func TestStringSliceFlagWithEnvVarHelpOutput(t *testing.T) {
os.Clearenv()
os.Setenv("APP_QWWX", "11,4")
for _, test := range stringSliceFlagTests {
flag := StringSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_QWWX"}
output := flag.String()
if !strings.HasSuffix(output, " [$APP_QWWX]") {
t.Errorf("%q does not end with [$APP_QWWX]", output)
}
}
}
var intFlagTests = []struct {
name string
expected string
}{
{"help", "--help \"0\"\t"},
{"h", "-h \"0\"\t"},
}
func TestIntFlagHelpOutput(t *testing.T) {
for _, test := range intFlagTests {
flag := IntFlag{Name: test.name}
output := flag.String()
if output != test.expected {
t.Errorf("%s does not match %s", output, test.expected)
}
}
}
func TestIntFlagWithEnvVarHelpOutput(t *testing.T) {
os.Clearenv()
os.Setenv("APP_BAR", "2")
for _, test := range intFlagTests {
flag := IntFlag{Name: test.name, EnvVar: "APP_BAR"}
output := flag.String()
if !strings.HasSuffix(output, " [$APP_BAR]") {
t.Errorf("%s does not end with [$APP_BAR]", output)
}
}
}
var durationFlagTests = []struct {
name string
expected string
}{
{"help", "--help \"0\"\t"},
{"h", "-h \"0\"\t"},
}
func TestDurationFlagHelpOutput(t *testing.T) {
for _, test := range durationFlagTests {
flag := DurationFlag{Name: test.name}
output := flag.String()
if output != test.expected {
t.Errorf("%s does not match %s", output, test.expected)
}
}
}
func TestDurationFlagWithEnvVarHelpOutput(t *testing.T) {
os.Clearenv()
os.Setenv("APP_BAR", "2h3m6s")
for _, test := range durationFlagTests {
flag := DurationFlag{Name: test.name, EnvVar: "APP_BAR"}
output := flag.String()
if !strings.HasSuffix(output, " [$APP_BAR]") {
t.Errorf("%s does not end with [$APP_BAR]", output)
}
}
}
var intSliceFlagTests = []struct {
name string
value *IntSlice
expected string
}{
{"help", &IntSlice{}, "--help [--help option --help option]\t"},
{"h", &IntSlice{}, "-h [-h option -h option]\t"},
{"h", &IntSlice{}, "-h [-h option -h option]\t"},
{"test", func() *IntSlice {
i := &IntSlice{}
i.Set("9")
return i
}(), "--test [--test option --test option]\t"},
}
func TestIntSliceFlagHelpOutput(t *testing.T) {
for _, test := range intSliceFlagTests {
flag := IntSliceFlag{Name: test.name, Value: test.value}
output := flag.String()
if output != test.expected {
t.Errorf("%q does not match %q", output, test.expected)
}
}
}
func TestIntSliceFlagWithEnvVarHelpOutput(t *testing.T) {
os.Clearenv()
os.Setenv("APP_SMURF", "42,3")
for _, test := range intSliceFlagTests {
flag := IntSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_SMURF"}
output := flag.String()
if !strings.HasSuffix(output, " [$APP_SMURF]") {
t.Errorf("%q does not end with [$APP_SMURF]", output)
}
}
}
var float64FlagTests = []struct {
name string
expected string
}{
{"help", "--help \"0\"\t"},
{"h", "-h \"0\"\t"},
}
func TestFloat64FlagHelpOutput(t *testing.T) {
for _, test := range float64FlagTests {
flag := Float64Flag{Name: test.name}
output := flag.String()
if output != test.expected {
t.Errorf("%s does not match %s", output, test.expected)
}
}
}
func TestFloat64FlagWithEnvVarHelpOutput(t *testing.T) {
os.Clearenv()
os.Setenv("APP_BAZ", "99.4")
for _, test := range float64FlagTests {
flag := Float64Flag{Name: test.name, EnvVar: "APP_BAZ"}
output := flag.String()
if !strings.HasSuffix(output, " [$APP_BAZ]") {
t.Errorf("%s does not end with [$APP_BAZ]", output)
}
}
}
var genericFlagTests = []struct {
name string
value Generic
expected string
}{
{"test", &Parser{"abc", "def"}, "--test \"abc,def\"\ttest flag"},
{"t", &Parser{"abc", "def"}, "-t \"abc,def\"\ttest flag"},
}
func TestGenericFlagHelpOutput(t *testing.T) {
for _, test := range genericFlagTests {
flag := GenericFlag{Name: test.name, Value: test.value, Usage: "test flag"}
output := flag.String()
if output != test.expected {
t.Errorf("%q does not match %q", output, test.expected)
}
}
}
func TestGenericFlagWithEnvVarHelpOutput(t *testing.T) {
os.Clearenv()
os.Setenv("APP_ZAP", "3")
for _, test := range genericFlagTests {
flag := GenericFlag{Name: test.name, EnvVar: "APP_ZAP"}
output := flag.String()
if !strings.HasSuffix(output, " [$APP_ZAP]") {
t.Errorf("%s does not end with [$APP_ZAP]", output)
}
}
}
func TestParseMultiString(t *testing.T) {
(&App{
Flags: []Flag{
StringFlag{Name: "serve, s"},
},
Action: func(ctx *Context) {
if ctx.String("serve") != "10" {
t.Errorf("main name not set")
}
if ctx.String("s") != "10" {
t.Errorf("short name not set")
}
},
}).Run([]string{"run", "-s", "10"})
}
func TestParseDestinationString(t *testing.T) {
var dest string
a := App{
Flags: []Flag{
StringFlag{
Name: "dest",
Destination: &dest,
},
},
Action: func(ctx *Context) {
if dest != "10" {
t.Errorf("expected destination String 10")
}
},
}
a.Run([]string{"run", "--dest", "10"})
}
func TestParseMultiStringFromEnv(t *testing.T) {
os.Clearenv()
os.Setenv("APP_COUNT", "20")
(&App{
Flags: []Flag{
StringFlag{Name: "count, c", EnvVar: "APP_COUNT"},
},
Action: func(ctx *Context) {
if ctx.String("count") != "20" {
t.Errorf("main name not set")
}
if ctx.String("c") != "20" {
t.Errorf("short name not set")
}
},
}).Run([]string{"run"})
}
func TestParseMultiStringFromEnvCascade(t *testing.T) {
os.Clearenv()
os.Setenv("APP_COUNT", "20")
(&App{
Flags: []Flag{
StringFlag{Name: "count, c", EnvVar: "COMPAT_COUNT,APP_COUNT"},
},
Action: func(ctx *Context) {
if ctx.String("count") != "20" {
t.Errorf("main name not set")
}
if ctx.String("c") != "20" {
t.Errorf("short name not set")
}
},
}).Run([]string{"run"})
}
func TestParseMultiStringSlice(t *testing.T) {
(&App{
Flags: []Flag{
StringSliceFlag{Name: "serve, s", Value: &StringSlice{}},
},
Action: func(ctx *Context) {
if !reflect.DeepEqual(ctx.StringSlice("serve"), []string{"10", "20"}) {
t.Errorf("main name not set")
}
if !reflect.DeepEqual(ctx.StringSlice("s"), []string{"10", "20"}) {
t.Errorf("short name not set")
}
},
}).Run([]string{"run", "-s", "10", "-s", "20"})
}
func TestParseMultiStringSliceFromEnv(t *testing.T) {
os.Clearenv()
os.Setenv("APP_INTERVALS", "20,30,40")
(&App{
Flags: []Flag{
StringSliceFlag{Name: "intervals, i", Value: &StringSlice{}, EnvVar: "APP_INTERVALS"},
},
Action: func(ctx *Context) {
if !reflect.DeepEqual(ctx.StringSlice("intervals"), []string{"20", "30", "40"}) {
t.Errorf("main name not set from env")
}
if !reflect.DeepEqual(ctx.StringSlice("i"), []string{"20", "30", "40"}) {
t.Errorf("short name not set from env")
}
},
}).Run([]string{"run"})
}
func TestParseMultiStringSliceFromEnvCascade(t *testing.T) {
os.Clearenv()
os.Setenv("APP_INTERVALS", "20,30,40")
(&App{
Flags: []Flag{
StringSliceFlag{Name: "intervals, i", Value: &StringSlice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"},
},
Action: func(ctx *Context) {
if !reflect.DeepEqual(ctx.StringSlice("intervals"), []string{"20", "30", "40"}) {
t.Errorf("main name not set from env")
}
if !reflect.DeepEqual(ctx.StringSlice("i"), []string{"20", "30", "40"}) {
t.Errorf("short name not set from env")
}
},
}).Run([]string{"run"})
}
func TestParseMultiInt(t *testing.T) {
a := App{
Flags: []Flag{
IntFlag{Name: "serve, s"},
},
Action: func(ctx *Context) {
if ctx.Int("serve") != 10 {
t.Errorf("main name not set")
}
if ctx.Int("s") != 10 {
t.Errorf("short name not set")
}
},
}
a.Run([]string{"run", "-s", "10"})
}
func TestParseDestinationInt(t *testing.T) {
var dest int
a := App{
Flags: []Flag{
IntFlag{
Name: "dest",
Destination: &dest,
},
},
Action: func(ctx *Context) {
if dest != 10 {
t.Errorf("expected destination Int 10")
}
},
}
a.Run([]string{"run", "--dest", "10"})
}
func TestParseMultiIntFromEnv(t *testing.T) {
os.Clearenv()
os.Setenv("APP_TIMEOUT_SECONDS", "10")
a := App{
Flags: []Flag{
IntFlag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"},
},
Action: func(ctx *Context) {
if ctx.Int("timeout") != 10 {
t.Errorf("main name not set")
}
if ctx.Int("t") != 10 {
t.Errorf("short name not set")
}
},
}
a.Run([]string{"run"})
}
func TestParseMultiIntFromEnvCascade(t *testing.T) {
os.Clearenv()
os.Setenv("APP_TIMEOUT_SECONDS", "10")
a := App{
Flags: []Flag{
IntFlag{Name: "timeout, t", EnvVar: "COMPAT_TIMEOUT_SECONDS,APP_TIMEOUT_SECONDS"},
},
Action: func(ctx *Context) {
if ctx.Int("timeout") != 10 {
t.Errorf("main name not set")
}
if ctx.Int("t") != 10 {
t.Errorf("short name not set")
}
},
}
a.Run([]string{"run"})
}
func TestParseMultiIntSlice(t *testing.T) {
(&App{
Flags: []Flag{
IntSliceFlag{Name: "serve, s", Value: &IntSlice{}},
},
Action: func(ctx *Context) {
if !reflect.DeepEqual(ctx.IntSlice("serve"), []int{10, 20}) {
t.Errorf("main name not set")
}
if !reflect.DeepEqual(ctx.IntSlice("s"), []int{10, 20}) {
t.Errorf("short name not set")
}
},
}).Run([]string{"run", "-s", "10", "-s", "20"})
}
func TestParseMultiIntSliceFromEnv(t *testing.T) {
os.Clearenv()
os.Setenv("APP_INTERVALS", "20,30,40")
(&App{
Flags: []Flag{
IntSliceFlag{Name: "intervals, i", Value: &IntSlice{}, EnvVar: "APP_INTERVALS"},
},
Action: func(ctx *Context) {
if !reflect.DeepEqual(ctx.IntSlice("intervals"), []int{20, 30, 40}) {
t.Errorf("main name not set from env")
}
if !reflect.DeepEqual(ctx.IntSlice("i"), []int{20, 30, 40}) {
t.Errorf("short name not set from env")
}
},
}).Run([]string{"run"})
}
func TestParseMultiIntSliceFromEnvCascade(t *testing.T) {
os.Clearenv()
os.Setenv("APP_INTERVALS", "20,30,40")
(&App{
Flags: []Flag{
IntSliceFlag{Name: "intervals, i", Value: &IntSlice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"},
},
Action: func(ctx *Context) {
if !reflect.DeepEqual(ctx.IntSlice("intervals"), []int{20, 30, 40}) {
t.Errorf("main name not set from env")
}
if !reflect.DeepEqual(ctx.IntSlice("i"), []int{20, 30, 40}) {
t.Errorf("short name not set from env")
}
},
}).Run([]string{"run"})
}
func TestParseMultiFloat64(t *testing.T) {
a := App{
Flags: []Flag{
Float64Flag{Name: "serve, s"},
},
Action: func(ctx *Context) {
if ctx.Float64("serve") != 10.2 {
t.Errorf("main name not set")
}
if ctx.Float64("s") != 10.2 {
t.Errorf("short name not set")
}
},
}
a.Run([]string{"run", "-s", "10.2"})
}
func TestParseDestinationFloat64(t *testing.T) {
var dest float64
a := App{
Flags: []Flag{
Float64Flag{
Name: "dest",
Destination: &dest,
},
},
Action: func(ctx *Context) {
if dest != 10.2 {
t.Errorf("expected destination Float64 10.2")
}
},
}
a.Run([]string{"run", "--dest", "10.2"})
}
func TestParseMultiFloat64FromEnv(t *testing.T) {
os.Clearenv()
os.Setenv("APP_TIMEOUT_SECONDS", "15.5")
a := App{
Flags: []Flag{
Float64Flag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"},
},
Action: func(ctx *Context) {
if ctx.Float64("timeout") != 15.5 {
t.Errorf("main name not set")
}
if ctx.Float64("t") != 15.5 {
t.Errorf("short name not set")
}
},
}
a.Run([]string{"run"})
}
func TestParseMultiFloat64FromEnvCascade(t *testing.T) {
os.Clearenv()
os.Setenv("APP_TIMEOUT_SECONDS", "15.5")
a := App{
Flags: []Flag{
Float64Flag{Name: "timeout, t", EnvVar: "COMPAT_TIMEOUT_SECONDS,APP_TIMEOUT_SECONDS"},
},
Action: func(ctx *Context) {
if ctx.Float64("timeout") != 15.5 {
t.Errorf("main name not set")
}
if ctx.Float64("t") != 15.5 {
t.Errorf("short name not set")
}
},
}
a.Run([]string{"run"})
}
func TestParseMultiBool(t *testing.T) {
a := App{
Flags: []Flag{
BoolFlag{Name: "serve, s"},
},
Action: func(ctx *Context) {
if ctx.Bool("serve") != true {
t.Errorf("main name not set")
}
if ctx.Bool("s") != true {
t.Errorf("short name not set")
}
},
}
a.Run([]string{"run", "--serve"})
}
func TestParseDestinationBool(t *testing.T) {
var dest bool
a := App{
Flags: []Flag{
BoolFlag{
Name: "dest",
Destination: &dest,
},
},
Action: func(ctx *Context) {
if dest != true {
t.Errorf("expected destination Bool true")
}
},
}
a.Run([]string{"run", "--dest"})
}
func TestParseMultiBoolFromEnv(t *testing.T) {
os.Clearenv()
os.Setenv("APP_DEBUG", "1")
a := App{
Flags: []Flag{
BoolFlag{Name: "debug, d", EnvVar: "APP_DEBUG"},
},
Action: func(ctx *Context) {
if ctx.Bool("debug") != true {
t.Errorf("main name not set from env")
}
if ctx.Bool("d") != true {
t.Errorf("short name not set from env")
}
},
}
a.Run([]string{"run"})
}
func TestParseMultiBoolFromEnvCascade(t *testing.T) {
os.Clearenv()
os.Setenv("APP_DEBUG", "1")
a := App{
Flags: []Flag{
BoolFlag{Name: "debug, d", EnvVar: "COMPAT_DEBUG,APP_DEBUG"},
},
Action: func(ctx *Context) {
if ctx.Bool("debug") != true {
t.Errorf("main name not set from env")
}
if ctx.Bool("d") != true {
t.Errorf("short name not set from env")
}
},
}
a.Run([]string{"run"})
}
func TestParseMultiBoolT(t *testing.T) {
a := App{
Flags: []Flag{
BoolTFlag{Name: "serve, s"},
},
Action: func(ctx *Context) {
if ctx.BoolT("serve") != true {
t.Errorf("main name not set")
}
if ctx.BoolT("s") != true {
t.Errorf("short name not set")
}
},
}
a.Run([]string{"run", "--serve"})
}
func TestParseDestinationBoolT(t *testing.T) {
var dest bool
a := App{
Flags: []Flag{
BoolTFlag{
Name: "dest",
Destination: &dest,
},
},
Action: func(ctx *Context) {
if dest != true {
t.Errorf("expected destination BoolT true")
}
},
}
a.Run([]string{"run", "--dest"})
}
func TestParseMultiBoolTFromEnv(t *testing.T) {
os.Clearenv()
os.Setenv("APP_DEBUG", "0")
a := App{
Flags: []Flag{
BoolTFlag{Name: "debug, d", EnvVar: "APP_DEBUG"},
},
Action: func(ctx *Context) {
if ctx.BoolT("debug") != false {
t.Errorf("main name not set from env")
}
if ctx.BoolT("d") != false {
t.Errorf("short name not set from env")
}
},
}
a.Run([]string{"run"})
}
func TestParseMultiBoolTFromEnvCascade(t *testing.T) {
os.Clearenv()
os.Setenv("APP_DEBUG", "0")
a := App{
Flags: []Flag{
BoolTFlag{Name: "debug, d", EnvVar: "COMPAT_DEBUG,APP_DEBUG"},
},
Action: func(ctx *Context) {
if ctx.BoolT("debug") != false {
t.Errorf("main name not set from env")
}
if ctx.BoolT("d") != false {
t.Errorf("short name not set from env")
}
},
}
a.Run([]string{"run"})
}
type Parser [2]string
func (p *Parser) Set(value string) error {
parts := strings.Split(value, ",")
if len(parts) != 2 {
return fmt.Errorf("invalid format")
}
(*p)[0] = parts[0]
(*p)[1] = parts[1]
return nil
}
func (p *Parser) String() string {
return fmt.Sprintf("%s,%s", p[0], p[1])
}
func TestParseGeneric(t *testing.T) {
a := App{
Flags: []Flag{
GenericFlag{Name: "serve, s", Value: &Parser{}},
},
Action: func(ctx *Context) {
if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"10", "20"}) {
t.Errorf("main name not set")
}
if !reflect.DeepEqual(ctx.Generic("s"), &Parser{"10", "20"}) {
t.Errorf("short name not set")
}
},
}
a.Run([]string{"run", "-s", "10,20"})
}
func TestParseGenericFromEnv(t *testing.T) {
os.Clearenv()
os.Setenv("APP_SERVE", "20,30")
a := App{
Flags: []Flag{
GenericFlag{Name: "serve, s", Value: &Parser{}, EnvVar: "APP_SERVE"},
},
Action: func(ctx *Context) {
if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"20", "30"}) {
t.Errorf("main name not set from env")
}
if !reflect.DeepEqual(ctx.Generic("s"), &Parser{"20", "30"}) {
t.Errorf("short name not set from env")
}
},
}
a.Run([]string{"run"})
}
func TestParseGenericFromEnvCascade(t *testing.T) {
os.Clearenv()
os.Setenv("APP_FOO", "99,2000")
a := App{
Flags: []Flag{
GenericFlag{Name: "foos", Value: &Parser{}, EnvVar: "COMPAT_FOO,APP_FOO"},
},
Action: func(ctx *Context) {
if !reflect.DeepEqual(ctx.Generic("foos"), &Parser{"99", "2000"}) {
t.Errorf("value not set from env")
}
},
}
a.Run([]string{"run"})
}

View File

@ -1,94 +0,0 @@
package cli
import (
"bytes"
"testing"
)
func Test_ShowAppHelp_NoAuthor(t *testing.T) {
output := new(bytes.Buffer)
app := NewApp()
app.Writer = output
c := NewContext(app, nil, nil)
ShowAppHelp(c)
if bytes.Index(output.Bytes(), []byte("AUTHOR(S):")) != -1 {
t.Errorf("expected\n%snot to include %s", output.String(), "AUTHOR(S):")
}
}
func Test_ShowAppHelp_NoVersion(t *testing.T) {
output := new(bytes.Buffer)
app := NewApp()
app.Writer = output
app.Version = ""
c := NewContext(app, nil, nil)
ShowAppHelp(c)
if bytes.Index(output.Bytes(), []byte("VERSION:")) != -1 {
t.Errorf("expected\n%snot to include %s", output.String(), "VERSION:")
}
}
func Test_Help_Custom_Flags(t *testing.T) {
oldFlag := HelpFlag
defer func() {
HelpFlag = oldFlag
}()
HelpFlag = BoolFlag{
Name: "help, x",
Usage: "show help",
}
app := App{
Flags: []Flag{
BoolFlag{Name: "foo, h"},
},
Action: func(ctx *Context) {
if ctx.Bool("h") != true {
t.Errorf("custom help flag not set")
}
},
}
output := new(bytes.Buffer)
app.Writer = output
app.Run([]string{"test", "-h"})
if output.Len() > 0 {
t.Errorf("unexpected output: %s", output.String())
}
}
func Test_Version_Custom_Flags(t *testing.T) {
oldFlag := VersionFlag
defer func() {
VersionFlag = oldFlag
}()
VersionFlag = BoolFlag{
Name: "version, V",
Usage: "show version",
}
app := App{
Flags: []Flag{
BoolFlag{Name: "foo, v"},
},
Action: func(ctx *Context) {
if ctx.Bool("v") != true {
t.Errorf("custom version flag not set")
}
},
}
output := new(bytes.Buffer)
app.Writer = output
app.Run([]string{"test", "-v"})
if output.Len() > 0 {
t.Errorf("unexpected output: %s", output.String())
}
}

View File

@ -1,19 +0,0 @@
package cli
import (
"reflect"
"testing"
)
/* Test Helpers */
func expect(t *testing.T, a interface{}, b interface{}) {
if !reflect.DeepEqual(a, b) {
t.Errorf("Expected %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a))
}
}
func refute(t *testing.T, a interface{}, b interface{}) {
if reflect.DeepEqual(a, b) {
t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a))
}
}

View File

@ -1,19 +0,0 @@
// Copyright 2014 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"testing"
)
func BenchmarkMaskBytes(b *testing.B) {
var key [4]byte
data := make([]byte, 1024)
pos := 0
for i := 0; i < b.N; i++ {
pos = maskBytes(key, pos, data)
}
b.SetBytes(int64(len(data)))
}

View File

@ -1,323 +0,0 @@
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"crypto/tls"
"crypto/x509"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
"net/url"
"reflect"
"strings"
"testing"
"time"
)
var cstUpgrader = Upgrader{
Subprotocols: []string{"p0", "p1"},
ReadBufferSize: 1024,
WriteBufferSize: 1024,
Error: func(w http.ResponseWriter, r *http.Request, status int, reason error) {
http.Error(w, reason.Error(), status)
},
}
var cstDialer = Dialer{
Subprotocols: []string{"p1", "p2"},
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
type cstHandler struct{ *testing.T }
type cstServer struct {
*httptest.Server
URL string
}
func newServer(t *testing.T) *cstServer {
var s cstServer
s.Server = httptest.NewServer(cstHandler{t})
s.URL = makeWsProto(s.Server.URL)
return &s
}
func newTLSServer(t *testing.T) *cstServer {
var s cstServer
s.Server = httptest.NewTLSServer(cstHandler{t})
s.URL = makeWsProto(s.Server.URL)
return &s
}
func (t cstHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
t.Logf("method %s not allowed", r.Method)
http.Error(w, "method not allowed", 405)
return
}
subprotos := Subprotocols(r)
if !reflect.DeepEqual(subprotos, cstDialer.Subprotocols) {
t.Logf("subprotols=%v, want %v", subprotos, cstDialer.Subprotocols)
http.Error(w, "bad protocol", 400)
return
}
ws, err := cstUpgrader.Upgrade(w, r, http.Header{"Set-Cookie": {"sessionID=1234"}})
if err != nil {
t.Logf("Upgrade: %v", err)
return
}
defer ws.Close()
if ws.Subprotocol() != "p1" {
t.Logf("Subprotocol() = %s, want p1", ws.Subprotocol())
ws.Close()
return
}
op, rd, err := ws.NextReader()
if err != nil {
t.Logf("NextReader: %v", err)
return
}
wr, err := ws.NextWriter(op)
if err != nil {
t.Logf("NextWriter: %v", err)
return
}
if _, err = io.Copy(wr, rd); err != nil {
t.Logf("NextWriter: %v", err)
return
}
if err := wr.Close(); err != nil {
t.Logf("Close: %v", err)
return
}
}
func makeWsProto(s string) string {
return "ws" + strings.TrimPrefix(s, "http")
}
func sendRecv(t *testing.T, ws *Conn) {
const message = "Hello World!"
if err := ws.SetWriteDeadline(time.Now().Add(time.Second)); err != nil {
t.Fatalf("SetWriteDeadline: %v", err)
}
if err := ws.WriteMessage(TextMessage, []byte(message)); err != nil {
t.Fatalf("WriteMessage: %v", err)
}
if err := ws.SetReadDeadline(time.Now().Add(time.Second)); err != nil {
t.Fatalf("SetReadDeadline: %v", err)
}
_, p, err := ws.ReadMessage()
if err != nil {
t.Fatalf("ReadMessage: %v", err)
}
if string(p) != message {
t.Fatalf("message=%s, want %s", p, message)
}
}
func TestDial(t *testing.T) {
s := newServer(t)
defer s.Close()
ws, _, err := cstDialer.Dial(s.URL, nil)
if err != nil {
t.Fatalf("Dial: %v", err)
}
defer ws.Close()
sendRecv(t, ws)
}
func TestDialTLS(t *testing.T) {
s := newTLSServer(t)
defer s.Close()
certs := x509.NewCertPool()
for _, c := range s.TLS.Certificates {
roots, err := x509.ParseCertificates(c.Certificate[len(c.Certificate)-1])
if err != nil {
t.Fatalf("error parsing server's root cert: %v", err)
}
for _, root := range roots {
certs.AddCert(root)
}
}
u, _ := url.Parse(s.URL)
d := cstDialer
d.NetDial = func(network, addr string) (net.Conn, error) { return net.Dial(network, u.Host) }
d.TLSClientConfig = &tls.Config{RootCAs: certs}
ws, _, err := d.Dial("wss://example.com/", nil)
if err != nil {
t.Fatalf("Dial: %v", err)
}
defer ws.Close()
sendRecv(t, ws)
}
func xTestDialTLSBadCert(t *testing.T) {
// This test is deactivated because of noisy logging from the net/http package.
s := newTLSServer(t)
defer s.Close()
ws, _, err := cstDialer.Dial(s.URL, nil)
if err == nil {
ws.Close()
t.Fatalf("Dial: nil")
}
}
func xTestDialTLSNoVerify(t *testing.T) {
s := newTLSServer(t)
defer s.Close()
d := cstDialer
d.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
ws, _, err := d.Dial(s.URL, nil)
if err != nil {
t.Fatalf("Dial: %v", err)
}
defer ws.Close()
sendRecv(t, ws)
}
func TestDialTimeout(t *testing.T) {
s := newServer(t)
defer s.Close()
d := cstDialer
d.HandshakeTimeout = -1
ws, _, err := d.Dial(s.URL, nil)
if err == nil {
ws.Close()
t.Fatalf("Dial: nil")
}
}
func TestDialBadScheme(t *testing.T) {
s := newServer(t)
defer s.Close()
ws, _, err := cstDialer.Dial(s.Server.URL, nil)
if err == nil {
ws.Close()
t.Fatalf("Dial: nil")
}
}
func TestDialBadOrigin(t *testing.T) {
s := newServer(t)
defer s.Close()
ws, resp, err := cstDialer.Dial(s.URL, http.Header{"Origin": {"bad"}})
if err == nil {
ws.Close()
t.Fatalf("Dial: nil")
}
if resp == nil {
t.Fatalf("resp=nil, err=%v", err)
}
if resp.StatusCode != http.StatusForbidden {
t.Fatalf("status=%d, want %d", resp.StatusCode, http.StatusForbidden)
}
}
func TestHandshake(t *testing.T) {
s := newServer(t)
defer s.Close()
ws, resp, err := cstDialer.Dial(s.URL, http.Header{"Origin": {s.URL}})
if err != nil {
t.Fatalf("Dial: %v", err)
}
defer ws.Close()
var sessionID string
for _, c := range resp.Cookies() {
if c.Name == "sessionID" {
sessionID = c.Value
}
}
if sessionID != "1234" {
t.Error("Set-Cookie not received from the server.")
}
if ws.Subprotocol() != "p1" {
t.Errorf("ws.Subprotocol() = %s, want p1", ws.Subprotocol())
}
sendRecv(t, ws)
}
func TestRespOnBadHandshake(t *testing.T) {
const expectedStatus = http.StatusGone
const expectedBody = "This is the response body."
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(expectedStatus)
io.WriteString(w, expectedBody)
}))
defer s.Close()
ws, resp, err := cstDialer.Dial(makeWsProto(s.URL), nil)
if err == nil {
ws.Close()
t.Fatalf("Dial: nil")
}
if resp == nil {
t.Fatalf("resp=nil, err=%v", err)
}
if resp.StatusCode != expectedStatus {
t.Errorf("resp.StatusCode=%d, want %d", resp.StatusCode, expectedStatus)
}
p, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatalf("ReadFull(resp.Body) returned error %v", err)
}
if string(p) != expectedBody {
t.Errorf("resp.Body=%s, want %s", p, expectedBody)
}
}
// If the Host header is specified in `Dial()`, the server must receive it as
// the `Host:` header.
func TestHostHeader(t *testing.T) {
s := newServer(t)
defer s.Close()
specifiedHost := make(chan string, 1)
origHandler := s.Server.Config.Handler
// Capture the request Host header.
s.Server.Config.Handler = http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
specifiedHost <- r.Host
origHandler.ServeHTTP(w, r)
})
ws, resp, err := cstDialer.Dial(s.URL, http.Header{"Host": {"testhost"}})
if err != nil {
t.Fatalf("Dial: %v", err)
}
defer ws.Close()
if resp.StatusCode != http.StatusSwitchingProtocols {
t.Fatalf("resp.StatusCode = %v, want http.StatusSwitchingProtocols", resp.StatusCode)
}
if gotHost := <-specifiedHost; gotHost != "testhost" {
t.Fatalf("gotHost = %q, want \"testhost\"", gotHost)
}
sendRecv(t, ws)
}

View File

@ -1,64 +0,0 @@
// Copyright 2014 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"net/url"
"reflect"
"testing"
)
var parseURLTests = []struct {
s string
u *url.URL
}{
{"ws://example.com/", &url.URL{Scheme: "ws", Host: "example.com", Opaque: "/"}},
{"ws://example.com", &url.URL{Scheme: "ws", Host: "example.com", Opaque: "/"}},
{"ws://example.com:7777/", &url.URL{Scheme: "ws", Host: "example.com:7777", Opaque: "/"}},
{"wss://example.com/", &url.URL{Scheme: "wss", Host: "example.com", Opaque: "/"}},
{"wss://example.com/a/b", &url.URL{Scheme: "wss", Host: "example.com", Opaque: "/a/b"}},
{"ss://example.com/a/b", nil},
{"ws://webmaster@example.com/", nil},
}
func TestParseURL(t *testing.T) {
for _, tt := range parseURLTests {
u, err := parseURL(tt.s)
if tt.u != nil && err != nil {
t.Errorf("parseURL(%q) returned error %v", tt.s, err)
continue
}
if tt.u == nil && err == nil {
t.Errorf("parseURL(%q) did not return error", tt.s)
continue
}
if !reflect.DeepEqual(u, tt.u) {
t.Errorf("parseURL(%q) returned %v, want %v", tt.s, u, tt.u)
continue
}
}
}
var hostPortNoPortTests = []struct {
u *url.URL
hostPort, hostNoPort string
}{
{&url.URL{Scheme: "ws", Host: "example.com"}, "example.com:80", "example.com"},
{&url.URL{Scheme: "wss", Host: "example.com"}, "example.com:443", "example.com"},
{&url.URL{Scheme: "ws", Host: "example.com:7777"}, "example.com:7777", "example.com"},
{&url.URL{Scheme: "wss", Host: "example.com:7777"}, "example.com:7777", "example.com"},
}
func TestHostPortNoPort(t *testing.T) {
for _, tt := range hostPortNoPortTests {
hostPort, hostNoPort := hostPortNoPort(tt.u)
if hostPort != tt.hostPort {
t.Errorf("hostPortNoPort(%v) returned hostPort %q, want %q", tt.u, hostPort, tt.hostPort)
}
if hostNoPort != tt.hostNoPort {
t.Errorf("hostPortNoPort(%v) returned hostNoPort %q, want %q", tt.u, hostNoPort, tt.hostNoPort)
}
}
}

View File

@ -1,238 +0,0 @@
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net"
"testing"
"testing/iotest"
"time"
)
var _ net.Error = errWriteTimeout
type fakeNetConn struct {
io.Reader
io.Writer
}
func (c fakeNetConn) Close() error { return nil }
func (c fakeNetConn) LocalAddr() net.Addr { return nil }
func (c fakeNetConn) RemoteAddr() net.Addr { return nil }
func (c fakeNetConn) SetDeadline(t time.Time) error { return nil }
func (c fakeNetConn) SetReadDeadline(t time.Time) error { return nil }
func (c fakeNetConn) SetWriteDeadline(t time.Time) error { return nil }
func TestFraming(t *testing.T) {
frameSizes := []int{0, 1, 2, 124, 125, 126, 127, 128, 129, 65534, 65535, 65536, 65537}
var readChunkers = []struct {
name string
f func(io.Reader) io.Reader
}{
{"half", iotest.HalfReader},
{"one", iotest.OneByteReader},
{"asis", func(r io.Reader) io.Reader { return r }},
}
writeBuf := make([]byte, 65537)
for i := range writeBuf {
writeBuf[i] = byte(i)
}
for _, isServer := range []bool{true, false} {
for _, chunker := range readChunkers {
var connBuf bytes.Buffer
wc := newConn(fakeNetConn{Reader: nil, Writer: &connBuf}, isServer, 1024, 1024)
rc := newConn(fakeNetConn{Reader: chunker.f(&connBuf), Writer: nil}, !isServer, 1024, 1024)
for _, n := range frameSizes {
for _, iocopy := range []bool{true, false} {
name := fmt.Sprintf("s:%v, r:%s, n:%d c:%v", isServer, chunker.name, n, iocopy)
w, err := wc.NextWriter(TextMessage)
if err != nil {
t.Errorf("%s: wc.NextWriter() returned %v", name, err)
continue
}
var nn int
if iocopy {
var n64 int64
n64, err = io.Copy(w, bytes.NewReader(writeBuf[:n]))
nn = int(n64)
} else {
nn, err = w.Write(writeBuf[:n])
}
if err != nil || nn != n {
t.Errorf("%s: w.Write(writeBuf[:n]) returned %d, %v", name, nn, err)
continue
}
err = w.Close()
if err != nil {
t.Errorf("%s: w.Close() returned %v", name, err)
continue
}
opCode, r, err := rc.NextReader()
if err != nil || opCode != TextMessage {
t.Errorf("%s: NextReader() returned %d, r, %v", name, opCode, err)
continue
}
rbuf, err := ioutil.ReadAll(r)
if err != nil {
t.Errorf("%s: ReadFull() returned rbuf, %v", name, err)
continue
}
if len(rbuf) != n {
t.Errorf("%s: len(rbuf) is %d, want %d", name, len(rbuf), n)
continue
}
for i, b := range rbuf {
if byte(i) != b {
t.Errorf("%s: bad byte at offset %d", name, i)
break
}
}
}
}
}
}
}
func TestControl(t *testing.T) {
const message = "this is a ping/pong messsage"
for _, isServer := range []bool{true, false} {
for _, isWriteControl := range []bool{true, false} {
name := fmt.Sprintf("s:%v, wc:%v", isServer, isWriteControl)
var connBuf bytes.Buffer
wc := newConn(fakeNetConn{Reader: nil, Writer: &connBuf}, isServer, 1024, 1024)
rc := newConn(fakeNetConn{Reader: &connBuf, Writer: nil}, !isServer, 1024, 1024)
if isWriteControl {
wc.WriteControl(PongMessage, []byte(message), time.Now().Add(time.Second))
} else {
w, err := wc.NextWriter(PongMessage)
if err != nil {
t.Errorf("%s: wc.NextWriter() returned %v", name, err)
continue
}
if _, err := w.Write([]byte(message)); err != nil {
t.Errorf("%s: w.Write() returned %v", name, err)
continue
}
if err := w.Close(); err != nil {
t.Errorf("%s: w.Close() returned %v", name, err)
continue
}
var actualMessage string
rc.SetPongHandler(func(s string) error { actualMessage = s; return nil })
rc.NextReader()
if actualMessage != message {
t.Errorf("%s: pong=%q, want %q", name, actualMessage, message)
continue
}
}
}
}
}
func TestCloseBeforeFinalFrame(t *testing.T) {
const bufSize = 512
var b1, b2 bytes.Buffer
wc := newConn(fakeNetConn{Reader: nil, Writer: &b1}, false, 1024, bufSize)
rc := newConn(fakeNetConn{Reader: &b1, Writer: &b2}, true, 1024, 1024)
w, _ := wc.NextWriter(BinaryMessage)
w.Write(make([]byte, bufSize+bufSize/2))
wc.WriteControl(CloseMessage, FormatCloseMessage(CloseNormalClosure, ""), time.Now().Add(10*time.Second))
w.Close()
op, r, err := rc.NextReader()
if op != BinaryMessage || err != nil {
t.Fatalf("NextReader() returned %d, %v", op, err)
}
_, err = io.Copy(ioutil.Discard, r)
if err != errUnexpectedEOF {
t.Fatalf("io.Copy() returned %v, want %v", err, errUnexpectedEOF)
}
_, _, err = rc.NextReader()
if err != io.EOF {
t.Fatalf("NextReader() returned %v, want %v", err, io.EOF)
}
}
func TestEOFBeforeFinalFrame(t *testing.T) {
const bufSize = 512
var b1, b2 bytes.Buffer
wc := newConn(fakeNetConn{Reader: nil, Writer: &b1}, false, 1024, bufSize)
rc := newConn(fakeNetConn{Reader: &b1, Writer: &b2}, true, 1024, 1024)
w, _ := wc.NextWriter(BinaryMessage)
w.Write(make([]byte, bufSize+bufSize/2))
op, r, err := rc.NextReader()
if op != BinaryMessage || err != nil {
t.Fatalf("NextReader() returned %d, %v", op, err)
}
_, err = io.Copy(ioutil.Discard, r)
if err != errUnexpectedEOF {
t.Fatalf("io.Copy() returned %v, want %v", err, errUnexpectedEOF)
}
_, _, err = rc.NextReader()
if err != errUnexpectedEOF {
t.Fatalf("NextReader() returned %v, want %v", err, errUnexpectedEOF)
}
}
func TestReadLimit(t *testing.T) {
const readLimit = 512
message := make([]byte, readLimit+1)
var b1, b2 bytes.Buffer
wc := newConn(fakeNetConn{Reader: nil, Writer: &b1}, false, 1024, readLimit-2)
rc := newConn(fakeNetConn{Reader: &b1, Writer: &b2}, true, 1024, 1024)
rc.SetReadLimit(readLimit)
// Send message at the limit with interleaved pong.
w, _ := wc.NextWriter(BinaryMessage)
w.Write(message[:readLimit-1])
wc.WriteControl(PongMessage, []byte("this is a pong"), time.Now().Add(10*time.Second))
w.Write(message[:1])
w.Close()
// Send message larger than the limit.
wc.WriteMessage(BinaryMessage, message[:readLimit+1])
op, _, err := rc.NextReader()
if op != BinaryMessage || err != nil {
t.Fatalf("1: NextReader() returned %d, %v", op, err)
}
op, r, err := rc.NextReader()
if op != BinaryMessage || err != nil {
t.Fatalf("2: NextReader() returned %d, %v", op, err)
}
_, err = io.Copy(ioutil.Discard, r)
if err != ErrReadLimit {
t.Fatalf("io.Copy() returned %v", err)
}
}
func TestUnderlyingConn(t *testing.T) {
var b1, b2 bytes.Buffer
fc := fakeNetConn{Reader: &b1, Writer: &b2}
c := newConn(fc, true, 1024, 1024)
ul := c.UnderlyingConn()
if ul != fc {
t.Fatalf("Underlying conn is not what it should be.")
}
}

View File

@ -1,119 +0,0 @@
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"bytes"
"encoding/json"
"io"
"reflect"
"testing"
)
func TestJSON(t *testing.T) {
var buf bytes.Buffer
c := fakeNetConn{&buf, &buf}
wc := newConn(c, true, 1024, 1024)
rc := newConn(c, false, 1024, 1024)
var actual, expect struct {
A int
B string
}
expect.A = 1
expect.B = "hello"
if err := wc.WriteJSON(&expect); err != nil {
t.Fatal("write", err)
}
if err := rc.ReadJSON(&actual); err != nil {
t.Fatal("read", err)
}
if !reflect.DeepEqual(&actual, &expect) {
t.Fatal("equal", actual, expect)
}
}
func TestPartialJsonRead(t *testing.T) {
var buf bytes.Buffer
c := fakeNetConn{&buf, &buf}
wc := newConn(c, true, 1024, 1024)
rc := newConn(c, false, 1024, 1024)
var v struct {
A int
B string
}
v.A = 1
v.B = "hello"
messageCount := 0
// Partial JSON values.
data, err := json.Marshal(v)
if err != nil {
t.Fatal(err)
}
for i := len(data) - 1; i >= 0; i-- {
if err := wc.WriteMessage(TextMessage, data[:i]); err != nil {
t.Fatal(err)
}
messageCount++
}
// Whitespace.
if err := wc.WriteMessage(TextMessage, []byte(" ")); err != nil {
t.Fatal(err)
}
messageCount++
// Close.
if err := wc.WriteMessage(CloseMessage, FormatCloseMessage(CloseNormalClosure, "")); err != nil {
t.Fatal(err)
}
for i := 0; i < messageCount; i++ {
err := rc.ReadJSON(&v)
if err != io.ErrUnexpectedEOF {
t.Error("read", i, err)
}
}
err = rc.ReadJSON(&v)
if err != io.EOF {
t.Error("final", err)
}
}
func TestDeprecatedJSON(t *testing.T) {
var buf bytes.Buffer
c := fakeNetConn{&buf, &buf}
wc := newConn(c, true, 1024, 1024)
rc := newConn(c, false, 1024, 1024)
var actual, expect struct {
A int
B string
}
expect.A = 1
expect.B = "hello"
if err := WriteJSON(wc, &expect); err != nil {
t.Fatal("write", err)
}
if err := ReadJSON(rc, &actual); err != nil {
t.Fatal("read", err)
}
if !reflect.DeepEqual(&actual, &expect) {
t.Fatal("equal", actual, expect)
}
}

View File

@ -1,33 +0,0 @@
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"net/http"
"reflect"
"testing"
)
var subprotocolTests = []struct {
h string
protocols []string
}{
{"", nil},
{"foo", []string{"foo"}},
{"foo,bar", []string{"foo", "bar"}},
{"foo, bar", []string{"foo", "bar"}},
{" foo, bar", []string{"foo", "bar"}},
{" foo, bar ", []string{"foo", "bar"}},
}
func TestSubprotocols(t *testing.T) {
for _, st := range subprotocolTests {
r := http.Request{Header: http.Header{"Sec-Websocket-Protocol": {st.h}}}
protocols := Subprotocols(&r)
if !reflect.DeepEqual(st.protocols, protocols) {
t.Errorf("SubProtocols(%q) returned %#v, want %#v", st.h, protocols, st.protocols)
}
}
}

View File

@ -1,34 +0,0 @@
// Copyright 2014 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"net/http"
"testing"
)
var tokenListContainsValueTests = []struct {
value string
ok bool
}{
{"WebSocket", true},
{"WEBSOCKET", true},
{"websocket", true},
{"websockets", false},
{"x websocket", false},
{"websocket x", false},
{"other,websocket,more", true},
{"other, websocket, more", true},
}
func TestTokenListContainsValue(t *testing.T) {
for _, tt := range tokenListContainsValueTests {
h := http.Header{"Upgrade": {tt.value}}
ok := tokenListContainsValue(h, "Upgrade", "websocket")
if ok != tt.ok {
t.Errorf("tokenListContainsValue(h, n, %q) = %v, want %v", tt.value, ok, tt.ok)
}
}
}

View File

@ -1,231 +0,0 @@
package stack_test
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"testing"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/inconshreveable/log15/stack"
)
type testType struct{}
func (tt testType) testMethod() (pc uintptr, file string, line int, ok bool) {
return runtime.Caller(0)
}
func TestCallFormat(t *testing.T) {
t.Parallel()
pc, file, line, ok := runtime.Caller(0)
if !ok {
t.Fatal("runtime.Caller(0) failed")
}
gopathSrc := filepath.Join(os.Getenv("GOPATH"), "src")
relFile, err := filepath.Rel(gopathSrc, file)
if err != nil {
t.Fatalf("failed to determine path relative to GOPATH: %v", err)
}
relFile = filepath.ToSlash(relFile)
pc2, file2, line2, ok2 := testType{}.testMethod()
if !ok2 {
t.Fatal("runtime.Caller(0) failed")
}
relFile2, err := filepath.Rel(gopathSrc, file)
if err != nil {
t.Fatalf("failed to determine path relative to GOPATH: %v", err)
}
relFile2 = filepath.ToSlash(relFile2)
data := []struct {
pc uintptr
desc string
fmt string
out string
}{
{0, "error", "%s", "%!s(NOFUNC)"},
{pc, "func", "%s", path.Base(file)},
{pc, "func", "%+s", relFile},
{pc, "func", "%#s", file},
{pc, "func", "%d", fmt.Sprint(line)},
{pc, "func", "%n", "TestCallFormat"},
{pc, "func", "%+n", runtime.FuncForPC(pc).Name()},
{pc, "func", "%v", fmt.Sprint(path.Base(file), ":", line)},
{pc, "func", "%+v", fmt.Sprint(relFile, ":", line)},
{pc, "func", "%#v", fmt.Sprint(file, ":", line)},
{pc, "func", "%v|%[1]n()", fmt.Sprint(path.Base(file), ":", line, "|", "TestCallFormat()")},
{pc2, "meth", "%s", path.Base(file2)},
{pc2, "meth", "%+s", relFile2},
{pc2, "meth", "%#s", file2},
{pc2, "meth", "%d", fmt.Sprint(line2)},
{pc2, "meth", "%n", "testType.testMethod"},
{pc2, "meth", "%+n", runtime.FuncForPC(pc2).Name()},
{pc2, "meth", "%v", fmt.Sprint(path.Base(file2), ":", line2)},
{pc2, "meth", "%+v", fmt.Sprint(relFile2, ":", line2)},
{pc2, "meth", "%#v", fmt.Sprint(file2, ":", line2)},
{pc2, "meth", "%v|%[1]n()", fmt.Sprint(path.Base(file2), ":", line2, "|", "testType.testMethod()")},
}
for _, d := range data {
got := fmt.Sprintf(d.fmt, stack.Call(d.pc))
if got != d.out {
t.Errorf("fmt.Sprintf(%q, Call(%s)) = %s, want %s", d.fmt, d.desc, got, d.out)
}
}
}
func BenchmarkCallVFmt(b *testing.B) {
pc, _, _, ok := runtime.Caller(0)
if !ok {
b.Fatal("runtime.Caller(0) failed")
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
fmt.Fprint(ioutil.Discard, stack.Call(pc))
}
}
func BenchmarkCallPlusVFmt(b *testing.B) {
pc, _, _, ok := runtime.Caller(0)
if !ok {
b.Fatal("runtime.Caller(0) failed")
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
fmt.Fprintf(ioutil.Discard, "%+v", stack.Call(pc))
}
}
func BenchmarkCallSharpVFmt(b *testing.B) {
pc, _, _, ok := runtime.Caller(0)
if !ok {
b.Fatal("runtime.Caller(0) failed")
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
fmt.Fprintf(ioutil.Discard, "%#v", stack.Call(pc))
}
}
func BenchmarkCallSFmt(b *testing.B) {
pc, _, _, ok := runtime.Caller(0)
if !ok {
b.Fatal("runtime.Caller(0) failed")
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
fmt.Fprintf(ioutil.Discard, "%s", stack.Call(pc))
}
}
func BenchmarkCallPlusSFmt(b *testing.B) {
pc, _, _, ok := runtime.Caller(0)
if !ok {
b.Fatal("runtime.Caller(0) failed")
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
fmt.Fprintf(ioutil.Discard, "%+s", stack.Call(pc))
}
}
func BenchmarkCallSharpSFmt(b *testing.B) {
pc, _, _, ok := runtime.Caller(0)
if !ok {
b.Fatal("runtime.Caller(0) failed")
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
fmt.Fprintf(ioutil.Discard, "%#s", stack.Call(pc))
}
}
func BenchmarkCallDFmt(b *testing.B) {
pc, _, _, ok := runtime.Caller(0)
if !ok {
b.Fatal("runtime.Caller(0) failed")
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
fmt.Fprintf(ioutil.Discard, "%d", stack.Call(pc))
}
}
func BenchmarkCallNFmt(b *testing.B) {
pc, _, _, ok := runtime.Caller(0)
if !ok {
b.Fatal("runtime.Caller(0) failed")
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
fmt.Fprintf(ioutil.Discard, "%n", stack.Call(pc))
}
}
func BenchmarkCallPlusNFmt(b *testing.B) {
pc, _, _, ok := runtime.Caller(0)
if !ok {
b.Fatal("runtime.Caller(0) failed")
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
fmt.Fprintf(ioutil.Discard, "%+n", stack.Call(pc))
}
}
func BenchmarkCallers(b *testing.B) {
for i := 0; i < b.N; i++ {
stack.Callers()
}
}
func deepStack(depth int, b *testing.B) stack.Trace {
if depth > 0 {
return deepStack(depth-1, b)
}
b.StartTimer()
s := stack.Callers()
b.StopTimer()
return s
}
func BenchmarkCallers10(b *testing.B) {
b.StopTimer()
for i := 0; i < b.N; i++ {
deepStack(10, b)
}
}
func BenchmarkCallers50(b *testing.B) {
b.StopTimer()
for i := 0; i < b.N; i++ {
deepStack(50, b)
}
}
func BenchmarkCallers100(b *testing.B) {
b.StopTimer()
for i := 0; i < b.N; i++ {
deepStack(100, b)
}
}

View File

@ -1,120 +0,0 @@
package stringutil
import (
"bytes"
"unicode"
)
// ToUpperCamelCase returns a copy of the string s with all Unicode letters mapped to their camel case.
// It will convert to upper case previous letter of '_' and first letter, and remove letter of '_'.
func ToUpperCamelCase(s string) string {
if s == "" {
return ""
}
upper := true
var result bytes.Buffer
for _, c := range s {
if c == '_' {
upper = true
continue
}
if upper {
result.WriteRune(unicode.ToUpper(c))
upper = false
continue
}
result.WriteRune(c)
}
return result.String()
}
// ToUpperCamelCaseASCII is similar to ToUpperCamelCase, but optimized for
// only the ASCII characters.
// ToUpperCamelCaseASCII is faster than ToUpperCamelCase, but doesn't work if
// contains non-ASCII characters.
func ToUpperCamelCaseASCII(s string) string {
if s == "" {
return ""
}
upper := true
result := make([]byte, 0, len(s))
for i := 0; i < len(s); i++ {
c := s[i]
if c == '_' {
upper = true
continue
}
if upper {
result = append(result, toUpperASCII(c))
upper = false
continue
}
result = append(result, c)
}
return string(result)
}
// ToSnakeCase returns a copy of the string s with all Unicode letters mapped to their snake case.
// It will insert letter of '_' at position of previous letter of uppercase and all
// letters convert to lower case.
func ToSnakeCase(s string) string {
if s == "" {
return ""
}
var result bytes.Buffer
for _, c := range s {
if unicode.IsUpper(c) {
result.WriteByte('_')
}
result.WriteRune(unicode.ToLower(c))
}
s = result.String()
if s[0] == '_' {
return s[1:]
}
return s
}
// ToSnakeCaseASCII is similar to ToSnakeCase, but optimized for only the ASCII
// characters.
// ToSnakeCaseASCII is faster than ToSnakeCase, but doesn't work correctly if
// contains non-ASCII characters.
func ToSnakeCaseASCII(s string) string {
if s == "" {
return ""
}
result := make([]byte, 0, len(s))
for i := 0; i < len(s); i++ {
c := s[i]
if isUpperASCII(c) {
result = append(result, '_')
}
result = append(result, toLowerASCII(c))
}
if result[0] == '_' {
return string(result[1:])
}
return string(result)
}
func isUpperASCII(c byte) bool {
return 'A' <= c && c <= 'Z'
}
func isLowerASCII(c byte) bool {
return 'a' <= c && c <= 'z'
}
func toUpperASCII(c byte) byte {
if isLowerASCII(c) {
return c - ('a' - 'A')
}
return c
}
func toLowerASCII(c byte) byte {
if isUpperASCII(c) {
return c + 'a' - 'A'
}
return c
}

View File

@ -1,35 +0,0 @@
package stringutil_test
import (
"testing"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/naoina/go-stringutil"
)
var benchcaseForCamelCase = "the_quick_brown_fox_jumps_over_the_lazy_dog"
func BenchmarkToUpperCamelCase(b *testing.B) {
for i := 0; i < b.N; i++ {
stringutil.ToUpperCamelCase(benchcaseForCamelCase)
}
}
func BenchmarkToUpperCamelCaseASCII(b *testing.B) {
for i := 0; i < b.N; i++ {
stringutil.ToUpperCamelCaseASCII(benchcaseForCamelCase)
}
}
var benchcaseForSnakeCase = "TheQuickBrownFoxJumpsOverTheLazyDog"
func BenchmarkToSnakeCase(b *testing.B) {
for i := 0; i < b.N; i++ {
stringutil.ToSnakeCase(benchcaseForSnakeCase)
}
}
func BenchmarkToSnakeCaseASCII(b *testing.B) {
for i := 0; i < b.N; i++ {
stringutil.ToSnakeCaseASCII(benchcaseForSnakeCase)
}
}

View File

@ -1,88 +0,0 @@
package stringutil_test
import (
"reflect"
"testing"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/naoina/go-stringutil"
)
func TestToUpperCamelCase(t *testing.T) {
for _, v := range []struct {
input, expect string
}{
{"", ""},
{"thequickbrownfoxoverthelazydog", "Thequickbrownfoxoverthelazydog"},
{"thequickbrownfoxoverthelazydoG", "ThequickbrownfoxoverthelazydoG"},
{"thequickbrownfoxoverthelazydo_g", "ThequickbrownfoxoverthelazydoG"},
{"TheQuickBrownFoxJumpsOverTheLazyDog", "TheQuickBrownFoxJumpsOverTheLazyDog"},
{"the_quick_brown_fox_jumps_over_the_lazy_dog", "TheQuickBrownFoxJumpsOverTheLazyDog"},
{"the_Quick_Brown_Fox_Jumps_Over_The_Lazy_Dog", "TheQuickBrownFoxJumpsOverTheLazyDog"},
{"_______", ""},
} {
actual := stringutil.ToUpperCamelCase(v.input)
expect := v.expect
if !reflect.DeepEqual(actual, expect) {
t.Errorf(`stringutil.ToUpperCamelCase(%#v) => %#v; want %#v`, v.input, actual, expect)
}
}
}
func TestToUpperCamelCaseASCII(t *testing.T) {
for _, v := range []struct {
input, expect string
}{
{"", ""},
{"thequickbrownfoxoverthelazydog", "Thequickbrownfoxoverthelazydog"},
{"thequickbrownfoxoverthelazydoG", "ThequickbrownfoxoverthelazydoG"},
{"thequickbrownfoxoverthelazydo_g", "ThequickbrownfoxoverthelazydoG"},
{"TheQuickBrownFoxJumpsOverTheLazyDog", "TheQuickBrownFoxJumpsOverTheLazyDog"},
{"the_quick_brown_fox_jumps_over_the_lazy_dog", "TheQuickBrownFoxJumpsOverTheLazyDog"},
{"the_Quick_Brown_Fox_Jumps_Over_The_Lazy_Dog", "TheQuickBrownFoxJumpsOverTheLazyDog"},
} {
actual := stringutil.ToUpperCamelCaseASCII(v.input)
expect := v.expect
if !reflect.DeepEqual(actual, expect) {
t.Errorf(`stringutil.ToUpperCamelCaseASCII(%#v) => %#v; want %#v`, v.input, actual, expect)
}
}
}
func TestToSnakeCase(t *testing.T) {
for _, v := range []struct {
input, expect string
}{
{"", ""},
{"thequickbrownfoxjumpsoverthelazydog", "thequickbrownfoxjumpsoverthelazydog"},
{"Thequickbrownfoxjumpsoverthelazydog", "thequickbrownfoxjumpsoverthelazydog"},
{"ThequickbrownfoxjumpsoverthelazydoG", "thequickbrownfoxjumpsoverthelazydo_g"},
{"TheQuickBrownFoxJumpsOverTheLazyDog", "the_quick_brown_fox_jumps_over_the_lazy_dog"},
{"the_quick_brown_fox_jumps_over_the_lazy_dog", "the_quick_brown_fox_jumps_over_the_lazy_dog"},
{"", "_______"},
} {
actual := stringutil.ToSnakeCase(v.input)
expect := v.expect
if !reflect.DeepEqual(actual, expect) {
t.Errorf(`stringutil.ToSnakeCase(%#v) => %#v; want %#v`, v.input, actual, expect)
}
}
}
func TestToSnakeCaseASCII(t *testing.T) {
for _, v := range []struct {
input, expect string
}{
{"", ""},
{"thequickbrownfoxjumpsoverthelazydog", "thequickbrownfoxjumpsoverthelazydog"},
{"Thequickbrownfoxjumpsoverthelazydog", "thequickbrownfoxjumpsoverthelazydog"},
{"ThequickbrownfoxjumpsoverthelazydoG", "thequickbrownfoxjumpsoverthelazydo_g"},
{"TheQuickBrownFoxJumpsOverTheLazyDog", "the_quick_brown_fox_jumps_over_the_lazy_dog"},
{"the_quick_brown_fox_jumps_over_the_lazy_dog", "the_quick_brown_fox_jumps_over_the_lazy_dog"},
} {
actual := stringutil.ToSnakeCaseASCII(v.input)
expect := v.expect
if !reflect.DeepEqual(actual, expect) {
t.Errorf(`stringutil.ToSnakeCaseASCII(%#v) => %#v; want %#v`, v.input, actual, expect)
}
}
}

View File

@ -1,49 +0,0 @@
package toml_test
import (
"testing"
"time"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/naoina/toml"
)
func BenchmarkUnmarshal(b *testing.B) {
var v struct {
Title string
Owner struct {
Name string
Organization string
Bio string
Dob time.Time
}
Database struct {
Server string
Ports []int
ConnectionMax int
Enabled bool
}
Servers struct {
Alpha struct {
IP string
DC string
}
Beta struct {
IP string
DC string
}
}
Clients struct {
Data []interface{}
Hosts []string
}
}
data, err := loadTestData()
if err != nil {
b.Fatal(err)
}
for i := 0; i < b.N; i++ {
if err := toml.Unmarshal(data, &v); err != nil {
b.Fatal(err)
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,298 +0,0 @@
package toml_test
import (
"reflect"
"testing"
"time"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/naoina/toml"
)
func TestMarshal(t *testing.T) {
for _, v := range []struct {
v interface{}
expect string
}{
{struct{ Name string }{"alice"}, "name=\"alice\"\n"},
{struct{ Age int }{7}, "age=7\n"},
{struct {
Name string
Age int
}{"alice", 7}, "name=\"alice\"\nage=7\n"},
{struct {
Name string `toml:"-"`
Age int
}{"alice", 7}, "age=7\n"},
{struct {
Name string `toml:"my_name"`
}{"bob"}, "my_name=\"bob\"\n"},
{struct {
Name string `toml:"my_name,omitempty"`
}{"bob"}, "my_name=\"bob\"\n"},
{struct {
Name string `toml:",omitempty"`
}{"bob"}, "name=\"bob\"\n"},
{struct {
Name string `toml:",omitempty"`
}{""}, ""},
} {
b, err := toml.Marshal(v.v)
var actual interface{} = err
var expect interface{} = nil
if !reflect.DeepEqual(actual, expect) {
t.Errorf(`Marshal(%#v) => %#v; want %#v`, v.v, actual, expect)
}
actual = string(b)
expect = v.expect
if !reflect.DeepEqual(actual, expect) {
t.Errorf(`Marshal(%#v); v => %#v; want %#v`, v, actual, expect)
}
}
}
func TestMarshalWhole(t *testing.T) {
for _, v := range []struct {
v interface{}
expect string
}{
{
testStruct{
Table: Table{
Key: "value",
Subtable: Subtable{
Key: "another value",
},
Inline: Inline{
Name: Name{
First: "Tom",
Last: "Preston-Werner",
},
Point: Point{
X: 1,
Y: 2,
},
},
},
X: X{},
String: String{
Basic: Basic{
Basic: "I'm a string. \"You can quote me\". Name\tJos\u00E9\nLocation\tSF.",
},
Multiline: Multiline{
Key1: "One\nTwo",
Continued: Continued{
Key1: "The quick brown fox jumps over the lazy dog.",
},
},
Literal: Literal{
Winpath: `C:\Users\nodejs\templates`,
Winpath2: `\\ServerX\admin$\system32\`,
Quoted: `Tom "Dubs" Preston-Werner`,
Regex: `<\i\c*\s*>`,
Multiline: LiteralMultiline{
Regex2: `I [dw]on't need \d{2} apples`,
Lines: "The first newline is\ntrimmed in raw strings.\n All other whitespace\n is preserved.\n",
},
},
},
Integer: Integer{
Key1: 99,
Key2: 42,
Key3: 0,
Key4: -17,
Underscores: IntegerUnderscores{
Key1: 1000,
Key2: 5349221,
Key3: 12345,
},
},
Float: Float{
Fractional: Fractional{
Key1: 1.0,
Key2: 3.1415,
Key3: -0.01,
},
Exponent: Exponent{
Key1: 5e22,
Key2: 1e6,
Key3: -2e-2,
},
Both: Both{
Key: 6.626e-34,
},
Underscores: FloatUnderscores{
Key1: 9224617.445991228313,
Key2: 1e100,
},
},
Boolean: Boolean{
True: true,
False: false,
},
Datetime: Datetime{
Key1: mustTime(time.Parse(time.RFC3339Nano, "1979-05-27T07:32:00Z")),
Key2: mustTime(time.Parse(time.RFC3339Nano, "1979-05-27T00:32:00-07:00")),
Key3: mustTime(time.Parse(time.RFC3339Nano, "1979-05-27T00:32:00.999999-07:00")),
},
Array: Array{
Key1: []int{1, 2, 3},
Key2: []string{"red", "yellow", "green"},
Key3: [][]int{{1, 2}, {3, 4, 5}},
Key4: [][]interface{}{{int64(1), int64(2)}, {"a", "b", "c"}},
Key5: []int{1, 2, 3},
Key6: []int{1, 2},
},
Products: []Product{
{Name: "Hammer", Sku: 738594937},
{},
{Name: "Nail", Sku: 284758393, Color: "gray"},
},
Fruit: []Fruit{
{
Name: "apple",
Physical: Physical{
Color: "red",
Shape: "round",
},
Variety: []Variety{
{Name: "red delicious"},
{Name: "granny smith"},
},
},
{
Name: "banana",
Variety: []Variety{
{Name: "plantain"},
},
},
},
},
`[table]
key="value"
[table.subtable]
key="another value"
[table.inline]
[table.inline.name]
first="Tom"
last="Preston-Werner"
[table.inline.point]
x=1
y=2
[x]
[x.y]
[x.y.z]
[x.y.z.w]
[string]
[string.basic]
basic="I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF."
[string.multiline]
key1="One\nTwo"
key2=""
key3=""
[string.multiline.continued]
key1="The quick brown fox jumps over the lazy dog."
key2=""
key3=""
[string.literal]
winpath="C:\\Users\\nodejs\\templates"
winpath2="\\\\ServerX\\admin$\\system32\\"
quoted="Tom \"Dubs\" Preston-Werner"
regex="<\\i\\c*\\s*>"
[string.literal.multiline]
regex2="I [dw]on't need \\d{2} apples"
lines="The first newline is\ntrimmed in raw strings.\n All other whitespace\n is preserved.\n"
[integer]
key1=99
key2=42
key3=0
key4=-17
[integer.underscores]
key1=1000
key2=5349221
key3=12345
[float]
[float.fractional]
key1=1e+00
key2=3.1415e+00
key3=-1e-02
[float.exponent]
key1=5e+22
key2=1e+06
key3=-2e-02
[float.both]
key=6.626e-34
[float.underscores]
key1=9.224617445991227e+06
key2=1e+100
[boolean]
true=true
false=false
[datetime]
key1=1979-05-27T07:32:00Z
key2=1979-05-27T00:32:00-07:00
key3=1979-05-27T00:32:00.999999-07:00
[array]
key1=[1,2,3]
key2=["red","yellow","green"]
key3=[[1,2],[3,4,5]]
key4=[[1,2],["a","b","c"]]
key5=[1,2,3]
key6=[1,2]
[[products]]
name="Hammer"
sku=738594937
color=""
[[products]]
name=""
sku=0
color=""
[[products]]
name="Nail"
sku=284758393
color="gray"
[[fruit]]
name="apple"
[fruit.physical]
color="red"
shape="round"
[[fruit.variety]]
name="red delicious"
[[fruit.variety]]
name="granny smith"
[[fruit]]
name="banana"
[fruit.physical]
color=""
shape=""
[[fruit.variety]]
name="plantain"
`,
},
} {
b, err := toml.Marshal(v.v)
var actual interface{} = err
var expect interface{} = nil
if !reflect.DeepEqual(actual, expect) {
t.Errorf(`Marshal(%#v) => %#v; want %#v`, v.v, actual, expect)
}
actual = string(b)
expect = v.expect
if !reflect.DeepEqual(actual, expect) {
t.Errorf(`Marshal(%#v); v => %#v; want %#v`, v.v, actual, expect)
}
// test for reversible.
dest := testStruct{}
actual = toml.Unmarshal(b, &dest)
expect = nil
if !reflect.DeepEqual(actual, expect) {
t.Errorf(`Unmarshal after Marshal => %#v; want %#v`, actual, expect)
}
actual = dest
expect = v.v
if !reflect.DeepEqual(actual, expect) {
t.Errorf(`Unmarshal after Marshal => %#v; want %#v`, v, actual, expect)
}
}
}

View File

@ -1,244 +0,0 @@
################################################################################
## Comment
# Speak your mind with the hash symbol. They go from the symbol to the end of
# the line.
################################################################################
## Table
# Tables (also known as hash tables or dictionaries) are collections of
# key/value pairs. They appear in square brackets on a line by themselves.
[table]
key = "value" # Yeah, you can do this.
# Nested tables are denoted by table names with dots in them. Name your tables
# whatever crap you please, just don't use #, ., [ or ].
[table.subtable]
key = "another value"
# You don't need to specify all the super-tables if you don't want to. TOML
# knows how to do it for you.
# [x] you
# [x.y] don't
# [x.y.z] need these
[x.y.z.w] # for this to work
################################################################################
## Inline Table
# Inline tables provide a more compact syntax for expressing tables. They are
# especially useful for grouped data that can otherwise quickly become verbose.
# Inline tables are enclosed in curly braces `{` and `}`. No newlines are
# allowed between the curly braces unless they are valid within a value.
[table.inline]
name = { first = "Tom", last = "Preston-Werner" }
point = { x = 1, y = 2 }
################################################################################
## String
# There are four ways to express strings: basic, multi-line basic, literal, and
# multi-line literal. All strings must contain only valid UTF-8 characters.
[string.basic]
basic = "I'm a string. \"You can quote me\". Name\tJos\u00E9\nLocation\tSF."
[string.multiline]
# The following strings are byte-for-byte equivalent:
key1 = "One\nTwo"
key2 = """One\nTwo"""
key3 = """
One
Two"""
[string.multiline.continued]
# The following strings are byte-for-byte equivalent:
key1 = "The quick brown fox jumps over the lazy dog."
key2 = """
The quick brown \
fox jumps over \
the lazy dog."""
key3 = """\
The quick brown \
fox jumps over \
the lazy dog.\
"""
[string.literal]
# What you see is what you get.
winpath = 'C:\Users\nodejs\templates'
winpath2 = '\\ServerX\admin$\system32\'
quoted = 'Tom "Dubs" Preston-Werner'
regex = '<\i\c*\s*>'
[string.literal.multiline]
regex2 = '''I [dw]on't need \d{2} apples'''
lines = '''
The first newline is
trimmed in raw strings.
All other whitespace
is preserved.
'''
################################################################################
## Integer
# Integers are whole numbers. Positive numbers may be prefixed with a plus sign.
# Negative numbers are prefixed with a minus sign.
[integer]
key1 = +99
key2 = 42
key3 = 0
key4 = -17
[integer.underscores]
# For large numbers, you may use underscores to enhance readability. Each
# underscore must be surrounded by at least one digit.
key1 = 1_000
key2 = 5_349_221
key3 = 1_2_3_4_5 # valid but inadvisable
################################################################################
## Float
# A float consists of an integer part (which may be prefixed with a plus or
# minus sign) followed by a fractional part and/or an exponent part.
[float.fractional]
key1 = +1.0
key2 = 3.1415
key3 = -0.01
[float.exponent]
key1 = 5e+22
key2 = 1e6
key3 = -2E-2
[float.both]
key = 6.626e-34
[float.underscores]
key1 = 9_224_617.445_991_228_313
key2 = 1e1_00
################################################################################
## Boolean
# Booleans are just the tokens you're used to. Always lowercase.
[boolean]
True = true
False = false
################################################################################
## Datetime
# Datetimes are RFC 3339 dates.
[datetime]
key1 = 1979-05-27T07:32:00Z
key2 = 1979-05-27T00:32:00-07:00
key3 = 1979-05-27T00:32:00.999999-07:00
################################################################################
## Array
# Arrays are square brackets with other primitives inside. Whitespace is
# ignored. Elements are separated by commas. Data types may not be mixed.
[array]
key1 = [ 1, 2, 3 ]
key2 = [ "red", "yellow", "green" ]
key3 = [ [ 1, 2 ], [3, 4, 5] ]
key4 = [ [ 1, 2 ], ["a", "b", "c"] ] # this is ok
# Arrays can also be multiline. So in addition to ignoring whitespace, arrays
# also ignore newlines between the brackets. Terminating commas are ok before
# the closing bracket.
key5 = [
1, 2, 3
]
key6 = [
1,
2, # this is ok
]
################################################################################
## Array of Tables
# These can be expressed by using a table name in double brackets. Each table
# with the same double bracketed name will be an element in the array. The
# tables are inserted in the order encountered.
[[products]]
name = "Hammer"
sku = 738594937
[[products]]
[[products]]
name = "Nail"
sku = 284758393
color = "gray"
# You can create nested arrays of tables as well.
[[fruit]]
name = "apple"
[fruit.physical]
color = "red"
shape = "round"
[[fruit.variety]]
name = "red delicious"
[[fruit.variety]]
name = "granny smith"
[[fruit]]
name = "banana"
[[fruit.variety]]
name = "plantain"

View File

@ -1,104 +0,0 @@
go-metrics
==========
Go port of Coda Hale's Metrics library: <https://github.com/codahale/metrics>.
Documentation: <http://godoc.org/github.com/rcrowley/go-metrics>.
Usage
-----
Create and update metrics:
```go
c := metrics.NewCounter()
metrics.Register("foo", c)
c.Inc(47)
g := metrics.NewGauge()
metrics.Register("bar", g)
g.Update(47)
s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028)
h := metrics.NewHistogram(s)
metrics.Register("baz", h)
h.Update(47)
m := metrics.NewMeter()
metrics.Register("quux", m)
m.Mark(47)
t := metrics.NewTimer()
metrics.Register("bang", t)
t.Time(func() {})
t.Update(47)
```
Periodically log every metric in human-readable form to standard error:
```go
go metrics.Log(metrics.DefaultRegistry, 60e9, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
```
Periodically log every metric in slightly-more-parseable form to syslog:
```go
w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics")
go metrics.Syslog(metrics.DefaultRegistry, 60e9, w)
```
Periodically emit every metric to Graphite:
```go
addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
go metrics.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr)
```
Periodically emit every metric into InfluxDB:
```go
import "github.com/rcrowley/go-metrics/influxdb"
go influxdb.Influxdb(metrics.DefaultRegistry, 10e9, &influxdb.Config{
Host: "127.0.0.1:8086",
Database: "metrics",
Username: "test",
Password: "test",
})
```
Periodically upload every metric to Librato:
```go
import "github.com/rcrowley/go-metrics/librato"
go librato.Librato(metrics.DefaultRegistry,
10e9, // interval
"example@example.com", // account owner email address
"token", // Librato API token
"hostname", // source
[]float64{0.95}, // precentiles to send
time.Millisecond, // time unit
)
```
Periodically emit every metric to StatHat:
```go
import "github.com/rcrowley/go-metrics/stathat"
go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com")
```
Installation
------------
```sh
go get github.com/rcrowley/go-metrics
```
StatHat support additionally requires their Go client:
```sh
go get github.com/stathat/go
```

View File

@ -1,77 +0,0 @@
package metrics
import "testing"
func BenchmarkCounter(b *testing.B) {
c := NewCounter()
b.ResetTimer()
for i := 0; i < b.N; i++ {
c.Inc(1)
}
}
func TestCounterClear(t *testing.T) {
c := NewCounter()
c.Inc(1)
c.Clear()
if count := c.Count(); 0 != count {
t.Errorf("c.Count(): 0 != %v\n", count)
}
}
func TestCounterDec1(t *testing.T) {
c := NewCounter()
c.Dec(1)
if count := c.Count(); -1 != count {
t.Errorf("c.Count(): -1 != %v\n", count)
}
}
func TestCounterDec2(t *testing.T) {
c := NewCounter()
c.Dec(2)
if count := c.Count(); -2 != count {
t.Errorf("c.Count(): -2 != %v\n", count)
}
}
func TestCounterInc1(t *testing.T) {
c := NewCounter()
c.Inc(1)
if count := c.Count(); 1 != count {
t.Errorf("c.Count(): 1 != %v\n", count)
}
}
func TestCounterInc2(t *testing.T) {
c := NewCounter()
c.Inc(2)
if count := c.Count(); 2 != count {
t.Errorf("c.Count(): 2 != %v\n", count)
}
}
func TestCounterSnapshot(t *testing.T) {
c := NewCounter()
c.Inc(1)
snapshot := c.Snapshot()
c.Inc(1)
if count := snapshot.Count(); 1 != count {
t.Errorf("c.Count(): 1 != %v\n", count)
}
}
func TestCounterZero(t *testing.T) {
c := NewCounter()
if count := c.Count(); 0 != count {
t.Errorf("c.Count(): 0 != %v\n", count)
}
}
func TestGetOrRegisterCounter(t *testing.T) {
r := NewRegistry()
NewRegisteredCounter("foo", r).Inc(47)
if c := GetOrRegisterCounter("foo", r); 47 != c.Count() {
t.Fatal(c)
}
}

View File

@ -1,48 +0,0 @@
package metrics
import (
"runtime"
"runtime/debug"
"testing"
"time"
)
func BenchmarkDebugGCStats(b *testing.B) {
r := NewRegistry()
RegisterDebugGCStats(r)
b.ResetTimer()
for i := 0; i < b.N; i++ {
CaptureDebugGCStatsOnce(r)
}
}
func TestDebugGCStatsBlocking(t *testing.T) {
if g := runtime.GOMAXPROCS(0); g < 2 {
t.Skipf("skipping TestDebugGCMemStatsBlocking with GOMAXPROCS=%d\n", g)
return
}
ch := make(chan int)
go testDebugGCStatsBlocking(ch)
var gcStats debug.GCStats
t0 := time.Now()
debug.ReadGCStats(&gcStats)
t1 := time.Now()
t.Log("i++ during debug.ReadGCStats:", <-ch)
go testDebugGCStatsBlocking(ch)
d := t1.Sub(t0)
t.Log(d)
time.Sleep(d)
t.Log("i++ during time.Sleep:", <-ch)
}
func testDebugGCStatsBlocking(ch chan int) {
i := 0
for {
select {
case ch <- i:
return
default:
i++
}
}
}

View File

@ -1,225 +0,0 @@
package metrics
import "testing"
func BenchmarkEWMA(b *testing.B) {
a := NewEWMA1()
b.ResetTimer()
for i := 0; i < b.N; i++ {
a.Update(1)
a.Tick()
}
}
func TestEWMA1(t *testing.T) {
a := NewEWMA1()
a.Update(3)
a.Tick()
if rate := a.Rate(); 0.6 != rate {
t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.22072766470286553 != rate {
t.Errorf("1 minute a.Rate(): 0.22072766470286553 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.08120116994196772 != rate {
t.Errorf("2 minute a.Rate(): 0.08120116994196772 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.029872241020718428 != rate {
t.Errorf("3 minute a.Rate(): 0.029872241020718428 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.01098938333324054 != rate {
t.Errorf("4 minute a.Rate(): 0.01098938333324054 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.004042768199451294 != rate {
t.Errorf("5 minute a.Rate(): 0.004042768199451294 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.0014872513059998212 != rate {
t.Errorf("6 minute a.Rate(): 0.0014872513059998212 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.0005471291793327122 != rate {
t.Errorf("7 minute a.Rate(): 0.0005471291793327122 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.00020127757674150815 != rate {
t.Errorf("8 minute a.Rate(): 0.00020127757674150815 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 7.404588245200814e-05 != rate {
t.Errorf("9 minute a.Rate(): 7.404588245200814e-05 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 2.7239957857491083e-05 != rate {
t.Errorf("10 minute a.Rate(): 2.7239957857491083e-05 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 1.0021020474147462e-05 != rate {
t.Errorf("11 minute a.Rate(): 1.0021020474147462e-05 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 3.6865274119969525e-06 != rate {
t.Errorf("12 minute a.Rate(): 3.6865274119969525e-06 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 1.3561976441886433e-06 != rate {
t.Errorf("13 minute a.Rate(): 1.3561976441886433e-06 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 4.989172314621449e-07 != rate {
t.Errorf("14 minute a.Rate(): 4.989172314621449e-07 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 1.8354139230109722e-07 != rate {
t.Errorf("15 minute a.Rate(): 1.8354139230109722e-07 != %v\n", rate)
}
}
func TestEWMA5(t *testing.T) {
a := NewEWMA5()
a.Update(3)
a.Tick()
if rate := a.Rate(); 0.6 != rate {
t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.49123845184678905 != rate {
t.Errorf("1 minute a.Rate(): 0.49123845184678905 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.4021920276213837 != rate {
t.Errorf("2 minute a.Rate(): 0.4021920276213837 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.32928698165641596 != rate {
t.Errorf("3 minute a.Rate(): 0.32928698165641596 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.269597378470333 != rate {
t.Errorf("4 minute a.Rate(): 0.269597378470333 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.2207276647028654 != rate {
t.Errorf("5 minute a.Rate(): 0.2207276647028654 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.18071652714732128 != rate {
t.Errorf("6 minute a.Rate(): 0.18071652714732128 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.14795817836496392 != rate {
t.Errorf("7 minute a.Rate(): 0.14795817836496392 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.12113791079679326 != rate {
t.Errorf("8 minute a.Rate(): 0.12113791079679326 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.09917933293295193 != rate {
t.Errorf("9 minute a.Rate(): 0.09917933293295193 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.08120116994196763 != rate {
t.Errorf("10 minute a.Rate(): 0.08120116994196763 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.06648189501740036 != rate {
t.Errorf("11 minute a.Rate(): 0.06648189501740036 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.05443077197364752 != rate {
t.Errorf("12 minute a.Rate(): 0.05443077197364752 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.04456414692860035 != rate {
t.Errorf("13 minute a.Rate(): 0.04456414692860035 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.03648603757513079 != rate {
t.Errorf("14 minute a.Rate(): 0.03648603757513079 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.0298722410207183831020718428 != rate {
t.Errorf("15 minute a.Rate(): 0.0298722410207183831020718428 != %v\n", rate)
}
}
func TestEWMA15(t *testing.T) {
a := NewEWMA15()
a.Update(3)
a.Tick()
if rate := a.Rate(); 0.6 != rate {
t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.5613041910189706 != rate {
t.Errorf("1 minute a.Rate(): 0.5613041910189706 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.5251039914257684 != rate {
t.Errorf("2 minute a.Rate(): 0.5251039914257684 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.4912384518467888184678905 != rate {
t.Errorf("3 minute a.Rate(): 0.4912384518467888184678905 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.459557003018789 != rate {
t.Errorf("4 minute a.Rate(): 0.459557003018789 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.4299187863442732 != rate {
t.Errorf("5 minute a.Rate(): 0.4299187863442732 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.4021920276213831 != rate {
t.Errorf("6 minute a.Rate(): 0.4021920276213831 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.37625345116383313 != rate {
t.Errorf("7 minute a.Rate(): 0.37625345116383313 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.3519877317060185 != rate {
t.Errorf("8 minute a.Rate(): 0.3519877317060185 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.3292869816564153165641596 != rate {
t.Errorf("9 minute a.Rate(): 0.3292869816564153165641596 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.3080502714195546 != rate {
t.Errorf("10 minute a.Rate(): 0.3080502714195546 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.2881831806538789 != rate {
t.Errorf("11 minute a.Rate(): 0.2881831806538789 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.26959737847033216 != rate {
t.Errorf("12 minute a.Rate(): 0.26959737847033216 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.2522102307052083 != rate {
t.Errorf("13 minute a.Rate(): 0.2522102307052083 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.23594443252115815 != rate {
t.Errorf("14 minute a.Rate(): 0.23594443252115815 != %v\n", rate)
}
elapseMinute(a)
if rate := a.Rate(); 0.2207276647028646247028654470286553 != rate {
t.Errorf("15 minute a.Rate(): 0.2207276647028646247028654470286553 != %v\n", rate)
}
}
func elapseMinute(a EWMA) {
for i := 0; i < 12; i++ {
a.Tick()
}
}

View File

@ -1,38 +0,0 @@
package metrics
import "testing"
func BenchmarkGuageFloat64(b *testing.B) {
g := NewGaugeFloat64()
b.ResetTimer()
for i := 0; i < b.N; i++ {
g.Update(float64(i))
}
}
func TestGaugeFloat64(t *testing.T) {
g := NewGaugeFloat64()
g.Update(float64(47.0))
if v := g.Value(); float64(47.0) != v {
t.Errorf("g.Value(): 47.0 != %v\n", v)
}
}
func TestGaugeFloat64Snapshot(t *testing.T) {
g := NewGaugeFloat64()
g.Update(float64(47.0))
snapshot := g.Snapshot()
g.Update(float64(0))
if v := snapshot.Value(); float64(47.0) != v {
t.Errorf("g.Value(): 47.0 != %v\n", v)
}
}
func TestGetOrRegisterGaugeFloat64(t *testing.T) {
r := NewRegistry()
NewRegisteredGaugeFloat64("foo", r).Update(float64(47.0))
t.Logf("registry: %v", r)
if g := GetOrRegisterGaugeFloat64("foo", r); float64(47.0) != g.Value() {
t.Fatal(g)
}
}

View File

@ -1,37 +0,0 @@
package metrics
import "testing"
func BenchmarkGuage(b *testing.B) {
g := NewGauge()
b.ResetTimer()
for i := 0; i < b.N; i++ {
g.Update(int64(i))
}
}
func TestGauge(t *testing.T) {
g := NewGauge()
g.Update(int64(47))
if v := g.Value(); 47 != v {
t.Errorf("g.Value(): 47 != %v\n", v)
}
}
func TestGaugeSnapshot(t *testing.T) {
g := NewGauge()
g.Update(int64(47))
snapshot := g.Snapshot()
g.Update(int64(0))
if v := snapshot.Value(); 47 != v {
t.Errorf("g.Value(): 47 != %v\n", v)
}
}
func TestGetOrRegisterGauge(t *testing.T) {
r := NewRegistry()
NewRegisteredGauge("foo", r).Update(47)
if g := GetOrRegisterGauge("foo", r); 47 != g.Value() {
t.Fatal(g)
}
}

View File

@ -1,22 +0,0 @@
package metrics
import (
"net"
"time"
)
func ExampleGraphite() {
addr, _ := net.ResolveTCPAddr("net", ":2003")
go Graphite(DefaultRegistry, 1*time.Second, "some.prefix", addr)
}
func ExampleGraphiteWithConfig() {
addr, _ := net.ResolveTCPAddr("net", ":2003")
go GraphiteWithConfig(GraphiteConfig{
Addr: addr,
Registry: DefaultRegistry,
FlushInterval: 1 * time.Second,
DurationUnit: time.Millisecond,
Percentiles: []float64{ 0.5, 0.75, 0.99, 0.999 },
})
}

View File

@ -1,95 +0,0 @@
package metrics
import "testing"
func BenchmarkHistogram(b *testing.B) {
h := NewHistogram(NewUniformSample(100))
b.ResetTimer()
for i := 0; i < b.N; i++ {
h.Update(int64(i))
}
}
func TestGetOrRegisterHistogram(t *testing.T) {
r := NewRegistry()
s := NewUniformSample(100)
NewRegisteredHistogram("foo", r, s).Update(47)
if h := GetOrRegisterHistogram("foo", r, s); 1 != h.Count() {
t.Fatal(h)
}
}
func TestHistogram10000(t *testing.T) {
h := NewHistogram(NewUniformSample(100000))
for i := 1; i <= 10000; i++ {
h.Update(int64(i))
}
testHistogram10000(t, h)
}
func TestHistogramEmpty(t *testing.T) {
h := NewHistogram(NewUniformSample(100))
if count := h.Count(); 0 != count {
t.Errorf("h.Count(): 0 != %v\n", count)
}
if min := h.Min(); 0 != min {
t.Errorf("h.Min(): 0 != %v\n", min)
}
if max := h.Max(); 0 != max {
t.Errorf("h.Max(): 0 != %v\n", max)
}
if mean := h.Mean(); 0.0 != mean {
t.Errorf("h.Mean(): 0.0 != %v\n", mean)
}
if stdDev := h.StdDev(); 0.0 != stdDev {
t.Errorf("h.StdDev(): 0.0 != %v\n", stdDev)
}
ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
if 0.0 != ps[0] {
t.Errorf("median: 0.0 != %v\n", ps[0])
}
if 0.0 != ps[1] {
t.Errorf("75th percentile: 0.0 != %v\n", ps[1])
}
if 0.0 != ps[2] {
t.Errorf("99th percentile: 0.0 != %v\n", ps[2])
}
}
func TestHistogramSnapshot(t *testing.T) {
h := NewHistogram(NewUniformSample(100000))
for i := 1; i <= 10000; i++ {
h.Update(int64(i))
}
snapshot := h.Snapshot()
h.Update(0)
testHistogram10000(t, snapshot)
}
func testHistogram10000(t *testing.T, h Histogram) {
if count := h.Count(); 10000 != count {
t.Errorf("h.Count(): 10000 != %v\n", count)
}
if min := h.Min(); 1 != min {
t.Errorf("h.Min(): 1 != %v\n", min)
}
if max := h.Max(); 10000 != max {
t.Errorf("h.Max(): 10000 != %v\n", max)
}
if mean := h.Mean(); 5000.5 != mean {
t.Errorf("h.Mean(): 5000.5 != %v\n", mean)
}
if stdDev := h.StdDev(); 2886.751331514372 != stdDev {
t.Errorf("h.StdDev(): 2886.751331514372 != %v\n", stdDev)
}
ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
if 5000.5 != ps[0] {
t.Errorf("median: 5000.5 != %v\n", ps[0])
}
if 7500.75 != ps[1] {
t.Errorf("75th percentile: 7500.75 != %v\n", ps[1])
}
if 9900.99 != ps[2] {
t.Errorf("99th percentile: 9900.99 != %v\n", ps[2])
}
}

View File

@ -1,114 +0,0 @@
package influxdb
import (
"fmt"
influxClient "github.com/influxdb/influxdb/client"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/rcrowley/go-metrics"
"log"
"time"
)
type Config struct {
Host string
Database string
Username string
Password string
}
func Influxdb(r metrics.Registry, d time.Duration, config *Config) {
client, err := influxClient.NewClient(&influxClient.ClientConfig{
Host: config.Host,
Database: config.Database,
Username: config.Username,
Password: config.Password,
})
if err != nil {
log.Println(err)
return
}
for _ = range time.Tick(d) {
if err := send(r, client); err != nil {
log.Println(err)
}
}
}
func send(r metrics.Registry, client *influxClient.Client) error {
series := []*influxClient.Series{}
r.Each(func(name string, i interface{}) {
now := getCurrentTime()
switch metric := i.(type) {
case metrics.Counter:
series = append(series, &influxClient.Series{
Name: fmt.Sprintf("%s.count", name),
Columns: []string{"time", "count"},
Points: [][]interface{}{
{now, metric.Count()},
},
})
case metrics.Gauge:
series = append(series, &influxClient.Series{
Name: fmt.Sprintf("%s.value", name),
Columns: []string{"time", "value"},
Points: [][]interface{}{
{now, metric.Value()},
},
})
case metrics.GaugeFloat64:
series = append(series, &influxClient.Series{
Name: fmt.Sprintf("%s.value", name),
Columns: []string{"time", "value"},
Points: [][]interface{}{
{now, metric.Value()},
},
})
case metrics.Histogram:
h := metric.Snapshot()
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
series = append(series, &influxClient.Series{
Name: fmt.Sprintf("%s.histogram", name),
Columns: []string{"time", "count", "min", "max", "mean", "std-dev",
"50-percentile", "75-percentile", "95-percentile",
"99-percentile", "999-percentile"},
Points: [][]interface{}{
{now, h.Count(), h.Min(), h.Max(), h.Mean(), h.StdDev(),
ps[0], ps[1], ps[2], ps[3], ps[4]},
},
})
case metrics.Meter:
m := metric.Snapshot()
series = append(series, &influxClient.Series{
Name: fmt.Sprintf("%s.meter", name),
Columns: []string{"count", "one-minute",
"five-minute", "fifteen-minute", "mean"},
Points: [][]interface{}{
{m.Count(), m.Rate1(), m.Rate5(), m.Rate15(), m.RateMean()},
},
})
case metrics.Timer:
h := metric.Snapshot()
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
series = append(series, &influxClient.Series{
Name: fmt.Sprintf("%s.timer", name),
Columns: []string{"count", "min", "max", "mean", "std-dev",
"50-percentile", "75-percentile", "95-percentile",
"99-percentile", "999-percentile", "one-minute", "five-minute", "fifteen-minute", "mean-rate"},
Points: [][]interface{}{
{h.Count(), h.Min(), h.Max(), h.Mean(), h.StdDev(),
ps[0], ps[1], ps[2], ps[3], ps[4],
h.Rate1(), h.Rate5(), h.Rate15(), h.RateMean()},
},
})
}
})
if err := client.WriteSeries(series); err != nil {
log.Println(err)
}
return nil
}
func getCurrentTime() int64 {
return time.Now().UnixNano() / 1000000
}

View File

@ -1,28 +0,0 @@
package metrics
import (
"bytes"
"encoding/json"
"testing"
)
func TestRegistryMarshallJSON(t *testing.T) {
b := &bytes.Buffer{}
enc := json.NewEncoder(b)
r := NewRegistry()
r.Register("counter", NewCounter())
enc.Encode(r)
if s := b.String(); "{\"counter\":{\"count\":0}}\n" != s {
t.Fatalf(s)
}
}
func TestRegistryWriteJSONOnce(t *testing.T) {
r := NewRegistry()
r.Register("counter", NewCounter())
b := &bytes.Buffer{}
WriteJSONOnce(r, b)
if s := b.String(); s != "{\"counter\":{\"count\":0}}\n" {
t.Fail()
}
}

View File

@ -1,60 +0,0 @@
package metrics
import (
"testing"
"time"
)
func BenchmarkMeter(b *testing.B) {
m := NewMeter()
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Mark(1)
}
}
func TestGetOrRegisterMeter(t *testing.T) {
r := NewRegistry()
NewRegisteredMeter("foo", r).Mark(47)
if m := GetOrRegisterMeter("foo", r); 47 != m.Count() {
t.Fatal(m)
}
}
func TestMeterDecay(t *testing.T) {
ma := meterArbiter{
ticker: time.NewTicker(1),
}
m := newStandardMeter()
ma.meters = append(ma.meters, m)
go ma.tick()
m.Mark(1)
rateMean := m.RateMean()
time.Sleep(1)
if m.RateMean() >= rateMean {
t.Error("m.RateMean() didn't decrease")
}
}
func TestMeterNonzero(t *testing.T) {
m := NewMeter()
m.Mark(3)
if count := m.Count(); 3 != count {
t.Errorf("m.Count(): 3 != %v\n", count)
}
}
func TestMeterSnapshot(t *testing.T) {
m := NewMeter()
m.Mark(1)
if snapshot := m.Snapshot(); m.RateMean() != snapshot.RateMean() {
t.Fatal(snapshot)
}
}
func TestMeterZero(t *testing.T) {
m := NewMeter()
if count := m.Count(); 0 != count {
t.Errorf("m.Count(): 0 != %v\n", count)
}
}

View File

@ -1,107 +0,0 @@
package metrics
import (
"io/ioutil"
"log"
"sync"
"testing"
)
const FANOUT = 128
// Stop the compiler from complaining during debugging.
var (
_ = ioutil.Discard
_ = log.LstdFlags
)
func BenchmarkMetrics(b *testing.B) {
r := NewRegistry()
c := NewRegisteredCounter("counter", r)
g := NewRegisteredGauge("gauge", r)
gf := NewRegisteredGaugeFloat64("gaugefloat64", r)
h := NewRegisteredHistogram("histogram", r, NewUniformSample(100))
m := NewRegisteredMeter("meter", r)
t := NewRegisteredTimer("timer", r)
RegisterDebugGCStats(r)
RegisterRuntimeMemStats(r)
b.ResetTimer()
ch := make(chan bool)
wgD := &sync.WaitGroup{}
/*
wgD.Add(1)
go func() {
defer wgD.Done()
//log.Println("go CaptureDebugGCStats")
for {
select {
case <-ch:
//log.Println("done CaptureDebugGCStats")
return
default:
CaptureDebugGCStatsOnce(r)
}
}
}()
//*/
wgR := &sync.WaitGroup{}
//*
wgR.Add(1)
go func() {
defer wgR.Done()
//log.Println("go CaptureRuntimeMemStats")
for {
select {
case <-ch:
//log.Println("done CaptureRuntimeMemStats")
return
default:
CaptureRuntimeMemStatsOnce(r)
}
}
}()
//*/
wgW := &sync.WaitGroup{}
/*
wgW.Add(1)
go func() {
defer wgW.Done()
//log.Println("go Write")
for {
select {
case <-ch:
//log.Println("done Write")
return
default:
WriteOnce(r, ioutil.Discard)
}
}
}()
//*/
wg := &sync.WaitGroup{}
wg.Add(FANOUT)
for i := 0; i < FANOUT; i++ {
go func(i int) {
defer wg.Done()
//log.Println("go", i)
for i := 0; i < b.N; i++ {
c.Inc(1)
g.Update(int64(i))
gf.Update(float64(i))
h.Update(int64(i))
m.Mark(1)
t.Update(1)
}
//log.Println("done", i)
}(i)
}
wg.Wait()
close(ch)
wgD.Wait()
wgR.Wait()
wgW.Wait()
}

View File

@ -1,22 +0,0 @@
package metrics
import (
"net"
"time"
)
func ExampleOpenTSDB() {
addr, _ := net.ResolveTCPAddr("net", ":2003")
go OpenTSDB(DefaultRegistry, 1*time.Second, "some.prefix", addr)
}
func ExampleOpenTSDBWithConfig() {
addr, _ := net.ResolveTCPAddr("net", ":2003")
go OpenTSDBWithConfig(OpenTSDBConfig{
Addr: addr,
Registry: DefaultRegistry,
FlushInterval: 1 * time.Second,
DurationUnit: time.Millisecond,
})
}

View File

@ -1,118 +0,0 @@
package metrics
import "testing"
func BenchmarkRegistry(b *testing.B) {
r := NewRegistry()
r.Register("foo", NewCounter())
b.ResetTimer()
for i := 0; i < b.N; i++ {
r.Each(func(string, interface{}) {})
}
}
func TestRegistry(t *testing.T) {
r := NewRegistry()
r.Register("foo", NewCounter())
i := 0
r.Each(func(name string, iface interface{}) {
i++
if "foo" != name {
t.Fatal(name)
}
if _, ok := iface.(Counter); !ok {
t.Fatal(iface)
}
})
if 1 != i {
t.Fatal(i)
}
r.Unregister("foo")
i = 0
r.Each(func(string, interface{}) { i++ })
if 0 != i {
t.Fatal(i)
}
}
func TestRegistryDuplicate(t *testing.T) {
r := NewRegistry()
if err := r.Register("foo", NewCounter()); nil != err {
t.Fatal(err)
}
if err := r.Register("foo", NewGauge()); nil == err {
t.Fatal(err)
}
i := 0
r.Each(func(name string, iface interface{}) {
i++
if _, ok := iface.(Counter); !ok {
t.Fatal(iface)
}
})
if 1 != i {
t.Fatal(i)
}
}
func TestRegistryGet(t *testing.T) {
r := NewRegistry()
r.Register("foo", NewCounter())
if count := r.Get("foo").(Counter).Count(); 0 != count {
t.Fatal(count)
}
r.Get("foo").(Counter).Inc(1)
if count := r.Get("foo").(Counter).Count(); 1 != count {
t.Fatal(count)
}
}
func TestRegistryGetOrRegister(t *testing.T) {
r := NewRegistry()
// First metric wins with GetOrRegister
_ = r.GetOrRegister("foo", NewCounter())
m := r.GetOrRegister("foo", NewGauge())
if _, ok := m.(Counter); !ok {
t.Fatal(m)
}
i := 0
r.Each(func(name string, iface interface{}) {
i++
if name != "foo" {
t.Fatal(name)
}
if _, ok := iface.(Counter); !ok {
t.Fatal(iface)
}
})
if i != 1 {
t.Fatal(i)
}
}
func TestRegistryGetOrRegisterWithLazyInstantiation(t *testing.T) {
r := NewRegistry()
// First metric wins with GetOrRegister
_ = r.GetOrRegister("foo", NewCounter)
m := r.GetOrRegister("foo", NewGauge)
if _, ok := m.(Counter); !ok {
t.Fatal(m)
}
i := 0
r.Each(func(name string, iface interface{}) {
i++
if name != "foo" {
t.Fatal(name)
}
if _, ok := iface.(Counter); !ok {
t.Fatal(iface)
}
})
if i != 1 {
t.Fatal(i)
}
}

View File

@ -1,78 +0,0 @@
package metrics
import (
"runtime"
"testing"
"time"
)
func BenchmarkRuntimeMemStats(b *testing.B) {
r := NewRegistry()
RegisterRuntimeMemStats(r)
b.ResetTimer()
for i := 0; i < b.N; i++ {
CaptureRuntimeMemStatsOnce(r)
}
}
func TestRuntimeMemStats(t *testing.T) {
r := NewRegistry()
RegisterRuntimeMemStats(r)
CaptureRuntimeMemStatsOnce(r)
zero := runtimeMetrics.MemStats.PauseNs.Count() // Get a "zero" since GC may have run before these tests.
runtime.GC()
CaptureRuntimeMemStatsOnce(r)
if count := runtimeMetrics.MemStats.PauseNs.Count(); 1 != count-zero {
t.Fatal(count - zero)
}
runtime.GC()
runtime.GC()
CaptureRuntimeMemStatsOnce(r)
if count := runtimeMetrics.MemStats.PauseNs.Count(); 3 != count-zero {
t.Fatal(count - zero)
}
for i := 0; i < 256; i++ {
runtime.GC()
}
CaptureRuntimeMemStatsOnce(r)
if count := runtimeMetrics.MemStats.PauseNs.Count(); 259 != count-zero {
t.Fatal(count - zero)
}
for i := 0; i < 257; i++ {
runtime.GC()
}
CaptureRuntimeMemStatsOnce(r)
if count := runtimeMetrics.MemStats.PauseNs.Count(); 515 != count-zero { // We lost one because there were too many GCs between captures.
t.Fatal(count - zero)
}
}
func TestRuntimeMemStatsBlocking(t *testing.T) {
if g := runtime.GOMAXPROCS(0); g < 2 {
t.Skipf("skipping TestRuntimeMemStatsBlocking with GOMAXPROCS=%d\n", g)
}
ch := make(chan int)
go testRuntimeMemStatsBlocking(ch)
var memStats runtime.MemStats
t0 := time.Now()
runtime.ReadMemStats(&memStats)
t1 := time.Now()
t.Log("i++ during runtime.ReadMemStats:", <-ch)
go testRuntimeMemStatsBlocking(ch)
d := t1.Sub(t0)
t.Log(d)
time.Sleep(d)
t.Log("i++ during time.Sleep:", <-ch)
}
func testRuntimeMemStatsBlocking(ch chan int) {
i := 0
for {
select {
case ch <- i:
return
default:
i++
}
}
}

View File

@ -1,363 +0,0 @@
package metrics
import (
"math/rand"
"runtime"
"testing"
"time"
)
// Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively
// expensive computations like Variance, the cost of copying the Sample, as
// approximated by a make and copy, is much greater than the cost of the
// computation for small samples and only slightly less for large samples.
func BenchmarkCompute1000(b *testing.B) {
s := make([]int64, 1000)
for i := 0; i < len(s); i++ {
s[i] = int64(i)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
SampleVariance(s)
}
}
func BenchmarkCompute1000000(b *testing.B) {
s := make([]int64, 1000000)
for i := 0; i < len(s); i++ {
s[i] = int64(i)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
SampleVariance(s)
}
}
func BenchmarkCopy1000(b *testing.B) {
s := make([]int64, 1000)
for i := 0; i < len(s); i++ {
s[i] = int64(i)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
sCopy := make([]int64, len(s))
copy(sCopy, s)
}
}
func BenchmarkCopy1000000(b *testing.B) {
s := make([]int64, 1000000)
for i := 0; i < len(s); i++ {
s[i] = int64(i)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
sCopy := make([]int64, len(s))
copy(sCopy, s)
}
}
func BenchmarkExpDecaySample257(b *testing.B) {
benchmarkSample(b, NewExpDecaySample(257, 0.015))
}
func BenchmarkExpDecaySample514(b *testing.B) {
benchmarkSample(b, NewExpDecaySample(514, 0.015))
}
func BenchmarkExpDecaySample1028(b *testing.B) {
benchmarkSample(b, NewExpDecaySample(1028, 0.015))
}
func BenchmarkUniformSample257(b *testing.B) {
benchmarkSample(b, NewUniformSample(257))
}
func BenchmarkUniformSample514(b *testing.B) {
benchmarkSample(b, NewUniformSample(514))
}
func BenchmarkUniformSample1028(b *testing.B) {
benchmarkSample(b, NewUniformSample(1028))
}
func TestExpDecaySample10(t *testing.T) {
rand.Seed(1)
s := NewExpDecaySample(100, 0.99)
for i := 0; i < 10; i++ {
s.Update(int64(i))
}
if size := s.Count(); 10 != size {
t.Errorf("s.Count(): 10 != %v\n", size)
}
if size := s.Size(); 10 != size {
t.Errorf("s.Size(): 10 != %v\n", size)
}
if l := len(s.Values()); 10 != l {
t.Errorf("len(s.Values()): 10 != %v\n", l)
}
for _, v := range s.Values() {
if v > 10 || v < 0 {
t.Errorf("out of range [0, 10): %v\n", v)
}
}
}
func TestExpDecaySample100(t *testing.T) {
rand.Seed(1)
s := NewExpDecaySample(1000, 0.01)
for i := 0; i < 100; i++ {
s.Update(int64(i))
}
if size := s.Count(); 100 != size {
t.Errorf("s.Count(): 100 != %v\n", size)
}
if size := s.Size(); 100 != size {
t.Errorf("s.Size(): 100 != %v\n", size)
}
if l := len(s.Values()); 100 != l {
t.Errorf("len(s.Values()): 100 != %v\n", l)
}
for _, v := range s.Values() {
if v > 100 || v < 0 {
t.Errorf("out of range [0, 100): %v\n", v)
}
}
}
func TestExpDecaySample1000(t *testing.T) {
rand.Seed(1)
s := NewExpDecaySample(100, 0.99)
for i := 0; i < 1000; i++ {
s.Update(int64(i))
}
if size := s.Count(); 1000 != size {
t.Errorf("s.Count(): 1000 != %v\n", size)
}
if size := s.Size(); 100 != size {
t.Errorf("s.Size(): 100 != %v\n", size)
}
if l := len(s.Values()); 100 != l {
t.Errorf("len(s.Values()): 100 != %v\n", l)
}
for _, v := range s.Values() {
if v > 1000 || v < 0 {
t.Errorf("out of range [0, 1000): %v\n", v)
}
}
}
// This test makes sure that the sample's priority is not amplified by using
// nanosecond duration since start rather than second duration since start.
// The priority becomes +Inf quickly after starting if this is done,
// effectively freezing the set of samples until a rescale step happens.
func TestExpDecaySampleNanosecondRegression(t *testing.T) {
rand.Seed(1)
s := NewExpDecaySample(100, 0.99)
for i := 0; i < 100; i++ {
s.Update(10)
}
time.Sleep(1 * time.Millisecond)
for i := 0; i < 100; i++ {
s.Update(20)
}
v := s.Values()
avg := float64(0)
for i := 0; i < len(v); i++ {
avg += float64(v[i])
}
avg /= float64(len(v))
if avg > 16 || avg < 14 {
t.Errorf("out of range [14, 16]: %v\n", avg)
}
}
func TestExpDecaySampleRescale(t *testing.T) {
s := NewExpDecaySample(2, 0.001).(*ExpDecaySample)
s.update(time.Now(), 1)
s.update(time.Now().Add(time.Hour+time.Microsecond), 1)
for _, v := range s.values.Values() {
if v.k == 0.0 {
t.Fatal("v.k == 0.0")
}
}
}
func TestExpDecaySampleSnapshot(t *testing.T) {
now := time.Now()
rand.Seed(1)
s := NewExpDecaySample(100, 0.99)
for i := 1; i <= 10000; i++ {
s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
}
snapshot := s.Snapshot()
s.Update(1)
testExpDecaySampleStatistics(t, snapshot)
}
func TestExpDecaySampleStatistics(t *testing.T) {
now := time.Now()
rand.Seed(1)
s := NewExpDecaySample(100, 0.99)
for i := 1; i <= 10000; i++ {
s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
}
testExpDecaySampleStatistics(t, s)
}
func TestUniformSample(t *testing.T) {
rand.Seed(1)
s := NewUniformSample(100)
for i := 0; i < 1000; i++ {
s.Update(int64(i))
}
if size := s.Count(); 1000 != size {
t.Errorf("s.Count(): 1000 != %v\n", size)
}
if size := s.Size(); 100 != size {
t.Errorf("s.Size(): 100 != %v\n", size)
}
if l := len(s.Values()); 100 != l {
t.Errorf("len(s.Values()): 100 != %v\n", l)
}
for _, v := range s.Values() {
if v > 1000 || v < 0 {
t.Errorf("out of range [0, 100): %v\n", v)
}
}
}
func TestUniformSampleIncludesTail(t *testing.T) {
rand.Seed(1)
s := NewUniformSample(100)
max := 100
for i := 0; i < max; i++ {
s.Update(int64(i))
}
v := s.Values()
sum := 0
exp := (max - 1) * max / 2
for i := 0; i < len(v); i++ {
sum += int(v[i])
}
if exp != sum {
t.Errorf("sum: %v != %v\n", exp, sum)
}
}
func TestUniformSampleSnapshot(t *testing.T) {
s := NewUniformSample(100)
for i := 1; i <= 10000; i++ {
s.Update(int64(i))
}
snapshot := s.Snapshot()
s.Update(1)
testUniformSampleStatistics(t, snapshot)
}
func TestUniformSampleStatistics(t *testing.T) {
rand.Seed(1)
s := NewUniformSample(100)
for i := 1; i <= 10000; i++ {
s.Update(int64(i))
}
testUniformSampleStatistics(t, s)
}
func benchmarkSample(b *testing.B, s Sample) {
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
pauseTotalNs := memStats.PauseTotalNs
b.ResetTimer()
for i := 0; i < b.N; i++ {
s.Update(1)
}
b.StopTimer()
runtime.GC()
runtime.ReadMemStats(&memStats)
b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
}
func testExpDecaySampleStatistics(t *testing.T, s Sample) {
if count := s.Count(); 10000 != count {
t.Errorf("s.Count(): 10000 != %v\n", count)
}
if min := s.Min(); 107 != min {
t.Errorf("s.Min(): 107 != %v\n", min)
}
if max := s.Max(); 10000 != max {
t.Errorf("s.Max(): 10000 != %v\n", max)
}
if mean := s.Mean(); 4965.98 != mean {
t.Errorf("s.Mean(): 4965.98 != %v\n", mean)
}
if stdDev := s.StdDev(); 2959.825156930727 != stdDev {
t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev)
}
ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
if 4615 != ps[0] {
t.Errorf("median: 4615 != %v\n", ps[0])
}
if 7672 != ps[1] {
t.Errorf("75th percentile: 7672 != %v\n", ps[1])
}
if 9998.99 != ps[2] {
t.Errorf("99th percentile: 9998.99 != %v\n", ps[2])
}
}
func testUniformSampleStatistics(t *testing.T, s Sample) {
if count := s.Count(); 10000 != count {
t.Errorf("s.Count(): 10000 != %v\n", count)
}
if min := s.Min(); 37 != min {
t.Errorf("s.Min(): 37 != %v\n", min)
}
if max := s.Max(); 9989 != max {
t.Errorf("s.Max(): 9989 != %v\n", max)
}
if mean := s.Mean(); 4748.14 != mean {
t.Errorf("s.Mean(): 4748.14 != %v\n", mean)
}
if stdDev := s.StdDev(); 2826.684117548333 != stdDev {
t.Errorf("s.StdDev(): 2826.684117548333 != %v\n", stdDev)
}
ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
if 4599 != ps[0] {
t.Errorf("median: 4599 != %v\n", ps[0])
}
if 7380.5 != ps[1] {
t.Errorf("75th percentile: 7380.5 != %v\n", ps[1])
}
if 9986.429999999998 != ps[2] {
t.Errorf("99th percentile: 9986.429999999998 != %v\n", ps[2])
}
}
// TestUniformSampleConcurrentUpdateCount would expose data race problems with
// concurrent Update and Count calls on Sample when test is called with -race
// argument
func TestUniformSampleConcurrentUpdateCount(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
s := NewUniformSample(100)
for i := 0; i < 100; i++ {
s.Update(int64(i))
}
quit := make(chan struct{})
go func() {
t := time.NewTicker(10 * time.Millisecond)
for {
select {
case <-t.C:
s.Update(rand.Int63())
case <-quit:
t.Stop()
return
}
}
}()
for i := 0; i < 1000; i++ {
s.Count()
time.Sleep(5 * time.Millisecond)
}
quit <- struct{}{}
}

View File

@ -1,81 +0,0 @@
package metrics
import (
"math"
"testing"
"time"
)
func BenchmarkTimer(b *testing.B) {
tm := NewTimer()
b.ResetTimer()
for i := 0; i < b.N; i++ {
tm.Update(1)
}
}
func TestGetOrRegisterTimer(t *testing.T) {
r := NewRegistry()
NewRegisteredTimer("foo", r).Update(47)
if tm := GetOrRegisterTimer("foo", r); 1 != tm.Count() {
t.Fatal(tm)
}
}
func TestTimerExtremes(t *testing.T) {
tm := NewTimer()
tm.Update(math.MaxInt64)
tm.Update(0)
if stdDev := tm.StdDev(); 4.611686018427388e+18 != stdDev {
t.Errorf("tm.StdDev(): 4.611686018427388e+18 != %v\n", stdDev)
}
}
func TestTimerFunc(t *testing.T) {
tm := NewTimer()
tm.Time(func() { time.Sleep(50e6) })
if max := tm.Max(); 45e6 > max || max > 55e6 {
t.Errorf("tm.Max(): 45e6 > %v || %v > 55e6\n", max, max)
}
}
func TestTimerZero(t *testing.T) {
tm := NewTimer()
if count := tm.Count(); 0 != count {
t.Errorf("tm.Count(): 0 != %v\n", count)
}
if min := tm.Min(); 0 != min {
t.Errorf("tm.Min(): 0 != %v\n", min)
}
if max := tm.Max(); 0 != max {
t.Errorf("tm.Max(): 0 != %v\n", max)
}
if mean := tm.Mean(); 0.0 != mean {
t.Errorf("tm.Mean(): 0.0 != %v\n", mean)
}
if stdDev := tm.StdDev(); 0.0 != stdDev {
t.Errorf("tm.StdDev(): 0.0 != %v\n", stdDev)
}
ps := tm.Percentiles([]float64{0.5, 0.75, 0.99})
if 0.0 != ps[0] {
t.Errorf("median: 0.0 != %v\n", ps[0])
}
if 0.0 != ps[1] {
t.Errorf("75th percentile: 0.0 != %v\n", ps[1])
}
if 0.0 != ps[2] {
t.Errorf("99th percentile: 0.0 != %v\n", ps[2])
}
if rate1 := tm.Rate1(); 0.0 != rate1 {
t.Errorf("tm.Rate1(): 0.0 != %v\n", rate1)
}
if rate5 := tm.Rate5(); 0.0 != rate5 {
t.Errorf("tm.Rate5(): 0.0 != %v\n", rate5)
}
if rate15 := tm.Rate15(); 0.0 != rate15 {
t.Errorf("tm.Rate15(): 0.0 != %v\n", rate15)
}
if rateMean := tm.RateMean(); 0.0 != rateMean {
t.Errorf("tm.RateMean(): 0.0 != %v\n", rateMean)
}
}

View File

@ -1,22 +0,0 @@
package metrics
import (
"sort"
"testing"
)
func TestMetricsSorting(t *testing.T) {
var namedMetrics = namedMetricSlice{
{name: "zzz"},
{name: "bbb"},
{name: "fff"},
{name: "ggg"},
}
sort.Sort(namedMetrics)
for i, name := range []string{"bbb", "fff", "ggg", "zzz"} {
if namedMetrics[i].name != name {
t.Fail()
}
}
}

View File

@ -1,54 +0,0 @@
package gotwilio
import (
"testing"
)
var params map[string]string
func init() {
params = make(map[string]string)
params["SID"] = "AC0f30491286ab4abb4a108abefbd05d8a"
params["TOKEN"] = "1dcf52d7a1f3853ed78f0ee20d056dd0"
params["FROM"] = "+15005550006"
params["TO"] = "+19135551234"
}
func TestSMS(t *testing.T) {
msg := "Welcome to gotwilio"
twilio := NewTwilioClient(params["SID"], params["TOKEN"])
_, exc, err := twilio.SendSMS(params["FROM"], params["TO"], msg, "", "")
if err != nil {
t.Fatal(err)
}
if exc != nil {
t.Fatal(exc)
}
}
func TestMMS(t *testing.T) {
msg := "Welcome to gotwilio"
twilio := NewTwilioClient(params["SID"], params["TOKEN"])
_, exc, err := twilio.SendMMS(params["FROM"], params["TO"], msg, "http://www.google.com/images/logo.png", "", "")
if err != nil {
t.Fatal(err)
}
if exc != nil {
t.Fatal(exc)
}
}
func TestVoice(t *testing.T) {
callback := NewCallbackParameters("http://example.com")
twilio := NewTwilioClient(params["SID"], params["TOKEN"])
_, exc, err := twilio.CallWithUrlCallbacks(params["FROM"], params["TO"], callback)
if err != nil {
t.Fatal(err)
}
if exc != nil {
t.Fatal(exc)
}
}

View File

@ -1,70 +0,0 @@
package gotwilio
import (
"bytes"
"io/ioutil"
"net/http"
"net/url"
"testing"
)
const (
// Magic strings from https://github.com/twilio/twilio-python
testServerURL = "http://www.postbin.org"
testAuthToken = "1c892n40nd03kdnc0112slzkl3091j20"
testValidSignature = "fF+xx6dTinOaCdZ0aIeNkHr/ZAA="
)
func TestCheckSignature(t *testing.T) {
twilio := Twilio{
AuthToken: testAuthToken,
}
// Magic strings from https://github.com/twilio/twilio-python
u, err := url.Parse("/1ed898x")
if err != nil {
t.Fatal(err)
}
h := http.Header{
"Content-Type": []string{"application/x-www-form-urlencoded"},
"X-Twilio-Signature": []string{testValidSignature},
}
b := bytes.NewBufferString(`FromZip=89449&From=%2B15306666666&` +
`FromCity=SOUTH+LAKE+TAHOE&ApiVersion=2010-04-01&To=%2B15306384866&` +
`CallStatus=ringing&CalledState=CA&FromState=CA&Direction=inbound&` +
`ToCity=OAKLAND&ToZip=94612&CallerCity=SOUTH+LAKE+TAHOE&FromCountry=US&` +
`CallerName=CA+Wireless+Call&CalledCity=OAKLAND&CalledCountry=US&` +
`Caller=%2B15306666666&CallerZip=89449&AccountSid=AC9a9f9392lad99kla0sklakjs90j092j3&` +
`Called=%2B15306384866&CallerCountry=US&CalledZip=94612&CallSid=CAd800bb12c0426a7ea4230e492fef2a4f&` +
`CallerState=CA&ToCountry=US&ToState=CA`)
r := http.Request{
Method: "POST",
URL: u,
Header: h,
Body: ioutil.NopCloser(b),
}
valid, err := twilio.CheckRequestSignature(&r, testServerURL)
if err != nil {
t.Fatal(err)
}
if !valid {
t.Fatal("Expected signature to be valid")
}
h["X-Twilio-Signature"] = []string{"foo"}
valid, err = twilio.CheckRequestSignature(&r, testServerURL)
if err != nil {
t.Fatal(err)
}
if valid {
t.Fatal("Expected signature to be invalid")
}
delete(h, "X-Twilio-Signature")
valid, err = twilio.CheckRequestSignature(&r, testServerURL)
if err == nil {
t.Fatal("Expected an error verifying a request without a signature header")
}
}

View File

@ -1,120 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"bytes"
"testing"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb"
)
type tbRec struct {
kt kType
key, value []byte
}
type testBatch struct {
rec []*tbRec
}
func (p *testBatch) Put(key, value []byte) {
p.rec = append(p.rec, &tbRec{ktVal, key, value})
}
func (p *testBatch) Delete(key []byte) {
p.rec = append(p.rec, &tbRec{ktDel, key, nil})
}
func compareBatch(t *testing.T, b1, b2 *Batch) {
if b1.seq != b2.seq {
t.Errorf("invalid seq number want %d, got %d", b1.seq, b2.seq)
}
if b1.Len() != b2.Len() {
t.Fatalf("invalid record length want %d, got %d", b1.Len(), b2.Len())
}
p1, p2 := new(testBatch), new(testBatch)
err := b1.Replay(p1)
if err != nil {
t.Fatal("error when replaying batch 1: ", err)
}
err = b2.Replay(p2)
if err != nil {
t.Fatal("error when replaying batch 2: ", err)
}
for i := range p1.rec {
r1, r2 := p1.rec[i], p2.rec[i]
if r1.kt != r2.kt {
t.Errorf("invalid type on record '%d' want %d, got %d", i, r1.kt, r2.kt)
}
if !bytes.Equal(r1.key, r2.key) {
t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key))
}
if r1.kt == ktVal {
if !bytes.Equal(r1.value, r2.value) {
t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value))
}
}
}
}
func TestBatch_EncodeDecode(t *testing.T) {
b1 := new(Batch)
b1.seq = 10009
b1.Put([]byte("key1"), []byte("value1"))
b1.Put([]byte("key2"), []byte("value2"))
b1.Delete([]byte("key1"))
b1.Put([]byte("k"), []byte(""))
b1.Put([]byte("zzzzzzzzzzz"), []byte("zzzzzzzzzzzzzzzzzzzzzzzz"))
b1.Delete([]byte("key10000"))
b1.Delete([]byte("k"))
buf := b1.encode()
b2 := new(Batch)
err := b2.decode(0, buf)
if err != nil {
t.Error("error when decoding batch: ", err)
}
compareBatch(t, b1, b2)
}
func TestBatch_Append(t *testing.T) {
b1 := new(Batch)
b1.seq = 10009
b1.Put([]byte("key1"), []byte("value1"))
b1.Put([]byte("key2"), []byte("value2"))
b1.Delete([]byte("key1"))
b1.Put([]byte("foo"), []byte("foovalue"))
b1.Put([]byte("bar"), []byte("barvalue"))
b2a := new(Batch)
b2a.seq = 10009
b2a.Put([]byte("key1"), []byte("value1"))
b2a.Put([]byte("key2"), []byte("value2"))
b2a.Delete([]byte("key1"))
b2b := new(Batch)
b2b.Put([]byte("foo"), []byte("foovalue"))
b2b.Put([]byte("bar"), []byte("barvalue"))
b2a.append(b2b)
compareBatch(t, b1, b2a)
}
func TestBatch_Size(t *testing.T) {
b := new(Batch)
for i := 0; i < 2; i++ {
b.Put([]byte("key1"), []byte("value1"))
b.Put([]byte("key2"), []byte("value2"))
b.Delete([]byte("key1"))
b.Put([]byte("foo"), []byte("foovalue"))
b.Put([]byte("bar"), []byte("barvalue"))
mem := memdb.New(&iComparer{comparer.DefaultComparer}, 0)
b.memReplay(mem)
if b.size() != mem.Size() {
t.Errorf("invalid batch size calculation, want=%d got=%d", mem.Size(), b.size())
}
b.Reset()
}
}

View File

@ -1,58 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// +build !go1.2
package leveldb
import (
"sync/atomic"
"testing"
)
func BenchmarkDBReadConcurrent(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gc()
defer p.close()
b.ResetTimer()
b.SetBytes(116)
b.RunParallel(func(pb *testing.PB) {
iter := p.newIter()
defer iter.Release()
for pb.Next() && iter.Next() {
}
})
}
func BenchmarkDBReadConcurrent2(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gc()
defer p.close()
b.ResetTimer()
b.SetBytes(116)
var dir uint32
b.RunParallel(func(pb *testing.PB) {
iter := p.newIter()
defer iter.Release()
if atomic.AddUint32(&dir, 1)%2 == 0 {
for pb.Next() && iter.Next() {
}
} else {
if pb.Next() && iter.Last() {
for pb.Next() && iter.Prev() {
}
}
}
})
}

View File

@ -1,464 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"bytes"
"fmt"
"math/rand"
"os"
"path/filepath"
"runtime"
"testing"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
)
func randomString(r *rand.Rand, n int) []byte {
b := new(bytes.Buffer)
for i := 0; i < n; i++ {
b.WriteByte(' ' + byte(r.Intn(95)))
}
return b.Bytes()
}
func compressibleStr(r *rand.Rand, frac float32, n int) []byte {
nn := int(float32(n) * frac)
rb := randomString(r, nn)
b := make([]byte, 0, n+nn)
for len(b) < n {
b = append(b, rb...)
}
return b[:n]
}
type valueGen struct {
src []byte
pos int
}
func newValueGen(frac float32) *valueGen {
v := new(valueGen)
r := rand.New(rand.NewSource(301))
v.src = make([]byte, 0, 1048576+100)
for len(v.src) < 1048576 {
v.src = append(v.src, compressibleStr(r, frac, 100)...)
}
return v
}
func (v *valueGen) get(n int) []byte {
if v.pos+n > len(v.src) {
v.pos = 0
}
v.pos += n
return v.src[v.pos-n : v.pos]
}
var benchDB = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbbench-%d", os.Getuid()))
type dbBench struct {
b *testing.B
stor storage.Storage
db *DB
o *opt.Options
ro *opt.ReadOptions
wo *opt.WriteOptions
keys, values [][]byte
}
func openDBBench(b *testing.B, noCompress bool) *dbBench {
_, err := os.Stat(benchDB)
if err == nil {
err = os.RemoveAll(benchDB)
if err != nil {
b.Fatal("cannot remove old db: ", err)
}
}
p := &dbBench{
b: b,
o: &opt.Options{},
ro: &opt.ReadOptions{},
wo: &opt.WriteOptions{},
}
p.stor, err = storage.OpenFile(benchDB)
if err != nil {
b.Fatal("cannot open stor: ", err)
}
if noCompress {
p.o.Compression = opt.NoCompression
}
p.db, err = Open(p.stor, p.o)
if err != nil {
b.Fatal("cannot open db: ", err)
}
runtime.GOMAXPROCS(runtime.NumCPU())
return p
}
func (p *dbBench) reopen() {
p.db.Close()
var err error
p.db, err = Open(p.stor, p.o)
if err != nil {
p.b.Fatal("Reopen: got error: ", err)
}
}
func (p *dbBench) populate(n int) {
p.keys, p.values = make([][]byte, n), make([][]byte, n)
v := newValueGen(0.5)
for i := range p.keys {
p.keys[i], p.values[i] = []byte(fmt.Sprintf("%016d", i)), v.get(100)
}
}
func (p *dbBench) randomize() {
m := len(p.keys)
times := m * 2
r1, r2 := rand.New(rand.NewSource(0xdeadbeef)), rand.New(rand.NewSource(0xbeefface))
for n := 0; n < times; n++ {
i, j := r1.Int()%m, r2.Int()%m
if i == j {
continue
}
p.keys[i], p.keys[j] = p.keys[j], p.keys[i]
p.values[i], p.values[j] = p.values[j], p.values[i]
}
}
func (p *dbBench) writes(perBatch int) {
b := p.b
db := p.db
n := len(p.keys)
m := n / perBatch
if n%perBatch > 0 {
m++
}
batches := make([]Batch, m)
j := 0
for i := range batches {
first := true
for ; j < n && ((j+1)%perBatch != 0 || first); j++ {
first = false
batches[i].Put(p.keys[j], p.values[j])
}
}
runtime.GC()
b.ResetTimer()
b.StartTimer()
for i := range batches {
err := db.Write(&(batches[i]), p.wo)
if err != nil {
b.Fatal("write failed: ", err)
}
}
b.StopTimer()
b.SetBytes(116)
}
func (p *dbBench) gc() {
p.keys, p.values = nil, nil
runtime.GC()
}
func (p *dbBench) puts() {
b := p.b
db := p.db
b.ResetTimer()
b.StartTimer()
for i := range p.keys {
err := db.Put(p.keys[i], p.values[i], p.wo)
if err != nil {
b.Fatal("put failed: ", err)
}
}
b.StopTimer()
b.SetBytes(116)
}
func (p *dbBench) fill() {
b := p.b
db := p.db
perBatch := 10000
batch := new(Batch)
for i, n := 0, len(p.keys); i < n; {
first := true
for ; i < n && ((i+1)%perBatch != 0 || first); i++ {
first = false
batch.Put(p.keys[i], p.values[i])
}
err := db.Write(batch, p.wo)
if err != nil {
b.Fatal("write failed: ", err)
}
batch.Reset()
}
}
func (p *dbBench) gets() {
b := p.b
db := p.db
b.ResetTimer()
for i := range p.keys {
_, err := db.Get(p.keys[i], p.ro)
if err != nil {
b.Error("got error: ", err)
}
}
b.StopTimer()
}
func (p *dbBench) seeks() {
b := p.b
iter := p.newIter()
defer iter.Release()
b.ResetTimer()
for i := range p.keys {
if !iter.Seek(p.keys[i]) {
b.Error("value not found for: ", string(p.keys[i]))
}
}
b.StopTimer()
}
func (p *dbBench) newIter() iterator.Iterator {
iter := p.db.NewIterator(nil, p.ro)
err := iter.Error()
if err != nil {
p.b.Fatal("cannot create iterator: ", err)
}
return iter
}
func (p *dbBench) close() {
if bp, err := p.db.GetProperty("leveldb.blockpool"); err == nil {
p.b.Log("Block pool stats: ", bp)
}
p.db.Close()
p.stor.Close()
os.RemoveAll(benchDB)
p.db = nil
p.keys = nil
p.values = nil
runtime.GC()
runtime.GOMAXPROCS(1)
}
func BenchmarkDBWrite(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.writes(1)
p.close()
}
func BenchmarkDBWriteBatch(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.writes(1000)
p.close()
}
func BenchmarkDBWriteUncompressed(b *testing.B) {
p := openDBBench(b, true)
p.populate(b.N)
p.writes(1)
p.close()
}
func BenchmarkDBWriteBatchUncompressed(b *testing.B) {
p := openDBBench(b, true)
p.populate(b.N)
p.writes(1000)
p.close()
}
func BenchmarkDBWriteRandom(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.randomize()
p.writes(1)
p.close()
}
func BenchmarkDBWriteRandomSync(b *testing.B) {
p := openDBBench(b, false)
p.wo.Sync = true
p.populate(b.N)
p.writes(1)
p.close()
}
func BenchmarkDBOverwrite(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.writes(1)
p.writes(1)
p.close()
}
func BenchmarkDBOverwriteRandom(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.writes(1)
p.randomize()
p.writes(1)
p.close()
}
func BenchmarkDBPut(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.puts()
p.close()
}
func BenchmarkDBRead(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gc()
iter := p.newIter()
b.ResetTimer()
for iter.Next() {
}
iter.Release()
b.StopTimer()
b.SetBytes(116)
p.close()
}
func BenchmarkDBReadGC(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
iter := p.newIter()
b.ResetTimer()
for iter.Next() {
}
iter.Release()
b.StopTimer()
b.SetBytes(116)
p.close()
}
func BenchmarkDBReadUncompressed(b *testing.B) {
p := openDBBench(b, true)
p.populate(b.N)
p.fill()
p.gc()
iter := p.newIter()
b.ResetTimer()
for iter.Next() {
}
iter.Release()
b.StopTimer()
b.SetBytes(116)
p.close()
}
func BenchmarkDBReadTable(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.reopen()
p.gc()
iter := p.newIter()
b.ResetTimer()
for iter.Next() {
}
iter.Release()
b.StopTimer()
b.SetBytes(116)
p.close()
}
func BenchmarkDBReadReverse(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gc()
iter := p.newIter()
b.ResetTimer()
iter.Last()
for iter.Prev() {
}
iter.Release()
b.StopTimer()
b.SetBytes(116)
p.close()
}
func BenchmarkDBReadReverseTable(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.reopen()
p.gc()
iter := p.newIter()
b.ResetTimer()
iter.Last()
for iter.Prev() {
}
iter.Release()
b.StopTimer()
b.SetBytes(116)
p.close()
}
func BenchmarkDBSeek(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.seeks()
p.close()
}
func BenchmarkDBSeekRandom(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.randomize()
p.seeks()
p.close()
}
func BenchmarkDBGet(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gets()
p.close()
}
func BenchmarkDBGetRandom(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.randomize()
p.gets()
p.close()
}

View File

@ -1,30 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// +build !go1.2
package cache
import (
"math/rand"
"testing"
)
func BenchmarkLRUCache(b *testing.B) {
c := NewCache(NewLRU(10000))
b.SetParallelism(10)
b.RunParallel(func(pb *testing.PB) {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for pb.Next() {
key := uint64(r.Intn(1000000))
c.Get(0, key, func() (int, Value) {
return 1, key
}).Release()
}
})
}

View File

@ -1,554 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package cache
import (
"math/rand"
"runtime"
"sync"
"sync/atomic"
"testing"
"time"
"unsafe"
)
type int32o int32
func (o *int32o) acquire() {
if atomic.AddInt32((*int32)(o), 1) != 1 {
panic("BUG: invalid ref")
}
}
func (o *int32o) Release() {
if atomic.AddInt32((*int32)(o), -1) != 0 {
panic("BUG: invalid ref")
}
}
type releaserFunc struct {
fn func()
value Value
}
func (r releaserFunc) Release() {
if r.fn != nil {
r.fn()
}
}
func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle {
return c.Get(ns, key, func() (int, Value) {
if relf != nil {
return charge, releaserFunc{relf, value}
} else {
return charge, value
}
})
}
func TestCacheMap(t *testing.T) {
runtime.GOMAXPROCS(runtime.NumCPU())
nsx := []struct {
nobjects, nhandles, concurrent, repeat int
}{
{10000, 400, 50, 3},
{100000, 1000, 100, 10},
}
var (
objects [][]int32o
handles [][]unsafe.Pointer
)
for _, x := range nsx {
objects = append(objects, make([]int32o, x.nobjects))
handles = append(handles, make([]unsafe.Pointer, x.nhandles))
}
c := NewCache(nil)
wg := new(sync.WaitGroup)
var done int32
for ns, x := range nsx {
for i := 0; i < x.concurrent; i++ {
wg.Add(1)
go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) {
defer wg.Done()
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for j := len(objects) * repeat; j >= 0; j-- {
key := uint64(r.Intn(len(objects)))
h := c.Get(uint64(ns), key, func() (int, Value) {
o := &objects[key]
o.acquire()
return 1, o
})
if v := h.Value().(*int32o); v != &objects[key] {
t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v)
}
if objects[key] != 1 {
t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key])
}
if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) {
h.Release()
}
}
}(ns, i, x.repeat, objects[ns], handles[ns])
}
go func(handles []unsafe.Pointer) {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for atomic.LoadInt32(&done) == 0 {
i := r.Intn(len(handles))
h := (*Handle)(atomic.LoadPointer(&handles[i]))
if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) {
h.Release()
}
time.Sleep(time.Millisecond)
}
}(handles[ns])
}
go func() {
handles := make([]*Handle, 100000)
for atomic.LoadInt32(&done) == 0 {
for i := range handles {
handles[i] = c.Get(999999999, uint64(i), func() (int, Value) {
return 1, 1
})
}
for _, h := range handles {
h.Release()
}
}
}()
wg.Wait()
atomic.StoreInt32(&done, 1)
for _, handles0 := range handles {
for i := range handles0 {
h := (*Handle)(atomic.LoadPointer(&handles0[i]))
if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) {
h.Release()
}
}
}
for ns, objects0 := range objects {
for i, o := range objects0 {
if o != 0 {
t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o)
}
}
}
}
func TestCacheMap_NodesAndSize(t *testing.T) {
c := NewCache(nil)
if c.Nodes() != 0 {
t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
}
if c.Size() != 0 {
t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
}
set(c, 0, 1, 1, 1, nil)
set(c, 0, 2, 2, 2, nil)
set(c, 1, 1, 3, 3, nil)
set(c, 2, 1, 4, 1, nil)
if c.Nodes() != 4 {
t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes())
}
if c.Size() != 7 {
t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size())
}
}
func TestLRUCache_Capacity(t *testing.T) {
c := NewCache(NewLRU(10))
if c.Capacity() != 10 {
t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity())
}
set(c, 0, 1, 1, 1, nil).Release()
set(c, 0, 2, 2, 2, nil).Release()
set(c, 1, 1, 3, 3, nil).Release()
set(c, 2, 1, 4, 1, nil).Release()
set(c, 2, 2, 5, 1, nil).Release()
set(c, 2, 3, 6, 1, nil).Release()
set(c, 2, 4, 7, 1, nil).Release()
set(c, 2, 5, 8, 1, nil).Release()
if c.Nodes() != 7 {
t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes())
}
if c.Size() != 10 {
t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size())
}
c.SetCapacity(9)
if c.Capacity() != 9 {
t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity())
}
if c.Nodes() != 6 {
t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes())
}
if c.Size() != 8 {
t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size())
}
}
func TestCacheMap_NilValue(t *testing.T) {
c := NewCache(NewLRU(10))
h := c.Get(0, 0, func() (size int, value Value) {
return 1, nil
})
if h != nil {
t.Error("cache handle is non-nil")
}
if c.Nodes() != 0 {
t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
}
if c.Size() != 0 {
t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
}
}
func TestLRUCache_GetLatency(t *testing.T) {
runtime.GOMAXPROCS(runtime.NumCPU())
const (
concurrentSet = 30
concurrentGet = 3
duration = 3 * time.Second
delay = 3 * time.Millisecond
maxkey = 100000
)
var (
set, getHit, getAll int32
getMaxLatency, getDuration int64
)
c := NewCache(NewLRU(5000))
wg := &sync.WaitGroup{}
until := time.Now().Add(duration)
for i := 0; i < concurrentSet; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for time.Now().Before(until) {
c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) {
time.Sleep(delay)
atomic.AddInt32(&set, 1)
return 1, 1
}).Release()
}
}(i)
}
for i := 0; i < concurrentGet; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for {
mark := time.Now()
if mark.Before(until) {
h := c.Get(0, uint64(r.Intn(maxkey)), nil)
latency := int64(time.Now().Sub(mark))
m := atomic.LoadInt64(&getMaxLatency)
if latency > m {
atomic.CompareAndSwapInt64(&getMaxLatency, m, latency)
}
atomic.AddInt64(&getDuration, latency)
if h != nil {
atomic.AddInt32(&getHit, 1)
h.Release()
}
atomic.AddInt32(&getAll, 1)
} else {
break
}
}
}(i)
}
wg.Wait()
getAvglatency := time.Duration(getDuration) / time.Duration(getAll)
t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v",
set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency)
if getAvglatency > delay/3 {
t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency)
}
}
func TestLRUCache_HitMiss(t *testing.T) {
cases := []struct {
key uint64
value string
}{
{1, "vvvvvvvvv"},
{100, "v1"},
{0, "v2"},
{12346, "v3"},
{777, "v4"},
{999, "v5"},
{7654, "v6"},
{2, "v7"},
{3, "v8"},
{9, "v9"},
}
setfin := 0
c := NewCache(NewLRU(1000))
for i, x := range cases {
set(c, 0, x.key, x.value, len(x.value), func() {
setfin++
}).Release()
for j, y := range cases {
h := c.Get(0, y.key, nil)
if j <= i {
// should hit
if h == nil {
t.Errorf("case '%d' iteration '%d' is miss", i, j)
} else {
if x := h.Value().(releaserFunc).value.(string); x != y.value {
t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
}
}
} else {
// should miss
if h != nil {
t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string))
}
}
if h != nil {
h.Release()
}
}
}
for i, x := range cases {
finalizerOk := false
c.Delete(0, x.key, func() {
finalizerOk = true
})
if !finalizerOk {
t.Errorf("case %d delete finalizer not executed", i)
}
for j, y := range cases {
h := c.Get(0, y.key, nil)
if j > i {
// should hit
if h == nil {
t.Errorf("case '%d' iteration '%d' is miss", i, j)
} else {
if x := h.Value().(releaserFunc).value.(string); x != y.value {
t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
}
}
} else {
// should miss
if h != nil {
t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string))
}
}
if h != nil {
h.Release()
}
}
}
if setfin != len(cases) {
t.Errorf("some set finalizer may not be executed, want=%d got=%d", len(cases), setfin)
}
}
func TestLRUCache_Eviction(t *testing.T) {
c := NewCache(NewLRU(12))
o1 := set(c, 0, 1, 1, 1, nil)
set(c, 0, 2, 2, 1, nil).Release()
set(c, 0, 3, 3, 1, nil).Release()
set(c, 0, 4, 4, 1, nil).Release()
set(c, 0, 5, 5, 1, nil).Release()
if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2
h.Release()
}
set(c, 0, 9, 9, 10, nil).Release() // 5,2,9
for _, key := range []uint64{9, 2, 5, 1} {
h := c.Get(0, key, nil)
if h == nil {
t.Errorf("miss for key '%d'", key)
} else {
if x := h.Value().(int); x != int(key) {
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
}
h.Release()
}
}
o1.Release()
for _, key := range []uint64{1, 2, 5} {
h := c.Get(0, key, nil)
if h == nil {
t.Errorf("miss for key '%d'", key)
} else {
if x := h.Value().(int); x != int(key) {
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
}
h.Release()
}
}
for _, key := range []uint64{3, 4, 9} {
h := c.Get(0, key, nil)
if h != nil {
t.Errorf("hit for key '%d'", key)
if x := h.Value().(int); x != int(key) {
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
}
h.Release()
}
}
}
func TestLRUCache_Evict(t *testing.T) {
c := NewCache(NewLRU(6))
set(c, 0, 1, 1, 1, nil).Release()
set(c, 0, 2, 2, 1, nil).Release()
set(c, 1, 1, 4, 1, nil).Release()
set(c, 1, 2, 5, 1, nil).Release()
set(c, 2, 1, 6, 1, nil).Release()
set(c, 2, 2, 7, 1, nil).Release()
for ns := 0; ns < 3; ns++ {
for key := 1; key < 3; key++ {
if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
h.Release()
} else {
t.Errorf("Cache.Get on #%d.%d return nil", ns, key)
}
}
}
if ok := c.Evict(0, 1); !ok {
t.Error("first Cache.Evict on #0.1 return false")
}
if ok := c.Evict(0, 1); ok {
t.Error("second Cache.Evict on #0.1 return true")
}
if h := c.Get(0, 1, nil); h != nil {
t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value())
}
c.EvictNS(1)
if h := c.Get(1, 1, nil); h != nil {
t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value())
}
if h := c.Get(1, 2, nil); h != nil {
t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value())
}
c.EvictAll()
for ns := 0; ns < 3; ns++ {
for key := 1; key < 3; key++ {
if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value())
}
}
}
}
func TestLRUCache_Delete(t *testing.T) {
delFuncCalled := 0
delFunc := func() {
delFuncCalled++
}
c := NewCache(NewLRU(2))
set(c, 0, 1, 1, 1, nil).Release()
set(c, 0, 2, 2, 1, nil).Release()
if ok := c.Delete(0, 1, delFunc); !ok {
t.Error("Cache.Delete on #1 return false")
}
if h := c.Get(0, 1, nil); h != nil {
t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value())
}
if ok := c.Delete(0, 1, delFunc); ok {
t.Error("Cache.Delete on #1 return true")
}
h2 := c.Get(0, 2, nil)
if h2 == nil {
t.Error("Cache.Get on #2 return nil")
}
if ok := c.Delete(0, 2, delFunc); !ok {
t.Error("(1) Cache.Delete on #2 return false")
}
if ok := c.Delete(0, 2, delFunc); !ok {
t.Error("(2) Cache.Delete on #2 return false")
}
set(c, 0, 3, 3, 1, nil).Release()
set(c, 0, 4, 4, 1, nil).Release()
c.Get(0, 2, nil).Release()
for key := 2; key <= 4; key++ {
if h := c.Get(0, uint64(key), nil); h != nil {
h.Release()
} else {
t.Errorf("Cache.Get on #%d return nil", key)
}
}
h2.Release()
if h := c.Get(0, 2, nil); h != nil {
t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value())
}
if delFuncCalled != 4 {
t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled)
}
}
func TestLRUCache_Close(t *testing.T) {
relFuncCalled := 0
relFunc := func() {
relFuncCalled++
}
delFuncCalled := 0
delFunc := func() {
delFuncCalled++
}
c := NewCache(NewLRU(2))
set(c, 0, 1, 1, 1, relFunc).Release()
set(c, 0, 2, 2, 1, relFunc).Release()
h3 := set(c, 0, 3, 3, 1, relFunc)
if h3 == nil {
t.Error("Cache.Get on #3 return nil")
}
if ok := c.Delete(0, 3, delFunc); !ok {
t.Error("Cache.Delete on #3 return false")
}
c.Close()
if relFuncCalled != 3 {
t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled)
}
if delFuncCalled != 1 {
t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled)
}
}

View File

@ -1,500 +0,0 @@
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"bytes"
"fmt"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
"io"
"math/rand"
"testing"
)
const ctValSize = 1000
type dbCorruptHarness struct {
dbHarness
}
func newDbCorruptHarnessWopt(t *testing.T, o *opt.Options) *dbCorruptHarness {
h := new(dbCorruptHarness)
h.init(t, o)
return h
}
func newDbCorruptHarness(t *testing.T) *dbCorruptHarness {
return newDbCorruptHarnessWopt(t, &opt.Options{
BlockCacheCapacity: 100,
Strict: opt.StrictJournalChecksum,
})
}
func (h *dbCorruptHarness) recover() {
p := &h.dbHarness
t := p.t
var err error
p.db, err = Recover(h.stor, h.o)
if err != nil {
t.Fatal("Repair: got error: ", err)
}
}
func (h *dbCorruptHarness) build(n int) {
p := &h.dbHarness
t := p.t
db := p.db
batch := new(Batch)
for i := 0; i < n; i++ {
batch.Reset()
batch.Put(tkey(i), tval(i, ctValSize))
err := db.Write(batch, p.wo)
if err != nil {
t.Fatal("write error: ", err)
}
}
}
func (h *dbCorruptHarness) buildShuffled(n int, rnd *rand.Rand) {
p := &h.dbHarness
t := p.t
db := p.db
batch := new(Batch)
for i := range rnd.Perm(n) {
batch.Reset()
batch.Put(tkey(i), tval(i, ctValSize))
err := db.Write(batch, p.wo)
if err != nil {
t.Fatal("write error: ", err)
}
}
}
func (h *dbCorruptHarness) deleteRand(n, max int, rnd *rand.Rand) {
p := &h.dbHarness
t := p.t
db := p.db
batch := new(Batch)
for i := 0; i < n; i++ {
batch.Reset()
batch.Delete(tkey(rnd.Intn(max)))
err := db.Write(batch, p.wo)
if err != nil {
t.Fatal("write error: ", err)
}
}
}
func (h *dbCorruptHarness) corrupt(ft storage.FileType, fi, offset, n int) {
p := &h.dbHarness
t := p.t
ff, _ := p.stor.GetFiles(ft)
sff := files(ff)
sff.sort()
if fi < 0 {
fi = len(sff) - 1
}
if fi >= len(sff) {
t.Fatalf("no such file with type %q with index %d", ft, fi)
}
file := sff[fi]
r, err := file.Open()
if err != nil {
t.Fatal("cannot open file: ", err)
}
x, err := r.Seek(0, 2)
if err != nil {
t.Fatal("cannot query file size: ", err)
}
m := int(x)
if _, err := r.Seek(0, 0); err != nil {
t.Fatal(err)
}
if offset < 0 {
if -offset > m {
offset = 0
} else {
offset = m + offset
}
}
if offset > m {
offset = m
}
if offset+n > m {
n = m - offset
}
buf := make([]byte, m)
_, err = io.ReadFull(r, buf)
if err != nil {
t.Fatal("cannot read file: ", err)
}
r.Close()
for i := 0; i < n; i++ {
buf[offset+i] ^= 0x80
}
err = file.Remove()
if err != nil {
t.Fatal("cannot remove old file: ", err)
}
w, err := file.Create()
if err != nil {
t.Fatal("cannot create new file: ", err)
}
_, err = w.Write(buf)
if err != nil {
t.Fatal("cannot write new file: ", err)
}
w.Close()
}
func (h *dbCorruptHarness) removeAll(ft storage.FileType) {
ff, err := h.stor.GetFiles(ft)
if err != nil {
h.t.Fatal("get files: ", err)
}
for _, f := range ff {
if err := f.Remove(); err != nil {
h.t.Error("remove file: ", err)
}
}
}
func (h *dbCorruptHarness) removeOne(ft storage.FileType) {
ff, err := h.stor.GetFiles(ft)
if err != nil {
h.t.Fatal("get files: ", err)
}
f := ff[rand.Intn(len(ff))]
h.t.Logf("removing file @%d", f.Num())
if err := f.Remove(); err != nil {
h.t.Error("remove file: ", err)
}
}
func (h *dbCorruptHarness) check(min, max int) {
p := &h.dbHarness
t := p.t
db := p.db
var n, badk, badv, missed, good int
iter := db.NewIterator(nil, p.ro)
for iter.Next() {
k := 0
fmt.Sscanf(string(iter.Key()), "%d", &k)
if k < n {
badk++
continue
}
missed += k - n
n = k + 1
if !bytes.Equal(iter.Value(), tval(k, ctValSize)) {
badv++
} else {
good++
}
}
err := iter.Error()
iter.Release()
t.Logf("want=%d..%d got=%d badkeys=%d badvalues=%d missed=%d, err=%v",
min, max, good, badk, badv, missed, err)
if good < min || good > max {
t.Errorf("good entries number not in range")
}
}
func TestCorruptDB_Journal(t *testing.T) {
h := newDbCorruptHarness(t)
h.build(100)
h.check(100, 100)
h.closeDB()
h.corrupt(storage.TypeJournal, -1, 19, 1)
h.corrupt(storage.TypeJournal, -1, 32*1024+1000, 1)
h.openDB()
h.check(36, 36)
h.close()
}
func TestCorruptDB_Table(t *testing.T) {
h := newDbCorruptHarness(t)
h.build(100)
h.compactMem()
h.compactRangeAt(0, "", "")
h.compactRangeAt(1, "", "")
h.closeDB()
h.corrupt(storage.TypeTable, -1, 100, 1)
h.openDB()
h.check(99, 99)
h.close()
}
func TestCorruptDB_TableIndex(t *testing.T) {
h := newDbCorruptHarness(t)
h.build(10000)
h.compactMem()
h.closeDB()
h.corrupt(storage.TypeTable, -1, -2000, 500)
h.openDB()
h.check(5000, 9999)
h.close()
}
func TestCorruptDB_MissingManifest(t *testing.T) {
rnd := rand.New(rand.NewSource(0x0badda7a))
h := newDbCorruptHarnessWopt(t, &opt.Options{
BlockCacheCapacity: 100,
Strict: opt.StrictJournalChecksum,
WriteBuffer: 1000 * 60,
})
h.build(1000)
h.compactMem()
h.buildShuffled(1000, rnd)
h.compactMem()
h.deleteRand(500, 1000, rnd)
h.compactMem()
h.buildShuffled(1000, rnd)
h.compactMem()
h.deleteRand(500, 1000, rnd)
h.compactMem()
h.buildShuffled(1000, rnd)
h.compactMem()
h.closeDB()
h.stor.SetIgnoreOpenErr(storage.TypeManifest)
h.removeAll(storage.TypeManifest)
h.openAssert(false)
h.stor.SetIgnoreOpenErr(0)
h.recover()
h.check(1000, 1000)
h.build(1000)
h.compactMem()
h.compactRange("", "")
h.closeDB()
h.recover()
h.check(1000, 1000)
h.close()
}
func TestCorruptDB_SequenceNumberRecovery(t *testing.T) {
h := newDbCorruptHarness(t)
h.put("foo", "v1")
h.put("foo", "v2")
h.put("foo", "v3")
h.put("foo", "v4")
h.put("foo", "v5")
h.closeDB()
h.recover()
h.getVal("foo", "v5")
h.put("foo", "v6")
h.getVal("foo", "v6")
h.reopenDB()
h.getVal("foo", "v6")
h.close()
}
func TestCorruptDB_SequenceNumberRecoveryTable(t *testing.T) {
h := newDbCorruptHarness(t)
h.put("foo", "v1")
h.put("foo", "v2")
h.put("foo", "v3")
h.compactMem()
h.put("foo", "v4")
h.put("foo", "v5")
h.compactMem()
h.closeDB()
h.recover()
h.getVal("foo", "v5")
h.put("foo", "v6")
h.getVal("foo", "v6")
h.reopenDB()
h.getVal("foo", "v6")
h.close()
}
func TestCorruptDB_CorruptedManifest(t *testing.T) {
h := newDbCorruptHarness(t)
h.put("foo", "hello")
h.compactMem()
h.compactRange("", "")
h.closeDB()
h.corrupt(storage.TypeManifest, -1, 0, 1000)
h.openAssert(false)
h.recover()
h.getVal("foo", "hello")
h.close()
}
func TestCorruptDB_CompactionInputError(t *testing.T) {
h := newDbCorruptHarness(t)
h.build(10)
h.compactMem()
h.closeDB()
h.corrupt(storage.TypeTable, -1, 100, 1)
h.openDB()
h.check(9, 9)
h.build(10000)
h.check(10000, 10000)
h.close()
}
func TestCorruptDB_UnrelatedKeys(t *testing.T) {
h := newDbCorruptHarness(t)
h.build(10)
h.compactMem()
h.closeDB()
h.corrupt(storage.TypeTable, -1, 100, 1)
h.openDB()
h.put(string(tkey(1000)), string(tval(1000, ctValSize)))
h.getVal(string(tkey(1000)), string(tval(1000, ctValSize)))
h.compactMem()
h.getVal(string(tkey(1000)), string(tval(1000, ctValSize)))
h.close()
}
func TestCorruptDB_Level0NewerFileHasOlderSeqnum(t *testing.T) {
h := newDbCorruptHarness(t)
h.put("a", "v1")
h.put("b", "v1")
h.compactMem()
h.put("a", "v2")
h.put("b", "v2")
h.compactMem()
h.put("a", "v3")
h.put("b", "v3")
h.compactMem()
h.put("c", "v0")
h.put("d", "v0")
h.compactMem()
h.compactRangeAt(1, "", "")
h.closeDB()
h.recover()
h.getVal("a", "v3")
h.getVal("b", "v3")
h.getVal("c", "v0")
h.getVal("d", "v0")
h.close()
}
func TestCorruptDB_RecoverInvalidSeq_Issue53(t *testing.T) {
h := newDbCorruptHarness(t)
h.put("a", "v1")
h.put("b", "v1")
h.compactMem()
h.put("a", "v2")
h.put("b", "v2")
h.compactMem()
h.put("a", "v3")
h.put("b", "v3")
h.compactMem()
h.put("c", "v0")
h.put("d", "v0")
h.compactMem()
h.compactRangeAt(0, "", "")
h.closeDB()
h.recover()
h.getVal("a", "v3")
h.getVal("b", "v3")
h.getVal("c", "v0")
h.getVal("d", "v0")
h.close()
}
func TestCorruptDB_MissingTableFiles(t *testing.T) {
h := newDbCorruptHarness(t)
h.put("a", "v1")
h.put("b", "v1")
h.compactMem()
h.put("c", "v2")
h.put("d", "v2")
h.compactMem()
h.put("e", "v3")
h.put("f", "v3")
h.closeDB()
h.removeOne(storage.TypeTable)
h.openAssert(false)
h.close()
}
func TestCorruptDB_RecoverTable(t *testing.T) {
h := newDbCorruptHarnessWopt(t, &opt.Options{
WriteBuffer: 112 * opt.KiB,
CompactionTableSize: 90 * opt.KiB,
Filter: filter.NewBloomFilter(10),
})
h.build(1000)
h.compactMem()
h.compactRangeAt(0, "", "")
h.compactRangeAt(1, "", "")
seq := h.db.seq
h.closeDB()
h.corrupt(storage.TypeTable, 0, 1000, 1)
h.corrupt(storage.TypeTable, 3, 10000, 1)
// Corrupted filter shouldn't affect recovery.
h.corrupt(storage.TypeTable, 3, 113888, 10)
h.corrupt(storage.TypeTable, -1, 20000, 1)
h.recover()
if h.db.seq != seq {
t.Errorf("invalid seq, want=%d got=%d", seq, h.db.seq)
}
h.check(985, 985)
h.close()
}

File diff suppressed because it is too large Load Diff

View File

@ -1,58 +0,0 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
)
var _ = testutil.Defer(func() {
Describe("Leveldb external", func() {
o := &opt.Options{
DisableBlockCache: true,
BlockRestartInterval: 5,
BlockSize: 80,
Compression: opt.NoCompression,
OpenFilesCacheCapacity: -1,
Strict: opt.StrictAll,
WriteBuffer: 1000,
CompactionTableSize: 2000,
}
Describe("write test", func() {
It("should do write correctly", func(done Done) {
db := newTestingDB(o, nil, nil)
t := testutil.DBTesting{
DB: db,
Deleted: testutil.KeyValue_Generate(nil, 500, 1, 50, 5, 5).Clone(),
}
testutil.DoDBTesting(&t)
db.TestClose()
done <- true
}, 20.0)
})
Describe("read test", func() {
testutil.AllKeyValueTesting(nil, nil, func(kv testutil.KeyValue) testutil.DB {
// Building the DB.
db := newTestingDB(o, nil, nil)
kv.IterateShuffled(nil, func(i int, key, value []byte) {
err := db.TestPut(key, value)
Expect(err).NotTo(HaveOccurred())
})
return db
}, func(db testutil.DB) {
db.(*testingDB).TestClose()
})
})
})
})

View File

@ -1,142 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package filter
import (
"encoding/binary"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
"testing"
)
type harness struct {
t *testing.T
bloom Filter
generator FilterGenerator
filter []byte
}
func newHarness(t *testing.T) *harness {
bloom := NewBloomFilter(10)
return &harness{
t: t,
bloom: bloom,
generator: bloom.NewGenerator(),
}
}
func (h *harness) add(key []byte) {
h.generator.Add(key)
}
func (h *harness) addNum(key uint32) {
var b [4]byte
binary.LittleEndian.PutUint32(b[:], key)
h.add(b[:])
}
func (h *harness) build() {
b := &util.Buffer{}
h.generator.Generate(b)
h.filter = b.Bytes()
}
func (h *harness) reset() {
h.filter = nil
}
func (h *harness) filterLen() int {
return len(h.filter)
}
func (h *harness) assert(key []byte, want, silent bool) bool {
got := h.bloom.Contains(h.filter, key)
if !silent && got != want {
h.t.Errorf("assert on '%v' failed got '%v', want '%v'", key, got, want)
}
return got
}
func (h *harness) assertNum(key uint32, want, silent bool) bool {
var b [4]byte
binary.LittleEndian.PutUint32(b[:], key)
return h.assert(b[:], want, silent)
}
func TestBloomFilter_Empty(t *testing.T) {
h := newHarness(t)
h.build()
h.assert([]byte("hello"), false, false)
h.assert([]byte("world"), false, false)
}
func TestBloomFilter_Small(t *testing.T) {
h := newHarness(t)
h.add([]byte("hello"))
h.add([]byte("world"))
h.build()
h.assert([]byte("hello"), true, false)
h.assert([]byte("world"), true, false)
h.assert([]byte("x"), false, false)
h.assert([]byte("foo"), false, false)
}
func nextN(n int) int {
switch {
case n < 10:
n += 1
case n < 100:
n += 10
case n < 1000:
n += 100
default:
n += 1000
}
return n
}
func TestBloomFilter_VaryingLengths(t *testing.T) {
h := newHarness(t)
var mediocre, good int
for n := 1; n < 10000; n = nextN(n) {
h.reset()
for i := 0; i < n; i++ {
h.addNum(uint32(i))
}
h.build()
got := h.filterLen()
want := (n * 10 / 8) + 40
if got > want {
t.Errorf("filter len test failed, '%d' > '%d'", got, want)
}
for i := 0; i < n; i++ {
h.assertNum(uint32(i), true, false)
}
var rate float32
for i := 0; i < 10000; i++ {
if h.assertNum(uint32(i+1000000000), true, true) {
rate++
}
}
rate /= 10000
if rate > 0.02 {
t.Errorf("false positive rate is more than 2%%, got %v, at len %d", rate, n)
}
if rate > 0.0125 {
mediocre++
} else {
good++
}
}
t.Logf("false positive rate: %d good, %d mediocre", good, mediocre)
if mediocre > good/5 {
t.Error("mediocre false positive rate is more than expected")
}
}

View File

@ -1,30 +0,0 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package iterator_test
import (
. "github.com/onsi/ginkgo"
. "github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
)
var _ = testutil.Defer(func() {
Describe("Array iterator", func() {
It("Should iterates and seeks correctly", func() {
// Build key/value.
kv := testutil.KeyValue_Generate(nil, 70, 1, 5, 3, 3)
// Test the iterator.
t := testutil.IteratorTesting{
KeyValue: kv.Clone(),
Iter: NewArrayIterator(kv),
}
testutil.DoIteratorTesting(&t)
})
})
})

View File

@ -1,83 +0,0 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package iterator_test
import (
"sort"
. "github.com/onsi/ginkgo"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
. "github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
)
type keyValue struct {
key []byte
testutil.KeyValue
}
type keyValueIndex []keyValue
func (x keyValueIndex) Search(key []byte) int {
return sort.Search(x.Len(), func(i int) bool {
return comparer.DefaultComparer.Compare(x[i].key, key) >= 0
})
}
func (x keyValueIndex) Len() int { return len(x) }
func (x keyValueIndex) Index(i int) (key, value []byte) { return x[i].key, nil }
func (x keyValueIndex) Get(i int) Iterator { return NewArrayIterator(x[i]) }
var _ = testutil.Defer(func() {
Describe("Indexed iterator", func() {
Test := func(n ...int) func() {
if len(n) == 0 {
rnd := testutil.NewRand()
n = make([]int, rnd.Intn(17)+3)
for i := range n {
n[i] = rnd.Intn(19) + 1
}
}
return func() {
It("Should iterates and seeks correctly", func(done Done) {
// Build key/value.
index := make(keyValueIndex, len(n))
sum := 0
for _, x := range n {
sum += x
}
kv := testutil.KeyValue_Generate(nil, sum, 1, 10, 4, 4)
for i, j := 0, 0; i < len(n); i++ {
for x := n[i]; x > 0; x-- {
key, value := kv.Index(j)
index[i].key = key
index[i].Put(key, value)
j++
}
}
// Test the iterator.
t := testutil.IteratorTesting{
KeyValue: kv.Clone(),
Iter: NewIndexedIterator(NewArrayIndexer(index), true),
}
testutil.DoIteratorTesting(&t)
done <- true
}, 1.5)
}
}
Describe("with 100 keys", Test(100))
Describe("with 50-50 keys", Test(50, 50))
Describe("with 50-1 keys", Test(50, 1))
Describe("with 50-1-50 keys", Test(50, 1, 50))
Describe("with 1-50 keys", Test(1, 50))
Describe("with random N-keys", Test())
})
})

View File

@ -1,11 +0,0 @@
package iterator_test
import (
"testing"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
)
func TestIterator(t *testing.T) {
testutil.RunSuite(t, "Iterator Suite")
}

View File

@ -1,60 +0,0 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package iterator_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
. "github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
)
var _ = testutil.Defer(func() {
Describe("Merged iterator", func() {
Test := func(filled int, empty int) func() {
return func() {
It("Should iterates and seeks correctly", func(done Done) {
rnd := testutil.NewRand()
// Build key/value.
filledKV := make([]testutil.KeyValue, filled)
kv := testutil.KeyValue_Generate(nil, 100, 1, 10, 4, 4)
kv.Iterate(func(i int, key, value []byte) {
filledKV[rnd.Intn(filled)].Put(key, value)
})
// Create itearators.
iters := make([]Iterator, filled+empty)
for i := range iters {
if empty == 0 || (rnd.Int()%2 == 0 && filled > 0) {
filled--
Expect(filledKV[filled].Len()).ShouldNot(BeZero())
iters[i] = NewArrayIterator(filledKV[filled])
} else {
empty--
iters[i] = NewEmptyIterator(nil)
}
}
// Test the iterator.
t := testutil.IteratorTesting{
KeyValue: kv.Clone(),
Iter: NewMergedIterator(iters, comparer.DefaultComparer, true),
}
testutil.DoIteratorTesting(&t)
done <- true
}, 1.5)
}
}
Describe("with three, all filled iterators", Test(3, 0))
Describe("with one filled, one empty iterators", Test(1, 1))
Describe("with one filled, two empty iterators", Test(1, 2))
})
})

View File

@ -1,818 +0,0 @@
// Copyright 2011 The LevelDB-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record_test.go?r=df1fa28f7f3be6c3935548169002309c12967135
// License, authors and contributors informations can be found at bellow URLs respectively:
// https://code.google.com/p/leveldb-go/source/browse/LICENSE
// https://code.google.com/p/leveldb-go/source/browse/AUTHORS
// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS
package journal
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"math/rand"
"strings"
"testing"
)
type dropper struct {
t *testing.T
}
func (d dropper) Drop(err error) {
d.t.Log(err)
}
func short(s string) string {
if len(s) < 64 {
return s
}
return fmt.Sprintf("%s...(skipping %d bytes)...%s", s[:20], len(s)-40, s[len(s)-20:])
}
// big returns a string of length n, composed of repetitions of partial.
func big(partial string, n int) string {
return strings.Repeat(partial, n/len(partial)+1)[:n]
}
func TestEmpty(t *testing.T) {
buf := new(bytes.Buffer)
r := NewReader(buf, dropper{t}, true, true)
if _, err := r.Next(); err != io.EOF {
t.Fatalf("got %v, want %v", err, io.EOF)
}
}
func testGenerator(t *testing.T, reset func(), gen func() (string, bool)) {
buf := new(bytes.Buffer)
reset()
w := NewWriter(buf)
for {
s, ok := gen()
if !ok {
break
}
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write([]byte(s)); err != nil {
t.Fatal(err)
}
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
reset()
r := NewReader(buf, dropper{t}, true, true)
for {
s, ok := gen()
if !ok {
break
}
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
x, err := ioutil.ReadAll(rr)
if err != nil {
t.Fatal(err)
}
if string(x) != s {
t.Fatalf("got %q, want %q", short(string(x)), short(s))
}
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("got %v, want %v", err, io.EOF)
}
}
func testLiterals(t *testing.T, s []string) {
var i int
reset := func() {
i = 0
}
gen := func() (string, bool) {
if i == len(s) {
return "", false
}
i++
return s[i-1], true
}
testGenerator(t, reset, gen)
}
func TestMany(t *testing.T) {
const n = 1e5
var i int
reset := func() {
i = 0
}
gen := func() (string, bool) {
if i == n {
return "", false
}
i++
return fmt.Sprintf("%d.", i-1), true
}
testGenerator(t, reset, gen)
}
func TestRandom(t *testing.T) {
const n = 1e2
var (
i int
r *rand.Rand
)
reset := func() {
i, r = 0, rand.New(rand.NewSource(0))
}
gen := func() (string, bool) {
if i == n {
return "", false
}
i++
return strings.Repeat(string(uint8(i)), r.Intn(2*blockSize+16)), true
}
testGenerator(t, reset, gen)
}
func TestBasic(t *testing.T) {
testLiterals(t, []string{
strings.Repeat("a", 1000),
strings.Repeat("b", 97270),
strings.Repeat("c", 8000),
})
}
func TestBoundary(t *testing.T) {
for i := blockSize - 16; i < blockSize+16; i++ {
s0 := big("abcd", i)
for j := blockSize - 16; j < blockSize+16; j++ {
s1 := big("ABCDE", j)
testLiterals(t, []string{s0, s1})
testLiterals(t, []string{s0, "", s1})
testLiterals(t, []string{s0, "x", s1})
}
}
}
func TestFlush(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// Write a couple of records. Everything should still be held
// in the record.Writer buffer, so that buf.Len should be 0.
w0, _ := w.Next()
w0.Write([]byte("0"))
w1, _ := w.Next()
w1.Write([]byte("11"))
if got, want := buf.Len(), 0; got != want {
t.Fatalf("buffer length #0: got %d want %d", got, want)
}
// Flush the record.Writer buffer, which should yield 17 bytes.
// 17 = 2*7 + 1 + 2, which is two headers and 1 + 2 payload bytes.
if err := w.Flush(); err != nil {
t.Fatal(err)
}
if got, want := buf.Len(), 17; got != want {
t.Fatalf("buffer length #1: got %d want %d", got, want)
}
// Do another write, one that isn't large enough to complete the block.
// The write should not have flowed through to buf.
w2, _ := w.Next()
w2.Write(bytes.Repeat([]byte("2"), 10000))
if got, want := buf.Len(), 17; got != want {
t.Fatalf("buffer length #2: got %d want %d", got, want)
}
// Flushing should get us up to 10024 bytes written.
// 10024 = 17 + 7 + 10000.
if err := w.Flush(); err != nil {
t.Fatal(err)
}
if got, want := buf.Len(), 10024; got != want {
t.Fatalf("buffer length #3: got %d want %d", got, want)
}
// Do a bigger write, one that completes the current block.
// We should now have 32768 bytes (a complete block), without
// an explicit flush.
w3, _ := w.Next()
w3.Write(bytes.Repeat([]byte("3"), 40000))
if got, want := buf.Len(), 32768; got != want {
t.Fatalf("buffer length #4: got %d want %d", got, want)
}
// Flushing should get us up to 50038 bytes written.
// 50038 = 10024 + 2*7 + 40000. There are two headers because
// the one record was split into two chunks.
if err := w.Flush(); err != nil {
t.Fatal(err)
}
if got, want := buf.Len(), 50038; got != want {
t.Fatalf("buffer length #5: got %d want %d", got, want)
}
// Check that reading those records give the right lengths.
r := NewReader(buf, dropper{t}, true, true)
wants := []int64{1, 2, 10000, 40000}
for i, want := range wants {
rr, _ := r.Next()
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #%d: %v", i, err)
}
if n != want {
t.Fatalf("read #%d: got %d bytes want %d", i, n, want)
}
}
}
func TestNonExhaustiveRead(t *testing.T) {
const n = 100
buf := new(bytes.Buffer)
p := make([]byte, 10)
rnd := rand.New(rand.NewSource(1))
w := NewWriter(buf)
for i := 0; i < n; i++ {
length := len(p) + rnd.Intn(3*blockSize)
s := string(uint8(i)) + "123456789abcdefgh"
ww, _ := w.Next()
ww.Write([]byte(big(s, length)))
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
r := NewReader(buf, dropper{t}, true, true)
for i := 0; i < n; i++ {
rr, _ := r.Next()
_, err := io.ReadFull(rr, p)
if err != nil {
t.Fatal(err)
}
want := string(uint8(i)) + "123456789"
if got := string(p); got != want {
t.Fatalf("read #%d: got %q want %q", i, got, want)
}
}
}
func TestStaleReader(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
w0, err := w.Next()
if err != nil {
t.Fatal(err)
}
w0.Write([]byte("0"))
w1, err := w.Next()
if err != nil {
t.Fatal(err)
}
w1.Write([]byte("11"))
if err := w.Close(); err != nil {
t.Fatal(err)
}
r := NewReader(buf, dropper{t}, true, true)
r0, err := r.Next()
if err != nil {
t.Fatal(err)
}
r1, err := r.Next()
if err != nil {
t.Fatal(err)
}
p := make([]byte, 1)
if _, err := r0.Read(p); err == nil || !strings.Contains(err.Error(), "stale") {
t.Fatalf("stale read #0: unexpected error: %v", err)
}
if _, err := r1.Read(p); err != nil {
t.Fatalf("fresh read #1: got %v want nil error", err)
}
if p[0] != '1' {
t.Fatalf("fresh read #1: byte contents: got '%c' want '1'", p[0])
}
}
func TestStaleWriter(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
w0, err := w.Next()
if err != nil {
t.Fatal(err)
}
w1, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := w0.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") {
t.Fatalf("stale write #0: unexpected error: %v", err)
}
if _, err := w1.Write([]byte("11")); err != nil {
t.Fatalf("fresh write #1: got %v want nil error", err)
}
if err := w.Flush(); err != nil {
t.Fatalf("flush: %v", err)
}
if _, err := w1.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") {
t.Fatalf("stale write #1: unexpected error: %v", err)
}
}
func TestCorrupt_MissingLastBlock(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-1024)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
// Cut the last block.
b := buf.Bytes()[:blockSize]
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read.
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if n != blockSize-1024 {
t.Fatalf("read #0: got %d bytes want %d", n, blockSize-1024)
}
// Second read.
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != io.ErrUnexpectedEOF {
t.Fatalf("read #1: unexpected error: %v", err)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}
func TestCorrupt_CorruptedFirstBlock(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
// Third record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
t.Fatalf("write #2: unexpected error: %v", err)
}
// Fourth record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
t.Fatalf("write #3: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
b := buf.Bytes()
// Corrupting block #0.
for i := 0; i < 1024; i++ {
b[i] = '1'
}
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read (third record).
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if want := int64(blockSize-headerSize) + 1; n != want {
t.Fatalf("read #0: got %d bytes want %d", n, want)
}
// Second read (fourth record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #1: %v", err)
}
if want := int64(blockSize-headerSize) + 2; n != want {
t.Fatalf("read #1: got %d bytes want %d", n, want)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}
func TestCorrupt_CorruptedMiddleBlock(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
// Third record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
t.Fatalf("write #2: unexpected error: %v", err)
}
// Fourth record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
t.Fatalf("write #3: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
b := buf.Bytes()
// Corrupting block #1.
for i := 0; i < 1024; i++ {
b[blockSize+i] = '1'
}
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read (first record).
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if want := int64(blockSize / 2); n != want {
t.Fatalf("read #0: got %d bytes want %d", n, want)
}
// Second read (second record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != io.ErrUnexpectedEOF {
t.Fatalf("read #1: unexpected error: %v", err)
}
// Third read (fourth record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #2: %v", err)
}
if want := int64(blockSize-headerSize) + 2; n != want {
t.Fatalf("read #2: got %d bytes want %d", n, want)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}
func TestCorrupt_CorruptedLastBlock(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
// Third record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
t.Fatalf("write #2: unexpected error: %v", err)
}
// Fourth record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
t.Fatalf("write #3: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
b := buf.Bytes()
// Corrupting block #3.
for i := len(b) - 1; i > len(b)-1024; i-- {
b[i] = '1'
}
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read (first record).
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if want := int64(blockSize / 2); n != want {
t.Fatalf("read #0: got %d bytes want %d", n, want)
}
// Second read (second record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #1: %v", err)
}
if want := int64(blockSize - headerSize); n != want {
t.Fatalf("read #1: got %d bytes want %d", n, want)
}
// Third read (third record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #2: %v", err)
}
if want := int64(blockSize-headerSize) + 1; n != want {
t.Fatalf("read #2: got %d bytes want %d", n, want)
}
// Fourth read (fourth record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != io.ErrUnexpectedEOF {
t.Fatalf("read #3: unexpected error: %v", err)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}
func TestCorrupt_FirstChuckLengthOverflow(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
// Third record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
t.Fatalf("write #2: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
b := buf.Bytes()
// Corrupting record #1.
x := blockSize
binary.LittleEndian.PutUint16(b[x+4:], 0xffff)
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read (first record).
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if want := int64(blockSize / 2); n != want {
t.Fatalf("read #0: got %d bytes want %d", n, want)
}
// Second read (second record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != io.ErrUnexpectedEOF {
t.Fatalf("read #1: unexpected error: %v", err)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}
func TestCorrupt_MiddleChuckLengthOverflow(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
// Third record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
t.Fatalf("write #2: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
b := buf.Bytes()
// Corrupting record #1.
x := blockSize/2 + headerSize
binary.LittleEndian.PutUint16(b[x+4:], 0xffff)
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read (first record).
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if want := int64(blockSize / 2); n != want {
t.Fatalf("read #0: got %d bytes want %d", n, want)
}
// Second read (third record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #1: %v", err)
}
if want := int64(blockSize-headerSize) + 1; n != want {
t.Fatalf("read #1: got %d bytes want %d", n, want)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}

View File

@ -1,133 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"bytes"
"testing"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
)
var defaultIComparer = &iComparer{comparer.DefaultComparer}
func ikey(key string, seq uint64, kt kType) iKey {
return newIkey([]byte(key), uint64(seq), kt)
}
func shortSep(a, b []byte) []byte {
dst := make([]byte, len(a))
dst = defaultIComparer.Separator(dst[:0], a, b)
if dst == nil {
return a
}
return dst
}
func shortSuccessor(b []byte) []byte {
dst := make([]byte, len(b))
dst = defaultIComparer.Successor(dst[:0], b)
if dst == nil {
return b
}
return dst
}
func testSingleKey(t *testing.T, key string, seq uint64, kt kType) {
ik := ikey(key, seq, kt)
if !bytes.Equal(ik.ukey(), []byte(key)) {
t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key)
}
rseq, rt := ik.parseNum()
if rseq != seq {
t.Errorf("seq number does not equal, got %v, want %v", rseq, seq)
}
if rt != kt {
t.Errorf("type does not equal, got %v, want %v", rt, kt)
}
if rukey, rseq, rt, kerr := parseIkey(ik); kerr == nil {
if !bytes.Equal(rukey, []byte(key)) {
t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key)
}
if rseq != seq {
t.Errorf("seq number does not equal, got %v, want %v", rseq, seq)
}
if rt != kt {
t.Errorf("type does not equal, got %v, want %v", rt, kt)
}
} else {
t.Errorf("key error: %v", kerr)
}
}
func TestIkey_EncodeDecode(t *testing.T) {
keys := []string{"", "k", "hello", "longggggggggggggggggggggg"}
seqs := []uint64{
1, 2, 3,
(1 << 8) - 1, 1 << 8, (1 << 8) + 1,
(1 << 16) - 1, 1 << 16, (1 << 16) + 1,
(1 << 32) - 1, 1 << 32, (1 << 32) + 1,
}
for _, key := range keys {
for _, seq := range seqs {
testSingleKey(t, key, seq, ktVal)
testSingleKey(t, "hello", 1, ktDel)
}
}
}
func assertBytes(t *testing.T, want, got []byte) {
if !bytes.Equal(got, want) {
t.Errorf("assert failed, got %v, want %v", got, want)
}
}
func TestIkeyShortSeparator(t *testing.T) {
// When user keys are same
assertBytes(t, ikey("foo", 100, ktVal),
shortSep(ikey("foo", 100, ktVal),
ikey("foo", 99, ktVal)))
assertBytes(t, ikey("foo", 100, ktVal),
shortSep(ikey("foo", 100, ktVal),
ikey("foo", 101, ktVal)))
assertBytes(t, ikey("foo", 100, ktVal),
shortSep(ikey("foo", 100, ktVal),
ikey("foo", 100, ktVal)))
assertBytes(t, ikey("foo", 100, ktVal),
shortSep(ikey("foo", 100, ktVal),
ikey("foo", 100, ktDel)))
// When user keys are misordered
assertBytes(t, ikey("foo", 100, ktVal),
shortSep(ikey("foo", 100, ktVal),
ikey("bar", 99, ktVal)))
// When user keys are different, but correctly ordered
assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek),
shortSep(ikey("foo", 100, ktVal),
ikey("hello", 200, ktVal)))
// When start user key is prefix of limit user key
assertBytes(t, ikey("foo", 100, ktVal),
shortSep(ikey("foo", 100, ktVal),
ikey("foobar", 200, ktVal)))
// When limit user key is prefix of start user key
assertBytes(t, ikey("foobar", 100, ktVal),
shortSep(ikey("foobar", 100, ktVal),
ikey("foo", 200, ktVal)))
}
func TestIkeyShortestSuccessor(t *testing.T) {
assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek),
shortSuccessor(ikey("foo", 100, ktVal)))
assertBytes(t, ikey("\xff\xff", 100, ktVal),
shortSuccessor(ikey("\xff\xff", 100, ktVal)))
}

View File

@ -1,11 +0,0 @@
package leveldb
import (
"testing"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
)
func TestLevelDB(t *testing.T) {
testutil.RunSuite(t, "LevelDB Suite")
}

View File

@ -1,75 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package memdb
import (
"encoding/binary"
"math/rand"
"testing"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
)
func BenchmarkPut(b *testing.B) {
buf := make([][4]byte, b.N)
for i := range buf {
binary.LittleEndian.PutUint32(buf[i][:], uint32(i))
}
b.ResetTimer()
p := New(comparer.DefaultComparer, 0)
for i := range buf {
p.Put(buf[i][:], nil)
}
}
func BenchmarkPutRandom(b *testing.B) {
buf := make([][4]byte, b.N)
for i := range buf {
binary.LittleEndian.PutUint32(buf[i][:], uint32(rand.Int()))
}
b.ResetTimer()
p := New(comparer.DefaultComparer, 0)
for i := range buf {
p.Put(buf[i][:], nil)
}
}
func BenchmarkGet(b *testing.B) {
buf := make([][4]byte, b.N)
for i := range buf {
binary.LittleEndian.PutUint32(buf[i][:], uint32(i))
}
p := New(comparer.DefaultComparer, 0)
for i := range buf {
p.Put(buf[i][:], nil)
}
b.ResetTimer()
for i := range buf {
p.Get(buf[i][:])
}
}
func BenchmarkGetRandom(b *testing.B) {
buf := make([][4]byte, b.N)
for i := range buf {
binary.LittleEndian.PutUint32(buf[i][:], uint32(i))
}
p := New(comparer.DefaultComparer, 0)
for i := range buf {
p.Put(buf[i][:], nil)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
p.Get(buf[rand.Int()%b.N][:])
}
}

View File

@ -1,11 +0,0 @@
package memdb
import (
"testing"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
)
func TestMemDB(t *testing.T) {
testutil.RunSuite(t, "MemDB Suite")
}

View File

@ -1,135 +0,0 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package memdb
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
)
func (p *DB) TestFindLT(key []byte) (rkey, value []byte, err error) {
p.mu.RLock()
if node := p.findLT(key); node != 0 {
n := p.nodeData[node]
m := n + p.nodeData[node+nKey]
rkey = p.kvData[n:m]
value = p.kvData[m : m+p.nodeData[node+nVal]]
} else {
err = ErrNotFound
}
p.mu.RUnlock()
return
}
func (p *DB) TestFindLast() (rkey, value []byte, err error) {
p.mu.RLock()
if node := p.findLast(); node != 0 {
n := p.nodeData[node]
m := n + p.nodeData[node+nKey]
rkey = p.kvData[n:m]
value = p.kvData[m : m+p.nodeData[node+nVal]]
} else {
err = ErrNotFound
}
p.mu.RUnlock()
return
}
func (p *DB) TestPut(key []byte, value []byte) error {
p.Put(key, value)
return nil
}
func (p *DB) TestDelete(key []byte) error {
p.Delete(key)
return nil
}
func (p *DB) TestFind(key []byte) (rkey, rvalue []byte, err error) {
return p.Find(key)
}
func (p *DB) TestGet(key []byte) (value []byte, err error) {
return p.Get(key)
}
func (p *DB) TestNewIterator(slice *util.Range) iterator.Iterator {
return p.NewIterator(slice)
}
var _ = testutil.Defer(func() {
Describe("Memdb", func() {
Describe("write test", func() {
It("should do write correctly", func() {
db := New(comparer.DefaultComparer, 0)
t := testutil.DBTesting{
DB: db,
Deleted: testutil.KeyValue_Generate(nil, 1000, 1, 30, 5, 5).Clone(),
PostFn: func(t *testutil.DBTesting) {
Expect(db.Len()).Should(Equal(t.Present.Len()))
Expect(db.Size()).Should(Equal(t.Present.Size()))
switch t.Act {
case testutil.DBPut, testutil.DBOverwrite:
Expect(db.Contains(t.ActKey)).Should(BeTrue())
default:
Expect(db.Contains(t.ActKey)).Should(BeFalse())
}
},
}
testutil.DoDBTesting(&t)
})
})
Describe("read test", func() {
testutil.AllKeyValueTesting(nil, func(kv testutil.KeyValue) testutil.DB {
// Building the DB.
db := New(comparer.DefaultComparer, 0)
kv.IterateShuffled(nil, func(i int, key, value []byte) {
db.Put(key, value)
})
if kv.Len() > 1 {
It("Should find correct keys with findLT", func() {
testutil.ShuffledIndex(nil, kv.Len()-1, 1, func(i int) {
key_, key, _ := kv.IndexInexact(i + 1)
expectedKey, expectedValue := kv.Index(i)
// Using key that exist.
rkey, rvalue, err := db.TestFindLT(key)
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q -> %q", key, expectedKey)
Expect(rkey).Should(Equal(expectedKey), "Key")
Expect(rvalue).Should(Equal(expectedValue), "Value for key %q -> %q", key, expectedKey)
// Using key that doesn't exist.
rkey, rvalue, err = db.TestFindLT(key_)
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q) -> %q", key_, key, expectedKey)
Expect(rkey).Should(Equal(expectedKey))
Expect(rvalue).Should(Equal(expectedValue), "Value for key %q (%q) -> %q", key_, key, expectedKey)
})
})
}
if kv.Len() > 0 {
It("Should find last key with findLast", func() {
key, value := kv.Index(kv.Len() - 1)
rkey, rvalue, err := db.TestFindLast()
Expect(err).ShouldNot(HaveOccurred())
Expect(rkey).Should(Equal(key))
Expect(rvalue).Should(Equal(value))
})
}
return db
}, nil, nil)
})
})
})

View File

@ -1,455 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"fmt"
"io"
"os"
"sync"
"sync/atomic"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
)
type ErrManifestCorrupted struct {
Field string
Reason string
}
func (e *ErrManifestCorrupted) Error() string {
return fmt.Sprintf("leveldb: manifest corrupted (field '%s'): %s", e.Field, e.Reason)
}
func newErrManifestCorrupted(f storage.File, field, reason string) error {
return errors.NewErrCorrupted(f, &ErrManifestCorrupted{field, reason})
}
// session represent a persistent database session.
type session struct {
// Need 64-bit alignment.
stNextFileNum uint64 // current unused file number
stJournalNum uint64 // current journal file number; need external synchronization
stPrevJournalNum uint64 // prev journal file number; no longer used; for compatibility with older version of leveldb
stSeqNum uint64 // last mem compacted seq; need external synchronization
stTempFileNum uint64
stor storage.Storage
storLock util.Releaser
o *cachedOptions
icmp *iComparer
tops *tOps
manifest *journal.Writer
manifestWriter storage.Writer
manifestFile storage.File
stCompPtrs []iKey // compaction pointers; need external synchronization
stVersion *version // current version
vmu sync.Mutex
}
// Creates new initialized session instance.
func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
if stor == nil {
return nil, os.ErrInvalid
}
storLock, err := stor.Lock()
if err != nil {
return
}
s = &session{
stor: stor,
storLock: storLock,
stCompPtrs: make([]iKey, o.GetNumLevel()),
}
s.setOptions(o)
s.tops = newTableOps(s)
s.setVersion(newVersion(s))
s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed")
return
}
// Close session.
func (s *session) close() {
s.tops.close()
if s.manifest != nil {
s.manifest.Close()
}
if s.manifestWriter != nil {
s.manifestWriter.Close()
}
s.manifest = nil
s.manifestWriter = nil
s.manifestFile = nil
s.stVersion = nil
}
// Release session lock.
func (s *session) release() {
s.storLock.Release()
}
// Create a new database session; need external synchronization.
func (s *session) create() error {
// create manifest
return s.newManifest(nil, nil)
}
// Recover a database session; need external synchronization.
func (s *session) recover() (err error) {
defer func() {
if os.IsNotExist(err) {
// Don't return os.ErrNotExist if the underlying storage contains
// other files that belong to LevelDB. So the DB won't get trashed.
if files, _ := s.stor.GetFiles(storage.TypeAll); len(files) > 0 {
err = &errors.ErrCorrupted{File: &storage.FileInfo{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}}
}
}
}()
m, err := s.stor.GetManifest()
if err != nil {
return
}
reader, err := m.Open()
if err != nil {
return
}
defer reader.Close()
strict := s.o.GetStrict(opt.StrictManifest)
jr := journal.NewReader(reader, dropper{s, m}, strict, true)
staging := s.stVersion.newStaging()
rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
for {
var r io.Reader
r, err = jr.Next()
if err != nil {
if err == io.EOF {
err = nil
break
}
return errors.SetFile(err, m)
}
err = rec.decode(r)
if err == nil {
// save compact pointers
for _, r := range rec.compPtrs {
s.stCompPtrs[r.level] = iKey(r.ikey)
}
// commit record to version staging
staging.commit(rec)
} else {
err = errors.SetFile(err, m)
if strict || !errors.IsCorrupted(err) {
return
} else {
s.logf("manifest error: %v (skipped)", errors.SetFile(err, m))
}
}
rec.resetCompPtrs()
rec.resetAddedTables()
rec.resetDeletedTables()
}
switch {
case !rec.has(recComparer):
return newErrManifestCorrupted(m, "comparer", "missing")
case rec.comparer != s.icmp.uName():
return newErrManifestCorrupted(m, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer))
case !rec.has(recNextFileNum):
return newErrManifestCorrupted(m, "next-file-num", "missing")
case !rec.has(recJournalNum):
return newErrManifestCorrupted(m, "journal-file-num", "missing")
case !rec.has(recSeqNum):
return newErrManifestCorrupted(m, "seq-num", "missing")
}
s.manifestFile = m
s.setVersion(staging.finish())
s.setNextFileNum(rec.nextFileNum)
s.recordCommited(rec)
return nil
}
// Commit session; need external synchronization.
func (s *session) commit(r *sessionRecord) (err error) {
v := s.version()
defer v.release()
// spawn new version based on current version
nv := v.spawn(r)
if s.manifest == nil {
// manifest journal writer not yet created, create one
err = s.newManifest(r, nv)
} else {
err = s.flushManifest(r)
}
// finally, apply new version if no error rise
if err == nil {
s.setVersion(nv)
}
return
}
// Pick a compaction based on current state; need external synchronization.
func (s *session) pickCompaction() *compaction {
v := s.version()
var level int
var t0 tFiles
if v.cScore >= 1 {
level = v.cLevel
cptr := s.stCompPtrs[level]
tables := v.tables[level]
for _, t := range tables {
if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 {
t0 = append(t0, t)
break
}
}
if len(t0) == 0 {
t0 = append(t0, tables[0])
}
} else {
if p := atomic.LoadPointer(&v.cSeek); p != nil {
ts := (*tSet)(p)
level = ts.level
t0 = append(t0, ts.table)
} else {
v.release()
return nil
}
}
return newCompaction(s, v, level, t0)
}
// Create compaction from given level and range; need external synchronization.
func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction {
v := s.version()
t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0)
if len(t0) == 0 {
v.release()
return nil
}
// Avoid compacting too much in one shot in case the range is large.
// But we cannot do this for level-0 since level-0 files can overlap
// and we must not pick one file and drop another older file if the
// two files overlap.
if level > 0 {
limit := uint64(v.s.o.GetCompactionSourceLimit(level))
total := uint64(0)
for i, t := range t0 {
total += t.size
if total >= limit {
s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1)
t0 = t0[:i+1]
break
}
}
}
return newCompaction(s, v, level, t0)
}
func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction {
c := &compaction{
s: s,
v: v,
level: level,
tables: [2]tFiles{t0, nil},
maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)),
tPtrs: make([]int, s.o.GetNumLevel()),
}
c.expand()
c.save()
return c
}
// compaction represent a compaction state.
type compaction struct {
s *session
v *version
level int
tables [2]tFiles
maxGPOverlaps uint64
gp tFiles
gpi int
seenKey bool
gpOverlappedBytes uint64
imin, imax iKey
tPtrs []int
released bool
snapGPI int
snapSeenKey bool
snapGPOverlappedBytes uint64
snapTPtrs []int
}
func (c *compaction) save() {
c.snapGPI = c.gpi
c.snapSeenKey = c.seenKey
c.snapGPOverlappedBytes = c.gpOverlappedBytes
c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...)
}
func (c *compaction) restore() {
c.gpi = c.snapGPI
c.seenKey = c.snapSeenKey
c.gpOverlappedBytes = c.snapGPOverlappedBytes
c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...)
}
func (c *compaction) release() {
if !c.released {
c.released = true
c.v.release()
}
}
// Expand compacted tables; need external synchronization.
func (c *compaction) expand() {
limit := uint64(c.s.o.GetCompactionExpandLimit(c.level))
vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1]
t0, t1 := c.tables[0], c.tables[1]
imin, imax := t0.getRange(c.s.icmp)
// We expand t0 here just incase ukey hop across tables.
t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0)
if len(t0) != len(c.tables[0]) {
imin, imax = t0.getRange(c.s.icmp)
}
t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false)
// Get entire range covered by compaction.
amin, amax := append(t0, t1...).getRange(c.s.icmp)
// See if we can grow the number of inputs in "level" without
// changing the number of "level+1" files we pick up.
if len(t1) > 0 {
exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0)
if len(exp0) > len(t0) && t1.size()+exp0.size() < limit {
xmin, xmax := exp0.getRange(c.s.icmp)
exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false)
if len(exp1) == len(t1) {
c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
imin, imax = xmin, xmax
t0, t1 = exp0, exp1
amin, amax = append(t0, t1...).getRange(c.s.icmp)
}
}
}
// Compute the set of grandparent files that overlap this compaction
// (parent == level+1; grandparent == level+2)
if c.level+2 < c.s.o.GetNumLevel() {
c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false)
}
c.tables[0], c.tables[1] = t0, t1
c.imin, c.imax = imin, imax
}
// Check whether compaction is trivial.
func (c *compaction) trivial() bool {
return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps
}
func (c *compaction) baseLevelForKey(ukey []byte) bool {
for level, tables := range c.v.tables[c.level+2:] {
for c.tPtrs[level] < len(tables) {
t := tables[c.tPtrs[level]]
if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 {
// We've advanced far enough.
if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
// Key falls in this file's range, so definitely not base level.
return false
}
break
}
c.tPtrs[level]++
}
}
return true
}
func (c *compaction) shouldStopBefore(ikey iKey) bool {
for ; c.gpi < len(c.gp); c.gpi++ {
gp := c.gp[c.gpi]
if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
break
}
if c.seenKey {
c.gpOverlappedBytes += gp.size
}
}
c.seenKey = true
if c.gpOverlappedBytes > c.maxGPOverlaps {
// Too much overlap for current output; start new output.
c.gpOverlappedBytes = 0
return true
}
return false
}
// Creates an iterator.
func (c *compaction) newIterator() iterator.Iterator {
// Creates iterator slice.
icap := len(c.tables)
if c.level == 0 {
// Special case for level-0
icap = len(c.tables[0]) + 1
}
its := make([]iterator.Iterator, 0, icap)
// Options.
ro := &opt.ReadOptions{
DontFillCache: true,
Strict: opt.StrictOverride,
}
strict := c.s.o.GetStrict(opt.StrictCompaction)
if strict {
ro.Strict |= opt.StrictReader
}
for i, tables := range c.tables {
if len(tables) == 0 {
continue
}
// Level-0 is not sorted and may overlaps each other.
if c.level+i == 0 {
for _, t := range tables {
its = append(its, c.s.tops.newIterator(t, nil, ro))
}
} else {
it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict)
its = append(its, it)
}
}
return iterator.NewMergedIterator(its, c.s.icmp, strict)
}

View File

@ -1,64 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"bytes"
"testing"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
)
func decodeEncode(v *sessionRecord) (res bool, err error) {
b := new(bytes.Buffer)
err = v.encode(b)
if err != nil {
return
}
v2 := &sessionRecord{numLevel: opt.DefaultNumLevel}
err = v.decode(b)
if err != nil {
return
}
b2 := new(bytes.Buffer)
err = v2.encode(b2)
if err != nil {
return
}
return bytes.Equal(b.Bytes(), b2.Bytes()), nil
}
func TestSessionRecord_EncodeDecode(t *testing.T) {
big := uint64(1) << 50
v := &sessionRecord{numLevel: opt.DefaultNumLevel}
i := uint64(0)
test := func() {
res, err := decodeEncode(v)
if err != nil {
t.Fatalf("error when testing encode/decode sessionRecord: %v", err)
}
if !res {
t.Error("encode/decode test failed at iteration:", i)
}
}
for ; i < 4; i++ {
test()
v.addTable(3, big+300+i, big+400+i,
newIkey([]byte("foo"), big+500+1, ktVal),
newIkey([]byte("zoo"), big+600+1, ktDel))
v.delTable(4, big+700+i)
v.addCompPtr(int(i), newIkey([]byte("x"), big+900+1, ktVal))
}
v.setComparer("foo")
v.setJournalNum(big + 100)
v.setPrevJournalNum(big + 99)
v.setNextFileNum(big + 200)
v.setSeqNum(big + 1000)
test()
}

View File

@ -1,142 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package storage
import (
"fmt"
"os"
"path/filepath"
"testing"
)
var cases = []struct {
oldName []string
name string
ftype FileType
num uint64
}{
{nil, "000100.log", TypeJournal, 100},
{nil, "000000.log", TypeJournal, 0},
{[]string{"000000.sst"}, "000000.ldb", TypeTable, 0},
{nil, "MANIFEST-000002", TypeManifest, 2},
{nil, "MANIFEST-000007", TypeManifest, 7},
{nil, "18446744073709551615.log", TypeJournal, 18446744073709551615},
{nil, "000100.tmp", TypeTemp, 100},
}
var invalidCases = []string{
"",
"foo",
"foo-dx-100.log",
".log",
"",
"manifest",
"CURREN",
"CURRENTX",
"MANIFES",
"MANIFEST",
"MANIFEST-",
"XMANIFEST-3",
"MANIFEST-3x",
"LOC",
"LOCKx",
"LO",
"LOGx",
"18446744073709551616.log",
"184467440737095516150.log",
"100",
"100.",
"100.lop",
}
func TestFileStorage_CreateFileName(t *testing.T) {
for _, c := range cases {
f := &file{num: c.num, t: c.ftype}
if f.name() != c.name {
t.Errorf("invalid filename got '%s', want '%s'", f.name(), c.name)
}
}
}
func TestFileStorage_ParseFileName(t *testing.T) {
for _, c := range cases {
for _, name := range append([]string{c.name}, c.oldName...) {
f := new(file)
if !f.parse(name) {
t.Errorf("cannot parse filename '%s'", name)
continue
}
if f.Type() != c.ftype {
t.Errorf("filename '%s' invalid type got '%d', want '%d'", name, f.Type(), c.ftype)
}
if f.Num() != c.num {
t.Errorf("filename '%s' invalid number got '%d', want '%d'", name, f.Num(), c.num)
}
}
}
}
func TestFileStorage_InvalidFileName(t *testing.T) {
for _, name := range invalidCases {
f := new(file)
if f.parse(name) {
t.Errorf("filename '%s' should be invalid", name)
}
}
}
func TestFileStorage_Locking(t *testing.T) {
path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestfd-%d", os.Getuid()))
_, err := os.Stat(path)
if err == nil {
err = os.RemoveAll(path)
if err != nil {
t.Fatal("RemoveAll: got error: ", err)
}
}
p1, err := OpenFile(path)
if err != nil {
t.Fatal("OpenFile(1): got error: ", err)
}
defer os.RemoveAll(path)
p2, err := OpenFile(path)
if err != nil {
t.Logf("OpenFile(2): got error: %s (expected)", err)
} else {
p2.Close()
p1.Close()
t.Fatal("OpenFile(2): expect error")
}
p1.Close()
p3, err := OpenFile(path)
if err != nil {
t.Fatal("OpenFile(3): got error: ", err)
}
defer p3.Close()
l, err := p3.Lock()
if err != nil {
t.Fatal("storage lock failed(1): ", err)
}
_, err = p3.Lock()
if err == nil {
t.Fatal("expect error for second storage lock attempt")
} else {
t.Logf("storage lock got error: %s (expected)", err)
}
l.Release()
_, err = p3.Lock()
if err != nil {
t.Fatal("storage lock failed(2): ", err)
}
}

View File

@ -1,66 +0,0 @@
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package storage
import (
"bytes"
"testing"
)
func TestMemStorage(t *testing.T) {
m := NewMemStorage()
l, err := m.Lock()
if err != nil {
t.Fatal("storage lock failed(1): ", err)
}
_, err = m.Lock()
if err == nil {
t.Fatal("expect error for second storage lock attempt")
} else {
t.Logf("storage lock got error: %s (expected)", err)
}
l.Release()
_, err = m.Lock()
if err != nil {
t.Fatal("storage lock failed(2): ", err)
}
f := m.GetFile(1, TypeTable)
if f.Num() != 1 && f.Type() != TypeTable {
t.Fatal("invalid file number and type")
}
w, _ := f.Create()
w.Write([]byte("abc"))
w.Close()
if ff, _ := m.GetFiles(TypeAll); len(ff) != 1 {
t.Fatal("invalid GetFiles len")
}
buf := new(bytes.Buffer)
r, err := f.Open()
if err != nil {
t.Fatal("Open: got error: ", err)
}
buf.ReadFrom(r)
r.Close()
if got := buf.String(); got != "abc" {
t.Fatalf("Read: invalid value, want=abc got=%s", got)
}
if _, err := f.Open(); err != nil {
t.Fatal("Open: got error: ", err)
}
if _, err := m.GetFile(1, TypeTable).Open(); err == nil {
t.Fatal("expecting error")
}
f.Remove()
if ff, _ := m.GetFiles(TypeAll); len(ff) != 0 {
t.Fatal("invalid GetFiles len", len(ff))
}
if _, err := f.Open(); err == nil {
t.Fatal("expecting error")
}
}

View File

@ -1,539 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENE file.
package leveldb
import (
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"sync"
"testing"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
)
const typeShift = 4
var (
tsErrInvalidFile = errors.New("leveldb.testStorage: invalid file for argument")
tsErrFileOpen = errors.New("leveldb.testStorage: file still open")
)
var (
tsFSEnv = os.Getenv("GOLEVELDB_USEFS")
tsTempdir = os.Getenv("GOLEVELDB_TEMPDIR")
tsKeepFS = tsFSEnv == "2"
tsFS = tsKeepFS || tsFSEnv == "" || tsFSEnv == "1"
tsMU = &sync.Mutex{}
tsNum = 0
)
type tsOp uint
const (
tsOpOpen tsOp = iota
tsOpCreate
tsOpRead
tsOpReadAt
tsOpWrite
tsOpSync
tsOpNum
)
type tsLock struct {
ts *testStorage
r util.Releaser
}
func (l tsLock) Release() {
l.r.Release()
l.ts.t.Log("I: storage lock released")
}
type tsReader struct {
tf tsFile
storage.Reader
}
func (tr tsReader) Read(b []byte) (n int, err error) {
ts := tr.tf.ts
ts.countRead(tr.tf.Type())
if tr.tf.shouldErrLocked(tsOpRead) {
return 0, errors.New("leveldb.testStorage: emulated read error")
}
n, err = tr.Reader.Read(b)
if err != nil && err != io.EOF {
ts.t.Errorf("E: read error, num=%d type=%v n=%d: %v", tr.tf.Num(), tr.tf.Type(), n, err)
}
return
}
func (tr tsReader) ReadAt(b []byte, off int64) (n int, err error) {
ts := tr.tf.ts
ts.countRead(tr.tf.Type())
if tr.tf.shouldErrLocked(tsOpReadAt) {
return 0, errors.New("leveldb.testStorage: emulated readAt error")
}
n, err = tr.Reader.ReadAt(b, off)
if err != nil && err != io.EOF {
ts.t.Errorf("E: readAt error, num=%d type=%v off=%d n=%d: %v", tr.tf.Num(), tr.tf.Type(), off, n, err)
}
return
}
func (tr tsReader) Close() (err error) {
err = tr.Reader.Close()
tr.tf.close("reader", err)
return
}
type tsWriter struct {
tf tsFile
storage.Writer
}
func (tw tsWriter) Write(b []byte) (n int, err error) {
if tw.tf.shouldErrLocked(tsOpWrite) {
return 0, errors.New("leveldb.testStorage: emulated write error")
}
n, err = tw.Writer.Write(b)
if err != nil {
tw.tf.ts.t.Errorf("E: write error, num=%d type=%v n=%d: %v", tw.tf.Num(), tw.tf.Type(), n, err)
}
return
}
func (tw tsWriter) Sync() (err error) {
ts := tw.tf.ts
ts.mu.Lock()
for ts.emuDelaySync&tw.tf.Type() != 0 {
ts.cond.Wait()
}
ts.mu.Unlock()
if tw.tf.shouldErrLocked(tsOpSync) {
return errors.New("leveldb.testStorage: emulated sync error")
}
err = tw.Writer.Sync()
if err != nil {
tw.tf.ts.t.Errorf("E: sync error, num=%d type=%v: %v", tw.tf.Num(), tw.tf.Type(), err)
}
return
}
func (tw tsWriter) Close() (err error) {
err = tw.Writer.Close()
tw.tf.close("writer", err)
return
}
type tsFile struct {
ts *testStorage
storage.File
}
func (tf tsFile) x() uint64 {
return tf.Num()<<typeShift | uint64(tf.Type())
}
func (tf tsFile) shouldErr(op tsOp) bool {
return tf.ts.shouldErr(tf, op)
}
func (tf tsFile) shouldErrLocked(op tsOp) bool {
tf.ts.mu.Lock()
defer tf.ts.mu.Unlock()
return tf.shouldErr(op)
}
func (tf tsFile) checkOpen(m string) error {
ts := tf.ts
if writer, ok := ts.opens[tf.x()]; ok {
if writer {
ts.t.Errorf("E: cannot %s file, num=%d type=%v: a writer still open", m, tf.Num(), tf.Type())
} else {
ts.t.Errorf("E: cannot %s file, num=%d type=%v: a reader still open", m, tf.Num(), tf.Type())
}
return tsErrFileOpen
}
return nil
}
func (tf tsFile) close(m string, err error) {
ts := tf.ts
ts.mu.Lock()
defer ts.mu.Unlock()
if _, ok := ts.opens[tf.x()]; !ok {
ts.t.Errorf("E: %s: redudant file closing, num=%d type=%v", m, tf.Num(), tf.Type())
} else if err == nil {
ts.t.Logf("I: %s: file closed, num=%d type=%v", m, tf.Num(), tf.Type())
}
delete(ts.opens, tf.x())
if err != nil {
ts.t.Errorf("E: %s: cannot close file, num=%d type=%v: %v", m, tf.Num(), tf.Type(), err)
}
}
func (tf tsFile) Open() (r storage.Reader, err error) {
ts := tf.ts
ts.mu.Lock()
defer ts.mu.Unlock()
err = tf.checkOpen("open")
if err != nil {
return
}
if tf.shouldErr(tsOpOpen) {
err = errors.New("leveldb.testStorage: emulated open error")
return
}
r, err = tf.File.Open()
if err != nil {
if ts.ignoreOpenErr&tf.Type() != 0 {
ts.t.Logf("I: cannot open file, num=%d type=%v: %v (ignored)", tf.Num(), tf.Type(), err)
} else {
ts.t.Errorf("E: cannot open file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
}
} else {
ts.t.Logf("I: file opened, num=%d type=%v", tf.Num(), tf.Type())
ts.opens[tf.x()] = false
r = tsReader{tf, r}
}
return
}
func (tf tsFile) Create() (w storage.Writer, err error) {
ts := tf.ts
ts.mu.Lock()
defer ts.mu.Unlock()
err = tf.checkOpen("create")
if err != nil {
return
}
if tf.shouldErr(tsOpCreate) {
err = errors.New("leveldb.testStorage: emulated create error")
return
}
w, err = tf.File.Create()
if err != nil {
ts.t.Errorf("E: cannot create file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
} else {
ts.t.Logf("I: file created, num=%d type=%v", tf.Num(), tf.Type())
ts.opens[tf.x()] = true
w = tsWriter{tf, w}
}
return
}
func (tf tsFile) Replace(newfile storage.File) (err error) {
ts := tf.ts
ts.mu.Lock()
defer ts.mu.Unlock()
err = tf.checkOpen("replace")
if err != nil {
return
}
err = tf.File.Replace(newfile.(tsFile).File)
if err != nil {
ts.t.Errorf("E: cannot replace file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
} else {
ts.t.Logf("I: file replace, num=%d type=%v", tf.Num(), tf.Type())
}
return
}
func (tf tsFile) Remove() (err error) {
ts := tf.ts
ts.mu.Lock()
defer ts.mu.Unlock()
err = tf.checkOpen("remove")
if err != nil {
return
}
err = tf.File.Remove()
if err != nil {
ts.t.Errorf("E: cannot remove file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
} else {
ts.t.Logf("I: file removed, num=%d type=%v", tf.Num(), tf.Type())
}
return
}
type testStorage struct {
t *testing.T
storage.Storage
closeFn func() error
mu sync.Mutex
cond sync.Cond
// Open files, true=writer, false=reader
opens map[uint64]bool
emuDelaySync storage.FileType
ignoreOpenErr storage.FileType
readCnt uint64
readCntEn storage.FileType
emuErr [tsOpNum]storage.FileType
emuErrOnce [tsOpNum]storage.FileType
emuRandErr [tsOpNum]storage.FileType
emuRandErrProb int
emuErrOnceMap map[uint64]uint
emuRandRand *rand.Rand
}
func (ts *testStorage) shouldErr(tf tsFile, op tsOp) bool {
if ts.emuErr[op]&tf.Type() != 0 {
return true
} else if ts.emuRandErr[op]&tf.Type() != 0 || ts.emuErrOnce[op]&tf.Type() != 0 {
sop := uint(1) << op
eop := ts.emuErrOnceMap[tf.x()]
if eop&sop == 0 && (ts.emuRandRand.Int()%ts.emuRandErrProb == 0 || ts.emuErrOnce[op]&tf.Type() != 0) {
ts.emuErrOnceMap[tf.x()] = eop | sop
ts.t.Logf("I: emulated error: file=%d type=%v op=%v", tf.Num(), tf.Type(), op)
return true
}
}
return false
}
func (ts *testStorage) SetEmuErr(t storage.FileType, ops ...tsOp) {
ts.mu.Lock()
for _, op := range ops {
ts.emuErr[op] = t
}
ts.mu.Unlock()
}
func (ts *testStorage) SetEmuErrOnce(t storage.FileType, ops ...tsOp) {
ts.mu.Lock()
for _, op := range ops {
ts.emuErrOnce[op] = t
}
ts.mu.Unlock()
}
func (ts *testStorage) SetEmuRandErr(t storage.FileType, ops ...tsOp) {
ts.mu.Lock()
for _, op := range ops {
ts.emuRandErr[op] = t
}
ts.mu.Unlock()
}
func (ts *testStorage) SetEmuRandErrProb(prob int) {
ts.mu.Lock()
ts.emuRandErrProb = prob
ts.mu.Unlock()
}
func (ts *testStorage) DelaySync(t storage.FileType) {
ts.mu.Lock()
ts.emuDelaySync |= t
ts.cond.Broadcast()
ts.mu.Unlock()
}
func (ts *testStorage) ReleaseSync(t storage.FileType) {
ts.mu.Lock()
ts.emuDelaySync &= ^t
ts.cond.Broadcast()
ts.mu.Unlock()
}
func (ts *testStorage) ReadCounter() uint64 {
ts.mu.Lock()
defer ts.mu.Unlock()
return ts.readCnt
}
func (ts *testStorage) ResetReadCounter() {
ts.mu.Lock()
ts.readCnt = 0
ts.mu.Unlock()
}
func (ts *testStorage) SetReadCounter(t storage.FileType) {
ts.mu.Lock()
ts.readCntEn = t
ts.mu.Unlock()
}
func (ts *testStorage) countRead(t storage.FileType) {
ts.mu.Lock()
if ts.readCntEn&t != 0 {
ts.readCnt++
}
ts.mu.Unlock()
}
func (ts *testStorage) SetIgnoreOpenErr(t storage.FileType) {
ts.ignoreOpenErr = t
}
func (ts *testStorage) Lock() (r util.Releaser, err error) {
r, err = ts.Storage.Lock()
if err != nil {
ts.t.Logf("W: storage locking failed: %v", err)
} else {
ts.t.Log("I: storage locked")
r = tsLock{ts, r}
}
return
}
func (ts *testStorage) Log(str string) {
ts.t.Log("L: " + str)
ts.Storage.Log(str)
}
func (ts *testStorage) GetFile(num uint64, t storage.FileType) storage.File {
return tsFile{ts, ts.Storage.GetFile(num, t)}
}
func (ts *testStorage) GetFiles(t storage.FileType) (ff []storage.File, err error) {
ff0, err := ts.Storage.GetFiles(t)
if err != nil {
ts.t.Errorf("E: get files failed: %v", err)
return
}
ff = make([]storage.File, len(ff0))
for i, f := range ff0 {
ff[i] = tsFile{ts, f}
}
ts.t.Logf("I: get files, type=0x%x count=%d", int(t), len(ff))
return
}
func (ts *testStorage) GetManifest() (f storage.File, err error) {
f0, err := ts.Storage.GetManifest()
if err != nil {
if !os.IsNotExist(err) {
ts.t.Errorf("E: get manifest failed: %v", err)
}
return
}
f = tsFile{ts, f0}
ts.t.Logf("I: get manifest, num=%d", f.Num())
return
}
func (ts *testStorage) SetManifest(f storage.File) error {
tf, ok := f.(tsFile)
if !ok {
ts.t.Error("E: set manifest failed: type assertion failed")
return tsErrInvalidFile
} else if tf.Type() != storage.TypeManifest {
ts.t.Errorf("E: set manifest failed: invalid file type: %s", tf.Type())
return tsErrInvalidFile
}
err := ts.Storage.SetManifest(tf.File)
if err != nil {
ts.t.Errorf("E: set manifest failed: %v", err)
} else {
ts.t.Logf("I: set manifest, num=%d", tf.Num())
}
return err
}
func (ts *testStorage) Close() error {
ts.CloseCheck()
err := ts.Storage.Close()
if err != nil {
ts.t.Errorf("E: closing storage failed: %v", err)
} else {
ts.t.Log("I: storage closed")
}
if ts.closeFn != nil {
if err := ts.closeFn(); err != nil {
ts.t.Errorf("E: close function: %v", err)
}
}
return err
}
func (ts *testStorage) CloseCheck() {
ts.mu.Lock()
if len(ts.opens) == 0 {
ts.t.Log("I: all files are closed")
} else {
ts.t.Errorf("E: %d files still open", len(ts.opens))
for x, writer := range ts.opens {
num, tt := x>>typeShift, storage.FileType(x)&storage.TypeAll
ts.t.Errorf("E: * num=%d type=%v writer=%v", num, tt, writer)
}
}
ts.mu.Unlock()
}
func newTestStorage(t *testing.T) *testStorage {
var stor storage.Storage
var closeFn func() error
if tsFS {
for {
tsMU.Lock()
num := tsNum
tsNum++
tsMU.Unlock()
tempdir := tsTempdir
if tempdir == "" {
tempdir = os.TempDir()
}
path := filepath.Join(tempdir, fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num))
if _, err := os.Stat(path); err != nil {
stor, err = storage.OpenFile(path)
if err != nil {
t.Fatalf("F: cannot create storage: %v", err)
}
t.Logf("I: storage created: %s", path)
closeFn = func() error {
for _, name := range []string{"LOG.old", "LOG"} {
f, err := os.Open(filepath.Join(path, name))
if err != nil {
continue
}
if log, err := ioutil.ReadAll(f); err != nil {
t.Logf("---------------------- %s ----------------------", name)
t.Logf("cannot read log: %v", err)
t.Logf("---------------------- %s ----------------------", name)
} else if len(log) > 0 {
t.Logf("---------------------- %s ----------------------\n%s", name, string(log))
t.Logf("---------------------- %s ----------------------", name)
}
f.Close()
}
if t.Failed() {
t.Logf("testing failed, test DB preserved at %s", path)
return nil
}
if tsKeepFS {
return nil
}
return os.RemoveAll(path)
}
break
}
}
} else {
stor = storage.NewMemStorage()
}
ts := &testStorage{
t: t,
Storage: stor,
closeFn: closeFn,
opens: make(map[uint64]bool),
emuErrOnceMap: make(map[uint64]uint),
emuRandErrProb: 0x999,
emuRandRand: rand.New(rand.NewSource(0xfacedead)),
}
ts.cond.L = &ts.mu
return ts
}

View File

@ -1,139 +0,0 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package table
import (
"encoding/binary"
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
)
type blockTesting struct {
tr *Reader
b *block
}
func (t *blockTesting) TestNewIterator(slice *util.Range) iterator.Iterator {
return t.tr.newBlockIter(t.b, nil, slice, false)
}
var _ = testutil.Defer(func() {
Describe("Block", func() {
Build := func(kv *testutil.KeyValue, restartInterval int) *blockTesting {
// Building the block.
bw := &blockWriter{
restartInterval: restartInterval,
scratch: make([]byte, 30),
}
kv.Iterate(func(i int, key, value []byte) {
bw.append(key, value)
})
bw.finish()
// Opening the block.
data := bw.buf.Bytes()
restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:]))
return &blockTesting{
tr: &Reader{cmp: comparer.DefaultComparer},
b: &block{
data: data,
restartsLen: restartsLen,
restartsOffset: len(data) - (restartsLen+1)*4,
},
}
}
Describe("read test", func() {
for restartInterval := 1; restartInterval <= 5; restartInterval++ {
Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() {
kv := &testutil.KeyValue{}
Text := func() string {
return fmt.Sprintf("and %d keys", kv.Len())
}
Test := func() {
// Make block.
br := Build(kv, restartInterval)
// Do testing.
testutil.KeyValueTesting(nil, kv.Clone(), br, nil, nil)
}
Describe(Text(), Test)
kv.PutString("", "empty")
Describe(Text(), Test)
kv.PutString("a1", "foo")
Describe(Text(), Test)
kv.PutString("a2", "v")
Describe(Text(), Test)
kv.PutString("a3qqwrkks", "hello")
Describe(Text(), Test)
kv.PutString("a4", "bar")
Describe(Text(), Test)
kv.PutString("a5111111", "v5")
kv.PutString("a6", "")
kv.PutString("a7", "v7")
kv.PutString("a8", "vvvvvvvvvvvvvvvvvvvvvv8")
kv.PutString("b", "v9")
kv.PutString("c9", "v9")
kv.PutString("c91", "v9")
kv.PutString("d0", "v9")
Describe(Text(), Test)
})
}
})
Describe("out-of-bound slice test", func() {
kv := &testutil.KeyValue{}
kv.PutString("k1", "v1")
kv.PutString("k2", "v2")
kv.PutString("k3abcdefgg", "v3")
kv.PutString("k4", "v4")
kv.PutString("k5", "v5")
for restartInterval := 1; restartInterval <= 5; restartInterval++ {
Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() {
// Make block.
bt := Build(kv, restartInterval)
Test := func(r *util.Range) func(done Done) {
return func(done Done) {
iter := bt.TestNewIterator(r)
Expect(iter.Error()).ShouldNot(HaveOccurred())
t := testutil.IteratorTesting{
KeyValue: kv.Clone(),
Iter: iter,
}
testutil.DoIteratorTesting(&t)
iter.Release()
done <- true
}
}
It("Should do iterations and seeks correctly #0",
Test(&util.Range{Start: []byte("k0"), Limit: []byte("k6")}), 2.0)
It("Should do iterations and seeks correctly #1",
Test(&util.Range{Start: []byte(""), Limit: []byte("zzzzzzz")}), 2.0)
})
}
})
})
})

View File

@ -1,11 +0,0 @@
package table
import (
"testing"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
)
func TestTable(t *testing.T) {
testutil.RunSuite(t, "Table Suite")
}

View File

@ -1,122 +0,0 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package table
import (
"bytes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
)
type tableWrapper struct {
*Reader
}
func (t tableWrapper) TestFind(key []byte) (rkey, rvalue []byte, err error) {
return t.Reader.Find(key, false, nil)
}
func (t tableWrapper) TestGet(key []byte) (value []byte, err error) {
return t.Reader.Get(key, nil)
}
func (t tableWrapper) TestNewIterator(slice *util.Range) iterator.Iterator {
return t.Reader.NewIterator(slice, nil)
}
var _ = testutil.Defer(func() {
Describe("Table", func() {
Describe("approximate offset test", func() {
var (
buf = &bytes.Buffer{}
o = &opt.Options{
BlockSize: 1024,
Compression: opt.NoCompression,
}
)
// Building the table.
tw := NewWriter(buf, o)
tw.Append([]byte("k01"), []byte("hello"))
tw.Append([]byte("k02"), []byte("hello2"))
tw.Append([]byte("k03"), bytes.Repeat([]byte{'x'}, 10000))
tw.Append([]byte("k04"), bytes.Repeat([]byte{'x'}, 200000))
tw.Append([]byte("k05"), bytes.Repeat([]byte{'x'}, 300000))
tw.Append([]byte("k06"), []byte("hello3"))
tw.Append([]byte("k07"), bytes.Repeat([]byte{'x'}, 100000))
err := tw.Close()
It("Should be able to approximate offset of a key correctly", func() {
Expect(err).ShouldNot(HaveOccurred())
tr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, nil, o)
Expect(err).ShouldNot(HaveOccurred())
CheckOffset := func(key string, expect, threshold int) {
offset, err := tr.OffsetOf([]byte(key))
Expect(err).ShouldNot(HaveOccurred())
Expect(offset).Should(BeNumerically("~", expect, threshold), "Offset of key %q", key)
}
CheckOffset("k0", 0, 0)
CheckOffset("k01a", 0, 0)
CheckOffset("k02", 0, 0)
CheckOffset("k03", 0, 0)
CheckOffset("k04", 10000, 1000)
CheckOffset("k04a", 210000, 1000)
CheckOffset("k05", 210000, 1000)
CheckOffset("k06", 510000, 1000)
CheckOffset("k07", 510000, 1000)
CheckOffset("xyz", 610000, 2000)
})
})
Describe("read test", func() {
Build := func(kv testutil.KeyValue) testutil.DB {
o := &opt.Options{
BlockSize: 512,
BlockRestartInterval: 3,
}
buf := &bytes.Buffer{}
// Building the table.
tw := NewWriter(buf, o)
kv.Iterate(func(i int, key, value []byte) {
tw.Append(key, value)
})
tw.Close()
// Opening the table.
tr, _ := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, nil, o)
return tableWrapper{tr}
}
Test := func(kv *testutil.KeyValue, body func(r *Reader)) func() {
return func() {
db := Build(*kv)
if body != nil {
body(db.(tableWrapper).Reader)
}
testutil.KeyValueTesting(nil, *kv, db, nil, nil)
}
}
testutil.AllKeyValueTesting(nil, Build, nil, nil)
Describe("with one key per block", Test(testutil.KeyValue_Generate(nil, 9, 1, 10, 512, 512), func(r *Reader) {
It("should have correct blocks number", func() {
indexBlock, err := r.readBlock(r.indexBH, true)
Expect(err).To(BeNil())
Expect(indexBlock.restartsLen).Should(Equal(9))
})
}))
})
})
})

View File

@ -1,63 +0,0 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
. "github.com/onsi/gomega"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util"
)
type testingDB struct {
*DB
ro *opt.ReadOptions
wo *opt.WriteOptions
stor *testutil.Storage
}
func (t *testingDB) TestPut(key []byte, value []byte) error {
return t.Put(key, value, t.wo)
}
func (t *testingDB) TestDelete(key []byte) error {
return t.Delete(key, t.wo)
}
func (t *testingDB) TestGet(key []byte) (value []byte, err error) {
return t.Get(key, t.ro)
}
func (t *testingDB) TestHas(key []byte) (ret bool, err error) {
return t.Has(key, t.ro)
}
func (t *testingDB) TestNewIterator(slice *util.Range) iterator.Iterator {
return t.NewIterator(slice, t.ro)
}
func (t *testingDB) TestClose() {
err := t.Close()
ExpectWithOffset(1, err).NotTo(HaveOccurred())
err = t.stor.Close()
ExpectWithOffset(1, err).NotTo(HaveOccurred())
}
func newTestingDB(o *opt.Options, ro *opt.ReadOptions, wo *opt.WriteOptions) *testingDB {
stor := testutil.NewStorage()
db, err := Open(stor, o)
// FIXME: This may be called from outside It, which may cause panic.
Expect(err).NotTo(HaveOccurred())
return &testingDB{
DB: db,
ro: ro,
wo: wo,
stor: stor,
}
}

View File

@ -1,369 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package util
import (
"bytes"
"io"
"math/rand"
"runtime"
"testing"
)
const N = 10000 // make this bigger for a larger (and slower) test
var data string // test data for write tests
var testBytes []byte // test data; same as data but as a slice.
func init() {
testBytes = make([]byte, N)
for i := 0; i < N; i++ {
testBytes[i] = 'a' + byte(i%26)
}
data = string(testBytes)
}
// Verify that contents of buf match the string s.
func check(t *testing.T, testname string, buf *Buffer, s string) {
bytes := buf.Bytes()
str := buf.String()
if buf.Len() != len(bytes) {
t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes))
}
if buf.Len() != len(str) {
t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str))
}
if buf.Len() != len(s) {
t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s))
}
if string(bytes) != s {
t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s)
}
}
// Fill buf through n writes of byte slice fub.
// The initial contents of buf corresponds to the string s;
// the result is the final contents of buf returned as a string.
func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string {
check(t, testname+" (fill 1)", buf, s)
for ; n > 0; n-- {
m, err := buf.Write(fub)
if m != len(fub) {
t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub))
}
if err != nil {
t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err)
}
s += string(fub)
check(t, testname+" (fill 4)", buf, s)
}
return s
}
func TestNewBuffer(t *testing.T) {
buf := NewBuffer(testBytes)
check(t, "NewBuffer", buf, data)
}
// Empty buf through repeated reads into fub.
// The initial contents of buf corresponds to the string s.
func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) {
check(t, testname+" (empty 1)", buf, s)
for {
n, err := buf.Read(fub)
if n == 0 {
break
}
if err != nil {
t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err)
}
s = s[n:]
check(t, testname+" (empty 3)", buf, s)
}
check(t, testname+" (empty 4)", buf, "")
}
func TestBasicOperations(t *testing.T) {
var buf Buffer
for i := 0; i < 5; i++ {
check(t, "TestBasicOperations (1)", &buf, "")
buf.Reset()
check(t, "TestBasicOperations (2)", &buf, "")
buf.Truncate(0)
check(t, "TestBasicOperations (3)", &buf, "")
n, err := buf.Write([]byte(data[0:1]))
if n != 1 {
t.Errorf("wrote 1 byte, but n == %d", n)
}
if err != nil {
t.Errorf("err should always be nil, but err == %s", err)
}
check(t, "TestBasicOperations (4)", &buf, "a")
buf.WriteByte(data[1])
check(t, "TestBasicOperations (5)", &buf, "ab")
n, err = buf.Write([]byte(data[2:26]))
if n != 24 {
t.Errorf("wrote 25 bytes, but n == %d", n)
}
check(t, "TestBasicOperations (6)", &buf, string(data[0:26]))
buf.Truncate(26)
check(t, "TestBasicOperations (7)", &buf, string(data[0:26]))
buf.Truncate(20)
check(t, "TestBasicOperations (8)", &buf, string(data[0:20]))
empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5))
empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100))
buf.WriteByte(data[1])
c, err := buf.ReadByte()
if err != nil {
t.Error("ReadByte unexpected eof")
}
if c != data[1] {
t.Errorf("ReadByte wrong value c=%v", c)
}
c, err = buf.ReadByte()
if err == nil {
t.Error("ReadByte unexpected not eof")
}
}
}
func TestLargeByteWrites(t *testing.T) {
var buf Buffer
limit := 30
if testing.Short() {
limit = 9
}
for i := 3; i < limit; i += 3 {
s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes)
empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i))
}
check(t, "TestLargeByteWrites (3)", &buf, "")
}
func TestLargeByteReads(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data)))
}
check(t, "TestLargeByteReads (3)", &buf, "")
}
func TestMixedReadsAndWrites(t *testing.T) {
var buf Buffer
s := ""
for i := 0; i < 50; i++ {
wlen := rand.Intn(len(data))
s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen])
rlen := rand.Intn(len(data))
fub := make([]byte, rlen)
n, _ := buf.Read(fub)
s = s[n:]
}
empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len()))
}
func TestNil(t *testing.T) {
var b *Buffer
if b.String() != "<nil>" {
t.Errorf("expected <nil>; got %q", b.String())
}
}
func TestReadFrom(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
var b Buffer
b.ReadFrom(&buf)
empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data)))
}
}
func TestWriteTo(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
var b Buffer
buf.WriteTo(&b)
empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data)))
}
}
func TestNext(t *testing.T) {
b := []byte{0, 1, 2, 3, 4}
tmp := make([]byte, 5)
for i := 0; i <= 5; i++ {
for j := i; j <= 5; j++ {
for k := 0; k <= 6; k++ {
// 0 <= i <= j <= 5; 0 <= k <= 6
// Check that if we start with a buffer
// of length j at offset i and ask for
// Next(k), we get the right bytes.
buf := NewBuffer(b[0:j])
n, _ := buf.Read(tmp[0:i])
if n != i {
t.Fatalf("Read %d returned %d", i, n)
}
bb := buf.Next(k)
want := k
if want > j-i {
want = j - i
}
if len(bb) != want {
t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb))
}
for l, v := range bb {
if v != byte(l+i) {
t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i)
}
}
}
}
}
}
var readBytesTests = []struct {
buffer string
delim byte
expected []string
err error
}{
{"", 0, []string{""}, io.EOF},
{"a\x00", 0, []string{"a\x00"}, nil},
{"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil},
{"hello\x01world", 1, []string{"hello\x01"}, nil},
{"foo\nbar", 0, []string{"foo\nbar"}, io.EOF},
{"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil},
{"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, io.EOF},
}
func TestReadBytes(t *testing.T) {
for _, test := range readBytesTests {
buf := NewBuffer([]byte(test.buffer))
var err error
for _, expected := range test.expected {
var bytes []byte
bytes, err = buf.ReadBytes(test.delim)
if string(bytes) != expected {
t.Errorf("expected %q, got %q", expected, bytes)
}
if err != nil {
break
}
}
if err != test.err {
t.Errorf("expected error %v, got %v", test.err, err)
}
}
}
func TestGrow(t *testing.T) {
x := []byte{'x'}
y := []byte{'y'}
tmp := make([]byte, 72)
for _, startLen := range []int{0, 100, 1000, 10000, 100000} {
xBytes := bytes.Repeat(x, startLen)
for _, growLen := range []int{0, 100, 1000, 10000, 100000} {
buf := NewBuffer(xBytes)
// If we read, this affects buf.off, which is good to test.
readBytes, _ := buf.Read(tmp)
buf.Grow(growLen)
yBytes := bytes.Repeat(y, growLen)
// Check no allocation occurs in write, as long as we're single-threaded.
var m1, m2 runtime.MemStats
runtime.ReadMemStats(&m1)
buf.Write(yBytes)
runtime.ReadMemStats(&m2)
if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs {
t.Errorf("allocation occurred during write")
}
// Check that buffer has correct data.
if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) {
t.Errorf("bad initial data at %d %d", startLen, growLen)
}
if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) {
t.Errorf("bad written data at %d %d", startLen, growLen)
}
}
}
}
// Was a bug: used to give EOF reading empty slice at EOF.
func TestReadEmptyAtEOF(t *testing.T) {
b := new(Buffer)
slice := make([]byte, 0)
n, err := b.Read(slice)
if err != nil {
t.Errorf("read error: %v", err)
}
if n != 0 {
t.Errorf("wrong count; got %d want 0", n)
}
}
// Tests that we occasionally compact. Issue 5154.
func TestBufferGrowth(t *testing.T) {
var b Buffer
buf := make([]byte, 1024)
b.Write(buf[0:1])
var cap0 int
for i := 0; i < 5<<10; i++ {
b.Write(buf)
b.Read(buf)
if i == 0 {
cap0 = cap(b.buf)
}
}
cap1 := cap(b.buf)
// (*Buffer).grow allows for 2x capacity slop before sliding,
// so set our error threshold at 3x.
if cap1 > cap0*3 {
t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0)
}
}
// From Issue 5154.
func BenchmarkBufferNotEmptyWriteRead(b *testing.B) {
buf := make([]byte, 1024)
for i := 0; i < b.N; i++ {
var b Buffer
b.Write(buf[0:1])
for i := 0; i < 5<<10; i++ {
b.Write(buf)
b.Read(buf)
}
}
}
// Check that we don't compact too often. From Issue 5154.
func BenchmarkBufferFullSmallReads(b *testing.B) {
buf := make([]byte, 1024)
for i := 0; i < b.N; i++ {
var b Buffer
b.Write(buf)
for b.Len()+20 < cap(b.buf) {
b.Write(buf[:10])
}
for i := 0; i < 5<<10; i++ {
b.Read(buf[:1])
b.Write(buf[:1])
}
}
}

View File

@ -1,364 +0,0 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"os"
"path/filepath"
"strings"
"testing"
)
var (
download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
testdata = flag.String("testdata", "testdata", "Directory containing the test data")
)
func roundtrip(b, ebuf, dbuf []byte) error {
e, err := Encode(ebuf, b)
if err != nil {
return fmt.Errorf("encoding error: %v", err)
}
d, err := Decode(dbuf, e)
if err != nil {
return fmt.Errorf("decoding error: %v", err)
}
if !bytes.Equal(b, d) {
return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot %v", b, d)
}
return nil
}
func TestEmpty(t *testing.T) {
if err := roundtrip(nil, nil, nil); err != nil {
t.Fatal(err)
}
}
func TestSmallCopy(t *testing.T) {
for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
for i := 0; i < 32; i++ {
s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb"
if err := roundtrip([]byte(s), ebuf, dbuf); err != nil {
t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err)
}
}
}
}
}
func TestSmallRand(t *testing.T) {
rng := rand.New(rand.NewSource(27354294))
for n := 1; n < 20000; n += 23 {
b := make([]byte, n)
for i := range b {
b[i] = uint8(rng.Uint32())
}
if err := roundtrip(b, nil, nil); err != nil {
t.Fatal(err)
}
}
}
func TestSmallRegular(t *testing.T) {
for n := 1; n < 20000; n += 23 {
b := make([]byte, n)
for i := range b {
b[i] = uint8(i%10 + 'a')
}
if err := roundtrip(b, nil, nil); err != nil {
t.Fatal(err)
}
}
}
func cmp(a, b []byte) error {
if len(a) != len(b) {
return fmt.Errorf("got %d bytes, want %d", len(a), len(b))
}
for i := range a {
if a[i] != b[i] {
return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i])
}
}
return nil
}
func TestFramingFormat(t *testing.T) {
// src is comprised of alternating 1e5-sized sequences of random
// (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen
// because it is larger than maxUncompressedChunkLen (64k).
src := make([]byte, 1e6)
rng := rand.New(rand.NewSource(1))
for i := 0; i < 10; i++ {
if i%2 == 0 {
for j := 0; j < 1e5; j++ {
src[1e5*i+j] = uint8(rng.Intn(256))
}
} else {
for j := 0; j < 1e5; j++ {
src[1e5*i+j] = uint8(i)
}
}
}
buf := new(bytes.Buffer)
if _, err := NewWriter(buf).Write(src); err != nil {
t.Fatalf("Write: encoding: %v", err)
}
dst, err := ioutil.ReadAll(NewReader(buf))
if err != nil {
t.Fatalf("ReadAll: decoding: %v", err)
}
if err := cmp(dst, src); err != nil {
t.Fatal(err)
}
}
func TestReaderReset(t *testing.T) {
gold := bytes.Repeat([]byte("All that is gold does not glitter,\n"), 10000)
buf := new(bytes.Buffer)
if _, err := NewWriter(buf).Write(gold); err != nil {
t.Fatalf("Write: %v", err)
}
encoded, invalid, partial := buf.String(), "invalid", "partial"
r := NewReader(nil)
for i, s := range []string{encoded, invalid, partial, encoded, partial, invalid, encoded, encoded} {
if s == partial {
r.Reset(strings.NewReader(encoded))
if _, err := r.Read(make([]byte, 101)); err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
continue
}
r.Reset(strings.NewReader(s))
got, err := ioutil.ReadAll(r)
switch s {
case encoded:
if err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
if err := cmp(got, gold); err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
case invalid:
if err == nil {
t.Errorf("#%d: got nil error, want non-nil", i)
continue
}
}
}
}
func TestWriterReset(t *testing.T) {
gold := bytes.Repeat([]byte("Not all those who wander are lost;\n"), 10000)
var gots, wants [][]byte
const n = 20
w, failed := NewWriter(nil), false
for i := 0; i <= n; i++ {
buf := new(bytes.Buffer)
w.Reset(buf)
want := gold[:len(gold)*i/n]
if _, err := w.Write(want); err != nil {
t.Errorf("#%d: Write: %v", i, err)
failed = true
continue
}
got, err := ioutil.ReadAll(NewReader(buf))
if err != nil {
t.Errorf("#%d: ReadAll: %v", i, err)
failed = true
continue
}
gots = append(gots, got)
wants = append(wants, want)
}
if failed {
return
}
for i := range gots {
if err := cmp(gots[i], wants[i]); err != nil {
t.Errorf("#%d: %v", i, err)
}
}
}
func benchDecode(b *testing.B, src []byte) {
encoded, err := Encode(nil, src)
if err != nil {
b.Fatal(err)
}
// Bandwidth is in amount of uncompressed data.
b.SetBytes(int64(len(src)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
Decode(src, encoded)
}
}
func benchEncode(b *testing.B, src []byte) {
// Bandwidth is in amount of uncompressed data.
b.SetBytes(int64(len(src)))
dst := make([]byte, MaxEncodedLen(len(src)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
Encode(dst, src)
}
}
func readFile(b testing.TB, filename string) []byte {
src, err := ioutil.ReadFile(filename)
if err != nil {
b.Fatalf("failed reading %s: %s", filename, err)
}
if len(src) == 0 {
b.Fatalf("%s has zero length", filename)
}
return src
}
// expand returns a slice of length n containing repeated copies of src.
func expand(src []byte, n int) []byte {
dst := make([]byte, n)
for x := dst; len(x) > 0; {
i := copy(x, src)
x = x[i:]
}
return dst
}
func benchWords(b *testing.B, n int, decode bool) {
// Note: the file is OS-language dependent so the resulting values are not
// directly comparable for non-US-English OS installations.
data := expand(readFile(b, "/usr/share/dict/words"), n)
if decode {
benchDecode(b, data)
} else {
benchEncode(b, data)
}
}
func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) }
func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) }
func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) }
func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) }
func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) }
func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) }
func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) }
func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) }
// testFiles' values are copied directly from
// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc
// The label field is unused in snappy-go.
var testFiles = []struct {
label string
filename string
}{
{"html", "html"},
{"urls", "urls.10K"},
{"jpg", "fireworks.jpeg"},
{"jpg_200", "fireworks.jpeg"},
{"pdf", "paper-100k.pdf"},
{"html4", "html_x_4"},
{"txt1", "alice29.txt"},
{"txt2", "asyoulik.txt"},
{"txt3", "lcet10.txt"},
{"txt4", "plrabn12.txt"},
{"pb", "geo.protodata"},
{"gaviota", "kppkn.gtb"},
}
// The test data files are present at this canonical URL.
const baseURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/"
func downloadTestdata(basename string) (errRet error) {
filename := filepath.Join(*testdata, basename)
if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 {
return nil
}
if !*download {
return fmt.Errorf("test data not found; skipping benchmark without the -download flag")
}
// Download the official snappy C++ implementation reference test data
// files for benchmarking.
if err := os.Mkdir(*testdata, 0777); err != nil && !os.IsExist(err) {
return fmt.Errorf("failed to create testdata: %s", err)
}
f, err := os.Create(filename)
if err != nil {
return fmt.Errorf("failed to create %s: %s", filename, err)
}
defer f.Close()
defer func() {
if errRet != nil {
os.Remove(filename)
}
}()
url := baseURL + basename
resp, err := http.Get(url)
if err != nil {
return fmt.Errorf("failed to download %s: %s", url, err)
}
defer resp.Body.Close()
if s := resp.StatusCode; s != http.StatusOK {
return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s))
}
_, err = io.Copy(f, resp.Body)
if err != nil {
return fmt.Errorf("failed to download %s to %s: %s", url, filename, err)
}
return nil
}
func benchFile(b *testing.B, n int, decode bool) {
if err := downloadTestdata(testFiles[n].filename); err != nil {
b.Fatalf("failed to download testdata: %s", err)
}
data := readFile(b, filepath.Join(*testdata, testFiles[n].filename))
if decode {
benchDecode(b, data)
} else {
benchEncode(b, data)
}
}
// Naming convention is kept similar to what snappy's C++ implementation uses.
func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) }
func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) }
func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) }
func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) }
func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) }
func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) }
func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) }
func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) }
func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) }
func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) }
func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) }
func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) }
func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) }
func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) }
func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) }
func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) }
func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) }
func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) }
func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) }
func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) }
func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) }
func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) }
func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) }
func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) }

View File

@ -1,105 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ed25519
import (
"bufio"
"bytes"
"compress/gzip"
"encoding/hex"
"io"
"os"
"strings"
"testing"
)
type zeroReader struct{}
func (zeroReader) Read(buf []byte) (int, error) {
for i := range buf {
buf[i] = 0
}
return len(buf), nil
}
func TestSignVerify(t *testing.T) {
var zero zeroReader
public, private, _ := GenerateKey(zero)
message := []byte("test message")
sig := Sign(private, message)
if !Verify(public, message, sig) {
t.Errorf("valid signature rejected")
}
wrongMessage := []byte("wrong message")
if Verify(public, wrongMessage, sig) {
t.Errorf("signature of different message accepted")
}
}
func TestGolden(t *testing.T) {
// sign.input.gz is a selection of test cases from
// http://ed25519.cr.yp.to/python/sign.input
testDataZ, err := os.Open("testdata/sign.input.gz")
if err != nil {
t.Fatal(err)
}
defer testDataZ.Close()
testData, err := gzip.NewReader(testDataZ)
if err != nil {
t.Fatal(err)
}
defer testData.Close()
in := bufio.NewReaderSize(testData, 1<<12)
lineNo := 0
for {
lineNo++
lineBytes, isPrefix, err := in.ReadLine()
if isPrefix {
t.Fatal("bufio buffer too small")
}
if err != nil {
if err == io.EOF {
break
}
t.Fatalf("error reading test data: %s", err)
}
line := string(lineBytes)
parts := strings.Split(line, ":")
if len(parts) != 5 {
t.Fatalf("bad number of parts on line %d", lineNo)
}
privBytes, _ := hex.DecodeString(parts[0])
pubKeyBytes, _ := hex.DecodeString(parts[1])
msg, _ := hex.DecodeString(parts[2])
sig, _ := hex.DecodeString(parts[3])
// The signatures in the test vectors also include the message
// at the end, but we just want R and S.
sig = sig[:SignatureSize]
if l := len(pubKeyBytes); l != PublicKeySize {
t.Fatalf("bad public key length on line %d: got %d bytes", lineNo, l)
}
var priv [PrivateKeySize]byte
copy(priv[:], privBytes)
copy(priv[32:], pubKeyBytes)
sig2 := Sign(&priv, msg)
if !bytes.Equal(sig, sig2[:]) {
t.Errorf("different signature result on line %d: %x vs %x", lineNo, sig, sig2)
}
var pubKey [PublicKeySize]byte
copy(pubKey[:], pubKeyBytes)
if !Verify(&pubKey, msg, sig2) {
t.Errorf("signature failed to verify on line %d", lineNo)
}
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,78 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package extra25519
import (
"bytes"
"crypto/rand"
"testing"
"code.google.com/p/go.crypto/curve25519"
"github.com/agl/ed25519"
)
func TestCurve25519Conversion(t *testing.T) {
public, private, _ := ed25519.GenerateKey(rand.Reader)
var curve25519Public, curve25519Public2, curve25519Private [32]byte
PrivateKeyToCurve25519(&curve25519Private, private)
curve25519.ScalarBaseMult(&curve25519Public, &curve25519Private)
if !PublicKeyToCurve25519(&curve25519Public2, public) {
t.Fatalf("PublicKeyToCurve25519 failed")
}
if !bytes.Equal(curve25519Public[:], curve25519Public2[:]) {
t.Errorf("Values didn't match: curve25519 produced %x, conversion produced %x", curve25519Public[:], curve25519Public2[:])
}
}
func TestElligator(t *testing.T) {
var publicKey, publicKey2, publicKey3, representative, privateKey [32]byte
for i := 0; i < 1000; i++ {
rand.Reader.Read(privateKey[:])
if !ScalarBaseMult(&publicKey, &representative, &privateKey) {
continue
}
RepresentativeToPublicKey(&publicKey2, &representative)
if !bytes.Equal(publicKey[:], publicKey2[:]) {
t.Fatal("The resulting public key doesn't match the initial one.")
}
curve25519.ScalarBaseMult(&publicKey3, &privateKey)
if !bytes.Equal(publicKey[:], publicKey3[:]) {
t.Fatal("The public key doesn't match the value that curve25519 produced.")
}
}
}
func BenchmarkKeyGeneration(b *testing.B) {
var publicKey, representative, privateKey [32]byte
// Find the private key that results in a point that's in the image of the map.
for {
rand.Reader.Read(privateKey[:])
if ScalarBaseMult(&publicKey, &representative, &privateKey) {
break
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
ScalarBaseMult(&publicKey, &representative, &privateKey);
}
}
func BenchmarkMap(b *testing.B) {
var publicKey, representative [32]byte
rand.Reader.Read(representative[:])
b.ResetTimer()
for i := 0; i < b.N; i++ {
RepresentativeToPublicKey(&publicKey, &representative);
}
}

View File

@ -1,146 +0,0 @@
//
// Written by Maxim Khitrov (November 2012)
//
package flowcontrol
import (
"bytes"
"reflect"
"testing"
"time"
)
const (
_50ms = 50 * time.Millisecond
_100ms = 100 * time.Millisecond
_200ms = 200 * time.Millisecond
_300ms = 300 * time.Millisecond
_400ms = 400 * time.Millisecond
_500ms = 500 * time.Millisecond
)
func nextStatus(m *Monitor) Status {
samples := m.samples
for i := 0; i < 30; i++ {
if s := m.Status(); s.Samples != samples {
return s
}
time.Sleep(5 * time.Millisecond)
}
return m.Status()
}
func TestReader(t *testing.T) {
in := make([]byte, 100)
for i := range in {
in[i] = byte(i)
}
b := make([]byte, 100)
r := NewReader(bytes.NewReader(in), 100)
start := time.Now()
// Make sure r implements Limiter
_ = Limiter(r)
// 1st read of 10 bytes is performed immediately
if n, err := r.Read(b); n != 10 || err != nil {
t.Fatalf("r.Read(b) expected 10 (<nil>); got %v (%v)", n, err)
} else if rt := time.Since(start); rt > _50ms {
t.Fatalf("r.Read(b) took too long (%v)", rt)
}
// No new Reads allowed in the current sample
r.SetBlocking(false)
if n, err := r.Read(b); n != 0 || err != nil {
t.Fatalf("r.Read(b) expected 0 (<nil>); got %v (%v)", n, err)
} else if rt := time.Since(start); rt > _50ms {
t.Fatalf("r.Read(b) took too long (%v)", rt)
}
status := [6]Status{0: r.Status()} // No samples in the first status
// 2nd read of 10 bytes blocks until the next sample
r.SetBlocking(true)
if n, err := r.Read(b[10:]); n != 10 || err != nil {
t.Fatalf("r.Read(b[10:]) expected 10 (<nil>); got %v (%v)", n, err)
} else if rt := time.Since(start); rt < _100ms {
t.Fatalf("r.Read(b[10:]) returned ahead of time (%v)", rt)
}
status[1] = r.Status() // 1st sample
status[2] = nextStatus(r.Monitor) // 2nd sample
status[3] = nextStatus(r.Monitor) // No activity for the 3rd sample
if n := r.Done(); n != 20 {
t.Fatalf("r.Done() expected 20; got %v", n)
}
status[4] = r.Status()
status[5] = nextStatus(r.Monitor) // Timeout
start = status[0].Start
// Active, Start, Duration, Idle, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, TimeRem, Progress
want := []Status{
Status{true, start, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
Status{true, start, _100ms, 0, 10, 1, 100, 100, 100, 100, 0, 0, 0},
Status{true, start, _200ms, _100ms, 20, 2, 100, 100, 100, 100, 0, 0, 0},
Status{true, start, _300ms, _200ms, 20, 3, 0, 90, 67, 100, 0, 0, 0},
Status{false, start, _300ms, 0, 20, 3, 0, 0, 67, 100, 0, 0, 0},
Status{false, start, _300ms, 0, 20, 3, 0, 0, 67, 100, 0, 0, 0},
}
for i, s := range status {
if !reflect.DeepEqual(&s, &want[i]) {
t.Errorf("r.Status(%v) expected %v; got %v", i, want[i], s)
}
}
if !bytes.Equal(b[:20], in[:20]) {
t.Errorf("r.Read() input doesn't match output")
}
}
func TestWriter(t *testing.T) {
b := make([]byte, 100)
for i := range b {
b[i] = byte(i)
}
w := NewWriter(&bytes.Buffer{}, 200)
start := time.Now()
// Make sure w implements Limiter
_ = Limiter(w)
// Non-blocking 20-byte write for the first sample returns ErrLimit
w.SetBlocking(false)
if n, err := w.Write(b); n != 20 || err != ErrLimit {
t.Fatalf("w.Write(b) expected 20 (ErrLimit); got %v (%v)", n, err)
} else if rt := time.Since(start); rt > _50ms {
t.Fatalf("w.Write(b) took too long (%v)", rt)
}
// Blocking 80-byte write
w.SetBlocking(true)
if n, err := w.Write(b[20:]); n != 80 || err != nil {
t.Fatalf("w.Write(b[20:]) expected 80 (<nil>); got %v (%v)", n, err)
} else if rt := time.Since(start); rt < _400ms {
t.Fatalf("w.Write(b[20:]) returned ahead of time (%v)", rt)
}
w.SetTransferSize(100)
status := []Status{w.Status(), nextStatus(w.Monitor)}
start = status[0].Start
// Active, Start, Duration, Idle, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, TimeRem, Progress
want := []Status{
Status{true, start, _400ms, 0, 80, 4, 200, 200, 200, 200, 20, _100ms, 80000},
Status{true, start, _500ms, _100ms, 100, 5, 200, 200, 200, 200, 0, 0, 100000},
}
for i, s := range status {
if !reflect.DeepEqual(&s, &want[i]) {
t.Errorf("w.Status(%v) expected %v; got %v", i, want[i], s)
}
}
if !bytes.Equal(b, w.Writer.(*bytes.Buffer).Bytes()) {
t.Errorf("w.Write() input doesn't match output")
}
}

View File

@ -1,7 +0,0 @@
package alert
import (
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/tendermint/go-logger"
)
var log = logger.New("module", "alert")

View File

@ -1,120 +0,0 @@
package common
import (
"testing"
)
func randBitArray(bits int) (*BitArray, []byte) {
src := RandBytes((bits + 7) / 8)
bA := NewBitArray(bits)
for i := 0; i < len(src); i++ {
for j := 0; j < 8; j++ {
if i*8+j >= bits {
return bA, src
}
setBit := src[i]&(1<<uint(j)) > 0
bA.SetIndex(i*8+j, setBit)
}
}
return bA, src
}
func TestAnd(t *testing.T) {
bA1, _ := randBitArray(51)
bA2, _ := randBitArray(31)
bA3 := bA1.And(bA2)
if bA3.Bits != 31 {
t.Error("Expected min bits", bA3.Bits)
}
if len(bA3.Elems) != len(bA2.Elems) {
t.Error("Expected min elems length")
}
for i := 0; i < bA3.Bits; i++ {
expected := bA1.GetIndex(i) && bA2.GetIndex(i)
if bA3.GetIndex(i) != expected {
t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i))
}
}
}
func TestOr(t *testing.T) {
bA1, _ := randBitArray(51)
bA2, _ := randBitArray(31)
bA3 := bA1.Or(bA2)
if bA3.Bits != 51 {
t.Error("Expected max bits")
}
if len(bA3.Elems) != len(bA1.Elems) {
t.Error("Expected max elems length")
}
for i := 0; i < bA3.Bits; i++ {
expected := bA1.GetIndex(i) || bA2.GetIndex(i)
if bA3.GetIndex(i) != expected {
t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i))
}
}
}
func TestSub1(t *testing.T) {
bA1, _ := randBitArray(31)
bA2, _ := randBitArray(51)
bA3 := bA1.Sub(bA2)
if bA3.Bits != bA1.Bits {
t.Error("Expected bA1 bits")
}
if len(bA3.Elems) != len(bA1.Elems) {
t.Error("Expected bA1 elems length")
}
for i := 0; i < bA3.Bits; i++ {
expected := bA1.GetIndex(i)
if bA2.GetIndex(i) {
expected = false
}
if bA3.GetIndex(i) != expected {
t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i))
}
}
}
func TestSub2(t *testing.T) {
bA1, _ := randBitArray(51)
bA2, _ := randBitArray(31)
bA3 := bA1.Sub(bA2)
if bA3.Bits != bA1.Bits {
t.Error("Expected bA1 bits")
}
if len(bA3.Elems) != len(bA1.Elems) {
t.Error("Expected bA1 elems length")
}
for i := 0; i < bA3.Bits; i++ {
expected := bA1.GetIndex(i)
if i < bA2.Bits && bA2.GetIndex(i) {
expected = false
}
if bA3.GetIndex(i) != expected {
t.Error("Wrong bit from bA3")
}
}
}
func TestPickRandom(t *testing.T) {
for idx := 0; idx < 123; idx++ {
bA1 := NewBitArray(123)
bA1.SetIndex(idx, true)
index, ok := bA1.PickRandom()
if !ok {
t.Fatal("Expected to pick element but got none")
}
if index != idx {
t.Fatalf("Expected to pick element at %v but got wrong index", idx)
}
}
}

View File

@ -1,62 +0,0 @@
package common
import "sync"
// CMapInt is a goroutine-safe map
type CMapInt struct {
m map[int]interface{}
l sync.Mutex
}
func NewCMapInt() *CMapInt {
return &CMapInt{
m: make(map[int]interface{}, 0),
}
}
func (cm *CMapInt) Set(key int, value interface{}) {
cm.l.Lock()
defer cm.l.Unlock()
cm.m[key] = value
}
func (cm *CMapInt) Get(key int) interface{} {
cm.l.Lock()
defer cm.l.Unlock()
return cm.m[key]
}
func (cm *CMapInt) Has(key int) bool {
cm.l.Lock()
defer cm.l.Unlock()
_, ok := cm.m[key]
return ok
}
func (cm *CMapInt) Delete(key int) {
cm.l.Lock()
defer cm.l.Unlock()
delete(cm.m, key)
}
func (cm *CMapInt) Size() int {
cm.l.Lock()
defer cm.l.Unlock()
return len(cm.m)
}
func (cm *CMapInt) Clear() {
cm.l.Lock()
defer cm.l.Unlock()
cm.m = make(map[int]interface{}, 0)
}
func (cm *CMapInt) Values() []interface{} {
cm.l.Lock()
defer cm.l.Unlock()
items := []interface{}{}
for _, v := range cm.m {
items = append(items, v)
}
return items
}

View File

@ -1,71 +0,0 @@
package common
import (
"fmt"
"math/rand"
"testing"
)
func TestMap(t *testing.T) {
const numElements = 10000
const numTimes = 10000000
const numScanners = 10
els := make(map[int]struct{})
for i := 0; i < numElements; i++ {
els[i] = struct{}{}
}
// Remove an element, push back an element.
for i := 0; i < numTimes; i++ {
// Pick an element to remove
rmElIdx := rand.Intn(len(els))
delete(els, rmElIdx)
// Insert a new element
els[rmElIdx] = struct{}{}
if i%100000 == 0 {
fmt.Printf("Pushed %vK elements so far...\n", i/1000)
}
}
}
func TestCMap(t *testing.T) {
const numElements = 10000
const numTimes = 10000000
const numScanners = 10
els := NewCMapInt()
for i := 0; i < numElements; i++ {
els.Set(i, struct{}{})
}
// Launch scanner routines that will rapidly iterate over elements.
for i := 0; i < numScanners; i++ {
go func(scannerID int) {
counter := 0
for {
els.Get(counter)
counter = (counter + 1) % numElements
}
}(i)
}
// Remove an element, push back an element.
for i := 0; i < numTimes; i++ {
// Pick an element to remove
rmElIdx := rand.Intn(els.Size())
els.Delete(rmElIdx)
// Insert a new element
els.Set(rmElIdx, struct{}{})
if i%100000 == 0 {
fmt.Printf("Pushed %vK elements so far...\n", i/1000)
}
}
}

View File

@ -1,69 +0,0 @@
package crypto
import (
"bytes"
"testing"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/tendermint/ed25519"
. "github.com/tendermint/netmon/Godeps/_workspace/src/github.com/tendermint/go-common"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/tendermint/go-wire"
)
func TestSignAndValidate(t *testing.T) {
privKey := GenPrivKeyEd25519()
pubKey := privKey.PubKey()
msg := CRandBytes(128)
sig := privKey.Sign(msg)
t.Logf("msg: %X, sig: %X", msg, sig)
// Test the signature
if !pubKey.VerifyBytes(msg, sig) {
t.Errorf("Account message signature verification failed")
}
// Mutate the signature, just one bit.
sigEd := sig.(SignatureEd25519)
sigEd[0] ^= byte(0x01)
sig = Signature(sigEd)
if pubKey.VerifyBytes(msg, sig) {
t.Errorf("Account message signature verification should have failed but passed instead")
}
}
func TestBinaryDecode(t *testing.T) {
privKey := GenPrivKeyEd25519()
pubKey := privKey.PubKey()
msg := CRandBytes(128)
sig := privKey.Sign(msg)
t.Logf("msg: %X, sig: %X", msg, sig)
buf, n, err := new(bytes.Buffer), new(int), new(error)
wire.WriteBinary(struct{ Signature }{sig}, buf, n, err)
if *err != nil {
t.Fatalf("Failed to write Signature: %v", err)
}
if len(buf.Bytes()) != ed25519.SignatureSize+1 {
// 1 byte TypeByte, 64 bytes signature bytes
t.Fatalf("Unexpected signature write size: %v", len(buf.Bytes()))
}
if buf.Bytes()[0] != SignatureTypeEd25519 {
t.Fatalf("Unexpected signature type byte")
}
sigStruct := struct{ Signature }{}
sig2 := wire.ReadBinary(sigStruct, buf, 0, n, err)
if *err != nil {
t.Fatalf("Failed to read Signature: %v", err)
}
// Test the signature
if !pubKey.VerifyBytes(msg, sig2.(struct{ Signature }).Signature.(SignatureEd25519)) {
t.Errorf("Account message signature verification failed")
}
}

View File

@ -1,7 +0,0 @@
package events
import (
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/tendermint/go-logger"
)
var log = logger.New("module", "events")

View File

@ -1,206 +0,0 @@
Tendermint Go-Logger
Copyright (C) 2015 Tendermint
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
//--------------------------------------------------------------------------------
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright © 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for software and other kinds of works.
The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too.
When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.
Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions.
Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and modification follow.
TERMS AND CONDITIONS
0. Definitions.
“This License” refers to version 3 of the GNU General Public License.
“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks.
“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations.
To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work.
A “covered work” means either the unmodified Program or a work based on the Program.
To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well.
To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion.
1. Source Code.
The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work.
A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language.
The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it.
The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work.
The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source.
The Corresponding Source for a work in source code form is that same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures.
When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified it, and giving a relevant date.
b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”.
c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so.
A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways:
a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b.
d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d.
A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work.
A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product.
“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made.
If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM).
The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying.
7. Additional Terms.
“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or authors of the material; or
e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors.
All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11).
However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice.
Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License.
An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it.
11. Patents.
A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”.
A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version.
In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party.
If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it.
A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation.
If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program.
Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS

View File

@ -1,206 +0,0 @@
Tendermint Go-Merkle
Copyright (C) 2015 Tendermint
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
//--------------------------------------------------------------------------------
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright © 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for software and other kinds of works.
The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too.
When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.
Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions.
Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and modification follow.
TERMS AND CONDITIONS
0. Definitions.
“This License” refers to version 3 of the GNU General Public License.
“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks.
“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations.
To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work.
A “covered work” means either the unmodified Program or a work based on the Program.
To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well.
To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion.
1. Source Code.
The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work.
A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language.
The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it.
The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work.
The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source.
The Corresponding Source for a work in source code form is that same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures.
When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified it, and giving a relevant date.
b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”.
c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so.
A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways:
a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b.
d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d.
A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work.
A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product.
“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made.
If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM).
The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying.
7. Additional Terms.
“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or authors of the material; or
e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors.
All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11).
However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice.
Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License.
An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it.
11. Patents.
A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”.
A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version.
In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party.
If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it.
A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation.
If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program.
Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS

View File

@ -1,325 +0,0 @@
mode: set
github.com/tendermint/go-merkle/util.go:8.36,10.17 2 0
github.com/tendermint/go-merkle/util.go:13.2,13.25 1 0
github.com/tendermint/go-merkle/util.go:10.17,12.3 1 0
github.com/tendermint/go-merkle/util.go:16.48,18.30 2 0
github.com/tendermint/go-merkle/util.go:22.2,22.27 1 0
github.com/tendermint/go-merkle/util.go:28.2,30.26 2 0
github.com/tendermint/go-merkle/util.go:18.30,20.3 1 0
github.com/tendermint/go-merkle/util.go:22.27,24.3 1 0
github.com/tendermint/go-merkle/util.go:24.3,24.34 1 0
github.com/tendermint/go-merkle/util.go:24.34,26.3 1 0
github.com/tendermint/go-merkle/util.go:30.26,32.3 1 0
github.com/tendermint/go-merkle/util.go:32.3,32.33 1 0
github.com/tendermint/go-merkle/util.go:32.33,34.3 1 0
github.com/tendermint/go-merkle/util.go:38.30,39.11 1 1
github.com/tendermint/go-merkle/util.go:42.2,42.10 1 1
github.com/tendermint/go-merkle/util.go:39.11,41.3 1 1
github.com/tendermint/go-merkle/iavl_node.go:27.64,34.2 1 1
github.com/tendermint/go-merkle/iavl_node.go:38.75,46.22 5 1
github.com/tendermint/go-merkle/iavl_node.go:54.2,54.13 1 1
github.com/tendermint/go-merkle/iavl_node.go:46.22,49.3 1 1
github.com/tendermint/go-merkle/iavl_node.go:49.3,53.3 2 1
github.com/tendermint/go-merkle/iavl_node.go:57.41,58.22 1 1
github.com/tendermint/go-merkle/iavl_node.go:61.2,71.3 1 1
github.com/tendermint/go-merkle/iavl_node.go:58.22,60.3 1 0
github.com/tendermint/go-merkle/iavl_node.go:74.68,75.44 1 1
github.com/tendermint/go-merkle/iavl_node.go:78.2,78.22 1 1
github.com/tendermint/go-merkle/iavl_node.go:75.44,77.3 1 1
github.com/tendermint/go-merkle/iavl_node.go:78.22,80.3 1 1
github.com/tendermint/go-merkle/iavl_node.go:80.3,81.44 1 1
github.com/tendermint/go-merkle/iavl_node.go:81.44,83.4 1 1
github.com/tendermint/go-merkle/iavl_node.go:83.4,85.4 1 1
github.com/tendermint/go-merkle/iavl_node.go:89.88,90.22 1 1
github.com/tendermint/go-merkle/iavl_node.go:90.22,91.45 1 1
github.com/tendermint/go-merkle/iavl_node.go:91.45,93.4 1 1
github.com/tendermint/go-merkle/iavl_node.go:93.4,95.4 1 0
github.com/tendermint/go-merkle/iavl_node.go:96.3,97.44 1 1
github.com/tendermint/go-merkle/iavl_node.go:97.44,99.4 1 1
github.com/tendermint/go-merkle/iavl_node.go:99.4,104.4 4 1
github.com/tendermint/go-merkle/iavl_node.go:108.95,109.22 1 0
github.com/tendermint/go-merkle/iavl_node.go:109.22,110.17 1 0
github.com/tendermint/go-merkle/iavl_node.go:110.17,112.4 1 0
github.com/tendermint/go-merkle/iavl_node.go:112.4,115.4 2 0
github.com/tendermint/go-merkle/iavl_node.go:116.3,120.28 2 0
github.com/tendermint/go-merkle/iavl_node.go:120.28,122.4 1 0
github.com/tendermint/go-merkle/iavl_node.go:122.4,124.4 1 0
github.com/tendermint/go-merkle/iavl_node.go:129.64,130.22 1 1
github.com/tendermint/go-merkle/iavl_node.go:134.2,137.16 4 1
github.com/tendermint/go-merkle/iavl_node.go:141.2,145.33 3 1
github.com/tendermint/go-merkle/iavl_node.go:130.22,132.3 1 1
github.com/tendermint/go-merkle/iavl_node.go:137.16,139.3 1 0
github.com/tendermint/go-merkle/iavl_node.go:149.98,155.22 3 1
github.com/tendermint/go-merkle/iavl_node.go:181.2,181.8 1 1
github.com/tendermint/go-merkle/iavl_node.go:155.22,159.3 2 1
github.com/tendermint/go-merkle/iavl_node.go:159.3,161.27 1 1
github.com/tendermint/go-merkle/iavl_node.go:166.3,166.27 1 1
github.com/tendermint/go-merkle/iavl_node.go:169.3,171.28 2 1
github.com/tendermint/go-merkle/iavl_node.go:176.3,176.28 1 1
github.com/tendermint/go-merkle/iavl_node.go:179.3,179.51 1 1
github.com/tendermint/go-merkle/iavl_node.go:161.27,165.4 3 1
github.com/tendermint/go-merkle/iavl_node.go:166.27,168.4 1 0
github.com/tendermint/go-merkle/iavl_node.go:171.28,175.4 3 1
github.com/tendermint/go-merkle/iavl_node.go:176.28,178.4 1 0
github.com/tendermint/go-merkle/iavl_node.go:186.48,187.22 1 1
github.com/tendermint/go-merkle/iavl_node.go:190.2,190.20 1 1
github.com/tendermint/go-merkle/iavl_node.go:195.2,195.26 1 1
github.com/tendermint/go-merkle/iavl_node.go:199.2,199.27 1 1
github.com/tendermint/go-merkle/iavl_node.go:205.2,206.18 2 1
github.com/tendermint/go-merkle/iavl_node.go:187.22,189.3 1 1
github.com/tendermint/go-merkle/iavl_node.go:190.20,192.3 1 0
github.com/tendermint/go-merkle/iavl_node.go:195.26,198.3 2 1
github.com/tendermint/go-merkle/iavl_node.go:199.27,202.3 2 1
github.com/tendermint/go-merkle/iavl_node.go:210.86,217.22 4 1
github.com/tendermint/go-merkle/iavl_node.go:232.2,232.8 1 1
github.com/tendermint/go-merkle/iavl_node.go:217.22,220.3 1 1
github.com/tendermint/go-merkle/iavl_node.go:220.3,222.27 1 1
github.com/tendermint/go-merkle/iavl_node.go:225.3,227.28 2 1
github.com/tendermint/go-merkle/iavl_node.go:230.3,230.51 1 1
github.com/tendermint/go-merkle/iavl_node.go:222.27,224.4 1 0
github.com/tendermint/go-merkle/iavl_node.go:227.28,229.4 1 0
github.com/tendermint/go-merkle/iavl_node.go:235.110,236.22 1 1
github.com/tendermint/go-merkle/iavl_node.go:236.22,238.14 2 1
github.com/tendermint/go-merkle/iavl_node.go:238.14,246.4 1 1
github.com/tendermint/go-merkle/iavl_node.go:246.4,246.22 1 1
github.com/tendermint/go-merkle/iavl_node.go:246.22,248.4 1 1
github.com/tendermint/go-merkle/iavl_node.go:248.4,256.4 1 1
github.com/tendermint/go-merkle/iavl_node.go:257.3,259.44 2 1
github.com/tendermint/go-merkle/iavl_node.go:266.3,266.14 1 1
github.com/tendermint/go-merkle/iavl_node.go:259.44,262.4 2 1
github.com/tendermint/go-merkle/iavl_node.go:262.4,265.4 2 1
github.com/tendermint/go-merkle/iavl_node.go:266.14,268.4 1 1
github.com/tendermint/go-merkle/iavl_node.go:268.4,271.4 2 1
github.com/tendermint/go-merkle/iavl_node.go:279.90,280.22 1 1
github.com/tendermint/go-merkle/iavl_node.go:280.22,281.45 1 1
github.com/tendermint/go-merkle/iavl_node.go:281.45,283.4 1 1
github.com/tendermint/go-merkle/iavl_node.go:283.4,285.4 1 0
github.com/tendermint/go-merkle/iavl_node.go:286.3,287.44 1 1
github.com/tendermint/go-merkle/iavl_node.go:287.44,291.16 4 1
github.com/tendermint/go-merkle/iavl_node.go:296.4,299.52 4 1
github.com/tendermint/go-merkle/iavl_node.go:291.16,293.5 1 0
github.com/tendermint/go-merkle/iavl_node.go:293.5,293.55 1 1
github.com/tendermint/go-merkle/iavl_node.go:293.55,295.5 1 1
github.com/tendermint/go-merkle/iavl_node.go:300.4,304.16 4 1
github.com/tendermint/go-merkle/iavl_node.go:309.4,311.21 3 1
github.com/tendermint/go-merkle/iavl_node.go:315.4,316.52 2 1
github.com/tendermint/go-merkle/iavl_node.go:304.16,306.5 1 0
github.com/tendermint/go-merkle/iavl_node.go:306.5,306.57 1 1
github.com/tendermint/go-merkle/iavl_node.go:306.57,308.5 1 1
github.com/tendermint/go-merkle/iavl_node.go:311.21,314.5 2 1
github.com/tendermint/go-merkle/iavl_node.go:321.58,322.26 1 1
github.com/tendermint/go-merkle/iavl_node.go:322.26,324.3 1 1
github.com/tendermint/go-merkle/iavl_node.go:324.3,326.3 1 1
github.com/tendermint/go-merkle/iavl_node.go:329.59,330.27 1 1
github.com/tendermint/go-merkle/iavl_node.go:330.27,332.3 1 1
github.com/tendermint/go-merkle/iavl_node.go:332.3,334.3 1 1
github.com/tendermint/go-merkle/iavl_node.go:337.58,349.2 8 1
github.com/tendermint/go-merkle/iavl_node.go:351.57,363.2 8 1
github.com/tendermint/go-merkle/iavl_node.go:366.54,369.2 2 1
github.com/tendermint/go-merkle/iavl_node.go:371.52,373.2 1 1
github.com/tendermint/go-merkle/iavl_node.go:375.64,377.17 2 1
github.com/tendermint/go-merkle/iavl_node.go:389.2,389.18 1 1
github.com/tendermint/go-merkle/iavl_node.go:402.2,402.13 1 1
github.com/tendermint/go-merkle/iavl_node.go:377.17,378.46 1 1
github.com/tendermint/go-merkle/iavl_node.go:378.46,381.4 1 1
github.com/tendermint/go-merkle/iavl_node.go:381.4,387.4 3 1
github.com/tendermint/go-merkle/iavl_node.go:389.18,390.47 1 1
github.com/tendermint/go-merkle/iavl_node.go:390.47,393.4 1 1
github.com/tendermint/go-merkle/iavl_node.go:393.4,399.4 3 1
github.com/tendermint/go-merkle/iavl_node.go:405.75,407.10 2 1
github.com/tendermint/go-merkle/iavl_node.go:410.2,410.21 1 1
github.com/tendermint/go-merkle/iavl_node.go:420.2,420.14 1 1
github.com/tendermint/go-merkle/iavl_node.go:407.10,409.3 1 0
github.com/tendermint/go-merkle/iavl_node.go:410.21,412.11 2 1
github.com/tendermint/go-merkle/iavl_node.go:415.3,416.11 2 1
github.com/tendermint/go-merkle/iavl_node.go:412.11,414.4 1 0
github.com/tendermint/go-merkle/iavl_node.go:416.11,418.4 1 0
github.com/tendermint/go-merkle/iavl_node.go:424.50,425.22 1 1
github.com/tendermint/go-merkle/iavl_node.go:428.2,428.35 1 1
github.com/tendermint/go-merkle/iavl_node.go:425.22,427.3 1 1
github.com/tendermint/go-merkle/iavl_node.go:432.50,433.22 1 0
github.com/tendermint/go-merkle/iavl_node.go:436.2,436.36 1 0
github.com/tendermint/go-merkle/iavl_node.go:433.22,435.3 1 0
github.com/tendermint/go-merkle/iavl_node.go:442.85,444.17 2 1
github.com/tendermint/go-merkle/iavl_node.go:447.2,448.54 2 1
github.com/tendermint/go-merkle/iavl_node.go:444.17,446.3 1 0
github.com/tendermint/go-merkle/iavl_node.go:452.88,455.17 3 1
github.com/tendermint/go-merkle/iavl_node.go:458.2,458.45 1 1
github.com/tendermint/go-merkle/iavl_node.go:455.17,457.3 1 0
github.com/tendermint/go-merkle/iavl_proof.go:18.76,19.53 1 1
github.com/tendermint/go-merkle/iavl_proof.go:22.2,22.57 1 1
github.com/tendermint/go-merkle/iavl_proof.go:25.2,25.44 1 1
github.com/tendermint/go-merkle/iavl_proof.go:28.2,30.42 2 1
github.com/tendermint/go-merkle/iavl_proof.go:35.2,35.42 1 1
github.com/tendermint/go-merkle/iavl_proof.go:19.53,21.3 1 1
github.com/tendermint/go-merkle/iavl_proof.go:22.57,24.3 1 1
github.com/tendermint/go-merkle/iavl_proof.go:25.44,27.3 1 1
github.com/tendermint/go-merkle/iavl_proof.go:30.42,33.3 1 1
github.com/tendermint/go-merkle/iavl_proof.go:45.64,51.27 6 1
github.com/tendermint/go-merkle/iavl_proof.go:58.2,58.16 1 1
github.com/tendermint/go-merkle/iavl_proof.go:62.2,63.24 2 1
github.com/tendermint/go-merkle/iavl_proof.go:51.27,54.3 2 1
github.com/tendermint/go-merkle/iavl_proof.go:54.3,57.3 2 1
github.com/tendermint/go-merkle/iavl_proof.go:58.16,60.3 1 0
github.com/tendermint/go-merkle/iavl_proof.go:71.45,79.16 8 1
github.com/tendermint/go-merkle/iavl_proof.go:83.2,84.24 2 1
github.com/tendermint/go-merkle/iavl_proof.go:79.16,81.3 1 0
github.com/tendermint/go-merkle/iavl_proof.go:87.100,88.22 1 1
github.com/tendermint/go-merkle/iavl_proof.go:88.22,89.45 1 1
github.com/tendermint/go-merkle/iavl_proof.go:89.45,93.18 4 1
github.com/tendermint/go-merkle/iavl_proof.go:96.4,97.18 2 1
github.com/tendermint/go-merkle/iavl_proof.go:100.4,105.15 3 1
github.com/tendermint/go-merkle/iavl_proof.go:93.18,95.5 1 0
github.com/tendermint/go-merkle/iavl_proof.go:97.18,99.5 1 0
github.com/tendermint/go-merkle/iavl_proof.go:106.4,108.4 1 0
github.com/tendermint/go-merkle/iavl_proof.go:109.3,110.44 1 1
github.com/tendermint/go-merkle/iavl_proof.go:110.44,112.15 2 1
github.com/tendermint/go-merkle/iavl_proof.go:115.4,122.15 3 1
github.com/tendermint/go-merkle/iavl_proof.go:112.15,114.5 1 0
github.com/tendermint/go-merkle/iavl_proof.go:123.4,125.15 2 1
github.com/tendermint/go-merkle/iavl_proof.go:128.4,135.15 3 1
github.com/tendermint/go-merkle/iavl_proof.go:125.15,127.5 1 0
github.com/tendermint/go-merkle/iavl_proof.go:141.63,142.19 1 1
github.com/tendermint/go-merkle/iavl_proof.go:145.2,150.14 4 1
github.com/tendermint/go-merkle/iavl_proof.go:142.19,144.3 1 0
github.com/tendermint/go-merkle/iavl_tree.go:24.87,25.15 1 1
github.com/tendermint/go-merkle/iavl_tree.go:25.15,31.3 1 1
github.com/tendermint/go-merkle/iavl_tree.go:31.3,38.3 1 1
github.com/tendermint/go-merkle/iavl_tree.go:43.32,44.19 1 0
github.com/tendermint/go-merkle/iavl_tree.go:52.2,52.39 1 0
github.com/tendermint/go-merkle/iavl_tree.go:63.2,68.3 1 0
github.com/tendermint/go-merkle/iavl_tree.go:44.19,51.3 1 0
github.com/tendermint/go-merkle/iavl_tree.go:52.39,58.3 1 0
github.com/tendermint/go-merkle/iavl_tree.go:58.3,58.47 1 0
github.com/tendermint/go-merkle/iavl_tree.go:58.47,62.3 1 0
github.com/tendermint/go-merkle/iavl_tree.go:71.31,72.19 1 1
github.com/tendermint/go-merkle/iavl_tree.go:75.2,75.20 1 1
github.com/tendermint/go-merkle/iavl_tree.go:72.19,74.3 1 1
github.com/tendermint/go-merkle/iavl_tree.go:78.34,79.19 1 0
github.com/tendermint/go-merkle/iavl_tree.go:82.2,82.22 1 0
github.com/tendermint/go-merkle/iavl_tree.go:79.19,81.3 1 0
github.com/tendermint/go-merkle/iavl_tree.go:85.46,86.19 1 1
github.com/tendermint/go-merkle/iavl_tree.go:89.2,89.27 1 1
github.com/tendermint/go-merkle/iavl_tree.go:86.19,88.3 1 0
github.com/tendermint/go-merkle/iavl_tree.go:92.75,93.19 1 1
github.com/tendermint/go-merkle/iavl_tree.go:97.2,98.16 2 1
github.com/tendermint/go-merkle/iavl_tree.go:93.19,96.3 2 1
github.com/tendermint/go-merkle/iavl_tree.go:101.34,102.19 1 1
github.com/tendermint/go-merkle/iavl_tree.go:105.2,106.13 2 1
github.com/tendermint/go-merkle/iavl_tree.go:102.19,104.3 1 0
github.com/tendermint/go-merkle/iavl_tree.go:109.50,110.19 1 1
github.com/tendermint/go-merkle/iavl_tree.go:113.2,113.32 1 1
github.com/tendermint/go-merkle/iavl_tree.go:110.19,112.3 1 0
github.com/tendermint/go-merkle/iavl_tree.go:116.34,117.19 1 1
github.com/tendermint/go-merkle/iavl_tree.go:120.2,120.23 1 1
github.com/tendermint/go-merkle/iavl_tree.go:117.19,119.3 1 0
github.com/tendermint/go-merkle/iavl_tree.go:125.38,126.20 1 1
github.com/tendermint/go-merkle/iavl_tree.go:126.20,128.3 1 0
github.com/tendermint/go-merkle/iavl_tree.go:128.3,130.3 1 1
github.com/tendermint/go-merkle/iavl_tree.go:133.72,134.19 1 1
github.com/tendermint/go-merkle/iavl_tree.go:137.2,137.27 1 1
github.com/tendermint/go-merkle/iavl_tree.go:134.19,136.3 1 0
github.com/tendermint/go-merkle/iavl_tree.go:140.79,141.19 1 0
github.com/tendermint/go-merkle/iavl_tree.go:144.2,144.36 1 0
github.com/tendermint/go-merkle/iavl_tree.go:141.19,143.3 1 0
github.com/tendermint/go-merkle/iavl_tree.go:147.78,148.19 1 1
github.com/tendermint/go-merkle/iavl_tree.go:151.2,152.14 2 1
github.com/tendermint/go-merkle/iavl_tree.go:155.2,155.42 1 1
github.com/tendermint/go-merkle/iavl_tree.go:160.2,160.20 1 1
github.com/tendermint/go-merkle/iavl_tree.go:148.19,150.3 1 0
github.com/tendermint/go-merkle/iavl_tree.go:152.14,154.3 1 0
github.com/tendermint/go-merkle/iavl_tree.go:155.42,157.3 1 0
github.com/tendermint/go-merkle/iavl_tree.go:157.3,159.3 1 1
github.com/tendermint/go-merkle/iavl_tree.go:163.93,164.19 1 1
github.com/tendermint/go-merkle/iavl_tree.go:167.2,167.54 1 1
github.com/tendermint/go-merkle/iavl_tree.go:164.19,166.3 1 0
github.com/tendermint/go-merkle/iavl_tree.go:167.54,168.23 1 1
github.com/tendermint/go-merkle/iavl_tree.go:168.23,170.4 1 1
github.com/tendermint/go-merkle/iavl_tree.go:170.4,172.4 1 1
github.com/tendermint/go-merkle/iavl_tree.go:191.50,198.2 1 1
github.com/tendermint/go-merkle/iavl_tree.go:200.64,205.8 4 1
github.com/tendermint/go-merkle/iavl_tree.go:205.8,209.3 2 1
github.com/tendermint/go-merkle/iavl_tree.go:209.3,212.20 2 1
github.com/tendermint/go-merkle/iavl_tree.go:216.3,220.17 5 1
github.com/tendermint/go-merkle/iavl_tree.go:223.3,226.14 4 1
github.com/tendermint/go-merkle/iavl_tree.go:212.20,215.4 2 0
github.com/tendermint/go-merkle/iavl_tree.go:220.17,222.4 1 0
github.com/tendermint/go-merkle/iavl_tree.go:230.58,233.22 3 1
github.com/tendermint/go-merkle/iavl_tree.go:236.2,236.20 1 1
github.com/tendermint/go-merkle/iavl_tree.go:243.2,245.16 3 1
github.com/tendermint/go-merkle/iavl_tree.go:248.2,250.21 3 1
github.com/tendermint/go-merkle/iavl_tree.go:233.22,235.3 1 0
github.com/tendermint/go-merkle/iavl_tree.go:236.20,238.3 1 0
github.com/tendermint/go-merkle/iavl_tree.go:245.16,247.3 1 0
github.com/tendermint/go-merkle/iavl_tree.go:253.46,258.42 3 1
github.com/tendermint/go-merkle/iavl_tree.go:258.42,261.3 2 1
github.com/tendermint/go-merkle/simple_tree.go:38.64,44.16 6 1
github.com/tendermint/go-merkle/simple_tree.go:47.2,47.24 1 1
github.com/tendermint/go-merkle/simple_tree.go:44.16,46.3 1 0
github.com/tendermint/go-merkle/simple_tree.go:50.51,52.21 1 1
github.com/tendermint/go-merkle/simple_tree.go:53.2,54.13 1 0
github.com/tendermint/go-merkle/simple_tree.go:55.2,56.19 1 1
github.com/tendermint/go-merkle/simple_tree.go:57.2,60.46 3 1
github.com/tendermint/go-merkle/simple_tree.go:65.57,67.29 2 0
github.com/tendermint/go-merkle/simple_tree.go:70.2,70.37 1 0
github.com/tendermint/go-merkle/simple_tree.go:67.29,69.3 1 0
github.com/tendermint/go-merkle/simple_tree.go:74.52,77.17 3 0
github.com/tendermint/go-merkle/simple_tree.go:80.2,80.24 1 0
github.com/tendermint/go-merkle/simple_tree.go:77.17,79.3 1 0
github.com/tendermint/go-merkle/simple_tree.go:84.55,86.29 2 1
github.com/tendermint/go-merkle/simple_tree.go:90.2,90.37 1 1
github.com/tendermint/go-merkle/simple_tree.go:86.29,89.3 2 1
github.com/tendermint/go-merkle/simple_tree.go:94.57,97.2 2 0
github.com/tendermint/go-merkle/simple_tree.go:111.32,114.40 3 0
github.com/tendermint/go-merkle/simple_tree.go:119.2,119.17 1 0
github.com/tendermint/go-merkle/simple_tree.go:122.2,122.24 1 0
github.com/tendermint/go-merkle/simple_tree.go:114.40,116.3 1 0
github.com/tendermint/go-merkle/simple_tree.go:116.3,118.3 1 0
github.com/tendermint/go-merkle/simple_tree.go:119.17,121.3 1 0
github.com/tendermint/go-merkle/simple_tree.go:127.41,127.61 1 0
github.com/tendermint/go-merkle/simple_tree.go:128.41,128.77 1 0
github.com/tendermint/go-merkle/simple_tree.go:129.41,129.80 1 0
github.com/tendermint/go-merkle/simple_tree.go:130.41,130.60 1 0
github.com/tendermint/go-merkle/simple_tree.go:132.61,134.22 2 0
github.com/tendermint/go-merkle/simple_tree.go:137.2,139.30 3 0
github.com/tendermint/go-merkle/simple_tree.go:142.2,142.17 1 0
github.com/tendermint/go-merkle/simple_tree.go:134.22,136.3 1 0
github.com/tendermint/go-merkle/simple_tree.go:139.30,141.3 1 0
github.com/tendermint/go-merkle/simple_tree.go:152.91,156.31 4 1
github.com/tendermint/go-merkle/simple_tree.go:161.2,161.8 1 1
github.com/tendermint/go-merkle/simple_tree.go:156.31,160.3 1 1
github.com/tendermint/go-merkle/simple_tree.go:166.92,168.25 2 1
github.com/tendermint/go-merkle/simple_tree.go:171.2,171.42 1 1
github.com/tendermint/go-merkle/simple_tree.go:174.2,174.13 1 1
github.com/tendermint/go-merkle/simple_tree.go:168.25,170.3 1 1
github.com/tendermint/go-merkle/simple_tree.go:171.42,173.3 1 1
github.com/tendermint/go-merkle/simple_tree.go:177.40,179.2 1 0
github.com/tendermint/go-merkle/simple_tree.go:181.61,187.2 1 0
github.com/tendermint/go-merkle/simple_tree.go:191.95,193.20 1 1
github.com/tendermint/go-merkle/simple_tree.go:196.2,196.15 1 1
github.com/tendermint/go-merkle/simple_tree.go:193.20,195.3 1 0
github.com/tendermint/go-merkle/simple_tree.go:197.2,199.13 2 0
github.com/tendermint/go-merkle/simple_tree.go:200.2,201.28 1 1
github.com/tendermint/go-merkle/simple_tree.go:204.3,204.18 1 1
github.com/tendermint/go-merkle/simple_tree.go:205.2,206.28 1 1
github.com/tendermint/go-merkle/simple_tree.go:209.3,210.22 2 1
github.com/tendermint/go-merkle/simple_tree.go:201.28,203.4 1 1
github.com/tendermint/go-merkle/simple_tree.go:206.28,208.4 1 1
github.com/tendermint/go-merkle/simple_tree.go:210.22,212.23 2 1
github.com/tendermint/go-merkle/simple_tree.go:215.4,215.77 1 1
github.com/tendermint/go-merkle/simple_tree.go:212.23,214.5 1 1
github.com/tendermint/go-merkle/simple_tree.go:216.4,218.24 2 1
github.com/tendermint/go-merkle/simple_tree.go:221.4,221.78 1 1
github.com/tendermint/go-merkle/simple_tree.go:218.24,220.5 1 1
github.com/tendermint/go-merkle/simple_tree.go:240.53,243.17 2 1
github.com/tendermint/go-merkle/simple_tree.go:253.2,253.20 1 1
github.com/tendermint/go-merkle/simple_tree.go:243.17,244.22 1 1
github.com/tendermint/go-merkle/simple_tree.go:251.3,251.19 1 1
github.com/tendermint/go-merkle/simple_tree.go:244.22,246.4 1 1
github.com/tendermint/go-merkle/simple_tree.go:246.4,246.30 1 1
github.com/tendermint/go-merkle/simple_tree.go:246.30,248.4 1 1
github.com/tendermint/go-merkle/simple_tree.go:248.4,249.9 1 1
github.com/tendermint/go-merkle/simple_tree.go:258.95,260.20 1 1
github.com/tendermint/go-merkle/simple_tree.go:261.2,262.18 1 0
github.com/tendermint/go-merkle/simple_tree.go:263.2,265.42 2 1
github.com/tendermint/go-merkle/simple_tree.go:266.2,275.40 9 1

View File

@ -1,337 +0,0 @@
package merkle
import (
"bytes"
"fmt"
. "github.com/tendermint/netmon/Godeps/_workspace/src/github.com/tendermint/go-common"
. "github.com/tendermint/netmon/Godeps/_workspace/src/github.com/tendermint/go-common/test"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/tendermint/go-db"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/tendermint/go-wire"
"runtime"
"testing"
)
const testReadLimit = 1 << 20 // Some reasonable limit for wire.Read*() lmt
func randstr(length int) string {
return RandStr(length)
}
// Convenience for a new node
func N(l, r interface{}) *IAVLNode {
var left, right *IAVLNode
if _, ok := l.(*IAVLNode); ok {
left = l.(*IAVLNode)
} else {
left = NewIAVLNode(l, "")
}
if _, ok := r.(*IAVLNode); ok {
right = r.(*IAVLNode)
} else {
right = NewIAVLNode(r, "")
}
n := &IAVLNode{
key: right.lmd(nil).key,
value: "",
leftNode: left,
rightNode: right,
}
n.calcHeightAndSize(nil)
return n
}
// Setup a deep node
func T(n *IAVLNode) *IAVLTree {
t := NewIAVLTree(wire.BasicCodec, wire.BasicCodec, 0, nil)
n.hashWithCount(t)
t.root = n
return t
}
// Convenience for simple printing of keys & tree structure
func P(n *IAVLNode) string {
if n.height == 0 {
return fmt.Sprintf("%v", n.key)
} else {
return fmt.Sprintf("(%v %v)", P(n.leftNode), P(n.rightNode))
}
}
func TestUnit(t *testing.T) {
expectHash := func(tree *IAVLTree, hashCount int) {
// ensure number of new hash calculations is as expected.
hash, count := tree.HashWithCount()
if count != hashCount {
t.Fatalf("Expected %v new hashes, got %v", hashCount, count)
}
// nuke hashes and reconstruct hash, ensure it's the same.
tree.root.traverse(tree, func(node *IAVLNode) bool {
node.hash = nil
return false
})
// ensure that the new hash after nuking is the same as the old.
newHash, _ := tree.HashWithCount()
if bytes.Compare(hash, newHash) != 0 {
t.Fatalf("Expected hash %v but got %v after nuking", hash, newHash)
}
}
expectSet := func(tree *IAVLTree, i int, repr string, hashCount int) {
origNode := tree.root
updated := tree.Set(i, "")
// ensure node was added & structure is as expected.
if updated == true || P(tree.root) != repr {
t.Fatalf("Adding %v to %v:\nExpected %v\nUnexpectedly got %v updated:%v",
i, P(origNode), repr, P(tree.root), updated)
}
// ensure hash calculation requirements
expectHash(tree, hashCount)
tree.root = origNode
}
expectRemove := func(tree *IAVLTree, i int, repr string, hashCount int) {
origNode := tree.root
value, removed := tree.Remove(i)
// ensure node was added & structure is as expected.
if value != "" || !removed || P(tree.root) != repr {
t.Fatalf("Removing %v from %v:\nExpected %v\nUnexpectedly got %v value:%v removed:%v",
i, P(origNode), repr, P(tree.root), value, removed)
}
// ensure hash calculation requirements
expectHash(tree, hashCount)
tree.root = origNode
}
//////// Test Set cases:
// Case 1:
t1 := T(N(4, 20))
expectSet(t1, 8, "((4 8) 20)", 3)
expectSet(t1, 25, "(4 (20 25))", 3)
t2 := T(N(4, N(20, 25)))
expectSet(t2, 8, "((4 8) (20 25))", 3)
expectSet(t2, 30, "((4 20) (25 30))", 4)
t3 := T(N(N(1, 2), 6))
expectSet(t3, 4, "((1 2) (4 6))", 4)
expectSet(t3, 8, "((1 2) (6 8))", 3)
t4 := T(N(N(1, 2), N(N(5, 6), N(7, 9))))
expectSet(t4, 8, "(((1 2) (5 6)) ((7 8) 9))", 5)
expectSet(t4, 10, "(((1 2) (5 6)) (7 (9 10)))", 5)
//////// Test Remove cases:
t10 := T(N(N(1, 2), 3))
expectRemove(t10, 2, "(1 3)", 1)
expectRemove(t10, 3, "(1 2)", 0)
t11 := T(N(N(N(1, 2), 3), N(4, 5)))
expectRemove(t11, 4, "((1 2) (3 5))", 2)
expectRemove(t11, 3, "((1 2) (4 5))", 1)
}
func TestIntegration(t *testing.T) {
type record struct {
key string
value string
}
records := make([]*record, 400)
var tree *IAVLTree = NewIAVLTree(wire.BasicCodec, wire.BasicCodec, 0, nil)
randomRecord := func() *record {
return &record{randstr(20), randstr(20)}
}
for i := range records {
r := randomRecord()
records[i] = r
//t.Log("New record", r)
//PrintIAVLNode(tree.root)
updated := tree.Set(r.key, "")
if updated {
t.Error("should have not been updated")
}
updated = tree.Set(r.key, r.value)
if !updated {
t.Error("should have been updated")
}
if tree.Size() != i+1 {
t.Error("size was wrong", tree.Size(), i+1)
}
}
for _, r := range records {
if has := tree.Has(r.key); !has {
t.Error("Missing key", r.key)
}
if has := tree.Has(randstr(12)); has {
t.Error("Table has extra key")
}
if _, val := tree.Get(r.key); val.(string) != r.value {
t.Error("wrong value")
}
}
for i, x := range records {
if val, removed := tree.Remove(x.key); !removed {
t.Error("Wasn't removed")
} else if val != x.value {
t.Error("Wrong value")
}
for _, r := range records[i+1:] {
if has := tree.Has(r.key); !has {
t.Error("Missing key", r.key)
}
if has := tree.Has(randstr(12)); has {
t.Error("Table has extra key")
}
_, val := tree.Get(r.key)
if val != r.value {
t.Error("wrong value")
}
}
if tree.Size() != len(records)-(i+1) {
t.Error("size was wrong", tree.Size(), (len(records) - (i + 1)))
}
}
}
func TestPersistence(t *testing.T) {
db := db.NewMemDB()
// Create some random key value pairs
records := make(map[string]string)
for i := 0; i < 10000; i++ {
records[randstr(20)] = randstr(20)
}
// Construct some tree and save it
t1 := NewIAVLTree(wire.BasicCodec, wire.BasicCodec, 0, db)
for key, value := range records {
t1.Set(key, value)
}
t1.Save()
hash, _ := t1.HashWithCount()
// Load a tree
t2 := NewIAVLTree(wire.BasicCodec, wire.BasicCodec, 0, db)
t2.Load(hash)
for key, value := range records {
_, t2value := t2.Get(key)
if t2value != value {
t.Fatalf("Invalid value. Expected %v, got %v", value, t2value)
}
}
}
func testProof(t *testing.T, proof *IAVLProof, keyBytes, valueBytes, rootHash []byte) {
// Proof must verify.
if !proof.Verify(keyBytes, valueBytes, rootHash) {
t.Errorf("Invalid proof. Verification failed.")
return
}
// Write/Read then verify.
proofBytes := wire.BinaryBytes(proof)
n, err := int(0), error(nil)
proof2 := wire.ReadBinary(&IAVLProof{}, bytes.NewBuffer(proofBytes), 0, &n, &err).(*IAVLProof)
if err != nil {
t.Errorf("Failed to read IAVLProof from bytes: %v", err)
return
}
if !proof2.Verify(keyBytes, valueBytes, rootHash) {
// t.Log(Fmt("%X\n%X\n", proofBytes, wire.BinaryBytes(proof2)))
t.Errorf("Invalid proof after write/read. Verification failed.")
return
}
// Random mutations must not verify
for i := 0; i < 5; i++ {
badProofBytes := MutateByteSlice(proofBytes)
n, err := int(0), error(nil)
badProof := wire.ReadBinary(&IAVLProof{}, bytes.NewBuffer(badProofBytes), testReadLimit, &n, &err).(*IAVLProof)
if err != nil {
continue // This is fine.
}
if badProof.Verify(keyBytes, valueBytes, rootHash) {
t.Errorf("Proof was still valid after a random mutation:\n%X\n%X", proofBytes, badProofBytes)
}
}
}
func TestIAVLProof(t *testing.T) {
// Convenient wrapper around wire.BasicCodec.
toBytes := func(o interface{}) []byte {
buf, n, err := new(bytes.Buffer), int(0), error(nil)
wire.BasicCodec.Encode(o, buf, &n, &err)
if err != nil {
panic(Fmt("Failed to encode thing: %v", err))
}
return buf.Bytes()
}
// Construct some random tree
db := db.NewMemDB()
var tree *IAVLTree = NewIAVLTree(wire.BasicCodec, wire.BasicCodec, 100, db)
for i := 0; i < 1000; i++ {
key, value := randstr(20), randstr(20)
tree.Set(key, value)
}
// Persist the items so far
tree.Save()
// Add more items so it's not all persisted
for i := 0; i < 100; i++ {
key, value := randstr(20), randstr(20)
tree.Set(key, value)
}
// Now for each item, construct a proof and verify
tree.Iterate(func(key interface{}, value interface{}) bool {
proof := tree.ConstructProof(key)
if !bytes.Equal(proof.RootHash, tree.Hash()) {
t.Errorf("Invalid proof. Expected root %X, got %X", tree.Hash(), proof.RootHash)
}
testProof(t, proof, toBytes(key), toBytes(value), tree.Hash())
return false
})
}
func BenchmarkImmutableAvlTree(b *testing.B) {
b.StopTimer()
t := NewIAVLTree(wire.BasicCodec, wire.BasicCodec, 0, nil)
// 23000ns/op, 43000ops/s
// for i := 0; i < 10000000; i++ {
for i := 0; i < 1000000; i++ {
t.Set(RandInt64(), "")
}
fmt.Println("ok, starting")
runtime.GC()
b.StartTimer()
for i := 0; i < b.N; i++ {
ri := RandInt64()
t.Set(ri, "")
t.Remove(ri)
}
}

View File

@ -1,87 +0,0 @@
package merkle
import (
"bytes"
. "github.com/tendermint/netmon/Godeps/_workspace/src/github.com/tendermint/go-common"
. "github.com/tendermint/netmon/Godeps/_workspace/src/github.com/tendermint/go-common/test"
"testing"
)
type testItem []byte
func (tI testItem) Hash() []byte {
return []byte(tI)
}
func TestSimpleProof(t *testing.T) {
total := 100
items := make([]Hashable, total)
for i := 0; i < total; i++ {
items[i] = testItem(RandBytes(32))
}
rootHash := SimpleHashFromHashables(items)
rootHash2, proofs := SimpleProofsFromHashables(items)
if !bytes.Equal(rootHash, rootHash2) {
t.Errorf("Unmatched root hashes: %X vs %X", rootHash, rootHash2)
}
// For each item, check the trail.
for i, item := range items {
itemHash := item.Hash()
proof := proofs[i]
// Verify success
ok := proof.Verify(i, total, itemHash, rootHash)
if !ok {
t.Errorf("Verification failed for index %v.", i)
}
// Wrong item index should make it fail
{
ok = proof.Verify((i+1)%total, total, itemHash, rootHash)
if ok {
t.Errorf("Expected verification to fail for wrong index %v.", i)
}
}
// Trail too long should make it fail
origAunts := proof.Aunts
proof.Aunts = append(proof.Aunts, RandBytes(32))
{
ok = proof.Verify(i, total, itemHash, rootHash)
if ok {
t.Errorf("Expected verification to fail for wrong trail length.")
}
}
proof.Aunts = origAunts
// Trail too short should make it fail
proof.Aunts = proof.Aunts[0 : len(proof.Aunts)-1]
{
ok = proof.Verify(i, total, itemHash, rootHash)
if ok {
t.Errorf("Expected verification to fail for wrong trail length.")
}
}
proof.Aunts = origAunts
// Mutating the itemHash should make it fail.
ok = proof.Verify(i, total, MutateByteSlice(itemHash), rootHash)
if ok {
t.Errorf("Expected verification to fail for mutated leaf hash")
}
// Mutating the rootHash should make it fail.
ok = proof.Verify(i, total, itemHash, MutateByteSlice(rootHash))
if ok {
t.Errorf("Expected verification to fail for mutated root hash")
}
}
}

View File

@ -1,21 +0,0 @@
package merkle
type Tree interface {
Size() (size int)
Height() (height int8)
Has(key interface{}) (has bool)
Get(key interface{}) (index int, value interface{})
GetByIndex(index int) (key interface{}, value interface{})
Set(key interface{}, value interface{}) (updated bool)
Remove(key interface{}) (value interface{}, removed bool)
HashWithCount() (hash []byte, count int)
Hash() (hash []byte)
Save() (hash []byte)
Load(hash []byte)
Copy() Tree
Iterate(func(key interface{}, value interface{}) (stop bool)) (stopped bool)
}
type Hashable interface {
Hash() []byte
}

View File

@ -1,161 +0,0 @@
package p2p
import (
"fmt"
"io/ioutil"
"math/rand"
"testing"
)
func createTempFileName(prefix string) string {
f, err := ioutil.TempFile("", prefix)
if err != nil {
panic(err)
}
fname := f.Name()
err = f.Close()
if err != nil {
panic(err)
}
return fname
}
func TestEmpty(t *testing.T) {
fname := createTempFileName("addrbook_test")
// t.Logf("New tempfile name: %v", fname)
// Save an empty book & load it
book := NewAddrBook(fname)
book.saveToFile(fname)
book = NewAddrBook(fname)
book.loadFromFile(fname)
if book.Size() != 0 {
t.Errorf("Expected 0 addresses, found %v", book.Size())
}
}
func randIPv4Address() *NetAddress {
for {
ip := fmt.Sprintf("%v.%v.%v.%v",
rand.Intn(254)+1,
rand.Intn(255),
rand.Intn(255),
rand.Intn(255),
)
port := rand.Intn(65535-1) + 1
addr := NewNetAddressString(fmt.Sprintf("%v:%v", ip, port))
if addr.Routable() {
return addr
}
}
}
func TestSaveAddresses(t *testing.T) {
fname := createTempFileName("addrbook_test")
//t.Logf("New tempfile name: %v", fname)
// Create some random addresses
randAddrs := []struct {
addr *NetAddress
src *NetAddress
}{}
for i := 0; i < 100; i++ {
addr := randIPv4Address()
src := randIPv4Address()
randAddrs = append(randAddrs, struct {
addr *NetAddress
src *NetAddress
}{
addr: addr,
src: src,
})
}
// Create the book & populate & save
book := NewAddrBook(fname)
for _, addrSrc := range randAddrs {
book.AddAddress(addrSrc.addr, addrSrc.src)
}
if book.Size() != 100 {
t.Errorf("Expected 100 addresses, found %v", book.Size())
}
book.saveToFile(fname)
// Reload the book
book = NewAddrBook(fname)
book.loadFromFile(fname)
// Test ...
if book.Size() != 100 {
t.Errorf("Expected 100 addresses, found %v", book.Size())
}
for _, addrSrc := range randAddrs {
addr := addrSrc.addr
src := addrSrc.src
ka := book.addrLookup[addr.String()]
if ka == nil {
t.Fatalf("Expected to find KnownAddress %v but wasn't there.", addr)
}
if !(ka.Addr.Equals(addr) && ka.Src.Equals(src)) {
t.Fatalf("KnownAddress doesn't match addr & src")
}
}
}
func TestPromoteToOld(t *testing.T) {
fname := createTempFileName("addrbook_test")
t.Logf("New tempfile name: %v", fname)
// Create some random addresses
randAddrs := []struct {
addr *NetAddress
src *NetAddress
}{}
for i := 0; i < 100; i++ {
addr := randIPv4Address()
src := randIPv4Address()
randAddrs = append(randAddrs, struct {
addr *NetAddress
src *NetAddress
}{
addr: addr,
src: src,
})
}
// Create the book & populate & save
book := NewAddrBook(fname)
for _, addrSrc := range randAddrs {
book.AddAddress(addrSrc.addr, addrSrc.src)
}
// Attempt all addresses.
for _, addrSrc := range randAddrs {
book.MarkAttempt(addrSrc.addr)
}
// Promote half of them
for i, addrSrc := range randAddrs {
if i%2 == 0 {
book.MarkGood(addrSrc.addr)
}
}
book.saveToFile(fname)
// Reload the book
book = NewAddrBook(fname)
book.loadFromFile(fname)
// Test ...
if book.Size() != 100 {
t.Errorf("Expected 100 addresses, found %v", book.Size())
}
// TODO: do more testing :)
selection := book.GetSelection()
t.Logf("selection: %v", selection)
}

View File

@ -1,7 +0,0 @@
package p2p
import (
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/tendermint/go-logger"
)
var log = logger.New("module", "p2p")

View File

@ -1,67 +0,0 @@
package p2p
import (
"math/rand"
"testing"
. "github.com/tendermint/netmon/Godeps/_workspace/src/github.com/tendermint/go-common"
)
// Returns an empty dummy peer
func randPeer() *Peer {
return &Peer{
Key: RandStr(12),
NodeInfo: &NodeInfo{
RemoteAddr: Fmt("%v.%v.%v.%v:46656", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256),
ListenAddr: Fmt("%v.%v.%v.%v:46656", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256),
},
}
}
func TestAddRemoveOne(t *testing.T) {
peerSet := NewPeerSet()
peer := randPeer()
err := peerSet.Add(peer)
if err != nil {
t.Errorf("Failed to add new peer")
}
if peerSet.Size() != 1 {
t.Errorf("Failed to add new peer and increment size")
}
peerSet.Remove(peer)
if peerSet.Has(peer.Key) {
t.Errorf("Failed to remove peer")
}
if peerSet.Size() != 0 {
t.Errorf("Failed to remove peer and decrement size")
}
}
func TestAddRemoveMany(t *testing.T) {
peerSet := NewPeerSet()
peers := []*Peer{}
N := 100
for i := 0; i < N; i++ {
peer := randPeer()
if err := peerSet.Add(peer); err != nil {
t.Errorf("Failed to add new peer")
}
if peerSet.Size() != i+1 {
t.Errorf("Failed to add new peer and increment size")
}
peers = append(peers, peer)
}
for i, peer := range peers {
peerSet.Remove(peer)
if peerSet.Has(peer.Key) {
t.Errorf("Failed to remove peer")
}
if peerSet.Size() != len(peers)-i-1 {
t.Errorf("Failed to remove peer and decrement size")
}
}
}

View File

@ -1,202 +0,0 @@
package p2p
import (
"bytes"
"io"
"testing"
. "github.com/tendermint/netmon/Godeps/_workspace/src/github.com/tendermint/go-common"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/tendermint/go-crypto"
)
type dummyConn struct {
*io.PipeReader
*io.PipeWriter
}
func (drw dummyConn) Close() (err error) {
err2 := drw.PipeWriter.CloseWithError(io.EOF)
err1 := drw.PipeReader.Close()
if err2 != nil {
return err
}
return err1
}
// Each returned ReadWriteCloser is akin to a net.Connection
func makeDummyConnPair() (fooConn, barConn dummyConn) {
barReader, fooWriter := io.Pipe()
fooReader, barWriter := io.Pipe()
return dummyConn{fooReader, fooWriter}, dummyConn{barReader, barWriter}
}
func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection) {
fooConn, barConn := makeDummyConnPair()
fooPrvKey := crypto.GenPrivKeyEd25519()
fooPubKey := fooPrvKey.PubKey().(crypto.PubKeyEd25519)
barPrvKey := crypto.GenPrivKeyEd25519()
barPubKey := barPrvKey.PubKey().(crypto.PubKeyEd25519)
Parallel(
func() {
var err error
fooSecConn, err = MakeSecretConnection(fooConn, fooPrvKey)
if err != nil {
tb.Errorf("Failed to establish SecretConnection for foo: %v", err)
return
}
remotePubBytes := fooSecConn.RemotePubKey()
if !bytes.Equal(remotePubBytes[:], barPubKey[:]) {
tb.Errorf("Unexpected fooSecConn.RemotePubKey. Expected %v, got %v",
barPubKey, fooSecConn.RemotePubKey())
}
},
func() {
var err error
barSecConn, err = MakeSecretConnection(barConn, barPrvKey)
if barSecConn == nil {
tb.Errorf("Failed to establish SecretConnection for bar: %v", err)
return
}
remotePubBytes := barSecConn.RemotePubKey()
if !bytes.Equal(remotePubBytes[:], fooPubKey[:]) {
tb.Errorf("Unexpected barSecConn.RemotePubKey. Expected %v, got %v",
fooPubKey, barSecConn.RemotePubKey())
}
})
return
}
func TestSecretConnectionHandshake(t *testing.T) {
fooSecConn, barSecConn := makeSecretConnPair(t)
fooSecConn.Close()
barSecConn.Close()
}
func TestSecretConnectionReadWrite(t *testing.T) {
fooConn, barConn := makeDummyConnPair()
fooWrites, barWrites := []string{}, []string{}
fooReads, barReads := []string{}, []string{}
// Pre-generate the things to write (for foo & bar)
for i := 0; i < 100; i++ {
fooWrites = append(fooWrites, RandStr((RandInt()%(dataMaxSize*5))+1))
barWrites = append(barWrites, RandStr((RandInt()%(dataMaxSize*5))+1))
}
// A helper that will run with (fooConn, fooWrites, fooReads) and vice versa
genNodeRunner := func(nodeConn dummyConn, nodeWrites []string, nodeReads *[]string) func() {
return func() {
// Node handskae
nodePrvKey := crypto.GenPrivKeyEd25519()
nodeSecretConn, err := MakeSecretConnection(nodeConn, nodePrvKey)
if err != nil {
t.Errorf("Failed to establish SecretConnection for node: %v", err)
return
}
// In parallel, handle reads and writes
Parallel(
func() {
// Node writes
for _, nodeWrite := range nodeWrites {
n, err := nodeSecretConn.Write([]byte(nodeWrite))
if err != nil {
t.Errorf("Failed to write to nodeSecretConn: %v", err)
return
}
if n != len(nodeWrite) {
t.Errorf("Failed to write all bytes. Expected %v, wrote %v", len(nodeWrite), n)
return
}
}
nodeConn.PipeWriter.Close()
},
func() {
// Node reads
readBuffer := make([]byte, dataMaxSize)
for {
n, err := nodeSecretConn.Read(readBuffer)
if err == io.EOF {
return
} else if err != nil {
t.Errorf("Failed to read from nodeSecretConn: %v", err)
return
}
*nodeReads = append(*nodeReads, string(readBuffer[:n]))
}
nodeConn.PipeReader.Close()
})
}
}
// Run foo & bar in parallel
Parallel(
genNodeRunner(fooConn, fooWrites, &fooReads),
genNodeRunner(barConn, barWrites, &barReads),
)
// A helper to ensure that the writes and reads match.
// Additionally, small writes (<= dataMaxSize) must be atomically read.
compareWritesReads := func(writes []string, reads []string) {
for {
// Pop next write & corresponding reads
var read, write string = "", writes[0]
var readCount = 0
for _, readChunk := range reads {
read += readChunk
readCount += 1
if len(write) <= len(read) {
break
}
if len(write) <= dataMaxSize {
break // atomicity of small writes
}
}
// Compare
if write != read {
t.Errorf("Expected to read %X, got %X", write, read)
}
// Iterate
writes = writes[1:]
reads = reads[readCount:]
if len(writes) == 0 {
break
}
}
}
compareWritesReads(fooWrites, barReads)
compareWritesReads(barWrites, fooReads)
}
func BenchmarkSecretConnection(b *testing.B) {
b.StopTimer()
fooSecConn, barSecConn := makeSecretConnPair(b)
fooWriteText := RandStr(dataMaxSize)
// Consume reads from bar's reader
go func() {
readBuffer := make([]byte, dataMaxSize)
for {
_, err := barSecConn.Read(readBuffer)
if err == io.EOF {
return
} else if err != nil {
b.Fatalf("Failed to read from barSecConn: %v", err)
}
}
}()
b.StartTimer()
for i := 0; i < b.N; i++ {
_, err := fooSecConn.Write([]byte(fooWriteText))
if err != nil {
b.Fatalf("Failed to write to fooSecConn: %v", err)
}
}
b.StopTimer()
fooSecConn.Close()
//barSecConn.Close() race condition
}

View File

@ -1,234 +0,0 @@
package p2p
import (
"bytes"
"sync"
"testing"
"time"
. "github.com/tendermint/netmon/Godeps/_workspace/src/github.com/tendermint/go-common"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/tendermint/go-crypto"
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/tendermint/go-wire"
)
type PeerMessage struct {
PeerKey string
Bytes []byte
Counter int
}
type TestReactor struct {
BaseReactor
mtx sync.Mutex
channels []*ChannelDescriptor
peersAdded []*Peer
peersRemoved []*Peer
logMessages bool
msgsCounter int
msgsReceived map[byte][]PeerMessage
}
func NewTestReactor(channels []*ChannelDescriptor, logMessages bool) *TestReactor {
tr := &TestReactor{
channels: channels,
logMessages: logMessages,
msgsReceived: make(map[byte][]PeerMessage),
}
tr.BaseReactor = *NewBaseReactor(log, "TestReactor", tr)
return tr
}
func (tr *TestReactor) GetChannels() []*ChannelDescriptor {
return tr.channels
}
func (tr *TestReactor) AddPeer(peer *Peer) {
tr.mtx.Lock()
defer tr.mtx.Unlock()
tr.peersAdded = append(tr.peersAdded, peer)
}
func (tr *TestReactor) RemovePeer(peer *Peer, reason interface{}) {
tr.mtx.Lock()
defer tr.mtx.Unlock()
tr.peersRemoved = append(tr.peersRemoved, peer)
}
func (tr *TestReactor) Receive(chID byte, peer *Peer, msgBytes []byte) {
if tr.logMessages {
tr.mtx.Lock()
defer tr.mtx.Unlock()
//fmt.Printf("Received: %X, %X\n", chID, msgBytes)
tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.Key, msgBytes, tr.msgsCounter})
tr.msgsCounter++
}
}
//-----------------------------------------------------------------------------
// convenience method for creating two switches connected to each other.
func makeSwitchPair(t testing.TB, initSwitch func(*Switch) *Switch) (*Switch, *Switch) {
s1PrivKey := crypto.GenPrivKeyEd25519()
s2PrivKey := crypto.GenPrivKeyEd25519()
// Create two switches that will be interconnected.
s1 := initSwitch(NewSwitch())
s1.SetNodeInfo(&NodeInfo{
PubKey: s1PrivKey.PubKey().(crypto.PubKeyEd25519),
Moniker: "switch1",
Network: "testing",
Version: "123.123.123",
})
s1.SetNodePrivKey(s1PrivKey)
s2 := initSwitch(NewSwitch())
s2.SetNodeInfo(&NodeInfo{
PubKey: s2PrivKey.PubKey().(crypto.PubKeyEd25519),
Moniker: "switch2",
Network: "testing",
Version: "123.123.123",
})
s2.SetNodePrivKey(s2PrivKey)
// Start switches and reactors
s1.Start()
s2.Start()
// Create a listener for s1
l := NewDefaultListener("tcp", ":8001", true)
// Dial the listener & add the connection to s2.
lAddr := l.ExternalAddress()
connOut, err := lAddr.Dial()
if err != nil {
t.Fatalf("Could not connect to listener address %v", lAddr)
} else {
t.Logf("Created a connection to listener address %v", lAddr)
}
connIn, ok := <-l.Connections()
if !ok {
t.Fatalf("Could not get inbound connection from listener")
}
go s1.AddPeerWithConnection(connIn, false) // AddPeer is blocking, requires handshake.
s2.AddPeerWithConnection(connOut, true)
// Wait for things to happen, peers to get added...
time.Sleep(100 * time.Millisecond)
// Close the server, no longer needed.
l.Stop()
return s1, s2
}
func TestSwitches(t *testing.T) {
s1, s2 := makeSwitchPair(t, func(sw *Switch) *Switch {
// Make two reactors of two channels each
sw.AddReactor("foo", NewTestReactor([]*ChannelDescriptor{
&ChannelDescriptor{ID: byte(0x00), Priority: 10},
&ChannelDescriptor{ID: byte(0x01), Priority: 10},
}, true))
sw.AddReactor("bar", NewTestReactor([]*ChannelDescriptor{
&ChannelDescriptor{ID: byte(0x02), Priority: 10},
&ChannelDescriptor{ID: byte(0x03), Priority: 10},
}, true))
return sw
})
defer s1.Stop()
defer s2.Stop()
// Lets send a message from s1 to s2.
if s1.Peers().Size() != 1 {
t.Errorf("Expected exactly 1 peer in s1, got %v", s1.Peers().Size())
}
if s2.Peers().Size() != 1 {
t.Errorf("Expected exactly 1 peer in s2, got %v", s2.Peers().Size())
}
ch0Msg := "channel zero"
ch1Msg := "channel foo"
ch2Msg := "channel bar"
s1.Broadcast(byte(0x00), ch0Msg)
s1.Broadcast(byte(0x01), ch1Msg)
s1.Broadcast(byte(0x02), ch2Msg)
// Wait for things to settle...
time.Sleep(5000 * time.Millisecond)
// Check message on ch0
ch0Msgs := s2.Reactor("foo").(*TestReactor).msgsReceived[byte(0x00)]
if len(ch0Msgs) != 1 {
t.Errorf("Expected to have received 1 message in ch0")
}
if !bytes.Equal(ch0Msgs[0].Bytes, wire.BinaryBytes(ch0Msg)) {
t.Errorf("Unexpected message bytes. Wanted: %X, Got: %X", wire.BinaryBytes(ch0Msg), ch0Msgs[0].Bytes)
}
// Check message on ch1
ch1Msgs := s2.Reactor("foo").(*TestReactor).msgsReceived[byte(0x01)]
if len(ch1Msgs) != 1 {
t.Errorf("Expected to have received 1 message in ch1")
}
if !bytes.Equal(ch1Msgs[0].Bytes, wire.BinaryBytes(ch1Msg)) {
t.Errorf("Unexpected message bytes. Wanted: %X, Got: %X", wire.BinaryBytes(ch1Msg), ch1Msgs[0].Bytes)
}
// Check message on ch2
ch2Msgs := s2.Reactor("bar").(*TestReactor).msgsReceived[byte(0x02)]
if len(ch2Msgs) != 1 {
t.Errorf("Expected to have received 1 message in ch2")
}
if !bytes.Equal(ch2Msgs[0].Bytes, wire.BinaryBytes(ch2Msg)) {
t.Errorf("Unexpected message bytes. Wanted: %X, Got: %X", wire.BinaryBytes(ch2Msg), ch2Msgs[0].Bytes)
}
}
func BenchmarkSwitches(b *testing.B) {
b.StopTimer()
s1, s2 := makeSwitchPair(b, func(sw *Switch) *Switch {
// Make bar reactors of bar channels each
sw.AddReactor("foo", NewTestReactor([]*ChannelDescriptor{
&ChannelDescriptor{ID: byte(0x00), Priority: 10},
&ChannelDescriptor{ID: byte(0x01), Priority: 10},
}, false))
sw.AddReactor("bar", NewTestReactor([]*ChannelDescriptor{
&ChannelDescriptor{ID: byte(0x02), Priority: 10},
&ChannelDescriptor{ID: byte(0x03), Priority: 10},
}, false))
return sw
})
defer s1.Stop()
defer s2.Stop()
// Allow time for goroutines to boot up
time.Sleep(1000 * time.Millisecond)
b.StartTimer()
numSuccess, numFailure := 0, 0
// Send random message from foo channel to another
for i := 0; i < b.N; i++ {
chID := byte(i % 4)
successChan := s1.Broadcast(chID, "test data")
for s := range successChan {
if s {
numSuccess += 1
} else {
numFailure += 1
}
}
}
log.Warn(Fmt("success: %v, failure: %v", numSuccess, numFailure))
// Allow everything to flush before stopping switches & closing connections.
b.StopTimer()
time.Sleep(1000 * time.Millisecond)
}

View File

@ -1,7 +0,0 @@
package upnp
import (
"github.com/tendermint/netmon/Godeps/_workspace/src/github.com/tendermint/go-logger"
)
var log = logger.New("module", "upnp")

View File

@ -1,206 +0,0 @@
Tendermint Go-Process
Copyright (C) 2015 Tendermint
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
//--------------------------------------------------------------------------------
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright © 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for software and other kinds of works.
The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too.
When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.
Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions.
Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and modification follow.
TERMS AND CONDITIONS
0. Definitions.
“This License” refers to version 3 of the GNU General Public License.
“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks.
“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations.
To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work.
A “covered work” means either the unmodified Program or a work based on the Program.
To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well.
To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion.
1. Source Code.
The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work.
A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language.
The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it.
The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work.
The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source.
The Corresponding Source for a work in source code form is that same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures.
When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified it, and giving a relevant date.
b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”.
c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so.
A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways:
a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b.
d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d.
A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work.
A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product.
“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made.
If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM).
The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying.
7. Additional Terms.
“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or authors of the material; or
e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors.
All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11).
However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice.
Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License.
An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it.
11. Patents.
A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”.
A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version.
In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party.
If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it.
A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation.
If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program.
Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS

Some files were not shown because too many files have changed in this diff Show More