summaryrefslogtreecommitdiffstats
path: root/vendor/gopkg.in
diff options
context:
space:
mode:
authorChristopher Speller <crspeller@gmail.com>2016-05-12 23:56:07 -0400
committerChristopher Speller <crspeller@gmail.com>2016-05-12 23:56:07 -0400
commit38ee83e45b4de7edf89bf9f0ef629eb4c6ad0fa8 (patch)
treea4fde09672192b97d453ad605b030bd5a10c5a45 /vendor/gopkg.in
parent84d2482ddbff9564c9ad75b2d30af66e3ddfd44d (diff)
downloadchat-38ee83e45b4de7edf89bf9f0ef629eb4c6ad0fa8.tar.gz
chat-38ee83e45b4de7edf89bf9f0ef629eb4c6ad0fa8.tar.bz2
chat-38ee83e45b4de7edf89bf9f0ef629eb4c6ad0fa8.zip
Moving to glide
Diffstat (limited to 'vendor/gopkg.in')
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/ber_test.go168
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/header_test.go135
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/identifier_test.go344
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/length_test.go158
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/suite_test.go182
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc1.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc10.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc11.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc12.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc13.berbin0 -> 11 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc14.berbin0 -> 7 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc15.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc16.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc17.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc18.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc19.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc2.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc20.berbin0 -> 11 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc21.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc22.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc23.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc24.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc25.berbin0 -> 5 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc26.berbin0 -> 5 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc27.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc28.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc29.berbin0 -> 3 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc3.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc30.berbin0 -> 5 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc31.berbin0 -> 4 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc32.berbin0 -> 2 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc33.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc34.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc35.berbin0 -> 16 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc36.berbin0 -> 20 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc37.berbin0 -> 14 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc38.berbin0 -> 16 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc39.berbin0 -> 2 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc4.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc40.berbin0 -> 2 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc41.berbin0 -> 16 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc42.berbin0 -> 14 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc43.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc44.berbin0 -> 2 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc45.berbin0 -> 2 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc46.berbin0 -> 11 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc47.berbin0 -> 16 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc48.berbin0 -> 16 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc5.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc6.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc7.ber1
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc8.berbin0 -> 5 bytes
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/tests/tc9.ber1
-rw-r--r--vendor/gopkg.in/fsnotify.v1/example_test.go42
-rw-r--r--vendor/gopkg.in/fsnotify.v1/inotify_poller_test.go229
-rw-r--r--vendor/gopkg.in/fsnotify.v1/inotify_test.go344
-rw-r--r--vendor/gopkg.in/fsnotify.v1/integration_darwin_test.go147
-rw-r--r--vendor/gopkg.in/fsnotify.v1/integration_test.go1237
-rw-r--r--vendor/gopkg.in/throttled/throttled.v1/common_test.go65
-rw-r--r--vendor/gopkg.in/throttled/throttled.v1/delayer_test.go65
-rw-r--r--vendor/gopkg.in/throttled/throttled.v1/doc.go2
-rw-r--r--vendor/gopkg.in/throttled/throttled.v1/examples/README.md12
-rw-r--r--vendor/gopkg.in/throttled/throttled.v1/examples/custom/main.go90
-rw-r--r--vendor/gopkg.in/throttled/throttled.v1/examples/interval-many/main.go79
-rw-r--r--vendor/gopkg.in/throttled/throttled.v1/examples/interval-vary/main.go74
-rw-r--r--vendor/gopkg.in/throttled/throttled.v1/examples/interval-vary/siege-urls4
-rw-r--r--vendor/gopkg.in/throttled/throttled.v1/examples/interval/main.go69
-rw-r--r--vendor/gopkg.in/throttled/throttled.v1/examples/memstats/main.go97
-rw-r--r--vendor/gopkg.in/throttled/throttled.v1/examples/memstats/test-filebin0 -> 65536 bytes
-rw-r--r--vendor/gopkg.in/throttled/throttled.v1/examples/rate-limit/main.go101
-rw-r--r--vendor/gopkg.in/throttled/throttled.v1/interval_test.go114
-rw-r--r--vendor/gopkg.in/throttled/throttled.v1/memstats_test.go64
-rwxr-xr-xvendor/gopkg.in/throttled/throttled.v1/misc/pre-commit38
-rw-r--r--vendor/gopkg.in/throttled/throttled.v1/rate_test.go101
-rw-r--r--vendor/gopkg.in/throttled/throttled.v1/store/doc.go2
-rw-r--r--vendor/gopkg.in/throttled/throttled.v1/store/mem_test.go43
-rw-r--r--vendor/gopkg.in/throttled/throttled.v1/store/redis_test.go66
-rw-r--r--vendor/gopkg.in/throttled/throttled.v1/varyby_test.go56
-rw-r--r--vendor/gopkg.in/yaml.v2/decode_test.go988
-rw-r--r--vendor/gopkg.in/yaml.v2/encode_test.go501
-rw-r--r--vendor/gopkg.in/yaml.v2/suite_test.go12
81 files changed, 5652 insertions, 2 deletions
diff --git a/vendor/gopkg.in/asn1-ber.v1/ber_test.go b/vendor/gopkg.in/asn1-ber.v1/ber_test.go
new file mode 100644
index 000000000..bbd22db6d
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/ber_test.go
@@ -0,0 +1,168 @@
+package ber
+
+import (
+ "bytes"
+ "math"
+
+ "io"
+ "testing"
+)
+
+func TestEncodeDecodeInteger(t *testing.T) {
+ for _, v := range []int64{0, 10, 128, 1024, math.MaxInt64, -1, -100, -128, -1024, math.MinInt64} {
+ enc := encodeInteger(v)
+ dec, err := parseInt64(enc)
+ if err != nil {
+ t.Fatalf("Error decoding %d : %s", v, err)
+ }
+ if v != dec {
+ t.Error("TestEncodeDecodeInteger failed for %d (got %d)", v, dec)
+ }
+
+ }
+}
+
+func TestBoolean(t *testing.T) {
+ var value bool = true
+
+ packet := NewBoolean(ClassUniversal, TypePrimitive, TagBoolean, value, "first Packet, True")
+
+ newBoolean, ok := packet.Value.(bool)
+ if !ok || newBoolean != value {
+ t.Error("error during creating packet")
+ }
+
+ encodedPacket := packet.Bytes()
+
+ newPacket := DecodePacket(encodedPacket)
+
+ newBoolean, ok = newPacket.Value.(bool)
+ if !ok || newBoolean != value {
+ t.Error("error during decoding packet")
+ }
+
+}
+
+func TestInteger(t *testing.T) {
+ var value int64 = 10
+
+ packet := NewInteger(ClassUniversal, TypePrimitive, TagInteger, value, "Integer, 10")
+
+ {
+ newInteger, ok := packet.Value.(int64)
+ if !ok || newInteger != value {
+ t.Error("error creating packet")
+ }
+ }
+
+ encodedPacket := packet.Bytes()
+
+ newPacket := DecodePacket(encodedPacket)
+
+ {
+ newInteger, ok := newPacket.Value.(int64)
+ if !ok || int64(newInteger) != value {
+ t.Error("error decoding packet")
+ }
+ }
+}
+
+func TestString(t *testing.T) {
+ var value string = "Hic sunt dracones"
+
+ packet := NewString(ClassUniversal, TypePrimitive, TagOctetString, value, "String")
+
+ newValue, ok := packet.Value.(string)
+ if !ok || newValue != value {
+ t.Error("error during creating packet")
+ }
+
+ encodedPacket := packet.Bytes()
+
+ newPacket := DecodePacket(encodedPacket)
+
+ newValue, ok = newPacket.Value.(string)
+ if !ok || newValue != value {
+ t.Error("error during decoding packet")
+ }
+
+}
+
+func TestSequenceAndAppendChild(t *testing.T) {
+
+ values := []string{
+ "HIC SVNT LEONES",
+ "Iñtërnâtiônàlizætiøn",
+ "Terra Incognita",
+ }
+
+ sequence := NewSequence("a sequence")
+ for _, s := range values {
+ sequence.AppendChild(NewString(ClassUniversal, TypePrimitive, TagOctetString, s, "String"))
+ }
+
+ if len(sequence.Children) != len(values) {
+ t.Errorf("wrong length for children array should be %d, got %d", len(values), len(sequence.Children))
+ }
+
+ encodedSequence := sequence.Bytes()
+
+ decodedSequence := DecodePacket(encodedSequence)
+ if len(decodedSequence.Children) != len(values) {
+ t.Errorf("wrong length for children array should be %d => %d", len(values), len(decodedSequence.Children))
+ }
+
+ for i, s := range values {
+ if decodedSequence.Children[i].Value.(string) != s {
+ t.Errorf("expected %d to be %q, got %q", i, s, decodedSequence.Children[i].Value.(string))
+ }
+ }
+}
+
+func TestReadPacket(t *testing.T) {
+ packet := NewString(ClassUniversal, TypePrimitive, TagOctetString, "Ad impossibilia nemo tenetur", "string")
+ var buffer io.ReadWriter
+ buffer = new(bytes.Buffer)
+
+ buffer.Write(packet.Bytes())
+
+ newPacket, err := ReadPacket(buffer)
+ if err != nil {
+ t.Error("error during ReadPacket", err)
+ }
+ newPacket.ByteValue = nil
+ if !bytes.Equal(newPacket.ByteValue, packet.ByteValue) {
+ t.Error("packets should be the same")
+ }
+}
+
+func TestBinaryInteger(t *testing.T) {
+ // data src : http://luca.ntop.org/Teaching/Appunti/asn1.html 5.7
+ var data = []struct {
+ v int64
+ e []byte
+ }{
+ {v: 0, e: []byte{0x02, 0x01, 0x00}},
+ {v: 127, e: []byte{0x02, 0x01, 0x7F}},
+ {v: 128, e: []byte{0x02, 0x02, 0x00, 0x80}},
+ {v: 256, e: []byte{0x02, 0x02, 0x01, 0x00}},
+ {v: -128, e: []byte{0x02, 0x01, 0x80}},
+ {v: -129, e: []byte{0x02, 0x02, 0xFF, 0x7F}},
+ {v: math.MaxInt64, e: []byte{0x02, 0x08, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}},
+ {v: math.MinInt64, e: []byte{0x02, 0x08, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
+ }
+
+ for _, d := range data {
+ if b := NewInteger(ClassUniversal, TypePrimitive, TagInteger, int64(d.v), "").Bytes(); !bytes.Equal(d.e, b) {
+ t.Errorf("Wrong binary generated for %d : got % X, expected % X", d.v, b, d.e)
+ }
+ }
+}
+
+func TestBinaryOctetString(t *testing.T) {
+ // data src : http://luca.ntop.org/Teaching/Appunti/asn1.html 5.10
+
+ if !bytes.Equal([]byte{0x04, 0x08, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}, NewString(ClassUniversal, TypePrimitive, TagOctetString, "\x01\x23\x45\x67\x89\xab\xcd\xef", "").Bytes()) {
+ t.Error("wrong binary generated")
+ }
+}
diff --git a/vendor/gopkg.in/asn1-ber.v1/header_test.go b/vendor/gopkg.in/asn1-ber.v1/header_test.go
new file mode 100644
index 000000000..cac1e2e2b
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/header_test.go
@@ -0,0 +1,135 @@
+package ber
+
+import (
+ "bytes"
+ "io"
+ "testing"
+)
+
+func TestReadHeader(t *testing.T) {
+ testcases := map[string]struct {
+ Data []byte
+ ExpectedIdentifier Identifier
+ ExpectedLength int
+ ExpectedBytesRead int
+ ExpectedError string
+ }{
+ "empty": {
+ Data: []byte{},
+ ExpectedIdentifier: Identifier{},
+ ExpectedLength: 0,
+ ExpectedBytesRead: 0,
+ ExpectedError: io.ErrUnexpectedEOF.Error(),
+ },
+
+ "valid short form": {
+ Data: []byte{
+ byte(ClassUniversal) | byte(TypePrimitive) | byte(TagCharacterString),
+ 127,
+ },
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypePrimitive,
+ Tag: TagCharacterString,
+ },
+ ExpectedLength: 127,
+ ExpectedBytesRead: 2,
+ ExpectedError: "",
+ },
+
+ "valid long form": {
+ Data: []byte{
+ // 2-byte encoding of tag
+ byte(ClassUniversal) | byte(TypePrimitive) | byte(HighTag),
+ byte(TagCharacterString),
+
+ // 2-byte encoding of length
+ LengthLongFormBitmask | 1,
+ 127,
+ },
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypePrimitive,
+ Tag: TagCharacterString,
+ },
+ ExpectedLength: 127,
+ ExpectedBytesRead: 4,
+ ExpectedError: "",
+ },
+
+ "valid indefinite length": {
+ Data: []byte{
+ byte(ClassUniversal) | byte(TypeConstructed) | byte(TagCharacterString),
+ LengthLongFormBitmask,
+ },
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypeConstructed,
+ Tag: TagCharacterString,
+ },
+ ExpectedLength: LengthIndefinite,
+ ExpectedBytesRead: 2,
+ ExpectedError: "",
+ },
+
+ "invalid indefinite length": {
+ Data: []byte{
+ byte(ClassUniversal) | byte(TypePrimitive) | byte(TagCharacterString),
+ LengthLongFormBitmask,
+ },
+ ExpectedIdentifier: Identifier{},
+ ExpectedLength: 0,
+ ExpectedBytesRead: 2,
+ ExpectedError: "indefinite length used with primitive type",
+ },
+ }
+
+ for k, tc := range testcases {
+ reader := bytes.NewBuffer(tc.Data)
+ identifier, length, read, err := readHeader(reader)
+
+ if err != nil {
+ if tc.ExpectedError == "" {
+ t.Errorf("%s: unexpected error: %v", k, err)
+ } else if err.Error() != tc.ExpectedError {
+ t.Errorf("%s: expected error %v, got %v", k, tc.ExpectedError, err)
+ }
+ } else if tc.ExpectedError != "" {
+ t.Errorf("%s: expected error %v, got none", k, tc.ExpectedError)
+ continue
+ }
+
+ if read != tc.ExpectedBytesRead {
+ t.Errorf("%s: expected read %d, got %d", k, tc.ExpectedBytesRead, read)
+ }
+
+ if identifier.ClassType != tc.ExpectedIdentifier.ClassType {
+ t.Errorf("%s: expected class type %d (%s), got %d (%s)", k,
+ tc.ExpectedIdentifier.ClassType,
+ ClassMap[tc.ExpectedIdentifier.ClassType],
+ identifier.ClassType,
+ ClassMap[identifier.ClassType],
+ )
+ }
+ if identifier.TagType != tc.ExpectedIdentifier.TagType {
+ t.Errorf("%s: expected tag type %d (%s), got %d (%s)", k,
+ tc.ExpectedIdentifier.TagType,
+ TypeMap[tc.ExpectedIdentifier.TagType],
+ identifier.TagType,
+ TypeMap[identifier.TagType],
+ )
+ }
+ if identifier.Tag != tc.ExpectedIdentifier.Tag {
+ t.Errorf("%s: expected tag %d (%s), got %d (%s)", k,
+ tc.ExpectedIdentifier.Tag,
+ tagMap[tc.ExpectedIdentifier.Tag],
+ identifier.Tag,
+ tagMap[identifier.Tag],
+ )
+ }
+
+ if length != tc.ExpectedLength {
+ t.Errorf("%s: expected length %d, got %d", k, tc.ExpectedLength, length)
+ }
+ }
+}
diff --git a/vendor/gopkg.in/asn1-ber.v1/identifier_test.go b/vendor/gopkg.in/asn1-ber.v1/identifier_test.go
new file mode 100644
index 000000000..7169362e2
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/identifier_test.go
@@ -0,0 +1,344 @@
+package ber
+
+import (
+ "bytes"
+ "io"
+ "math"
+ "testing"
+)
+
+func TestReadIdentifier(t *testing.T) {
+ testcases := map[string]struct {
+ Data []byte
+
+ ExpectedIdentifier Identifier
+ ExpectedBytesRead int
+ ExpectedError string
+ }{
+ "empty": {
+ Data: []byte{},
+ ExpectedBytesRead: 0,
+ ExpectedError: io.ErrUnexpectedEOF.Error(),
+ },
+
+ "universal primitive eoc": {
+ Data: []byte{byte(ClassUniversal) | byte(TypePrimitive) | byte(TagEOC)},
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypePrimitive,
+ Tag: TagEOC,
+ },
+ ExpectedBytesRead: 1,
+ },
+ "universal primitive character string": {
+ Data: []byte{byte(ClassUniversal) | byte(TypePrimitive) | byte(TagCharacterString)},
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypePrimitive,
+ Tag: TagCharacterString,
+ },
+ ExpectedBytesRead: 1,
+ },
+
+ "universal constructed bit string": {
+ Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(TagBitString)},
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypeConstructed,
+ Tag: TagBitString,
+ },
+ ExpectedBytesRead: 1,
+ },
+ "universal constructed character string": {
+ Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(TagCharacterString)},
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypeConstructed,
+ Tag: TagCharacterString,
+ },
+ ExpectedBytesRead: 1,
+ },
+
+ "application constructed object descriptor": {
+ Data: []byte{byte(ClassApplication) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassApplication,
+ TagType: TypeConstructed,
+ Tag: TagObjectDescriptor,
+ },
+ ExpectedBytesRead: 1,
+ },
+ "context constructed object descriptor": {
+ Data: []byte{byte(ClassContext) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassContext,
+ TagType: TypeConstructed,
+ Tag: TagObjectDescriptor,
+ },
+ ExpectedBytesRead: 1,
+ },
+ "private constructed object descriptor": {
+ Data: []byte{byte(ClassPrivate) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassPrivate,
+ TagType: TypeConstructed,
+ Tag: TagObjectDescriptor,
+ },
+ ExpectedBytesRead: 1,
+ },
+
+ "high-tag-number tag missing bytes": {
+ Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag)},
+ ExpectedError: io.ErrUnexpectedEOF.Error(),
+ ExpectedBytesRead: 1,
+ },
+ "high-tag-number tag invalid first byte": {
+ Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag), 0x0},
+ ExpectedError: "invalid first high-tag-number tag byte",
+ ExpectedBytesRead: 2,
+ },
+ "high-tag-number tag invalid first byte with continue bit": {
+ Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag), byte(HighTagContinueBitmask)},
+ ExpectedError: "invalid first high-tag-number tag byte",
+ ExpectedBytesRead: 2,
+ },
+ "high-tag-number tag continuation missing bytes": {
+ Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag), byte(HighTagContinueBitmask | 0x1)},
+ ExpectedError: io.ErrUnexpectedEOF.Error(),
+ ExpectedBytesRead: 2,
+ },
+ "high-tag-number tag overflow": {
+ Data: []byte{
+ byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
+ byte(HighTagContinueBitmask | 0x1),
+ byte(HighTagContinueBitmask | 0x1),
+ byte(HighTagContinueBitmask | 0x1),
+ byte(HighTagContinueBitmask | 0x1),
+ byte(HighTagContinueBitmask | 0x1),
+ byte(HighTagContinueBitmask | 0x1),
+ byte(HighTagContinueBitmask | 0x1),
+ byte(HighTagContinueBitmask | 0x1),
+ byte(HighTagContinueBitmask | 0x1),
+ byte(0x1),
+ },
+ ExpectedError: "high-tag-number tag overflow",
+ ExpectedBytesRead: 11,
+ },
+ "max high-tag-number tag": {
+ Data: []byte{
+ byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(0x7f),
+ },
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypeConstructed,
+ Tag: Tag(0x7FFFFFFFFFFFFFFF), // 01111111...(63)...11111b
+ },
+ ExpectedBytesRead: 10,
+ },
+ "high-tag-number encoding of low-tag value": {
+ Data: []byte{
+ byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
+ byte(TagObjectDescriptor),
+ },
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypeConstructed,
+ Tag: TagObjectDescriptor,
+ },
+ ExpectedBytesRead: 2,
+ },
+ "max high-tag-number tag ignores extra data": {
+ Data: []byte{
+ byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(0x7f),
+ byte(0x01), // extra data, shouldn't be read
+ byte(0x02), // extra data, shouldn't be read
+ byte(0x03), // extra data, shouldn't be read
+ },
+ ExpectedIdentifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypeConstructed,
+ Tag: Tag(0x7FFFFFFFFFFFFFFF), // 01111111...(63)...11111b
+ },
+ ExpectedBytesRead: 10,
+ },
+ }
+
+ for k, tc := range testcases {
+ reader := bytes.NewBuffer(tc.Data)
+ identifier, read, err := readIdentifier(reader)
+
+ if err != nil {
+ if tc.ExpectedError == "" {
+ t.Errorf("%s: unexpected error: %v", k, err)
+ } else if err.Error() != tc.ExpectedError {
+ t.Errorf("%s: expected error %v, got %v", k, tc.ExpectedError, err)
+ }
+ } else if tc.ExpectedError != "" {
+ t.Errorf("%s: expected error %v, got none", k, tc.ExpectedError)
+ continue
+ }
+
+ if read != tc.ExpectedBytesRead {
+ t.Errorf("%s: expected read %d, got %d", k, tc.ExpectedBytesRead, read)
+ }
+
+ if identifier.ClassType != tc.ExpectedIdentifier.ClassType {
+ t.Errorf("%s: expected class type %d (%s), got %d (%s)", k,
+ tc.ExpectedIdentifier.ClassType,
+ ClassMap[tc.ExpectedIdentifier.ClassType],
+ identifier.ClassType,
+ ClassMap[identifier.ClassType],
+ )
+ }
+ if identifier.TagType != tc.ExpectedIdentifier.TagType {
+ t.Errorf("%s: expected tag type %d (%s), got %d (%s)", k,
+ tc.ExpectedIdentifier.TagType,
+ TypeMap[tc.ExpectedIdentifier.TagType],
+ identifier.TagType,
+ TypeMap[identifier.TagType],
+ )
+ }
+ if identifier.Tag != tc.ExpectedIdentifier.Tag {
+ t.Errorf("%s: expected tag %d (%s), got %d (%s)", k,
+ tc.ExpectedIdentifier.Tag,
+ tagMap[tc.ExpectedIdentifier.Tag],
+ identifier.Tag,
+ tagMap[identifier.Tag],
+ )
+ }
+ }
+}
+
+func TestEncodeIdentifier(t *testing.T) {
+ testcases := map[string]struct {
+ Identifier Identifier
+ ExpectedBytes []byte
+ }{
+ "universal primitive eoc": {
+ Identifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypePrimitive,
+ Tag: TagEOC,
+ },
+ ExpectedBytes: []byte{byte(ClassUniversal) | byte(TypePrimitive) | byte(TagEOC)},
+ },
+ "universal primitive character string": {
+ Identifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypePrimitive,
+ Tag: TagCharacterString,
+ },
+ ExpectedBytes: []byte{byte(ClassUniversal) | byte(TypePrimitive) | byte(TagCharacterString)},
+ },
+
+ "universal constructed bit string": {
+ Identifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypeConstructed,
+ Tag: TagBitString,
+ },
+ ExpectedBytes: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(TagBitString)},
+ },
+ "universal constructed character string": {
+ Identifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypeConstructed,
+ Tag: TagCharacterString,
+ },
+ ExpectedBytes: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(TagCharacterString)},
+ },
+
+ "application constructed object descriptor": {
+ Identifier: Identifier{
+ ClassType: ClassApplication,
+ TagType: TypeConstructed,
+ Tag: TagObjectDescriptor,
+ },
+ ExpectedBytes: []byte{byte(ClassApplication) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
+ },
+ "context constructed object descriptor": {
+ Identifier: Identifier{
+ ClassType: ClassContext,
+ TagType: TypeConstructed,
+ Tag: TagObjectDescriptor,
+ },
+ ExpectedBytes: []byte{byte(ClassContext) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
+ },
+ "private constructed object descriptor": {
+ Identifier: Identifier{
+ ClassType: ClassPrivate,
+ TagType: TypeConstructed,
+ Tag: TagObjectDescriptor,
+ },
+ ExpectedBytes: []byte{byte(ClassPrivate) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
+ },
+
+ "max low-tag-number tag": {
+ Identifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypeConstructed,
+ Tag: TagBMPString,
+ },
+ ExpectedBytes: []byte{
+ byte(ClassUniversal) | byte(TypeConstructed) | byte(TagBMPString),
+ },
+ },
+
+ "min high-tag-number tag": {
+ Identifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypeConstructed,
+ Tag: TagBMPString + 1,
+ },
+ ExpectedBytes: []byte{
+ byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
+ byte(TagBMPString + 1),
+ },
+ },
+
+ "max high-tag-number tag": {
+ Identifier: Identifier{
+ ClassType: ClassUniversal,
+ TagType: TypeConstructed,
+ Tag: Tag(math.MaxInt64),
+ },
+ ExpectedBytes: []byte{
+ byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(HighTagContinueBitmask | 0x7f),
+ byte(0x7f),
+ },
+ },
+ }
+
+ for k, tc := range testcases {
+ b := encodeIdentifier(tc.Identifier)
+ if bytes.Compare(tc.ExpectedBytes, b) != 0 {
+ t.Errorf("%s: Expected\n\t%#v\ngot\n\t%#v", k, tc.ExpectedBytes, b)
+ }
+ }
+}
diff --git a/vendor/gopkg.in/asn1-ber.v1/length_test.go b/vendor/gopkg.in/asn1-ber.v1/length_test.go
new file mode 100644
index 000000000..afe0e8037
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/length_test.go
@@ -0,0 +1,158 @@
+package ber
+
+import (
+ "bytes"
+ "io"
+ "math"
+ "testing"
+)
+
+func TestReadLength(t *testing.T) {
+ testcases := map[string]struct {
+ Data []byte
+
+ ExpectedLength int
+ ExpectedBytesRead int
+ ExpectedError string
+ }{
+ "empty": {
+ Data: []byte{},
+ ExpectedBytesRead: 0,
+ ExpectedError: io.ErrUnexpectedEOF.Error(),
+ },
+ "invalid first byte": {
+ Data: []byte{0xFF},
+ ExpectedBytesRead: 1,
+ ExpectedError: "invalid length byte 0xff",
+ },
+
+ "indefinite form": {
+ Data: []byte{LengthLongFormBitmask},
+ ExpectedLength: LengthIndefinite,
+ ExpectedBytesRead: 1,
+ },
+
+ "short-definite-form zero length": {
+ Data: []byte{0},
+ ExpectedLength: 0,
+ ExpectedBytesRead: 1,
+ },
+ "short-definite-form length 1": {
+ Data: []byte{1},
+ ExpectedLength: 1,
+ ExpectedBytesRead: 1,
+ },
+ "short-definite-form max length": {
+ Data: []byte{127},
+ ExpectedLength: 127,
+ ExpectedBytesRead: 1,
+ },
+
+ "long-definite-form missing bytes": {
+ Data: []byte{LengthLongFormBitmask | 1},
+ ExpectedBytesRead: 1,
+ ExpectedError: io.ErrUnexpectedEOF.Error(),
+ },
+ "long-definite-form overflow": {
+ Data: []byte{LengthLongFormBitmask | 9},
+ ExpectedBytesRead: 1,
+ ExpectedError: "long-form length overflow",
+ },
+ "long-definite-form zero length": {
+ Data: []byte{LengthLongFormBitmask | 1, 0x0},
+ ExpectedLength: 0,
+ ExpectedBytesRead: 2,
+ },
+ "long-definite-form length 127": {
+ Data: []byte{LengthLongFormBitmask | 1, 127},
+ ExpectedLength: 127,
+ ExpectedBytesRead: 2,
+ },
+ "long-definite-form max length": {
+ Data: []byte{
+ LengthLongFormBitmask | 8,
+ 0x7F,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ },
+ ExpectedLength: math.MaxInt64,
+ ExpectedBytesRead: 9,
+ },
+ }
+
+ for k, tc := range testcases {
+ reader := bytes.NewBuffer(tc.Data)
+ length, read, err := readLength(reader)
+
+ if err != nil {
+ if tc.ExpectedError == "" {
+ t.Errorf("%s: unexpected error: %v", k, err)
+ } else if err.Error() != tc.ExpectedError {
+ t.Errorf("%s: expected error %v, got %v", k, tc.ExpectedError, err)
+ }
+ } else if tc.ExpectedError != "" {
+ t.Errorf("%s: expected error %v, got none", k, tc.ExpectedError)
+ continue
+ }
+
+ if read != tc.ExpectedBytesRead {
+ t.Errorf("%s: expected read %d, got %d", k, tc.ExpectedBytesRead, read)
+ }
+
+ if length != tc.ExpectedLength {
+ t.Errorf("%s: expected length %d, got %d", k, tc.ExpectedLength, length)
+ }
+ }
+}
+
+func TestEncodeLength(t *testing.T) {
+ testcases := map[string]struct {
+ Length int
+ ExpectedBytes []byte
+ }{
+ "0": {
+ Length: 0,
+ ExpectedBytes: []byte{0},
+ },
+ "1": {
+ Length: 1,
+ ExpectedBytes: []byte{1},
+ },
+
+ "max short-form length": {
+ Length: 127,
+ ExpectedBytes: []byte{127},
+ },
+ "min long-form length": {
+ Length: 128,
+ ExpectedBytes: []byte{LengthLongFormBitmask | 1, 128},
+ },
+
+ "max long-form length": {
+ Length: math.MaxInt64,
+ ExpectedBytes: []byte{
+ LengthLongFormBitmask | 8,
+ 0x7F,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ },
+ },
+ }
+
+ for k, tc := range testcases {
+ b := encodeLength(tc.Length)
+ if bytes.Compare(tc.ExpectedBytes, b) != 0 {
+ t.Errorf("%s: Expected\n\t%#v\ngot\n\t%#v", k, tc.ExpectedBytes, b)
+ }
+ }
+}
diff --git a/vendor/gopkg.in/asn1-ber.v1/suite_test.go b/vendor/gopkg.in/asn1-ber.v1/suite_test.go
new file mode 100644
index 000000000..ace8e6705
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/suite_test.go
@@ -0,0 +1,182 @@
+package ber
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "testing"
+)
+
+var errEOF = io.ErrUnexpectedEOF.Error()
+
+// Tests from http://www.strozhevsky.com/free_docs/free_asn1_testsuite_descr.pdf
+// Source files and descriptions at http://www.strozhevsky.com/free_docs/TEST_SUITE.zip
+var testcases = []struct {
+ // File contains the path to the BER-encoded file
+ File string
+ // Error indicates whether a decoding error is expected
+ Error string
+ // AbnormalEncoding indicates whether a normalized re-encoding is expected to differ from the original source
+ AbnormalEncoding bool
+ // IndefiniteEncoding indicates the source file used indefinite-length encoding, so the re-encoding is expected to differ (since the length is known)
+ IndefiniteEncoding bool
+}{
+ // Common blocks
+ {File: "tests/tc1.ber", Error: "high-tag-number tag overflow"},
+ {File: "tests/tc2.ber", Error: errEOF},
+ {File: "tests/tc3.ber", Error: errEOF},
+ {File: "tests/tc4.ber", Error: "invalid length byte 0xff"},
+ {File: "tests/tc5.ber", Error: "", AbnormalEncoding: true},
+ // Real numbers (some expected failures are disabled until support is added)
+ {File: "tests/tc6.ber", Error: ""}, // Error: "REAL value +0 must be encoded with zero-length value block"},
+ {File: "tests/tc7.ber", Error: ""}, // Error: "REAL value -0 must be encoded as a special value"},
+ {File: "tests/tc8.ber", Error: ""},
+ {File: "tests/tc9.ber", Error: ""}, // Error: "Bits 6 and 5 of information octet for REAL are equal to 11"
+ {File: "tests/tc10.ber", Error: ""},
+ {File: "tests/tc11.ber", Error: ""}, // Error: "Incorrect NR form"
+ {File: "tests/tc12.ber", Error: ""}, // Error: "Encoding of "special value" not from ASN.1 standard"
+ {File: "tests/tc13.ber", Error: errEOF},
+ {File: "tests/tc14.ber", Error: errEOF},
+ {File: "tests/tc15.ber", Error: ""}, // Error: "Too big value of exponent"
+ {File: "tests/tc16.ber", Error: ""}, // Error: "Too big value of mantissa"
+ {File: "tests/tc17.ber", Error: ""}, // Error: "Too big values for exponent and mantissa + using of "scaling factor" value"
+ // Integers
+ {File: "tests/tc18.ber", Error: ""},
+ {File: "tests/tc19.ber", Error: errEOF},
+ {File: "tests/tc20.ber", Error: ""},
+ // Object identifiers
+ {File: "tests/tc21.ber", Error: ""},
+ {File: "tests/tc22.ber", Error: ""},
+ {File: "tests/tc23.ber", Error: errEOF},
+ {File: "tests/tc24.ber", Error: ""},
+ // Booleans
+ {File: "tests/tc25.ber", Error: ""},
+ {File: "tests/tc26.ber", Error: ""},
+ {File: "tests/tc27.ber", Error: errEOF},
+ {File: "tests/tc28.ber", Error: ""},
+ {File: "tests/tc29.ber", Error: ""},
+ // Null
+ {File: "tests/tc30.ber", Error: ""},
+ {File: "tests/tc31.ber", Error: errEOF},
+ {File: "tests/tc32.ber", Error: ""},
+ // Bitstring (some expected failures are disabled until support is added)
+ {File: "tests/tc33.ber", Error: ""}, // Error: "Too big value for "unused bits""
+ {File: "tests/tc34.ber", Error: errEOF},
+ {File: "tests/tc35.ber", Error: "", IndefiniteEncoding: true}, // Error: "Using of different from BIT STRING types as internal types for constructive encoding"
+ {File: "tests/tc36.ber", Error: "", IndefiniteEncoding: true}, // Error: "Using of "unused bits" in internal BIT STRINGs with constructive form of encoding"
+ {File: "tests/tc37.ber", Error: ""},
+ {File: "tests/tc38.ber", Error: "", IndefiniteEncoding: true},
+ {File: "tests/tc39.ber", Error: ""},
+ {File: "tests/tc40.ber", Error: ""},
+ // Octet string (some expected failures are disabled until support is added)
+ {File: "tests/tc41.ber", Error: "", IndefiniteEncoding: true}, // Error: "Using of different from OCTET STRING types as internal types for constructive encoding"
+ {File: "tests/tc42.ber", Error: errEOF},
+ {File: "tests/tc43.ber", Error: errEOF},
+ {File: "tests/tc44.ber", Error: ""},
+ {File: "tests/tc45.ber", Error: ""},
+ // Bitstring
+ {File: "tests/tc46.ber", Error: "indefinite length used with primitive type"},
+ {File: "tests/tc47.ber", Error: "eoc child not allowed with definite length"},
+ {File: "tests/tc48.ber", Error: "", IndefiniteEncoding: true}, // Error: "Using of more than 7 "unused bits" in BIT STRING with constrictive encoding form"
+}
+
+func TestSuiteDecodePacket(t *testing.T) {
+ // Debug = true
+ for _, tc := range testcases {
+ file := tc.File
+
+ dataIn, err := ioutil.ReadFile(file)
+ if err != nil {
+ t.Errorf("%s: %v", file, err)
+ continue
+ }
+
+ // fmt.Printf("%s: decode %d\n", file, len(dataIn))
+ packet, err := DecodePacketErr(dataIn)
+ if err != nil {
+ if tc.Error == "" {
+ t.Errorf("%s: unexpected error during DecodePacket: %v", file, err)
+ } else if tc.Error != err.Error() {
+ t.Errorf("%s: expected error %q during DecodePacket, got %q", file, tc.Error, err)
+ }
+ continue
+ }
+ if tc.Error != "" {
+ t.Errorf("%s: expected error %q, got none", file, tc.Error)
+ continue
+ }
+
+ dataOut := packet.Bytes()
+ if tc.AbnormalEncoding || tc.IndefiniteEncoding {
+ // Abnormal encodings and encodings that used indefinite length should re-encode differently
+ if bytes.Equal(dataOut, dataIn) {
+ t.Errorf("%s: data should have been re-encoded differently", file)
+ }
+ } else if !bytes.Equal(dataOut, dataIn) {
+ // Make sure the serialized data matches the source
+ t.Errorf("%s: data should be the same", file)
+ }
+
+ packet, err = DecodePacketErr(dataOut)
+ if err != nil {
+ t.Errorf("%s: unexpected error: %v", file, err)
+ continue
+ }
+
+ // Make sure the re-serialized data matches our original serialization
+ dataOut2 := packet.Bytes()
+ if !bytes.Equal(dataOut, dataOut2) {
+ t.Errorf("%s: data should be the same", file)
+ }
+ }
+}
+
+func TestSuiteReadPacket(t *testing.T) {
+ for _, tc := range testcases {
+ file := tc.File
+
+ dataIn, err := ioutil.ReadFile(file)
+ if err != nil {
+ t.Errorf("%s: %v", file, err)
+ continue
+ }
+
+ buffer := bytes.NewBuffer(dataIn)
+ packet, err := ReadPacket(buffer)
+ if err != nil {
+ if tc.Error == "" {
+ t.Errorf("%s: unexpected error during ReadPacket: %v", file, err)
+ } else if tc.Error != err.Error() {
+ t.Errorf("%s: expected error %q during ReadPacket, got %q", file, tc.Error, err)
+ }
+ continue
+ }
+ if tc.Error != "" {
+ t.Errorf("%s: expected error %q, got none", file, tc.Error)
+ continue
+ }
+
+ dataOut := packet.Bytes()
+ if tc.AbnormalEncoding || tc.IndefiniteEncoding {
+ // Abnormal encodings and encodings that used indefinite length should re-encode differently
+ if bytes.Equal(dataOut, dataIn) {
+ t.Errorf("%s: data should have been re-encoded differently", file)
+ }
+ } else if !bytes.Equal(dataOut, dataIn) {
+ // Make sure the serialized data matches the source
+ t.Errorf("%s: data should be the same", file)
+ }
+
+ packet, err = DecodePacketErr(dataOut)
+ if err != nil {
+ t.Errorf("%s: unexpected error: %v", file, err)
+ continue
+ }
+
+ // Make sure the re-serialized data matches our original serialization
+ dataOut2 := packet.Bytes()
+ if !bytes.Equal(dataOut, dataOut2) {
+ t.Errorf("%s: data should be the same", file)
+ }
+ }
+}
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc1.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc1.ber
new file mode 100644
index 000000000..5c6ba1c6a
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc1.ber
@@ -0,0 +1 @@
+@ \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc10.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc10.ber
new file mode 100644
index 000000000..f733125d4
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc10.ber
@@ -0,0 +1 @@
+  \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc11.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc11.ber
new file mode 100644
index 000000000..cc4a609c8
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc11.ber
@@ -0,0 +1 @@
+  015625 \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc12.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc12.ber
new file mode 100644
index 000000000..dbb538d69
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc12.ber
@@ -0,0 +1 @@
+ I \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc13.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc13.ber
new file mode 100644
index 000000000..f4f438e0d
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc13.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc14.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc14.ber
new file mode 100644
index 000000000..b6f2fd3a4
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc14.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc15.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc15.ber
new file mode 100644
index 000000000..3d6da6764
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc15.ber
@@ -0,0 +1 @@
+  \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc16.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc16.ber
new file mode 100644
index 000000000..68634f5f3
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc16.ber
@@ -0,0 +1 @@
+  \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc17.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc17.ber
new file mode 100644
index 000000000..adb9e3320
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc17.ber
@@ -0,0 +1 @@
+   \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc18.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc18.ber
new file mode 100644
index 000000000..fb6843f7f
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc18.ber
@@ -0,0 +1 @@
+ \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc19.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc19.ber
new file mode 100644
index 000000000..03afaa5de
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc19.ber
@@ -0,0 +1 @@
+ \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc2.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc2.ber
new file mode 100644
index 000000000..7e785773c
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc2.ber
@@ -0,0 +1 @@
+ \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc20.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc20.ber
new file mode 100644
index 000000000..a976464b9
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc20.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc21.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc21.ber
new file mode 100644
index 000000000..d6c2f9aa7
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc21.ber
@@ -0,0 +1 @@
+Q \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc22.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc22.ber
new file mode 100644
index 000000000..d1d70afab
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc22.ber
@@ -0,0 +1 @@
+ \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc23.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc23.ber
new file mode 100644
index 000000000..0e8d18f62
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc23.ber
@@ -0,0 +1 @@
+ \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc24.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc24.ber
new file mode 100644
index 000000000..10565aefa
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc24.ber
@@ -0,0 +1 @@
+`HO Jc/ \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc25.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc25.ber
new file mode 100644
index 000000000..1e1140524
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc25.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc26.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc26.ber
new file mode 100644
index 000000000..d28653b3b
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc26.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc27.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc27.ber
new file mode 100644
index 000000000..c8c781144
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc27.ber
@@ -0,0 +1 @@
+ \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc28.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc28.ber
new file mode 100644
index 000000000..415fe23ed
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc28.ber
@@ -0,0 +1 @@
+ \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc29.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc29.ber
new file mode 100644
index 000000000..4076f4487
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc29.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc3.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc3.ber
new file mode 100644
index 000000000..c05c900b6
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc3.ber
@@ -0,0 +1 @@
+ \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc30.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc30.ber
new file mode 100644
index 000000000..72bcf80f4
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc30.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc31.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc31.ber
new file mode 100644
index 000000000..1fcc4f254
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc31.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc32.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc32.ber
new file mode 100644
index 000000000..19b3e940a
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc32.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc33.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc33.ber
new file mode 100644
index 000000000..6ea70c4d2
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc33.ber
@@ -0,0 +1 @@
+ \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc34.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc34.ber
new file mode 100644
index 000000000..61337095d
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc34.ber
@@ -0,0 +1 @@
+ \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc35.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc35.ber
new file mode 100644
index 000000000..d27eb301a
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc35.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc36.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc36.ber
new file mode 100644
index 000000000..e5baaeacd
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc36.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc37.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc37.ber
new file mode 100644
index 000000000..d0b1cfbe1
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc37.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc38.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc38.ber
new file mode 100644
index 000000000..090bce74b
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc38.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc39.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc39.ber
new file mode 100644
index 000000000..d9d01199b
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc39.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc4.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc4.ber
new file mode 100644
index 000000000..2b888baac
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc4.ber
@@ -0,0 +1 @@
+ \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc40.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc40.ber
new file mode 100644
index 000000000..15294a501
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc40.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc41.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc41.ber
new file mode 100644
index 000000000..276836b65
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc41.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc42.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc42.ber
new file mode 100644
index 000000000..21cbfd10f
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc42.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc43.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc43.ber
new file mode 100644
index 000000000..98dbd7419
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc43.ber
@@ -0,0 +1 @@
+$ \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc44.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc44.ber
new file mode 100644
index 000000000..d825e1ad7
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc44.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc45.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc45.ber
new file mode 100644
index 000000000..7b861b02c
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc45.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc46.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc46.ber
new file mode 100644
index 000000000..e78deee34
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc46.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc47.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc47.ber
new file mode 100644
index 000000000..190bb86f6
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc47.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc48.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc48.ber
new file mode 100644
index 000000000..f7f111ae6
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc48.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc5.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc5.ber
new file mode 100644
index 000000000..45e0a0093
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc5.ber
@@ -0,0 +1 @@
+@ \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc6.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc6.ber
new file mode 100644
index 000000000..cee1aaf0c
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc6.ber
@@ -0,0 +1 @@
+ +0.E-5 \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc7.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc7.ber
new file mode 100644
index 000000000..d5ae68572
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc7.ber
@@ -0,0 +1 @@
+ -0.E-5 \ No newline at end of file
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc8.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc8.ber
new file mode 100644
index 000000000..cb32a09cb
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc8.ber
Binary files differ
diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc9.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc9.ber
new file mode 100644
index 000000000..50b43a510
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/tests/tc9.ber
@@ -0,0 +1 @@
+  \ No newline at end of file
diff --git a/vendor/gopkg.in/fsnotify.v1/example_test.go b/vendor/gopkg.in/fsnotify.v1/example_test.go
new file mode 100644
index 000000000..700502cb3
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/example_test.go
@@ -0,0 +1,42 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+package fsnotify_test
+
+import (
+ "log"
+
+ "github.com/fsnotify/fsnotify"
+)
+
+func ExampleNewWatcher() {
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer watcher.Close()
+
+ done := make(chan bool)
+ go func() {
+ for {
+ select {
+ case event := <-watcher.Events:
+ log.Println("event:", event)
+ if event.Op&fsnotify.Write == fsnotify.Write {
+ log.Println("modified file:", event.Name)
+ }
+ case err := <-watcher.Errors:
+ log.Println("error:", err)
+ }
+ }
+ }()
+
+ err = watcher.Add("/tmp/foo")
+ if err != nil {
+ log.Fatal(err)
+ }
+ <-done
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/inotify_poller_test.go b/vendor/gopkg.in/fsnotify.v1/inotify_poller_test.go
new file mode 100644
index 000000000..26623efef
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/inotify_poller_test.go
@@ -0,0 +1,229 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "testing"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+type testFd [2]int
+
+func makeTestFd(t *testing.T) testFd {
+ var tfd testFd
+ errno := unix.Pipe(tfd[:])
+ if errno != nil {
+ t.Fatalf("Failed to create pipe: %v", errno)
+ }
+ return tfd
+}
+
+func (tfd testFd) fd() int {
+ return tfd[0]
+}
+
+func (tfd testFd) closeWrite(t *testing.T) {
+ errno := unix.Close(tfd[1])
+ if errno != nil {
+ t.Fatalf("Failed to close write end of pipe: %v", errno)
+ }
+}
+
+func (tfd testFd) put(t *testing.T) {
+ buf := make([]byte, 10)
+ _, errno := unix.Write(tfd[1], buf)
+ if errno != nil {
+ t.Fatalf("Failed to write to pipe: %v", errno)
+ }
+}
+
+func (tfd testFd) get(t *testing.T) {
+ buf := make([]byte, 10)
+ _, errno := unix.Read(tfd[0], buf)
+ if errno != nil {
+ t.Fatalf("Failed to read from pipe: %v", errno)
+ }
+}
+
+func (tfd testFd) close() {
+ unix.Close(tfd[1])
+ unix.Close(tfd[0])
+}
+
+func makePoller(t *testing.T) (testFd, *fdPoller) {
+ tfd := makeTestFd(t)
+ poller, err := newFdPoller(tfd.fd())
+ if err != nil {
+ t.Fatalf("Failed to create poller: %v", err)
+ }
+ return tfd, poller
+}
+
+func TestPollerWithBadFd(t *testing.T) {
+ _, err := newFdPoller(-1)
+ if err != unix.EBADF {
+ t.Fatalf("Expected EBADF, got: %v", err)
+ }
+}
+
+func TestPollerWithData(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ tfd.put(t)
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+ tfd.get(t)
+}
+
+func TestPollerWithWakeup(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ err := poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if ok {
+ t.Fatalf("expected poller to return false")
+ }
+}
+
+func TestPollerWithClose(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ tfd.closeWrite(t)
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+}
+
+func TestPollerWithWakeupAndData(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ tfd.put(t)
+ err := poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+
+ // both data and wakeup
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+
+ // data is still in the buffer, wakeup is cleared
+ ok, err = poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+
+ tfd.get(t)
+ // data is gone, only wakeup now
+ err = poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+ ok, err = poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if ok {
+ t.Fatalf("expected poller to return false")
+ }
+}
+
+func TestPollerConcurrent(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ oks := make(chan bool)
+ live := make(chan bool)
+ defer close(live)
+ go func() {
+ defer close(oks)
+ for {
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ oks <- ok
+ if !<-live {
+ return
+ }
+ }
+ }()
+
+ // Try a write
+ select {
+ case <-time.After(50 * time.Millisecond):
+ case <-oks:
+ t.Fatalf("poller did not wait")
+ }
+ tfd.put(t)
+ if !<-oks {
+ t.Fatalf("expected true")
+ }
+ tfd.get(t)
+ live <- true
+
+ // Try a wakeup
+ select {
+ case <-time.After(50 * time.Millisecond):
+ case <-oks:
+ t.Fatalf("poller did not wait")
+ }
+ err := poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+ if <-oks {
+ t.Fatalf("expected false")
+ }
+ live <- true
+
+ // Try a close
+ select {
+ case <-time.After(50 * time.Millisecond):
+ case <-oks:
+ t.Fatalf("poller did not wait")
+ }
+ tfd.closeWrite(t)
+ if !<-oks {
+ t.Fatalf("expected true")
+ }
+ tfd.get(t)
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/inotify_test.go b/vendor/gopkg.in/fsnotify.v1/inotify_test.go
new file mode 100644
index 000000000..2527cad1f
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/inotify_test.go
@@ -0,0 +1,344 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+func TestInotifyCloseRightAway(t *testing.T) {
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+
+ // Close immediately; it won't even reach the first unix.Read.
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func TestInotifyCloseSlightlyLater(t *testing.T) {
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+
+ // Wait until readEvents has reached unix.Read, and Close.
+ <-time.After(50 * time.Millisecond)
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func TestInotifyCloseSlightlyLaterWithWatch(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+ w.Add(testDir)
+
+ // Wait until readEvents has reached unix.Read, and Close.
+ <-time.After(50 * time.Millisecond)
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func TestInotifyCloseAfterRead(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Failed to add .")
+ }
+
+ // Generate an event.
+ os.Create(filepath.Join(testDir, "somethingSOMETHINGsomethingSOMETHING"))
+
+ // Wait for readEvents to read the event, then close the watcher.
+ <-time.After(50 * time.Millisecond)
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func isWatcherReallyClosed(t *testing.T, w *Watcher) {
+ select {
+ case err, ok := <-w.Errors:
+ if ok {
+ t.Fatalf("w.Errors is not closed; readEvents is still alive after closing (error: %v)", err)
+ }
+ default:
+ t.Fatalf("w.Errors would have blocked; readEvents is still alive!")
+ }
+
+ select {
+ case _, ok := <-w.Events:
+ if ok {
+ t.Fatalf("w.Events is not closed; readEvents is still alive after closing")
+ }
+ default:
+ t.Fatalf("w.Events would have blocked; readEvents is still alive!")
+ }
+}
+
+func TestInotifyCloseCreate(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Failed to add testDir: %v", err)
+ }
+ h, err := os.Create(filepath.Join(testDir, "testfile"))
+ if err != nil {
+ t.Fatalf("Failed to create file in testdir: %v", err)
+ }
+ h.Close()
+ select {
+ case _ = <-w.Events:
+ case err := <-w.Errors:
+ t.Fatalf("Error from watcher: %v", err)
+ case <-time.After(50 * time.Millisecond):
+ t.Fatalf("Took too long to wait for event")
+ }
+
+ // At this point, we've received one event, so the goroutine is ready.
+ // It's also blocking on unix.Read.
+ // Now we try to swap the file descriptor under its nose.
+ w.Close()
+ w, err = NewWatcher()
+ defer w.Close()
+ if err != nil {
+ t.Fatalf("Failed to create second watcher: %v", err)
+ }
+
+ <-time.After(50 * time.Millisecond)
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Error adding testDir again: %v", err)
+ }
+}
+
+func TestInotifyStress(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+ testFile := filepath.Join(testDir, "testfile")
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ killchan := make(chan struct{})
+ defer close(killchan)
+
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Failed to add testDir: %v", err)
+ }
+
+ proc, err := os.FindProcess(os.Getpid())
+ if err != nil {
+ t.Fatalf("Error finding process: %v", err)
+ }
+
+ go func() {
+ for {
+ select {
+ case <-time.After(5 * time.Millisecond):
+ err := proc.Signal(unix.SIGUSR1)
+ if err != nil {
+ t.Fatalf("Signal failed: %v", err)
+ }
+ case <-killchan:
+ return
+ }
+ }
+ }()
+
+ go func() {
+ for {
+ select {
+ case <-time.After(11 * time.Millisecond):
+ err := w.poller.wake()
+ if err != nil {
+ t.Fatalf("Wake failed: %v", err)
+ }
+ case <-killchan:
+ return
+ }
+ }
+ }()
+
+ go func() {
+ for {
+ select {
+ case <-killchan:
+ return
+ default:
+ handle, err := os.Create(testFile)
+ if err != nil {
+ t.Fatalf("Create failed: %v", err)
+ }
+ handle.Close()
+ time.Sleep(time.Millisecond)
+ err = os.Remove(testFile)
+ if err != nil {
+ t.Fatalf("Remove failed: %v", err)
+ }
+ }
+ }
+ }()
+
+ creates := 0
+ removes := 0
+ after := time.After(5 * time.Second)
+ for {
+ select {
+ case <-after:
+ if creates-removes > 1 || creates-removes < -1 {
+ t.Fatalf("Creates and removes should not be off by more than one: %d creates, %d removes", creates, removes)
+ }
+ if creates < 50 {
+ t.Fatalf("Expected at least 50 creates, got %d", creates)
+ }
+ return
+ case err := <-w.Errors:
+ t.Fatalf("Got an error from watcher: %v", err)
+ case evt := <-w.Events:
+ if evt.Name != testFile {
+ t.Fatalf("Got an event for an unknown file: %s", evt.Name)
+ }
+ if evt.Op == Create {
+ creates++
+ }
+ if evt.Op == Remove {
+ removes++
+ }
+ }
+ }
+}
+
+func TestInotifyRemoveTwice(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+ testFile := filepath.Join(testDir, "testfile")
+
+ handle, err := os.Create(testFile)
+ if err != nil {
+ t.Fatalf("Create failed: %v", err)
+ }
+ handle.Close()
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ err = w.Add(testFile)
+ if err != nil {
+ t.Fatalf("Failed to add testFile: %v", err)
+ }
+
+ err = os.Remove(testFile)
+ if err != nil {
+ t.Fatalf("Failed to remove testFile: %v", err)
+ }
+
+ err = w.Remove(testFile)
+ if err == nil {
+ t.Fatalf("no error on removing invalid file")
+ }
+ s1 := fmt.Sprintf("%s", err)
+
+ err = w.Remove(testFile)
+ if err == nil {
+ t.Fatalf("no error on removing invalid file")
+ }
+ s2 := fmt.Sprintf("%s", err)
+
+ if s1 != s2 {
+ t.Fatalf("receive different error - %s / %s", s1, s2)
+ }
+}
+
+func TestInotifyInnerMapLength(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+ testFile := filepath.Join(testDir, "testfile")
+
+ handle, err := os.Create(testFile)
+ if err != nil {
+ t.Fatalf("Create failed: %v", err)
+ }
+ handle.Close()
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ err = w.Add(testFile)
+ if err != nil {
+ t.Fatalf("Failed to add testFile: %v", err)
+ }
+ go func() {
+ for err := range w.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ err = os.Remove(testFile)
+ if err != nil {
+ t.Fatalf("Failed to remove testFile: %v", err)
+ }
+ _ = <-w.Events // consume Remove event
+ <-time.After(50 * time.Millisecond) // wait IN_IGNORE propagated
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if len(w.watches) != 0 {
+ t.Fatalf("Expected watches len is 0, but got: %d, %v", len(w.watches), w.watches)
+ }
+ if len(w.paths) != 0 {
+ t.Fatalf("Expected paths len is 0, but got: %d, %v", len(w.paths), w.paths)
+ }
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/integration_darwin_test.go b/vendor/gopkg.in/fsnotify.v1/integration_darwin_test.go
new file mode 100644
index 000000000..5564554f7
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/integration_darwin_test.go
@@ -0,0 +1,147 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fsnotify
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// testExchangedataForWatcher tests the watcher with the exchangedata operation on OS X.
+//
+// This is widely used for atomic saves on OS X, e.g. TextMate and in Apple's NSDocument.
+//
+// See https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/exchangedata.2.html
+// Also see: https://github.com/textmate/textmate/blob/cd016be29489eba5f3c09b7b70b06da134dda550/Frameworks/io/src/swap_file_data.cc#L20
+func testExchangedataForWatcher(t *testing.T, watchDir bool) {
+ // Create directory to watch
+ testDir1 := tempMkdir(t)
+
+ // For the intermediate file
+ testDir2 := tempMkdir(t)
+
+ defer os.RemoveAll(testDir1)
+ defer os.RemoveAll(testDir2)
+
+ resolvedFilename := "TestFsnotifyEvents.file"
+
+ // TextMate does:
+ //
+ // 1. exchangedata (intermediate, resolved)
+ // 2. unlink intermediate
+ //
+ // Let's try to simulate that:
+ resolved := filepath.Join(testDir1, resolvedFilename)
+ intermediate := filepath.Join(testDir2, resolvedFilename+"~")
+
+ // Make sure we create the file before we start watching
+ createAndSyncFile(t, resolved)
+
+ watcher := newWatcher(t)
+
+ // Test both variants in isolation
+ if watchDir {
+ addWatch(t, watcher, testDir1)
+ } else {
+ addWatch(t, watcher, resolved)
+ }
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var removeReceived counter
+ var createReceived counter
+
+ done := make(chan bool)
+
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(resolved) {
+ if event.Op&Remove == Remove {
+ removeReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ }
+ t.Logf("event received: %s", event)
+ }
+ done <- true
+ }()
+
+ // Repeat to make sure the watched file/directory "survives" the REMOVE/CREATE loop.
+ for i := 1; i <= 3; i++ {
+ // The intermediate file is created in a folder outside the watcher
+ createAndSyncFile(t, intermediate)
+
+ // 1. Swap
+ if err := unix.Exchangedata(intermediate, resolved, 0); err != nil {
+ t.Fatalf("[%d] exchangedata failed: %s", i, err)
+ }
+
+ time.Sleep(50 * time.Millisecond)
+
+ // 2. Delete the intermediate file
+ err := os.Remove(intermediate)
+
+ if err != nil {
+ t.Fatalf("[%d] remove %s failed: %s", i, intermediate, err)
+ }
+
+ time.Sleep(50 * time.Millisecond)
+
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+
+ // The events will be (CHMOD + REMOVE + CREATE) X 2. Let's focus on the last two:
+ if removeReceived.value() < 3 {
+ t.Fatal("fsnotify remove events have not been received after 500 ms")
+ }
+
+ if createReceived.value() < 3 {
+ t.Fatal("fsnotify create events have not been received after 500 ms")
+ }
+
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+// TestExchangedataInWatchedDir test exchangedata operation on file in watched dir.
+func TestExchangedataInWatchedDir(t *testing.T) {
+ testExchangedataForWatcher(t, true)
+}
+
+// TestExchangedataInWatchedDir test exchangedata operation on watched file.
+func TestExchangedataInWatchedFile(t *testing.T) {
+ testExchangedataForWatcher(t, false)
+}
+
+func createAndSyncFile(t *testing.T, filepath string) {
+ f1, err := os.OpenFile(filepath, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating %s failed: %s", filepath, err)
+ }
+ f1.Sync()
+ f1.Close()
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/integration_test.go b/vendor/gopkg.in/fsnotify.v1/integration_test.go
new file mode 100644
index 000000000..8b7e9d3ec
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/integration_test.go
@@ -0,0 +1,1237 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!solaris
+
+package fsnotify
+
+import (
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "runtime"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+// An atomic counter
+type counter struct {
+ val int32
+}
+
+func (c *counter) increment() {
+ atomic.AddInt32(&c.val, 1)
+}
+
+func (c *counter) value() int32 {
+ return atomic.LoadInt32(&c.val)
+}
+
+func (c *counter) reset() {
+ atomic.StoreInt32(&c.val, 0)
+}
+
+// tempMkdir makes a temporary directory
+func tempMkdir(t *testing.T) string {
+ dir, err := ioutil.TempDir("", "fsnotify")
+ if err != nil {
+ t.Fatalf("failed to create test directory: %s", err)
+ }
+ return dir
+}
+
+// tempMkFile makes a temporary file.
+func tempMkFile(t *testing.T, dir string) string {
+ f, err := ioutil.TempFile(dir, "fsnotify")
+ if err != nil {
+ t.Fatalf("failed to create test file: %v", err)
+ }
+ defer f.Close()
+ return f.Name()
+}
+
+// newWatcher initializes an fsnotify Watcher instance.
+func newWatcher(t *testing.T) *Watcher {
+ watcher, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("NewWatcher() failed: %s", err)
+ }
+ return watcher
+}
+
+// addWatch adds a watch for a directory
+func addWatch(t *testing.T, watcher *Watcher, dir string) {
+ if err := watcher.Add(dir); err != nil {
+ t.Fatalf("watcher.Add(%q) failed: %s", dir, err)
+ }
+}
+
+func TestFsnotifyMultipleOperations(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create directory that's not watched
+ testDirToMoveFiles := tempMkdir(t)
+ defer os.RemoveAll(testDirToMoveFiles)
+
+ testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile")
+ testFileRenamed := filepath.Join(testDirToMoveFiles, "TestFsnotifySeqRename.testfile")
+
+ addWatch(t, watcher, testDir)
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, modifyReceived, deleteReceived, renameReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ if event.Op&Rename == Rename {
+ renameReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // Modify the file outside of the watched dir
+ f, err = os.Open(testFileRenamed)
+ if err != nil {
+ t.Fatalf("open test renamed file failed: %s", err)
+ }
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Recreate the file that was moved
+ f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Close()
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 2 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
+ }
+ mReceived := modifyReceived.value()
+ if mReceived != 1 {
+ t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1)
+ }
+ dReceived := deleteReceived.value()
+ rReceived := renameReceived.value()
+ if dReceived+rReceived != 1 {
+ t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", rReceived+dReceived, 1)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyMultipleCreates(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile")
+
+ addWatch(t, watcher, testDir)
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, modifyReceived, deleteReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ os.Remove(testFile)
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Recreate the file
+ f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Close()
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Modify
+ f, err = os.OpenFile(testFile, os.O_WRONLY, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Modify
+ f, err = os.OpenFile(testFile, os.O_WRONLY, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 2 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
+ }
+ mReceived := modifyReceived.value()
+ if mReceived < 3 {
+ t.Fatalf("incorrect number of modify events received after 500 ms (%d vs atleast %d)", mReceived, 3)
+ }
+ dReceived := deleteReceived.value()
+ if dReceived != 1 {
+ t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", dReceived, 1)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyDirOnly(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ // This should NOT add any events to the fsnotify event queue
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDir, "TestFsnotifyDirOnly.testfile")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, modifyReceived, deleteReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileAlreadyExists) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ os.Remove(testFile)
+ os.Remove(testFileAlreadyExists)
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 1 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 1)
+ }
+ mReceived := modifyReceived.value()
+ if mReceived != 1 {
+ t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1)
+ }
+ dReceived := deleteReceived.value()
+ if dReceived != 2 {
+ t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyDeleteWatchedDir(t *testing.T) {
+ watcher := newWatcher(t)
+ defer watcher.Close()
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ addWatch(t, watcher, testDir)
+
+ // Add a watch for testFile
+ addWatch(t, watcher, testFileAlreadyExists)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var deleteReceived counter
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFileAlreadyExists) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ }()
+
+ os.RemoveAll(testDir)
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ dReceived := deleteReceived.value()
+ if dReceived < 2 {
+ t.Fatalf("did not receive at least %d delete events, received %d after 500 ms", 2, dReceived)
+ }
+}
+
+func TestFsnotifySubDir(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ testFile1 := filepath.Join(testDir, "TestFsnotifyFile1.testfile")
+ testSubDir := filepath.Join(testDir, "sub")
+ testSubDirFile := filepath.Join(testDir, "sub/TestFsnotifyFile1.testfile")
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, deleteReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testSubDir) || event.Name == filepath.Clean(testFile1) {
+ t.Logf("event received: %s", event)
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ addWatch(t, watcher, testDir)
+
+ // Create sub-directory
+ if err := os.Mkdir(testSubDir, 0777); err != nil {
+ t.Fatalf("failed to create test sub-directory: %s", err)
+ }
+
+ // Create a file
+ var f *os.File
+ f, err := os.OpenFile(testFile1, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+
+ // Create a file (Should not see this! we are not watching subdir)
+ var fs *os.File
+ fs, err = os.OpenFile(testSubDirFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ fs.Sync()
+ fs.Close()
+
+ time.Sleep(200 * time.Millisecond)
+
+ // Make sure receive deletes for both file and sub-directory
+ os.RemoveAll(testSubDir)
+ os.Remove(testFile1)
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 2 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
+ }
+ dReceived := deleteReceived.value()
+ if dReceived != 2 {
+ t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyRename(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDir, "TestFsnotifyEvents.testfile")
+ testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var renameReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) {
+ if event.Op&Rename == Rename {
+ renameReceived.increment()
+ }
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ // Add a watch for testFile
+ addWatch(t, watcher, testFile)
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ if renameReceived.value() == 0 {
+ t.Fatal("fsnotify rename events have not been received after 500 ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+
+ os.Remove(testFileRenamed)
+}
+
+func TestFsnotifyRenameToCreate(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create directory to get file
+ testDirFrom := tempMkdir(t)
+ defer os.RemoveAll(testDirFrom)
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile")
+ testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) {
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ if createReceived.value() == 0 {
+ t.Fatal("fsnotify create events have not been received after 500 ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+
+ os.Remove(testFileRenamed)
+}
+
+func TestFsnotifyRenameToOverwrite(t *testing.T) {
+ switch runtime.GOOS {
+ case "plan9", "windows":
+ t.Skipf("skipping test on %q (os.Rename over existing file does not create event).", runtime.GOOS)
+ }
+
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create directory to get file
+ testDirFrom := tempMkdir(t)
+ defer os.RemoveAll(testDirFrom)
+
+ testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile")
+ testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
+
+ // Create a file
+ var fr *os.File
+ fr, err := os.OpenFile(testFileRenamed, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ fr.Sync()
+ fr.Close()
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var eventReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testFileRenamed) {
+ eventReceived.increment()
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ if eventReceived.value() == 0 {
+ t.Fatal("fsnotify events have not been received after 500 ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+
+ os.Remove(testFileRenamed)
+}
+
+func TestRemovalOfWatch(t *testing.T) {
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ watcher := newWatcher(t)
+ defer watcher.Close()
+
+ addWatch(t, watcher, testDir)
+ if err := watcher.Remove(testDir); err != nil {
+ t.Fatalf("Could not remove the watch: %v\n", err)
+ }
+
+ go func() {
+ select {
+ case ev := <-watcher.Events:
+ t.Fatalf("We received event: %v\n", ev)
+ case <-time.After(500 * time.Millisecond):
+ t.Log("No event received, as expected.")
+ }
+ }()
+
+ time.Sleep(200 * time.Millisecond)
+ // Modify the file outside of the watched dir
+ f, err := os.Open(testFileAlreadyExists)
+ if err != nil {
+ t.Fatalf("Open test file failed: %s", err)
+ }
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+ if err := os.Chmod(testFileAlreadyExists, 0700); err != nil {
+ t.Fatalf("chmod failed: %s", err)
+ }
+ time.Sleep(400 * time.Millisecond)
+}
+
+func TestFsnotifyAttrib(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("attributes don't work on Windows.")
+ }
+
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDir, "TestFsnotifyAttrib.testfile")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ // The modifyReceived counter counts IsModify events that are not IsAttrib,
+ // and the attribReceived counts IsAttrib events (which are also IsModify as
+ // a consequence).
+ var modifyReceived counter
+ var attribReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ if event.Op&Chmod == Chmod {
+ attribReceived.increment()
+ }
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ // Add a watch for testFile
+ addWatch(t, watcher, testFile)
+
+ if err := os.Chmod(testFile, 0700); err != nil {
+ t.Fatalf("chmod failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ // Creating/writing a file changes also the mtime, so IsAttrib should be set to true here
+ time.Sleep(500 * time.Millisecond)
+ if modifyReceived.value() != 0 {
+ t.Fatal("received an unexpected modify event when creating a test file")
+ }
+ if attribReceived.value() == 0 {
+ t.Fatal("fsnotify attribute events have not received after 500 ms")
+ }
+
+ // Modifying the contents of the file does not set the attrib flag (although eg. the mtime
+ // might have been modified).
+ modifyReceived.reset()
+ attribReceived.reset()
+
+ f, err = os.OpenFile(testFile, os.O_WRONLY, 0)
+ if err != nil {
+ t.Fatalf("reopening test file failed: %s", err)
+ }
+
+ f.WriteString("more data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(500 * time.Millisecond)
+
+ if modifyReceived.value() != 1 {
+ t.Fatal("didn't receive a modify event after changing test file contents")
+ }
+
+ if attribReceived.value() != 0 {
+ t.Fatal("did receive an unexpected attrib event after changing test file contents")
+ }
+
+ modifyReceived.reset()
+ attribReceived.reset()
+
+ // Doing a chmod on the file should trigger an event with the "attrib" flag set (the contents
+ // of the file are not changed though)
+ if err := os.Chmod(testFile, 0600); err != nil {
+ t.Fatalf("chmod failed: %s", err)
+ }
+
+ time.Sleep(500 * time.Millisecond)
+
+ if attribReceived.value() != 1 {
+ t.Fatal("didn't receive an attribute change after 500ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(1e9):
+ t.Fatal("event stream was not closed after 1 second")
+ }
+
+ os.Remove(testFile)
+}
+
+func TestFsnotifyClose(t *testing.T) {
+ watcher := newWatcher(t)
+ watcher.Close()
+
+ var done int32
+ go func() {
+ watcher.Close()
+ atomic.StoreInt32(&done, 1)
+ }()
+
+ time.Sleep(50e6) // 50 ms
+ if atomic.LoadInt32(&done) == 0 {
+ t.Fatal("double Close() test failed: second Close() call didn't return")
+ }
+
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ if err := watcher.Add(testDir); err == nil {
+ t.Fatal("expected error on Watch() after Close(), got nil")
+ }
+}
+
+func TestFsnotifyFakeSymlink(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("symlinks don't work on Windows.")
+ }
+
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ var errorsReceived counter
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for errors := range watcher.Errors {
+ t.Logf("Received error: %s", errors)
+ errorsReceived.increment()
+ }
+ }()
+
+ // Count the CREATE events received
+ var createEventsReceived, otherEventsReceived counter
+ go func() {
+ for ev := range watcher.Events {
+ t.Logf("event received: %s", ev)
+ if ev.Op&Create == Create {
+ createEventsReceived.increment()
+ } else {
+ otherEventsReceived.increment()
+ }
+ }
+ }()
+
+ addWatch(t, watcher, testDir)
+
+ if err := os.Symlink(filepath.Join(testDir, "zzz"), filepath.Join(testDir, "zzznew")); err != nil {
+ t.Fatalf("Failed to create bogus symlink: %s", err)
+ }
+ t.Logf("Created bogus symlink")
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+
+ // Should not be error, just no events for broken links (watching nothing)
+ if errorsReceived.value() > 0 {
+ t.Fatal("fsnotify errors have been received.")
+ }
+ if otherEventsReceived.value() > 0 {
+ t.Fatal("fsnotify other events received on the broken link")
+ }
+
+ // Except for 1 create event (for the link itself)
+ if createEventsReceived.value() == 0 {
+ t.Fatal("fsnotify create events were not received after 500 ms")
+ }
+ if createEventsReceived.value() > 1 {
+ t.Fatal("fsnotify more create events received than expected")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+}
+
+func TestCyclicSymlink(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("symlinks don't work on Windows.")
+ }
+
+ watcher := newWatcher(t)
+
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ link := path.Join(testDir, "link")
+ if err := os.Symlink(".", link); err != nil {
+ t.Fatalf("could not make symlink: %v", err)
+ }
+ addWatch(t, watcher, testDir)
+
+ var createEventsReceived counter
+ go func() {
+ for ev := range watcher.Events {
+ if ev.Op&Create == Create {
+ createEventsReceived.increment()
+ }
+ }
+ }()
+
+ if err := os.Remove(link); err != nil {
+ t.Fatalf("Error removing link: %v", err)
+ }
+
+ // It would be nice to be able to expect a delete event here, but kqueue has
+ // no way for us to get events on symlinks themselves, because opening them
+ // opens an fd to the file to which they point.
+
+ if err := ioutil.WriteFile(link, []byte("foo"), 0700); err != nil {
+ t.Fatalf("could not make symlink: %v", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+
+ if got := createEventsReceived.value(); got == 0 {
+ t.Errorf("want at least 1 create event got %v", got)
+ }
+
+ watcher.Close()
+}
+
+// TestConcurrentRemovalOfWatch tests that concurrent calls to RemoveWatch do not race.
+// See https://codereview.appspot.com/103300045/
+// go test -test.run=TestConcurrentRemovalOfWatch -test.cpu=1,1,1,1,1 -race
+func TestConcurrentRemovalOfWatch(t *testing.T) {
+ if runtime.GOOS != "darwin" {
+ t.Skip("regression test for race only present on darwin")
+ }
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ watcher := newWatcher(t)
+ defer watcher.Close()
+
+ addWatch(t, watcher, testDir)
+
+ // Test that RemoveWatch can be invoked concurrently, with no data races.
+ removed1 := make(chan struct{})
+ go func() {
+ defer close(removed1)
+ watcher.Remove(testDir)
+ }()
+ removed2 := make(chan struct{})
+ go func() {
+ close(removed2)
+ watcher.Remove(testDir)
+ }()
+ <-removed1
+ <-removed2
+}
+
+func TestClose(t *testing.T) {
+ // Regression test for #59 bad file descriptor from Close
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ watcher := newWatcher(t)
+ if err := watcher.Add(testDir); err != nil {
+ t.Fatalf("Expected no error on Add, got %v", err)
+ }
+ err := watcher.Close()
+ if err != nil {
+ t.Fatalf("Expected no error on Close, got %v.", err)
+ }
+}
+
+// TestRemoveWithClose tests if one can handle Remove events and, at the same
+// time, close Watcher object without any data races.
+func TestRemoveWithClose(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ const fileN = 200
+ tempFiles := make([]string, 0, fileN)
+ for i := 0; i < fileN; i++ {
+ tempFiles = append(tempFiles, tempMkFile(t, testDir))
+ }
+ watcher := newWatcher(t)
+ if err := watcher.Add(testDir); err != nil {
+ t.Fatalf("Expected no error on Add, got %v", err)
+ }
+ startC, stopC := make(chan struct{}), make(chan struct{})
+ errC := make(chan error)
+ go func() {
+ for {
+ select {
+ case <-watcher.Errors:
+ case <-watcher.Events:
+ case <-stopC:
+ return
+ }
+ }
+ }()
+ go func() {
+ <-startC
+ for _, fileName := range tempFiles {
+ os.Remove(fileName)
+ }
+ }()
+ go func() {
+ <-startC
+ errC <- watcher.Close()
+ }()
+ close(startC)
+ defer close(stopC)
+ if err := <-errC; err != nil {
+ t.Fatalf("Expected no error on Close, got %v.", err)
+ }
+}
+
+func testRename(file1, file2 string) error {
+ switch runtime.GOOS {
+ case "windows", "plan9":
+ return os.Rename(file1, file2)
+ default:
+ cmd := exec.Command("mv", file1, file2)
+ return cmd.Run()
+ }
+}
diff --git a/vendor/gopkg.in/throttled/throttled.v1/common_test.go b/vendor/gopkg.in/throttled/throttled.v1/common_test.go
new file mode 100644
index 000000000..ddb57fb1c
--- /dev/null
+++ b/vendor/gopkg.in/throttled/throttled.v1/common_test.go
@@ -0,0 +1,65 @@
+package throttled
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "sync"
+ "time"
+
+ "github.com/PuerkitoBio/boom/commands"
+)
+
+type stats struct {
+ sync.Mutex
+ ok int
+ dropped int
+ ts []time.Time
+
+ body func()
+}
+
+func (s *stats) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if s.body != nil {
+ s.body()
+ }
+ s.Lock()
+ defer s.Unlock()
+ s.ts = append(s.ts, time.Now())
+ s.ok++
+ w.WriteHeader(200)
+}
+
+func (s *stats) DeniedHTTP(w http.ResponseWriter, r *http.Request) {
+ s.Lock()
+ defer s.Unlock()
+ s.dropped++
+ w.WriteHeader(deniedStatus)
+}
+
+func (s *stats) Stats() (int, int, []time.Time) {
+ s.Lock()
+ defer s.Unlock()
+ return s.ok, s.dropped, s.ts
+}
+
+func runTest(h http.Handler, b ...commands.Boom) []*commands.Report {
+ srv := httptest.NewServer(h)
+ defer srv.Close()
+
+ var rpts []*commands.Report
+ var wg sync.WaitGroup
+ var mu sync.Mutex
+ wg.Add(len(b))
+ for i, bo := range b {
+ bo.Req.Url = srv.URL + fmt.Sprintf("/%d", i)
+ go func(bo commands.Boom) {
+ mu.Lock()
+ defer mu.Unlock()
+ rpts = append(rpts, bo.Run())
+ wg.Done()
+ }(bo)
+ }
+ wg.Wait()
+ return rpts
+}
diff --git a/vendor/gopkg.in/throttled/throttled.v1/delayer_test.go b/vendor/gopkg.in/throttled/throttled.v1/delayer_test.go
new file mode 100644
index 000000000..822978e5d
--- /dev/null
+++ b/vendor/gopkg.in/throttled/throttled.v1/delayer_test.go
@@ -0,0 +1,65 @@
+package throttled
+
+import (
+ "testing"
+ "time"
+)
+
+func TestDelayer(t *testing.T) {
+ cases := []struct {
+ in Delayer
+ out time.Duration
+ }{
+ 0: {PerSec(1), time.Second},
+ 1: {PerSec(2), 500 * time.Millisecond},
+ 2: {PerSec(4), 250 * time.Millisecond},
+ 3: {PerSec(5), 200 * time.Millisecond},
+ 4: {PerSec(10), 100 * time.Millisecond},
+ 5: {PerSec(100), 10 * time.Millisecond},
+ 6: {PerSec(3), 333333333 * time.Nanosecond},
+ 7: {PerMin(1), time.Minute},
+ 8: {PerMin(2), 30 * time.Second},
+ 9: {PerMin(4), 15 * time.Second},
+ 10: {PerMin(5), 12 * time.Second},
+ 11: {PerMin(10), 6 * time.Second},
+ 12: {PerMin(60), time.Second},
+ 13: {PerHour(1), time.Hour},
+ 14: {PerHour(2), 30 * time.Minute},
+ 15: {PerHour(4), 15 * time.Minute},
+ 16: {PerHour(60), time.Minute},
+ 17: {PerHour(120), 30 * time.Second},
+ 18: {D(time.Second), time.Second},
+ 19: {D(5 * time.Minute), 5 * time.Minute},
+ 20: {PerSec(200), 5 * time.Millisecond},
+ 21: {PerDay(24), time.Hour},
+ }
+ for i, c := range cases {
+ got := c.in.Delay()
+ if got != c.out {
+ t.Errorf("%d: expected %s, got %s", i, c.out, got)
+ }
+ }
+}
+
+func TestQuota(t *testing.T) {
+ cases := []struct {
+ q Quota
+ reqs int
+ win time.Duration
+ }{
+ 0: {PerSec(10), 10, time.Second},
+ 1: {PerMin(30), 30, time.Minute},
+ 2: {PerHour(124), 124, time.Hour},
+ 3: {PerDay(1), 1, 24 * time.Hour},
+ 4: {Q{148, 17 * time.Second}, 148, 17 * time.Second},
+ }
+ for i, c := range cases {
+ r, w := c.q.Quota()
+ if r != c.reqs {
+ t.Errorf("%d: expected %d requests, got %d", i, c.reqs, r)
+ }
+ if w != c.win {
+ t.Errorf("%d: expected %s window, got %s", i, c.win, w)
+ }
+ }
+}
diff --git a/vendor/gopkg.in/throttled/throttled.v1/doc.go b/vendor/gopkg.in/throttled/throttled.v1/doc.go
index acf5213b0..a2c8d4c75 100644
--- a/vendor/gopkg.in/throttled/throttled.v1/doc.go
+++ b/vendor/gopkg.in/throttled/throttled.v1/doc.go
@@ -74,4 +74,4 @@
// The BSD 3-clause license. Copyright (c) 2014 Martin Angers and Contributors.
// http://opensource.org/licenses/BSD-3-Clause
//
-package throttled
+package throttled // import "gopkg.in/throttled/throttled.v1"
diff --git a/vendor/gopkg.in/throttled/throttled.v1/examples/README.md b/vendor/gopkg.in/throttled/throttled.v1/examples/README.md
new file mode 100644
index 000000000..6b12dad20
--- /dev/null
+++ b/vendor/gopkg.in/throttled/throttled.v1/examples/README.md
@@ -0,0 +1,12 @@
+# Examples
+
+This directory contains examples for all the throttlers implemented by the throttled package, as well as an example of a custom limiter.
+
+* custom/ : implements a custom limiter that allows requests to path /a on even seconds, and on path /b on odd seconds.
+* interval-many/ : implements a common interval throttler to control two different handlers, one for path /a and another for path /b, so that requests to any one of the handlers go through at the specified interval.
+* interval-vary/ : implements an interval throttler that varies by path, so that requests to each different path goes through at the specified interval.
+* interval/ : implements an interval throttler so that any request goes through at the specified interval, regardless of path or any other criteria.
+* memstats/ : implements a memory-usage throttler that limits access based on current memory statistics.
+* rate-limit/ : implements a rate-limiter throttler that varies by path, so that the number of requests allowed are counted based on the requested path.
+
+Each example app supports a number of command-line flags. Run the example with the -h flag to display usage and defaults.
diff --git a/vendor/gopkg.in/throttled/throttled.v1/examples/custom/main.go b/vendor/gopkg.in/throttled/throttled.v1/examples/custom/main.go
new file mode 100644
index 000000000..b3fe993e8
--- /dev/null
+++ b/vendor/gopkg.in/throttled/throttled.v1/examples/custom/main.go
@@ -0,0 +1,90 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "math/rand"
+ "net/http"
+ "sync"
+ "time"
+
+ "gopkg.in/throttled/throttled.v1"
+)
+
+var (
+ delayRes = flag.Duration("delay-response", 0, "delay the response by a random duration between 0 and this value")
+ output = flag.String("output", "v", "type of output, one of `v`erbose, `q`uiet, `ok`-only, `ko`-only")
+)
+
+// Custom limiter: allow requests to the /a path on even seconds only, and
+// allow access to the /b path on odd seconds only.
+//
+// Yes this is absurd. A more realistic case could be to allow requests to some
+// contest page only during a limited time window.
+type customLimiter struct {
+}
+
+func (c *customLimiter) Start() {
+ // No-op
+}
+
+func (c *customLimiter) Limit(w http.ResponseWriter, r *http.Request) (<-chan bool, error) {
+ s := time.Now().Second()
+ ch := make(chan bool, 1)
+ ok := (r.URL.Path == "/a" && s%2 == 0) || (r.URL.Path == "/b" && s%2 != 0)
+ ch <- ok
+ if *output == "v" {
+ log.Printf("Custom Limiter: Path=%s, Second=%d; ok? %v", r.URL.Path, s, ok)
+ }
+ return ch, nil
+}
+
+func main() {
+ flag.Parse()
+
+ var h http.Handler
+ var ok, ko int
+ var mu sync.Mutex
+
+ // Keep the start time to print since-time
+ start := time.Now()
+ // Create the custom throttler using our custom limiter
+ t := throttled.Custom(&customLimiter{})
+ // Set its denied handler
+ t.DeniedHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if *output == "v" || *output == "ko" {
+ log.Printf("KO: %s", time.Since(start))
+ }
+ throttled.DefaultDeniedHandler.ServeHTTP(w, r)
+ mu.Lock()
+ defer mu.Unlock()
+ ko++
+ })
+ // Throttle the OK handler
+ rand.Seed(time.Now().Unix())
+ h = t.Throttle(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if *output == "v" || *output == "ok" {
+ log.Printf("ok: %s", time.Since(start))
+ }
+ if *delayRes > 0 {
+ wait := time.Duration(rand.Intn(int(*delayRes)))
+ time.Sleep(wait)
+ }
+ w.WriteHeader(200)
+ mu.Lock()
+ defer mu.Unlock()
+ ok++
+ }))
+
+ // Print stats once in a while
+ go func() {
+ for _ = range time.Tick(10 * time.Second) {
+ mu.Lock()
+ log.Printf("ok: %d, ko: %d", ok, ko)
+ mu.Unlock()
+ }
+ }()
+ fmt.Println("server listening on port 9000")
+ http.ListenAndServe(":9000", h)
+}
diff --git a/vendor/gopkg.in/throttled/throttled.v1/examples/interval-many/main.go b/vendor/gopkg.in/throttled/throttled.v1/examples/interval-many/main.go
new file mode 100644
index 000000000..51a4ca023
--- /dev/null
+++ b/vendor/gopkg.in/throttled/throttled.v1/examples/interval-many/main.go
@@ -0,0 +1,79 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "math/rand"
+ "net/http"
+ "sync"
+ "time"
+
+ "gopkg.in/throttled/throttled.v1"
+)
+
+var (
+ delay = flag.Duration("delay", 200*time.Millisecond, "delay between calls")
+ bursts = flag.Int("bursts", 10, "number of bursts allowed")
+ delayRes = flag.Duration("delay-response", 0, "delay the response by a random duration between 0 and this value")
+ output = flag.String("output", "v", "type of output, one of `v`erbose, `q`uiet, `ok`-only, `ko`-only")
+)
+
+func main() {
+ flag.Parse()
+
+ var ok, ko int
+ var mu sync.Mutex
+
+ // Keep start time to log since-time
+ start := time.Now()
+
+ // Create the interval throttle
+ t := throttled.Interval(throttled.D(*delay), *bursts, nil, 0)
+ // Set its denied handler
+ t.DeniedHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if *output == "v" || *output == "ko" {
+ log.Printf("%s: KO: %s", r.URL.Path, time.Since(start))
+ }
+ throttled.DefaultDeniedHandler.ServeHTTP(w, r)
+ mu.Lock()
+ defer mu.Unlock()
+ ko++
+ })
+ // Create OK handlers
+ rand.Seed(time.Now().Unix())
+ makeHandler := func(ix int) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if *output == "v" || *output == "ok" {
+ log.Printf("handler %d: %s: ok: %s", ix, r.URL.Path, time.Since(start))
+ }
+ if *delayRes > 0 {
+ wait := time.Duration(rand.Intn(int(*delayRes)))
+ time.Sleep(wait)
+ }
+ w.WriteHeader(200)
+ mu.Lock()
+ defer mu.Unlock()
+ ok++
+ })
+ }
+ // Throttle them using the same interval throttler
+ h1 := t.Throttle(makeHandler(1))
+ h2 := t.Throttle(makeHandler(2))
+
+ // Handle two paths
+ mux := http.NewServeMux()
+ mux.Handle("/a", h1)
+ mux.Handle("/b", h2)
+
+ // Print stats once in a while
+ go func() {
+ for _ = range time.Tick(10 * time.Second) {
+ mu.Lock()
+ log.Printf("ok: %d, ko: %d", ok, ko)
+ mu.Unlock()
+ }
+ }()
+ fmt.Println("server listening on port 9000")
+ http.ListenAndServe(":9000", mux)
+}
diff --git a/vendor/gopkg.in/throttled/throttled.v1/examples/interval-vary/main.go b/vendor/gopkg.in/throttled/throttled.v1/examples/interval-vary/main.go
new file mode 100644
index 000000000..f43cdc122
--- /dev/null
+++ b/vendor/gopkg.in/throttled/throttled.v1/examples/interval-vary/main.go
@@ -0,0 +1,74 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "math/rand"
+ "net/http"
+ "sync"
+ "time"
+
+ "gopkg.in/throttled/throttled.v1"
+)
+
+var (
+ delay = flag.Duration("delay", 200*time.Millisecond, "delay between calls")
+ bursts = flag.Int("bursts", 10, "number of bursts allowed")
+ maxkeys = flag.Int("max-keys", 1000, "maximum number of keys")
+ delayRes = flag.Duration("delay-response", 0, "delay the response by a random duration between 0 and this value")
+ output = flag.String("output", "v", "type of output, one of `v`erbose, `q`uiet, `ok`-only, `ko`-only")
+)
+
+func main() {
+ flag.Parse()
+
+ var h http.Handler
+ var ok, ko int
+ var mu sync.Mutex
+
+ // Keep the start time to print since-time
+ start := time.Now()
+
+ // Create the interval throttler
+ t := throttled.Interval(throttled.D(*delay), *bursts, &throttled.VaryBy{
+ Path: true,
+ }, *maxkeys)
+ // Set the denied handler
+ t.DeniedHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if *output == "v" || *output == "ko" {
+ log.Printf("KO: %s", time.Since(start))
+ }
+ throttled.DefaultDeniedHandler.ServeHTTP(w, r)
+ mu.Lock()
+ defer mu.Unlock()
+ ko++
+ })
+
+ // Throttle the OK handler
+ rand.Seed(time.Now().Unix())
+ h = t.Throttle(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if *output == "v" || *output == "ok" {
+ log.Printf("%s: ok: %s", r.URL.Path, time.Since(start))
+ }
+ if *delayRes > 0 {
+ wait := time.Duration(rand.Intn(int(*delayRes)))
+ time.Sleep(wait)
+ }
+ w.WriteHeader(200)
+ mu.Lock()
+ defer mu.Unlock()
+ ok++
+ }))
+
+ // Print stats once in a while
+ go func() {
+ for _ = range time.Tick(10 * time.Second) {
+ mu.Lock()
+ log.Printf("ok: %d, ko: %d", ok, ko)
+ mu.Unlock()
+ }
+ }()
+ fmt.Println("server listening on port 9000")
+ http.ListenAndServe(":9000", h)
+}
diff --git a/vendor/gopkg.in/throttled/throttled.v1/examples/interval-vary/siege-urls b/vendor/gopkg.in/throttled/throttled.v1/examples/interval-vary/siege-urls
new file mode 100644
index 000000000..9a2d0d312
--- /dev/null
+++ b/vendor/gopkg.in/throttled/throttled.v1/examples/interval-vary/siege-urls
@@ -0,0 +1,4 @@
+http://localhost:9000/a
+http://localhost:9000/b
+http://localhost:9000/c
+
diff --git a/vendor/gopkg.in/throttled/throttled.v1/examples/interval/main.go b/vendor/gopkg.in/throttled/throttled.v1/examples/interval/main.go
new file mode 100644
index 000000000..ef8ee2cb8
--- /dev/null
+++ b/vendor/gopkg.in/throttled/throttled.v1/examples/interval/main.go
@@ -0,0 +1,69 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "math/rand"
+ "net/http"
+ "sync"
+ "time"
+
+ "gopkg.in/throttled/throttled.v1"
+)
+
+var (
+ delay = flag.Duration("delay", 200*time.Millisecond, "delay between calls")
+ bursts = flag.Int("bursts", 10, "number of bursts allowed")
+ delayRes = flag.Duration("delay-response", 0, "delay the response by a random duration between 0 and this value")
+ output = flag.String("output", "v", "type of output, one of `v`erbose, `q`uiet, `ok`-only, `ko`-only")
+)
+
+func main() {
+ flag.Parse()
+
+ var h http.Handler
+ var ok, ko int
+ var mu sync.Mutex
+
+ // Keep the start time to print since-time
+ start := time.Now()
+ // Create the interval throttler
+ t := throttled.Interval(throttled.D(*delay), *bursts, nil, 0)
+ // Set its denied handler
+ t.DeniedHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if *output == "v" || *output == "ko" {
+ log.Printf("KO: %s", time.Since(start))
+ }
+ throttled.DefaultDeniedHandler.ServeHTTP(w, r)
+ mu.Lock()
+ defer mu.Unlock()
+ ko++
+ })
+ // Throttle the OK handler
+ rand.Seed(time.Now().Unix())
+ h = t.Throttle(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if *output == "v" || *output == "ok" {
+ log.Printf("ok: %s", time.Since(start))
+ }
+ if *delayRes > 0 {
+ wait := time.Duration(rand.Intn(int(*delayRes)))
+ time.Sleep(wait)
+ }
+ w.WriteHeader(200)
+ mu.Lock()
+ defer mu.Unlock()
+ ok++
+ }))
+
+ // Print stats once in a while
+ go func() {
+ for _ = range time.Tick(10 * time.Second) {
+ mu.Lock()
+ log.Printf("ok: %d, ko: %d", ok, ko)
+ mu.Unlock()
+ }
+ }()
+ fmt.Println("server listening on port 9000")
+ http.ListenAndServe(":9000", h)
+}
diff --git a/vendor/gopkg.in/throttled/throttled.v1/examples/memstats/main.go b/vendor/gopkg.in/throttled/throttled.v1/examples/memstats/main.go
new file mode 100644
index 000000000..50d4cc69b
--- /dev/null
+++ b/vendor/gopkg.in/throttled/throttled.v1/examples/memstats/main.go
@@ -0,0 +1,97 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "math/rand"
+ "net/http"
+ "runtime"
+ "sync"
+ "time"
+
+ "gopkg.in/throttled/throttled.v1"
+)
+
+var (
+ numgc = flag.Int("gc", 0, "number of GC runs")
+ mallocs = flag.Int("mallocs", 0, "number of mallocs")
+ total = flag.Int("total", 0, "total number of bytes allocated")
+ allocs = flag.Int("allocs", 0, "number of bytes allocated")
+ refrate = flag.Duration("refresh", 0, "refresh rate of the memory stats")
+ delayRes = flag.Duration("delay-response", 0, "delay the response by a random duration between 0 and this value")
+ output = flag.String("output", "v", "type of output, one of `v`erbose, `q`uiet, `ok`-only, `ko`-only")
+)
+
+func main() {
+ flag.Parse()
+
+ var h http.Handler
+ var ok, ko int
+ var mu sync.Mutex
+
+ // Keep the start time to print since-time
+ start := time.Now()
+ // Create the thresholds struct
+ thresh := throttled.MemThresholds(&runtime.MemStats{
+ NumGC: uint32(*numgc),
+ Mallocs: uint64(*mallocs),
+ TotalAlloc: uint64(*total),
+ Alloc: uint64(*allocs),
+ })
+ if *output != "q" {
+ log.Printf("thresholds: NumGC: %d, Mallocs: %d, Alloc: %dKb, Total: %dKb", thresh.NumGC, thresh.Mallocs, thresh.Alloc/1024, thresh.TotalAlloc/1024)
+ }
+ // Create the MemStats throttler
+ t := throttled.MemStats(thresh, *refrate)
+ // Set its denied handler
+ t.DeniedHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if *output == "v" || *output == "ko" {
+ log.Printf("KO: %s", time.Since(start))
+ }
+ throttled.DefaultDeniedHandler.ServeHTTP(w, r)
+ mu.Lock()
+ defer mu.Unlock()
+ ko++
+ })
+
+ // Throttle the OK handler
+ rand.Seed(time.Now().Unix())
+ h = t.Throttle(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if *output == "v" || *output == "ok" {
+ log.Printf("ok: %s", time.Since(start))
+ }
+ if *delayRes > 0 {
+ wait := time.Duration(rand.Intn(int(*delayRes)))
+ time.Sleep(wait)
+ }
+ // Read the whole file in memory, to actually use 64Kb (instead of streaming to w)
+ b, err := ioutil.ReadFile("test-file")
+ if err != nil {
+ throttled.Error(w, r, err)
+ return
+ }
+ _, err = w.Write(b)
+ if err != nil {
+ throttled.Error(w, r, err)
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ ok++
+ }))
+
+ // Print stats once in a while
+ go func() {
+ var mem runtime.MemStats
+ for _ = range time.Tick(10 * time.Second) {
+ mu.Lock()
+ runtime.ReadMemStats(&mem)
+ log.Printf("ok: %d, ko: %d", ok, ko)
+ log.Printf("TotalAllocs: %d Kb, Allocs: %d Kb, Mallocs: %d, NumGC: %d", mem.TotalAlloc/1024, mem.Alloc/1024, mem.Mallocs, mem.NumGC)
+ mu.Unlock()
+ }
+ }()
+ fmt.Println("server listening on port 9000")
+ http.ListenAndServe(":9000", h)
+}
diff --git a/vendor/gopkg.in/throttled/throttled.v1/examples/memstats/test-file b/vendor/gopkg.in/throttled/throttled.v1/examples/memstats/test-file
new file mode 100644
index 000000000..c97c12f9b
--- /dev/null
+++ b/vendor/gopkg.in/throttled/throttled.v1/examples/memstats/test-file
Binary files differ
diff --git a/vendor/gopkg.in/throttled/throttled.v1/examples/rate-limit/main.go b/vendor/gopkg.in/throttled/throttled.v1/examples/rate-limit/main.go
new file mode 100644
index 000000000..b00119f63
--- /dev/null
+++ b/vendor/gopkg.in/throttled/throttled.v1/examples/rate-limit/main.go
@@ -0,0 +1,101 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "math/rand"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/garyburd/redigo/redis"
+ "gopkg.in/throttled/throttled.v1"
+ "gopkg.in/throttled/throttled.v1/store"
+)
+
+var (
+ requests = flag.Int("requests", 10, "number of requests allowed in the time window")
+ window = flag.Duration("window", time.Minute, "time window for the limit of requests")
+ storeType = flag.String("store", "mem", "store to use, one of `mem` or `redis` (on default localhost port)")
+ delayRes = flag.Duration("delay-response", 0, "delay the response by a random duration between 0 and this value")
+ output = flag.String("output", "v", "type of output, one of `v`erbose, `q`uiet, `ok`-only, `ko`-only")
+)
+
+func main() {
+ flag.Parse()
+
+ var h http.Handler
+ var ok, ko int
+ var mu sync.Mutex
+ var st throttled.Store
+
+ // Keep the start time to print since-time
+ start := time.Now()
+ // Create the rate-limit store
+ switch *storeType {
+ case "mem":
+ st = store.NewMemStore(0)
+ case "redis":
+ st = store.NewRedisStore(setupRedis(), "throttled:", 0)
+ default:
+ log.Fatalf("unsupported store: %s", *storeType)
+ }
+ // Create the rate-limit throttler, varying on path
+ t := throttled.RateLimit(throttled.Q{Requests: *requests, Window: *window}, &throttled.VaryBy{
+ Path: true,
+ }, st)
+
+ // Set its denied handler
+ t.DeniedHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if *output == "v" || *output == "ko" {
+ log.Printf("KO: %s", time.Since(start))
+ }
+ throttled.DefaultDeniedHandler.ServeHTTP(w, r)
+ mu.Lock()
+ defer mu.Unlock()
+ ko++
+ })
+
+ // Throttle the OK handler
+ rand.Seed(time.Now().Unix())
+ h = t.Throttle(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if *output == "v" || *output == "ok" {
+ log.Printf("ok: %s", time.Since(start))
+ }
+ if *delayRes > 0 {
+ wait := time.Duration(rand.Intn(int(*delayRes)))
+ time.Sleep(wait)
+ }
+ w.WriteHeader(200)
+ mu.Lock()
+ defer mu.Unlock()
+ ok++
+ }))
+
+ // Print stats once in a while
+ go func() {
+ for _ = range time.Tick(10 * time.Second) {
+ mu.Lock()
+ log.Printf("ok: %d, ko: %d", ok, ko)
+ mu.Unlock()
+ }
+ }()
+ fmt.Println("server listening on port 9000")
+ http.ListenAndServe(":9000", h)
+}
+
+func setupRedis() *redis.Pool {
+ pool := &redis.Pool{
+ MaxIdle: 3,
+ IdleTimeout: 30 * time.Second,
+ Dial: func() (redis.Conn, error) {
+ return redis.Dial("tcp", ":6379")
+ },
+ TestOnBorrow: func(c redis.Conn, t time.Time) error {
+ _, err := c.Do("PING")
+ return err
+ },
+ }
+ return pool
+}
diff --git a/vendor/gopkg.in/throttled/throttled.v1/interval_test.go b/vendor/gopkg.in/throttled/throttled.v1/interval_test.go
new file mode 100644
index 000000000..bc584e134
--- /dev/null
+++ b/vendor/gopkg.in/throttled/throttled.v1/interval_test.go
@@ -0,0 +1,114 @@
+package throttled
+
+import (
+ "net/http"
+ "testing"
+
+ "github.com/PuerkitoBio/boom/commands"
+)
+
+func TestInterval(t *testing.T) {
+ if testing.Short() {
+ t.Skip()
+ }
+ cases := []struct {
+ n int
+ c int
+ rps int
+ bursts int
+ }{
+ 0: {60, 10, 20, 100},
+ 1: {300, 20, 100, 100},
+ 2: {10, 10, 1, 10},
+ 3: {1000, 100, 1000, 100},
+ }
+ for i, c := range cases {
+ // Setup the stats handler
+ st := &stats{}
+ // Create the throttler
+ th := Interval(PerSec(c.rps), c.bursts, nil, 0)
+ th.DeniedHandler = http.HandlerFunc(st.DeniedHTTP)
+ b := commands.Boom{
+ Req: &commands.ReqOpts{},
+ N: c.n,
+ C: c.c,
+ Output: "quiet",
+ }
+ // Run the test
+ rpts := runTest(th.Throttle(st), b)
+ // Assert results
+ for _, rpt := range rpts {
+ assertRPS(t, i, c.rps, rpt)
+ }
+ assertStats(t, i, st, rpts)
+ }
+}
+
+func TestIntervalVary(t *testing.T) {
+ if testing.Short() {
+ t.Skip()
+ }
+ cases := []struct {
+ n int
+ c int
+ urls int
+ rps int
+ bursts int
+ }{
+ 0: {60, 10, 3, 20, 100},
+ 1: {300, 20, 3, 100, 100},
+ 2: {10, 10, 3, 1, 10},
+ 3: {500, 10, 2, 1000, 100},
+ }
+ for i, c := range cases {
+ // Setup the stats handler
+ st := &stats{}
+ // Create the throttler
+ th := Interval(PerSec(c.rps), c.bursts, nil, 0)
+ th.DeniedHandler = http.HandlerFunc(st.DeniedHTTP)
+ var booms []commands.Boom
+ for j := 0; j < c.urls; j++ {
+ booms = append(booms, commands.Boom{
+ Req: &commands.ReqOpts{},
+ N: c.n,
+ C: c.c,
+ Output: "quiet",
+ })
+ }
+ // Run the test
+ rpts := runTest(th.Throttle(st), booms...)
+ // Assert results
+ for _, rpt := range rpts {
+ assertRPS(t, i, c.rps, rpt)
+ }
+ assertStats(t, i, st, rpts)
+ }
+}
+
+func assertRPS(t *testing.T, ix int, exp int, rpt *commands.Report) {
+ wigglef := 0.2 * float64(exp)
+ if rpt.SuccessRPS < float64(exp)-wigglef || rpt.SuccessRPS > float64(exp)+wigglef {
+ t.Errorf("%d: expected RPS to be around %d, got %f", ix, exp, rpt.SuccessRPS)
+ }
+}
+
+func assertStats(t *testing.T, ix int, st *stats, rpts []*commands.Report) {
+ ok, ko, _ := st.Stats()
+ var twos, fives, max int
+ for _, rpt := range rpts {
+ twos += rpt.StatusCodeDist[200]
+ fives += rpt.StatusCodeDist[deniedStatus]
+ if len(rpt.StatusCodeDist) > max {
+ max = len(rpt.StatusCodeDist)
+ }
+ }
+ if ok != twos {
+ t.Errorf("%d: expected %d status 200, got %d", ix, twos, ok)
+ }
+ if ko != fives {
+ t.Errorf("%d: expected %d status 429, got %d", ix, fives, ok)
+ }
+ if max > 2 {
+ t.Errorf("%d: expected at most 2 different status codes, got %d", ix, max)
+ }
+}
diff --git a/vendor/gopkg.in/throttled/throttled.v1/memstats_test.go b/vendor/gopkg.in/throttled/throttled.v1/memstats_test.go
new file mode 100644
index 000000000..2b8faa721
--- /dev/null
+++ b/vendor/gopkg.in/throttled/throttled.v1/memstats_test.go
@@ -0,0 +1,64 @@
+package throttled
+
+import (
+ "net/http"
+ "runtime"
+ "testing"
+ "time"
+
+ "github.com/PuerkitoBio/boom/commands"
+)
+
+func TestMemStats(t *testing.T) {
+ if testing.Short() {
+ t.Skip()
+ }
+ cases := []struct {
+ n int
+ c int
+ gc uint32
+ total uint64
+ rate time.Duration
+ }{
+ 0: {1000, 10, 3, 0, 0},
+ 1: {200, 10, 0, 600000, 0},
+ 2: {500, 10, 2, 555555, 10 * time.Millisecond},
+ }
+ for i, c := range cases {
+ // Setup the stats handler
+ st := &stats{}
+ // Create the throttler
+ limit := MemThresholds(&runtime.MemStats{NumGC: c.gc, TotalAlloc: c.total})
+ th := MemStats(limit, c.rate)
+ th.DeniedHandler = http.HandlerFunc(st.DeniedHTTP)
+ // Run the test
+ b := commands.Boom{
+ Req: &commands.ReqOpts{},
+ N: c.n,
+ C: c.c,
+ Output: "quiet",
+ }
+ rpts := runTest(th.Throttle(st), b)
+ // Assert results
+ assertStats(t, i, st, rpts)
+ assertMem(t, i, limit)
+ }
+}
+
+func assertMem(t *testing.T, ix int, limit *runtime.MemStats) {
+ var mem runtime.MemStats
+ runtime.ReadMemStats(&mem)
+ if mem.NumGC < limit.NumGC {
+ t.Errorf("%d: expected gc to be at least %d, got %d", ix, limit.NumGC, mem.NumGC)
+ }
+ if mem.TotalAlloc < limit.TotalAlloc {
+ t.Errorf("%d: expected total alloc to be at least %dKb, got %dKb", ix, limit.TotalAlloc/1024, mem.TotalAlloc/1024)
+ }
+}
+
+func BenchmarkReadMemStats(b *testing.B) {
+ var mem runtime.MemStats
+ for i := 0; i < b.N; i++ {
+ runtime.ReadMemStats(&mem)
+ }
+}
diff --git a/vendor/gopkg.in/throttled/throttled.v1/misc/pre-commit b/vendor/gopkg.in/throttled/throttled.v1/misc/pre-commit
new file mode 100755
index 000000000..88b61bfde
--- /dev/null
+++ b/vendor/gopkg.in/throttled/throttled.v1/misc/pre-commit
@@ -0,0 +1,38 @@
+#!/bin/sh
+# Copyright 2012 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# git gofmt pre-commit hook
+#
+# To use, store as .git/hooks/pre-commit inside your repository and make sure
+# it has execute permissions.
+#
+# This script does not handle file names that contain spaces.
+
+# golint is purely informational, it doesn't fail with exit code != 0 if it finds something,
+# because it may find a lot of false positives. Just print out its result for information.
+echo "lint result (informational only):"
+echo
+golint .
+
+# go vet returns 1 if an error was found. Exit the hook with this exit code.
+go vet ./...
+vetres=$?
+
+# Check for gofmt problems and report if any.
+gofiles=$(git diff --cached --name-only --diff-filter=ACM | grep '.go$')
+[ -z "$gofiles" ] && echo "EXIT $vetres" && exit $vetres
+
+unformatted=$(gofmt -l $gofiles)
+[ -z "$unformatted" ] && echo "EXIT $vetres" && exit $vetres
+
+# Some files are not gofmt'd. Print message and fail.
+
+echo >&2 "Go files must be formatted with gofmt. Please run:"
+for fn in $unformatted; do
+ echo >&2 " gofmt -w $PWD/$fn"
+done
+
+echo "EXIT 1"
+exit 1
diff --git a/vendor/gopkg.in/throttled/throttled.v1/rate_test.go b/vendor/gopkg.in/throttled/throttled.v1/rate_test.go
new file mode 100644
index 000000000..67dea74b1
--- /dev/null
+++ b/vendor/gopkg.in/throttled/throttled.v1/rate_test.go
@@ -0,0 +1,101 @@
+package throttled
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "strconv"
+ "testing"
+ "time"
+)
+
+const deniedStatus = 429
+
+// Simple memory store for tests, unsafe for concurrent access
+type mapStore struct {
+ cnt map[string]int
+ ts map[string]time.Time
+}
+
+func newMapStore() *mapStore {
+ return &mapStore{
+ make(map[string]int),
+ make(map[string]time.Time),
+ }
+}
+func (ms *mapStore) Incr(key string, window time.Duration) (int, int, error) {
+ if _, ok := ms.cnt[key]; !ok {
+ return 0, 0, ErrNoSuchKey
+ }
+ ms.cnt[key]++
+ ts := ms.ts[key]
+ return ms.cnt[key], RemainingSeconds(ts, window), nil
+}
+func (ms *mapStore) Reset(key string, win time.Duration) error {
+ ms.cnt[key] = 1
+ ms.ts[key] = time.Now().UTC()
+ return nil
+}
+
+func TestRateLimit(t *testing.T) {
+ quota := Q{5, 5 * time.Second}
+ cases := []struct {
+ limit, remain, reset, status int
+ }{
+ 0: {5, 4, 5, 200},
+ 1: {5, 3, 4, 200},
+ 2: {5, 2, 4, 200},
+ 3: {5, 1, 3, 200},
+ 4: {5, 0, 3, 200},
+ 5: {5, 0, 2, deniedStatus},
+ }
+ // Limit the requests to 2 per second
+ th := Interval(PerSec(2), 0, nil, 0)
+ // Rate limit
+ rl := RateLimit(quota, nil, newMapStore())
+ // Create the stats
+ st := &stats{}
+ // Create the handler
+ h := th.Throttle(rl.Throttle(st))
+
+ // Start the server
+ srv := httptest.NewServer(h)
+ defer srv.Close()
+ for i, c := range cases {
+ callRateLimited(t, i, c.limit, c.remain, c.reset, c.status, srv.URL)
+ }
+ // Wait 3 seconds and call again, should start a new window
+ time.Sleep(3 * time.Second)
+ callRateLimited(t, len(cases), 5, 4, 5, 200, srv.URL)
+}
+
+func callRateLimited(t *testing.T, i, limit, remain, reset, status int, url string) {
+ res, err := http.Get(url)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+ // Assert status code
+ if status != res.StatusCode {
+ t.Errorf("%d: expected status %d, got %d", i, status, res.StatusCode)
+ }
+ // Assert headers
+ if v := res.Header.Get("X-RateLimit-Limit"); v != strconv.Itoa(limit) {
+ t.Errorf("%d: expected limit header to be %d, got %s", i, limit, v)
+ }
+ if v := res.Header.Get("X-RateLimit-Remaining"); v != strconv.Itoa(remain) {
+ t.Errorf("%d: expected remain header to be %d, got %s", i, remain, v)
+ }
+ // Allow 1 second wiggle room
+ v := res.Header.Get("X-RateLimit-Reset")
+ vi, _ := strconv.Atoi(v)
+ if vi < reset-1 || vi > reset+1 {
+ t.Errorf("%d: expected reset header to be close to %d, got %d", i, reset, vi)
+ }
+ if status == deniedStatus {
+ v := res.Header.Get("Retry-After")
+ vi, _ := strconv.Atoi(v)
+ if vi < reset-1 || vi > reset+1 {
+ t.Errorf("%d: expected retry after header to be close to %d, got %d", i, reset, vi)
+ }
+ }
+}
diff --git a/vendor/gopkg.in/throttled/throttled.v1/store/doc.go b/vendor/gopkg.in/throttled/throttled.v1/store/doc.go
index adb4618d3..8e33f2c98 100644
--- a/vendor/gopkg.in/throttled/throttled.v1/store/doc.go
+++ b/vendor/gopkg.in/throttled/throttled.v1/store/doc.go
@@ -1,2 +1,2 @@
// Package store offers a memory-based and a Redis-based throttled.Store implementation.
-package store
+package store // import "gopkg.in/throttled/throttled.v1/store"
diff --git a/vendor/gopkg.in/throttled/throttled.v1/store/mem_test.go b/vendor/gopkg.in/throttled/throttled.v1/store/mem_test.go
new file mode 100644
index 000000000..e8ef8d0da
--- /dev/null
+++ b/vendor/gopkg.in/throttled/throttled.v1/store/mem_test.go
@@ -0,0 +1,43 @@
+package store
+
+import (
+ "testing"
+ "time"
+)
+
+func TestMemStore(t *testing.T) {
+ st := NewMemStore(0)
+ win := time.Second
+
+ // Reset stores a key with count of 1, current timestamp
+ err := st.Reset("k", time.Second)
+ if err != nil {
+ t.Errorf("expected reset to return nil, got %s", err)
+ }
+ cnt, sec1, _ := st.Incr("k", win)
+ if cnt != 2 {
+ t.Errorf("expected reset+incr to set count to 2, got %d", cnt)
+ }
+
+ // Incr increments the key, keeps same timestamp
+ cnt, sec2, err := st.Incr("k", win)
+ if err != nil {
+ t.Errorf("expected 2nd incr to return nil error, got %s", err)
+ }
+ if cnt != 3 {
+ t.Errorf("expected 2nd incr to return 3, got %d", cnt)
+ }
+ if sec1 != sec2 {
+ t.Errorf("expected 2nd incr to return %d secs, got %d", sec1, sec2)
+ }
+
+ // Reset on existing key brings it back to 1, new timestamp
+ err = st.Reset("k", win)
+ if err != nil {
+ t.Errorf("expected reset on existing key to return nil, got %s", err)
+ }
+ cnt, _, _ = st.Incr("k", win)
+ if cnt != 2 {
+ t.Errorf("expected last reset+incr to return 2, got %d", cnt)
+ }
+}
diff --git a/vendor/gopkg.in/throttled/throttled.v1/store/redis_test.go b/vendor/gopkg.in/throttled/throttled.v1/store/redis_test.go
new file mode 100644
index 000000000..a282d6d25
--- /dev/null
+++ b/vendor/gopkg.in/throttled/throttled.v1/store/redis_test.go
@@ -0,0 +1,66 @@
+package store
+
+import (
+ "testing"
+ "time"
+
+ "github.com/garyburd/redigo/redis"
+)
+
+func getPool() *redis.Pool {
+ pool := &redis.Pool{
+ MaxIdle: 3,
+ IdleTimeout: 30 * time.Second,
+ Dial: func() (redis.Conn, error) {
+ return redis.Dial("tcp", ":6379")
+ },
+ TestOnBorrow: func(c redis.Conn, t time.Time) error {
+ _, err := c.Do("PING")
+ return err
+ },
+ }
+ return pool
+}
+
+func TestRedisStore(t *testing.T) {
+ pool := getPool()
+ c := pool.Get()
+ if _, err := redis.String(c.Do("PING")); err != nil {
+ c.Close()
+ t.Skip("redis server not available on localhost port 6379")
+ }
+ st := NewRedisStore(pool, "throttled:", 1)
+ win := 2 * time.Second
+
+ // Incr increments the key, even if it does not exist
+ cnt, secs, err := st.Incr("k", win)
+ if err != nil {
+ t.Errorf("expected initial incr to return nil error, got %s", err)
+ }
+ if cnt != 1 {
+ t.Errorf("expected initial incr to return 1, got %d", cnt)
+ }
+ if secs != int(win.Seconds()) {
+ t.Errorf("expected initial incr to return %d secs, got %d", int(win.Seconds()), secs)
+ }
+
+ // Waiting a second diminishes the remaining seconds
+ time.Sleep(time.Second)
+ _, sec2, _ := st.Incr("k", win)
+ if sec2 != secs-1 {
+ t.Errorf("expected 2nd incr after a 1s sleep to return %d secs, got %d", secs-1, sec2)
+ }
+
+ // Waiting a second so the key expires, Incr should set back to 1, initial secs
+ time.Sleep(1100 * time.Millisecond)
+ cnt, sec3, err := st.Incr("k", win)
+ if err != nil {
+ t.Errorf("expected last incr to return nil error, got %s", err)
+ }
+ if cnt != 1 {
+ t.Errorf("expected last incr to return 1, got %d", cnt)
+ }
+ if sec3 != int(win.Seconds()) {
+ t.Errorf("expected last incr to return %d secs, got %d", int(win.Seconds()), sec3)
+ }
+}
diff --git a/vendor/gopkg.in/throttled/throttled.v1/varyby_test.go b/vendor/gopkg.in/throttled/throttled.v1/varyby_test.go
new file mode 100644
index 000000000..91b7ae0ae
--- /dev/null
+++ b/vendor/gopkg.in/throttled/throttled.v1/varyby_test.go
@@ -0,0 +1,56 @@
+package throttled
+
+import (
+ "net/http"
+ "net/url"
+ "testing"
+)
+
+func TestVaryBy(t *testing.T) {
+ u, err := url.Parse("http://localhost/test/path?q=s")
+ if err != nil {
+ panic(err)
+ }
+ ck := &http.Cookie{Name: "ssn", Value: "test"}
+ cases := []struct {
+ vb *VaryBy
+ r *http.Request
+ k string
+ }{
+ 0: {nil, &http.Request{}, ""},
+ 1: {&VaryBy{RemoteAddr: true}, &http.Request{RemoteAddr: "::"}, "::\n"},
+ 2: {
+ &VaryBy{Method: true, Path: true},
+ &http.Request{Method: "POST", URL: u},
+ "post\n/test/path\n",
+ },
+ 3: {
+ &VaryBy{Headers: []string{"Content-length"}},
+ &http.Request{Header: http.Header{"Content-Type": []string{"text/plain"}, "Content-Length": []string{"123"}}},
+ "123\n",
+ },
+ 4: {
+ &VaryBy{Separator: ",", Method: true, Headers: []string{"Content-length"}, Params: []string{"q", "user"}},
+ &http.Request{Method: "GET", Header: http.Header{"Content-Type": []string{"text/plain"}, "Content-Length": []string{"123"}}, Form: url.Values{"q": []string{"s"}, "pwd": []string{"secret"}, "user": []string{"test"}}},
+ "get,123,s,test,",
+ },
+ 5: {
+ &VaryBy{Cookies: []string{"ssn"}},
+ &http.Request{Header: http.Header{"Cookie": []string{ck.String()}}},
+ "test\n",
+ },
+ 6: {
+ &VaryBy{Cookies: []string{"ssn"}, RemoteAddr: true, Custom: func(r *http.Request) string {
+ return "blah"
+ }},
+ &http.Request{Header: http.Header{"Cookie": []string{ck.String()}}},
+ "blah",
+ },
+ }
+ for i, c := range cases {
+ got := c.vb.Key(c.r)
+ if got != c.k {
+ t.Errorf("%d: expected '%s' (%d), got '%s' (%d)", i, c.k, len(c.k), got, len(got))
+ }
+ }
+}
diff --git a/vendor/gopkg.in/yaml.v2/decode_test.go b/vendor/gopkg.in/yaml.v2/decode_test.go
new file mode 100644
index 000000000..c159760b6
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/decode_test.go
@@ -0,0 +1,988 @@
+package yaml_test
+
+import (
+ "errors"
+ . "gopkg.in/check.v1"
+ "gopkg.in/yaml.v2"
+ "math"
+ "net"
+ "reflect"
+ "strings"
+ "time"
+)
+
+var unmarshalIntTest = 123
+
+var unmarshalTests = []struct {
+ data string
+ value interface{}
+}{
+ {
+ "",
+ &struct{}{},
+ }, {
+ "{}", &struct{}{},
+ }, {
+ "v: hi",
+ map[string]string{"v": "hi"},
+ }, {
+ "v: hi", map[string]interface{}{"v": "hi"},
+ }, {
+ "v: true",
+ map[string]string{"v": "true"},
+ }, {
+ "v: true",
+ map[string]interface{}{"v": true},
+ }, {
+ "v: 10",
+ map[string]interface{}{"v": 10},
+ }, {
+ "v: 0b10",
+ map[string]interface{}{"v": 2},
+ }, {
+ "v: 0xA",
+ map[string]interface{}{"v": 10},
+ }, {
+ "v: 4294967296",
+ map[string]int64{"v": 4294967296},
+ }, {
+ "v: 0.1",
+ map[string]interface{}{"v": 0.1},
+ }, {
+ "v: .1",
+ map[string]interface{}{"v": 0.1},
+ }, {
+ "v: .Inf",
+ map[string]interface{}{"v": math.Inf(+1)},
+ }, {
+ "v: -.Inf",
+ map[string]interface{}{"v": math.Inf(-1)},
+ }, {
+ "v: -10",
+ map[string]interface{}{"v": -10},
+ }, {
+ "v: -.1",
+ map[string]interface{}{"v": -0.1},
+ },
+
+ // Simple values.
+ {
+ "123",
+ &unmarshalIntTest,
+ },
+
+ // Floats from spec
+ {
+ "canonical: 6.8523e+5",
+ map[string]interface{}{"canonical": 6.8523e+5},
+ }, {
+ "expo: 685.230_15e+03",
+ map[string]interface{}{"expo": 685.23015e+03},
+ }, {
+ "fixed: 685_230.15",
+ map[string]interface{}{"fixed": 685230.15},
+ }, {
+ "neginf: -.inf",
+ map[string]interface{}{"neginf": math.Inf(-1)},
+ }, {
+ "fixed: 685_230.15",
+ map[string]float64{"fixed": 685230.15},
+ },
+ //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported
+ //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails.
+
+ // Bools from spec
+ {
+ "canonical: y",
+ map[string]interface{}{"canonical": true},
+ }, {
+ "answer: NO",
+ map[string]interface{}{"answer": false},
+ }, {
+ "logical: True",
+ map[string]interface{}{"logical": true},
+ }, {
+ "option: on",
+ map[string]interface{}{"option": true},
+ }, {
+ "option: on",
+ map[string]bool{"option": true},
+ },
+ // Ints from spec
+ {
+ "canonical: 685230",
+ map[string]interface{}{"canonical": 685230},
+ }, {
+ "decimal: +685_230",
+ map[string]interface{}{"decimal": 685230},
+ }, {
+ "octal: 02472256",
+ map[string]interface{}{"octal": 685230},
+ }, {
+ "hexa: 0x_0A_74_AE",
+ map[string]interface{}{"hexa": 685230},
+ }, {
+ "bin: 0b1010_0111_0100_1010_1110",
+ map[string]interface{}{"bin": 685230},
+ }, {
+ "bin: -0b101010",
+ map[string]interface{}{"bin": -42},
+ }, {
+ "decimal: +685_230",
+ map[string]int{"decimal": 685230},
+ },
+
+ //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported
+
+ // Nulls from spec
+ {
+ "empty:",
+ map[string]interface{}{"empty": nil},
+ }, {
+ "canonical: ~",
+ map[string]interface{}{"canonical": nil},
+ }, {
+ "english: null",
+ map[string]interface{}{"english": nil},
+ }, {
+ "~: null key",
+ map[interface{}]string{nil: "null key"},
+ }, {
+ "empty:",
+ map[string]*bool{"empty": nil},
+ },
+
+ // Flow sequence
+ {
+ "seq: [A,B]",
+ map[string]interface{}{"seq": []interface{}{"A", "B"}},
+ }, {
+ "seq: [A,B,C,]",
+ map[string][]string{"seq": []string{"A", "B", "C"}},
+ }, {
+ "seq: [A,1,C]",
+ map[string][]string{"seq": []string{"A", "1", "C"}},
+ }, {
+ "seq: [A,1,C]",
+ map[string][]int{"seq": []int{1}},
+ }, {
+ "seq: [A,1,C]",
+ map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
+ },
+ // Block sequence
+ {
+ "seq:\n - A\n - B",
+ map[string]interface{}{"seq": []interface{}{"A", "B"}},
+ }, {
+ "seq:\n - A\n - B\n - C",
+ map[string][]string{"seq": []string{"A", "B", "C"}},
+ }, {
+ "seq:\n - A\n - 1\n - C",
+ map[string][]string{"seq": []string{"A", "1", "C"}},
+ }, {
+ "seq:\n - A\n - 1\n - C",
+ map[string][]int{"seq": []int{1}},
+ }, {
+ "seq:\n - A\n - 1\n - C",
+ map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
+ },
+
+ // Literal block scalar
+ {
+ "scalar: | # Comment\n\n literal\n\n \ttext\n\n",
+ map[string]string{"scalar": "\nliteral\n\n\ttext\n"},
+ },
+
+ // Folded block scalar
+ {
+ "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n",
+ map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"},
+ },
+
+ // Map inside interface with no type hints.
+ {
+ "a: {b: c}",
+ map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
+ },
+
+ // Structs and type conversions.
+ {
+ "hello: world",
+ &struct{ Hello string }{"world"},
+ }, {
+ "a: {b: c}",
+ &struct{ A struct{ B string } }{struct{ B string }{"c"}},
+ }, {
+ "a: {b: c}",
+ &struct{ A *struct{ B string } }{&struct{ B string }{"c"}},
+ }, {
+ "a: {b: c}",
+ &struct{ A map[string]string }{map[string]string{"b": "c"}},
+ }, {
+ "a: {b: c}",
+ &struct{ A *map[string]string }{&map[string]string{"b": "c"}},
+ }, {
+ "a:",
+ &struct{ A map[string]string }{},
+ }, {
+ "a: 1",
+ &struct{ A int }{1},
+ }, {
+ "a: 1",
+ &struct{ A float64 }{1},
+ }, {
+ "a: 1.0",
+ &struct{ A int }{1},
+ }, {
+ "a: 1.0",
+ &struct{ A uint }{1},
+ }, {
+ "a: [1, 2]",
+ &struct{ A []int }{[]int{1, 2}},
+ }, {
+ "a: 1",
+ &struct{ B int }{0},
+ }, {
+ "a: 1",
+ &struct {
+ B int "a"
+ }{1},
+ }, {
+ "a: y",
+ &struct{ A bool }{true},
+ },
+
+ // Some cross type conversions
+ {
+ "v: 42",
+ map[string]uint{"v": 42},
+ }, {
+ "v: -42",
+ map[string]uint{},
+ }, {
+ "v: 4294967296",
+ map[string]uint64{"v": 4294967296},
+ }, {
+ "v: -4294967296",
+ map[string]uint64{},
+ },
+
+ // int
+ {
+ "int_max: 2147483647",
+ map[string]int{"int_max": math.MaxInt32},
+ },
+ {
+ "int_min: -2147483648",
+ map[string]int{"int_min": math.MinInt32},
+ },
+ {
+ "int_overflow: 9223372036854775808", // math.MaxInt64 + 1
+ map[string]int{},
+ },
+
+ // int64
+ {
+ "int64_max: 9223372036854775807",
+ map[string]int64{"int64_max": math.MaxInt64},
+ },
+ {
+ "int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111",
+ map[string]int64{"int64_max_base2": math.MaxInt64},
+ },
+ {
+ "int64_min: -9223372036854775808",
+ map[string]int64{"int64_min": math.MinInt64},
+ },
+ {
+ "int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111",
+ map[string]int64{"int64_neg_base2": -math.MaxInt64},
+ },
+ {
+ "int64_overflow: 9223372036854775808", // math.MaxInt64 + 1
+ map[string]int64{},
+ },
+
+ // uint
+ {
+ "uint_min: 0",
+ map[string]uint{"uint_min": 0},
+ },
+ {
+ "uint_max: 4294967295",
+ map[string]uint{"uint_max": math.MaxUint32},
+ },
+ {
+ "uint_underflow: -1",
+ map[string]uint{},
+ },
+
+ // uint64
+ {
+ "uint64_min: 0",
+ map[string]uint{"uint64_min": 0},
+ },
+ {
+ "uint64_max: 18446744073709551615",
+ map[string]uint64{"uint64_max": math.MaxUint64},
+ },
+ {
+ "uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111",
+ map[string]uint64{"uint64_max_base2": math.MaxUint64},
+ },
+ {
+ "uint64_maxint64: 9223372036854775807",
+ map[string]uint64{"uint64_maxint64": math.MaxInt64},
+ },
+ {
+ "uint64_underflow: -1",
+ map[string]uint64{},
+ },
+
+ // float32
+ {
+ "float32_max: 3.40282346638528859811704183484516925440e+38",
+ map[string]float32{"float32_max": math.MaxFloat32},
+ },
+ {
+ "float32_nonzero: 1.401298464324817070923729583289916131280e-45",
+ map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32},
+ },
+ {
+ "float32_maxuint64: 18446744073709551615",
+ map[string]float32{"float32_maxuint64": float32(math.MaxUint64)},
+ },
+ {
+ "float32_maxuint64+1: 18446744073709551616",
+ map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)},
+ },
+
+ // float64
+ {
+ "float64_max: 1.797693134862315708145274237317043567981e+308",
+ map[string]float64{"float64_max": math.MaxFloat64},
+ },
+ {
+ "float64_nonzero: 4.940656458412465441765687928682213723651e-324",
+ map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64},
+ },
+ {
+ "float64_maxuint64: 18446744073709551615",
+ map[string]float64{"float64_maxuint64": float64(math.MaxUint64)},
+ },
+ {
+ "float64_maxuint64+1: 18446744073709551616",
+ map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)},
+ },
+
+ // Overflow cases.
+ {
+ "v: 4294967297",
+ map[string]int32{},
+ }, {
+ "v: 128",
+ map[string]int8{},
+ },
+
+ // Quoted values.
+ {
+ "'1': '\"2\"'",
+ map[interface{}]interface{}{"1": "\"2\""},
+ }, {
+ "v:\n- A\n- 'B\n\n C'\n",
+ map[string][]string{"v": []string{"A", "B\nC"}},
+ },
+
+ // Explicit tags.
+ {
+ "v: !!float '1.1'",
+ map[string]interface{}{"v": 1.1},
+ }, {
+ "v: !!null ''",
+ map[string]interface{}{"v": nil},
+ }, {
+ "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'",
+ map[string]interface{}{"v": 1},
+ },
+
+ // Anchors and aliases.
+ {
+ "a: &x 1\nb: &y 2\nc: *x\nd: *y\n",
+ &struct{ A, B, C, D int }{1, 2, 1, 2},
+ }, {
+ "a: &a {c: 1}\nb: *a",
+ &struct {
+ A, B struct {
+ C int
+ }
+ }{struct{ C int }{1}, struct{ C int }{1}},
+ }, {
+ "a: &a [1, 2]\nb: *a",
+ &struct{ B []int }{[]int{1, 2}},
+ }, {
+ "b: *a\na: &a {c: 1}",
+ &struct {
+ A, B struct {
+ C int
+ }
+ }{struct{ C int }{1}, struct{ C int }{1}},
+ },
+
+ // Bug #1133337
+ {
+ "foo: ''",
+ map[string]*string{"foo": new(string)},
+ }, {
+ "foo: null",
+ map[string]string{"foo": ""},
+ }, {
+ "foo: null",
+ map[string]interface{}{"foo": nil},
+ },
+
+ // Ignored field
+ {
+ "a: 1\nb: 2\n",
+ &struct {
+ A int
+ B int "-"
+ }{1, 0},
+ },
+
+ // Bug #1191981
+ {
+ "" +
+ "%YAML 1.1\n" +
+ "--- !!str\n" +
+ `"Generic line break (no glyph)\n\` + "\n" +
+ ` Generic line break (glyphed)\n\` + "\n" +
+ ` Line separator\u2028\` + "\n" +
+ ` Paragraph separator\u2029"` + "\n",
+ "" +
+ "Generic line break (no glyph)\n" +
+ "Generic line break (glyphed)\n" +
+ "Line separator\u2028Paragraph separator\u2029",
+ },
+
+ // Struct inlining
+ {
+ "a: 1\nb: 2\nc: 3\n",
+ &struct {
+ A int
+ C inlineB `yaml:",inline"`
+ }{1, inlineB{2, inlineC{3}}},
+ },
+
+ // Map inlining
+ {
+ "a: 1\nb: 2\nc: 3\n",
+ &struct {
+ A int
+ C map[string]int `yaml:",inline"`
+ }{1, map[string]int{"b": 2, "c": 3}},
+ },
+
+ // bug 1243827
+ {
+ "a: -b_c",
+ map[string]interface{}{"a": "-b_c"},
+ },
+ {
+ "a: +b_c",
+ map[string]interface{}{"a": "+b_c"},
+ },
+ {
+ "a: 50cent_of_dollar",
+ map[string]interface{}{"a": "50cent_of_dollar"},
+ },
+
+ // Duration
+ {
+ "a: 3s",
+ map[string]time.Duration{"a": 3 * time.Second},
+ },
+
+ // Issue #24.
+ {
+ "a: <foo>",
+ map[string]string{"a": "<foo>"},
+ },
+
+ // Base 60 floats are obsolete and unsupported.
+ {
+ "a: 1:1\n",
+ map[string]string{"a": "1:1"},
+ },
+
+ // Binary data.
+ {
+ "a: !!binary gIGC\n",
+ map[string]string{"a": "\x80\x81\x82"},
+ }, {
+ "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
+ map[string]string{"a": strings.Repeat("\x90", 54)},
+ }, {
+ "a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n",
+ map[string]string{"a": strings.Repeat("\x00", 52)},
+ },
+
+ // Ordered maps.
+ {
+ "{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}",
+ &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
+ },
+
+ // Issue #39.
+ {
+ "a:\n b:\n c: d\n",
+ map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}},
+ },
+
+ // Custom map type.
+ {
+ "a: {b: c}",
+ M{"a": M{"b": "c"}},
+ },
+
+ // Support encoding.TextUnmarshaler.
+ {
+ "a: 1.2.3.4\n",
+ map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
+ },
+ {
+ "a: 2015-02-24T18:19:39Z\n",
+ map[string]time.Time{"a": time.Unix(1424801979, 0)},
+ },
+
+ // Encode empty lists as zero-length slices.
+ {
+ "a: []",
+ &struct{ A []int }{[]int{}},
+ },
+
+ // UTF-16-LE
+ {
+ "\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n\x00",
+ M{"ñoño": "very yes"},
+ },
+ // UTF-16-LE with surrogate.
+ {
+ "\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \x00=\xd8\xd4\xdf\n\x00",
+ M{"ñoño": "very yes 🟔"},
+ },
+
+ // UTF-16-BE
+ {
+ "\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n",
+ M{"ñoño": "very yes"},
+ },
+ // UTF-16-BE with surrogate.
+ {
+ "\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \xd8=\xdf\xd4\x00\n",
+ M{"ñoño": "very yes 🟔"},
+ },
+}
+
+type M map[interface{}]interface{}
+
+type inlineB struct {
+ B int
+ inlineC `yaml:",inline"`
+}
+
+type inlineC struct {
+ C int
+}
+
+func (s *S) TestUnmarshal(c *C) {
+ for _, item := range unmarshalTests {
+ t := reflect.ValueOf(item.value).Type()
+ var value interface{}
+ switch t.Kind() {
+ case reflect.Map:
+ value = reflect.MakeMap(t).Interface()
+ case reflect.String:
+ value = reflect.New(t).Interface()
+ case reflect.Ptr:
+ value = reflect.New(t.Elem()).Interface()
+ default:
+ c.Fatalf("missing case for %s", t)
+ }
+ err := yaml.Unmarshal([]byte(item.data), value)
+ if _, ok := err.(*yaml.TypeError); !ok {
+ c.Assert(err, IsNil)
+ }
+ if t.Kind() == reflect.String {
+ c.Assert(*value.(*string), Equals, item.value)
+ } else {
+ c.Assert(value, DeepEquals, item.value)
+ }
+ }
+}
+
+func (s *S) TestUnmarshalNaN(c *C) {
+ value := map[string]interface{}{}
+ err := yaml.Unmarshal([]byte("notanum: .NaN"), &value)
+ c.Assert(err, IsNil)
+ c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true)
+}
+
+var unmarshalErrorTests = []struct {
+ data, error string
+}{
+ {"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"},
+ {"v: [A,", "yaml: line 1: did not find expected node content"},
+ {"v:\n- [A,", "yaml: line 2: did not find expected node content"},
+ {"a: *b\n", "yaml: unknown anchor 'b' referenced"},
+ {"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"},
+ {"value: -", "yaml: block sequence entries are not allowed in this context"},
+ {"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"},
+ {"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`},
+ {"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`},
+}
+
+func (s *S) TestUnmarshalErrors(c *C) {
+ for _, item := range unmarshalErrorTests {
+ var value interface{}
+ err := yaml.Unmarshal([]byte(item.data), &value)
+ c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value))
+ }
+}
+
+var unmarshalerTests = []struct {
+ data, tag string
+ value interface{}
+}{
+ {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}},
+ {"_: [1,A]", "!!seq", []interface{}{1, "A"}},
+ {"_: 10", "!!int", 10},
+ {"_: null", "!!null", nil},
+ {`_: BAR!`, "!!str", "BAR!"},
+ {`_: "BAR!"`, "!!str", "BAR!"},
+ {"_: !!foo 'BAR!'", "!!foo", "BAR!"},
+}
+
+var unmarshalerResult = map[int]error{}
+
+type unmarshalerType struct {
+ value interface{}
+}
+
+func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error {
+ if err := unmarshal(&o.value); err != nil {
+ return err
+ }
+ if i, ok := o.value.(int); ok {
+ if result, ok := unmarshalerResult[i]; ok {
+ return result
+ }
+ }
+ return nil
+}
+
+type unmarshalerPointer struct {
+ Field *unmarshalerType "_"
+}
+
+type unmarshalerValue struct {
+ Field unmarshalerType "_"
+}
+
+func (s *S) TestUnmarshalerPointerField(c *C) {
+ for _, item := range unmarshalerTests {
+ obj := &unmarshalerPointer{}
+ err := yaml.Unmarshal([]byte(item.data), obj)
+ c.Assert(err, IsNil)
+ if item.value == nil {
+ c.Assert(obj.Field, IsNil)
+ } else {
+ c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
+ c.Assert(obj.Field.value, DeepEquals, item.value)
+ }
+ }
+}
+
+func (s *S) TestUnmarshalerValueField(c *C) {
+ for _, item := range unmarshalerTests {
+ obj := &unmarshalerValue{}
+ err := yaml.Unmarshal([]byte(item.data), obj)
+ c.Assert(err, IsNil)
+ c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
+ c.Assert(obj.Field.value, DeepEquals, item.value)
+ }
+}
+
+func (s *S) TestUnmarshalerWholeDocument(c *C) {
+ obj := &unmarshalerType{}
+ err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj)
+ c.Assert(err, IsNil)
+ value, ok := obj.value.(map[interface{}]interface{})
+ c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value))
+ c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value)
+}
+
+func (s *S) TestUnmarshalerTypeError(c *C) {
+ unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}}
+ unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}}
+ defer func() {
+ delete(unmarshalerResult, 2)
+ delete(unmarshalerResult, 4)
+ }()
+
+ type T struct {
+ Before int
+ After int
+ M map[string]*unmarshalerType
+ }
+ var v T
+ data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}`
+ err := yaml.Unmarshal([]byte(data), &v)
+ c.Assert(err, ErrorMatches, ""+
+ "yaml: unmarshal errors:\n"+
+ " line 1: cannot unmarshal !!str `A` into int\n"+
+ " foo\n"+
+ " bar\n"+
+ " line 1: cannot unmarshal !!str `B` into int")
+ c.Assert(v.M["abc"], NotNil)
+ c.Assert(v.M["def"], IsNil)
+ c.Assert(v.M["ghi"], NotNil)
+ c.Assert(v.M["jkl"], IsNil)
+
+ c.Assert(v.M["abc"].value, Equals, 1)
+ c.Assert(v.M["ghi"].value, Equals, 3)
+}
+
+type proxyTypeError struct{}
+
+func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ var a int32
+ var b int64
+ if err := unmarshal(&s); err != nil {
+ panic(err)
+ }
+ if s == "a" {
+ if err := unmarshal(&b); err == nil {
+ panic("should have failed")
+ }
+ return unmarshal(&a)
+ }
+ if err := unmarshal(&a); err == nil {
+ panic("should have failed")
+ }
+ return unmarshal(&b)
+}
+
+func (s *S) TestUnmarshalerTypeErrorProxying(c *C) {
+ type T struct {
+ Before int
+ After int
+ M map[string]*proxyTypeError
+ }
+ var v T
+ data := `{before: A, m: {abc: a, def: b}, after: B}`
+ err := yaml.Unmarshal([]byte(data), &v)
+ c.Assert(err, ErrorMatches, ""+
+ "yaml: unmarshal errors:\n"+
+ " line 1: cannot unmarshal !!str `A` into int\n"+
+ " line 1: cannot unmarshal !!str `a` into int32\n"+
+ " line 1: cannot unmarshal !!str `b` into int64\n"+
+ " line 1: cannot unmarshal !!str `B` into int")
+}
+
+type failingUnmarshaler struct{}
+
+var failingErr = errors.New("failingErr")
+
+func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ return failingErr
+}
+
+func (s *S) TestUnmarshalerError(c *C) {
+ err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{})
+ c.Assert(err, Equals, failingErr)
+}
+
+type sliceUnmarshaler []int
+
+func (su *sliceUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var slice []int
+ err := unmarshal(&slice)
+ if err == nil {
+ *su = slice
+ return nil
+ }
+
+ var intVal int
+ err = unmarshal(&intVal)
+ if err == nil {
+ *su = []int{intVal}
+ return nil
+ }
+
+ return err
+}
+
+func (s *S) TestUnmarshalerRetry(c *C) {
+ var su sliceUnmarshaler
+ err := yaml.Unmarshal([]byte("[1, 2, 3]"), &su)
+ c.Assert(err, IsNil)
+ c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1, 2, 3}))
+
+ err = yaml.Unmarshal([]byte("1"), &su)
+ c.Assert(err, IsNil)
+ c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1}))
+}
+
+// From http://yaml.org/type/merge.html
+var mergeTests = `
+anchors:
+ list:
+ - &CENTER { "x": 1, "y": 2 }
+ - &LEFT { "x": 0, "y": 2 }
+ - &BIG { "r": 10 }
+ - &SMALL { "r": 1 }
+
+# All the following maps are equal:
+
+plain:
+ # Explicit keys
+ "x": 1
+ "y": 2
+ "r": 10
+ label: center/big
+
+mergeOne:
+ # Merge one map
+ << : *CENTER
+ "r": 10
+ label: center/big
+
+mergeMultiple:
+ # Merge multiple maps
+ << : [ *CENTER, *BIG ]
+ label: center/big
+
+override:
+ # Override
+ << : [ *BIG, *LEFT, *SMALL ]
+ "x": 1
+ label: center/big
+
+shortTag:
+ # Explicit short merge tag
+ !!merge "<<" : [ *CENTER, *BIG ]
+ label: center/big
+
+longTag:
+ # Explicit merge long tag
+ !<tag:yaml.org,2002:merge> "<<" : [ *CENTER, *BIG ]
+ label: center/big
+
+inlineMap:
+ # Inlined map
+ << : {"x": 1, "y": 2, "r": 10}
+ label: center/big
+
+inlineSequenceMap:
+ # Inlined map in sequence
+ << : [ *CENTER, {"r": 10} ]
+ label: center/big
+`
+
+func (s *S) TestMerge(c *C) {
+ var want = map[interface{}]interface{}{
+ "x": 1,
+ "y": 2,
+ "r": 10,
+ "label": "center/big",
+ }
+
+ var m map[interface{}]interface{}
+ err := yaml.Unmarshal([]byte(mergeTests), &m)
+ c.Assert(err, IsNil)
+ for name, test := range m {
+ if name == "anchors" {
+ continue
+ }
+ c.Assert(test, DeepEquals, want, Commentf("test %q failed", name))
+ }
+}
+
+func (s *S) TestMergeStruct(c *C) {
+ type Data struct {
+ X, Y, R int
+ Label string
+ }
+ want := Data{1, 2, 10, "center/big"}
+
+ var m map[string]Data
+ err := yaml.Unmarshal([]byte(mergeTests), &m)
+ c.Assert(err, IsNil)
+ for name, test := range m {
+ if name == "anchors" {
+ continue
+ }
+ c.Assert(test, Equals, want, Commentf("test %q failed", name))
+ }
+}
+
+var unmarshalNullTests = []func() interface{}{
+ func() interface{} { var v interface{}; v = "v"; return &v },
+ func() interface{} { var s = "s"; return &s },
+ func() interface{} { var s = "s"; sptr := &s; return &sptr },
+ func() interface{} { var i = 1; return &i },
+ func() interface{} { var i = 1; iptr := &i; return &iptr },
+ func() interface{} { m := map[string]int{"s": 1}; return &m },
+ func() interface{} { m := map[string]int{"s": 1}; return m },
+}
+
+func (s *S) TestUnmarshalNull(c *C) {
+ for _, test := range unmarshalNullTests {
+ item := test()
+ zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface()
+ err := yaml.Unmarshal([]byte("null"), item)
+ c.Assert(err, IsNil)
+ if reflect.TypeOf(item).Kind() == reflect.Map {
+ c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface())
+ } else {
+ c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero)
+ }
+ }
+}
+
+func (s *S) TestUnmarshalSliceOnPreset(c *C) {
+ // Issue #48.
+ v := struct{ A []int }{[]int{1}}
+ yaml.Unmarshal([]byte("a: [2]"), &v)
+ c.Assert(v.A, DeepEquals, []int{2})
+}
+
+//var data []byte
+//func init() {
+// var err error
+// data, err = ioutil.ReadFile("/tmp/file.yaml")
+// if err != nil {
+// panic(err)
+// }
+//}
+//
+//func (s *S) BenchmarkUnmarshal(c *C) {
+// var err error
+// for i := 0; i < c.N; i++ {
+// var v map[string]interface{}
+// err = yaml.Unmarshal(data, &v)
+// }
+// if err != nil {
+// panic(err)
+// }
+//}
+//
+//func (s *S) BenchmarkMarshal(c *C) {
+// var v map[string]interface{}
+// yaml.Unmarshal(data, &v)
+// c.ResetTimer()
+// for i := 0; i < c.N; i++ {
+// yaml.Marshal(&v)
+// }
+//}
diff --git a/vendor/gopkg.in/yaml.v2/encode_test.go b/vendor/gopkg.in/yaml.v2/encode_test.go
new file mode 100644
index 000000000..84099bd38
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/encode_test.go
@@ -0,0 +1,501 @@
+package yaml_test
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/yaml.v2"
+ "net"
+ "os"
+)
+
+var marshalIntTest = 123
+
+var marshalTests = []struct {
+ value interface{}
+ data string
+}{
+ {
+ nil,
+ "null\n",
+ }, {
+ &struct{}{},
+ "{}\n",
+ }, {
+ map[string]string{"v": "hi"},
+ "v: hi\n",
+ }, {
+ map[string]interface{}{"v": "hi"},
+ "v: hi\n",
+ }, {
+ map[string]string{"v": "true"},
+ "v: \"true\"\n",
+ }, {
+ map[string]string{"v": "false"},
+ "v: \"false\"\n",
+ }, {
+ map[string]interface{}{"v": true},
+ "v: true\n",
+ }, {
+ map[string]interface{}{"v": false},
+ "v: false\n",
+ }, {
+ map[string]interface{}{"v": 10},
+ "v: 10\n",
+ }, {
+ map[string]interface{}{"v": -10},
+ "v: -10\n",
+ }, {
+ map[string]uint{"v": 42},
+ "v: 42\n",
+ }, {
+ map[string]interface{}{"v": int64(4294967296)},
+ "v: 4294967296\n",
+ }, {
+ map[string]int64{"v": int64(4294967296)},
+ "v: 4294967296\n",
+ }, {
+ map[string]uint64{"v": 4294967296},
+ "v: 4294967296\n",
+ }, {
+ map[string]interface{}{"v": "10"},
+ "v: \"10\"\n",
+ }, {
+ map[string]interface{}{"v": 0.1},
+ "v: 0.1\n",
+ }, {
+ map[string]interface{}{"v": float64(0.1)},
+ "v: 0.1\n",
+ }, {
+ map[string]interface{}{"v": -0.1},
+ "v: -0.1\n",
+ }, {
+ map[string]interface{}{"v": math.Inf(+1)},
+ "v: .inf\n",
+ }, {
+ map[string]interface{}{"v": math.Inf(-1)},
+ "v: -.inf\n",
+ }, {
+ map[string]interface{}{"v": math.NaN()},
+ "v: .nan\n",
+ }, {
+ map[string]interface{}{"v": nil},
+ "v: null\n",
+ }, {
+ map[string]interface{}{"v": ""},
+ "v: \"\"\n",
+ }, {
+ map[string][]string{"v": []string{"A", "B"}},
+ "v:\n- A\n- B\n",
+ }, {
+ map[string][]string{"v": []string{"A", "B\nC"}},
+ "v:\n- A\n- |-\n B\n C\n",
+ }, {
+ map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
+ "v:\n- A\n- 1\n- B:\n - 2\n - 3\n",
+ }, {
+ map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
+ "a:\n b: c\n",
+ }, {
+ map[string]interface{}{"a": "-"},
+ "a: '-'\n",
+ },
+
+ // Simple values.
+ {
+ &marshalIntTest,
+ "123\n",
+ },
+
+ // Structures
+ {
+ &struct{ Hello string }{"world"},
+ "hello: world\n",
+ }, {
+ &struct {
+ A struct {
+ B string
+ }
+ }{struct{ B string }{"c"}},
+ "a:\n b: c\n",
+ }, {
+ &struct {
+ A *struct {
+ B string
+ }
+ }{&struct{ B string }{"c"}},
+ "a:\n b: c\n",
+ }, {
+ &struct {
+ A *struct {
+ B string
+ }
+ }{},
+ "a: null\n",
+ }, {
+ &struct{ A int }{1},
+ "a: 1\n",
+ }, {
+ &struct{ A []int }{[]int{1, 2}},
+ "a:\n- 1\n- 2\n",
+ }, {
+ &struct {
+ B int "a"
+ }{1},
+ "a: 1\n",
+ }, {
+ &struct{ A bool }{true},
+ "a: true\n",
+ },
+
+ // Conditional flag
+ {
+ &struct {
+ A int "a,omitempty"
+ B int "b,omitempty"
+ }{1, 0},
+ "a: 1\n",
+ }, {
+ &struct {
+ A int "a,omitempty"
+ B int "b,omitempty"
+ }{0, 0},
+ "{}\n",
+ }, {
+ &struct {
+ A *struct{ X, y int } "a,omitempty,flow"
+ }{&struct{ X, y int }{1, 2}},
+ "a: {x: 1}\n",
+ }, {
+ &struct {
+ A *struct{ X, y int } "a,omitempty,flow"
+ }{nil},
+ "{}\n",
+ }, {
+ &struct {
+ A *struct{ X, y int } "a,omitempty,flow"
+ }{&struct{ X, y int }{}},
+ "a: {x: 0}\n",
+ }, {
+ &struct {
+ A struct{ X, y int } "a,omitempty,flow"
+ }{struct{ X, y int }{1, 2}},
+ "a: {x: 1}\n",
+ }, {
+ &struct {
+ A struct{ X, y int } "a,omitempty,flow"
+ }{struct{ X, y int }{0, 1}},
+ "{}\n",
+ }, {
+ &struct {
+ A float64 "a,omitempty"
+ B float64 "b,omitempty"
+ }{1, 0},
+ "a: 1\n",
+ },
+
+ // Flow flag
+ {
+ &struct {
+ A []int "a,flow"
+ }{[]int{1, 2}},
+ "a: [1, 2]\n",
+ }, {
+ &struct {
+ A map[string]string "a,flow"
+ }{map[string]string{"b": "c", "d": "e"}},
+ "a: {b: c, d: e}\n",
+ }, {
+ &struct {
+ A struct {
+ B, D string
+ } "a,flow"
+ }{struct{ B, D string }{"c", "e"}},
+ "a: {b: c, d: e}\n",
+ },
+
+ // Unexported field
+ {
+ &struct {
+ u int
+ A int
+ }{0, 1},
+ "a: 1\n",
+ },
+
+ // Ignored field
+ {
+ &struct {
+ A int
+ B int "-"
+ }{1, 2},
+ "a: 1\n",
+ },
+
+ // Struct inlining
+ {
+ &struct {
+ A int
+ C inlineB `yaml:",inline"`
+ }{1, inlineB{2, inlineC{3}}},
+ "a: 1\nb: 2\nc: 3\n",
+ },
+
+ // Map inlining
+ {
+ &struct {
+ A int
+ C map[string]int `yaml:",inline"`
+ }{1, map[string]int{"b": 2, "c": 3}},
+ "a: 1\nb: 2\nc: 3\n",
+ },
+
+ // Duration
+ {
+ map[string]time.Duration{"a": 3 * time.Second},
+ "a: 3s\n",
+ },
+
+ // Issue #24: bug in map merging logic.
+ {
+ map[string]string{"a": "<foo>"},
+ "a: <foo>\n",
+ },
+
+ // Issue #34: marshal unsupported base 60 floats quoted for compatibility
+ // with old YAML 1.1 parsers.
+ {
+ map[string]string{"a": "1:1"},
+ "a: \"1:1\"\n",
+ },
+
+ // Binary data.
+ {
+ map[string]string{"a": "\x00"},
+ "a: \"\\0\"\n",
+ }, {
+ map[string]string{"a": "\x80\x81\x82"},
+ "a: !!binary gIGC\n",
+ }, {
+ map[string]string{"a": strings.Repeat("\x90", 54)},
+ "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
+ },
+
+ // Ordered maps.
+ {
+ &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
+ "b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n",
+ },
+
+ // Encode unicode as utf-8 rather than in escaped form.
+ {
+ map[string]string{"a": "你好"},
+ "a: 你好\n",
+ },
+
+ // Support encoding.TextMarshaler.
+ {
+ map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
+ "a: 1.2.3.4\n",
+ },
+ {
+ map[string]time.Time{"a": time.Unix(1424801979, 0)},
+ "a: 2015-02-24T18:19:39Z\n",
+ },
+
+ // Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible).
+ {
+ map[string]string{"a": "b: c"},
+ "a: 'b: c'\n",
+ },
+
+ // Containing hash mark ('#') in string should be quoted
+ {
+ map[string]string{"a": "Hello #comment"},
+ "a: 'Hello #comment'\n",
+ },
+ {
+ map[string]string{"a": "你好 #comment"},
+ "a: '你好 #comment'\n",
+ },
+}
+
+func (s *S) TestMarshal(c *C) {
+ defer os.Setenv("TZ", os.Getenv("TZ"))
+ os.Setenv("TZ", "UTC")
+ for _, item := range marshalTests {
+ data, err := yaml.Marshal(item.value)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, item.data)
+ }
+}
+
+var marshalErrorTests = []struct {
+ value interface{}
+ error string
+ panic string
+}{{
+ value: &struct {
+ B int
+ inlineB ",inline"
+ }{1, inlineB{2, inlineC{3}}},
+ panic: `Duplicated key 'b' in struct struct \{ B int; .*`,
+}, {
+ value: &struct {
+ A int
+ B map[string]int ",inline"
+ }{1, map[string]int{"a": 2}},
+ panic: `Can't have key "a" in inlined map; conflicts with struct field`,
+}}
+
+func (s *S) TestMarshalErrors(c *C) {
+ for _, item := range marshalErrorTests {
+ if item.panic != "" {
+ c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic)
+ } else {
+ _, err := yaml.Marshal(item.value)
+ c.Assert(err, ErrorMatches, item.error)
+ }
+ }
+}
+
+func (s *S) TestMarshalTypeCache(c *C) {
+ var data []byte
+ var err error
+ func() {
+ type T struct{ A int }
+ data, err = yaml.Marshal(&T{})
+ c.Assert(err, IsNil)
+ }()
+ func() {
+ type T struct{ B int }
+ data, err = yaml.Marshal(&T{})
+ c.Assert(err, IsNil)
+ }()
+ c.Assert(string(data), Equals, "b: 0\n")
+}
+
+var marshalerTests = []struct {
+ data string
+ value interface{}
+}{
+ {"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}},
+ {"_:\n- 1\n- A\n", []interface{}{1, "A"}},
+ {"_: 10\n", 10},
+ {"_: null\n", nil},
+ {"_: BAR!\n", "BAR!"},
+}
+
+type marshalerType struct {
+ value interface{}
+}
+
+func (o marshalerType) MarshalText() ([]byte, error) {
+ panic("MarshalText called on type with MarshalYAML")
+}
+
+func (o marshalerType) MarshalYAML() (interface{}, error) {
+ return o.value, nil
+}
+
+type marshalerValue struct {
+ Field marshalerType "_"
+}
+
+func (s *S) TestMarshaler(c *C) {
+ for _, item := range marshalerTests {
+ obj := &marshalerValue{}
+ obj.Field.value = item.value
+ data, err := yaml.Marshal(obj)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, string(item.data))
+ }
+}
+
+func (s *S) TestMarshalerWholeDocument(c *C) {
+ obj := &marshalerType{}
+ obj.value = map[string]string{"hello": "world!"}
+ data, err := yaml.Marshal(obj)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, "hello: world!\n")
+}
+
+type failingMarshaler struct{}
+
+func (ft *failingMarshaler) MarshalYAML() (interface{}, error) {
+ return nil, failingErr
+}
+
+func (s *S) TestMarshalerError(c *C) {
+ _, err := yaml.Marshal(&failingMarshaler{})
+ c.Assert(err, Equals, failingErr)
+}
+
+func (s *S) TestSortedOutput(c *C) {
+ order := []interface{}{
+ false,
+ true,
+ 1,
+ uint(1),
+ 1.0,
+ 1.1,
+ 1.2,
+ 2,
+ uint(2),
+ 2.0,
+ 2.1,
+ "",
+ ".1",
+ ".2",
+ ".a",
+ "1",
+ "2",
+ "a!10",
+ "a/2",
+ "a/10",
+ "a~10",
+ "ab/1",
+ "b/1",
+ "b/01",
+ "b/2",
+ "b/02",
+ "b/3",
+ "b/03",
+ "b1",
+ "b01",
+ "b3",
+ "c2.10",
+ "c10.2",
+ "d1",
+ "d12",
+ "d12a",
+ }
+ m := make(map[interface{}]int)
+ for _, k := range order {
+ m[k] = 1
+ }
+ data, err := yaml.Marshal(m)
+ c.Assert(err, IsNil)
+ out := "\n" + string(data)
+ last := 0
+ for i, k := range order {
+ repr := fmt.Sprint(k)
+ if s, ok := k.(string); ok {
+ if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil {
+ repr = `"` + repr + `"`
+ }
+ }
+ index := strings.Index(out, "\n"+repr+":")
+ if index == -1 {
+ c.Fatalf("%#v is not in the output: %#v", k, out)
+ }
+ if index < last {
+ c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out)
+ }
+ last = index
+ }
+}
diff --git a/vendor/gopkg.in/yaml.v2/suite_test.go b/vendor/gopkg.in/yaml.v2/suite_test.go
new file mode 100644
index 000000000..c5cf1ed4f
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/suite_test.go
@@ -0,0 +1,12 @@
+package yaml_test
+
+import (
+ . "gopkg.in/check.v1"
+ "testing"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type S struct{}
+
+var _ = Suite(&S{})