summaryrefslogtreecommitdiffstats
path: root/vendor/github.com
diff options
context:
space:
mode:
authorChristopher Speller <crspeller@gmail.com>2017-03-13 12:54:22 -0400
committerGitHub <noreply@github.com>2017-03-13 12:54:22 -0400
commitc281ee3b61e8ab53ff118866d72618ae8cce582b (patch)
tree776e7bdf6c8bfbb9a1dee5976496ab065959991f /vendor/github.com
parent3ada7a41a7fb13abef19dd63dc56b720900dbaa9 (diff)
downloadchat-c281ee3b61e8ab53ff118866d72618ae8cce582b.tar.gz
chat-c281ee3b61e8ab53ff118866d72618ae8cce582b.tar.bz2
chat-c281ee3b61e8ab53ff118866d72618ae8cce582b.zip
Updating server dependancies. Also adding github.com/jaytaylor/html2text and gopkg.in/gomail.v2 (#5748)
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/goamz/goamz/aws/aws.go1
-rw-r--r--vendor/github.com/goamz/goamz/aws/regions.go23
-rw-r--r--vendor/github.com/goamz/goamz/sqs/sqs.go2
-rw-r--r--vendor/github.com/golang/protobuf/descriptor/descriptor.go2
-rw-r--r--vendor/github.com/golang/protobuf/descriptor/descriptor_test.go2
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile6
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go7
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden7
-rw-r--r--vendor/github.com/jaytaylor/html2text/.gitignore24
-rw-r--r--vendor/github.com/jaytaylor/html2text/.travis.yml14
-rw-r--r--vendor/github.com/jaytaylor/html2text/LICENSE22
-rw-r--r--vendor/github.com/jaytaylor/html2text/README.md116
-rw-r--r--vendor/github.com/jaytaylor/html2text/html2text.go312
-rw-r--r--vendor/github.com/jaytaylor/html2text/html2text_test.go674
-rwxr-xr-xvendor/github.com/jaytaylor/html2text/testdata/utf8.html22
-rwxr-xr-xvendor/github.com/jaytaylor/html2text/testdata/utf8_with_bom.xhtml24
-rw-r--r--vendor/github.com/lib/pq/README.md2
-rw-r--r--vendor/github.com/lib/pq/conn.go92
-rw-r--r--vendor/github.com/lib/pq/conn_go18.go35
-rw-r--r--vendor/github.com/lib/pq/conn_test.go108
-rw-r--r--vendor/github.com/lib/pq/go18_test.go15
-rw-r--r--vendor/github.com/lib/pq/listen_example/doc.go24
-rw-r--r--vendor/github.com/lib/pq/notify_test.go24
-rw-r--r--vendor/github.com/lib/pq/oid/types.go12
-rw-r--r--vendor/github.com/lib/pq/ssl.go119
-rw-r--r--vendor/github.com/lib/pq/ssl_permissions.go16
-rw-r--r--vendor/github.com/lib/pq/ssl_test.go186
-rw-r--r--vendor/github.com/lib/pq/ssl_windows.go10
-rw-r--r--vendor/github.com/miekg/dns/README.md4
-rw-r--r--vendor/github.com/miekg/dns/client.go20
-rw-r--r--vendor/github.com/miekg/dns/client_test.go63
-rw-r--r--vendor/github.com/miekg/dns/clientconfig.go32
-rw-r--r--vendor/github.com/miekg/dns/clientconfig_test.go37
-rw-r--r--vendor/github.com/miekg/dns/compress_generate.go184
-rw-r--r--vendor/github.com/miekg/dns/dane.go9
-rw-r--r--vendor/github.com/miekg/dns/dns_test.go17
-rw-r--r--vendor/github.com/miekg/dns/dnssec.go21
-rw-r--r--vendor/github.com/miekg/dns/dnssec_keygen.go16
-rw-r--r--vendor/github.com/miekg/dns/dnssec_keyscan.go2
-rw-r--r--vendor/github.com/miekg/dns/edns.go12
-rw-r--r--vendor/github.com/miekg/dns/idn/punycode.go3
-rw-r--r--vendor/github.com/miekg/dns/msg.go188
-rw-r--r--vendor/github.com/miekg/dns/nsecx.go11
-rw-r--r--vendor/github.com/miekg/dns/parse_test.go6
-rw-r--r--vendor/github.com/miekg/dns/reverse.go4
-rw-r--r--vendor/github.com/miekg/dns/scan.go8
-rw-r--r--vendor/github.com/miekg/dns/scan_rr.go116
-rw-r--r--vendor/github.com/miekg/dns/server_test.go4
-rw-r--r--vendor/github.com/miekg/dns/sig0.go5
-rw-r--r--vendor/github.com/miekg/dns/smimea.go8
-rw-r--r--vendor/github.com/miekg/dns/tsig.go3
-rw-r--r--vendor/github.com/miekg/dns/types.go2
-rw-r--r--vendor/github.com/miekg/dns/types_generate.go2
-rw-r--r--vendor/github.com/miekg/dns/udp.go26
-rw-r--r--vendor/github.com/miekg/dns/udp_linux.go25
-rw-r--r--vendor/github.com/miekg/dns/udp_other.go6
-rw-r--r--vendor/github.com/miekg/dns/udp_plan9.go34
-rw-r--r--vendor/github.com/miekg/dns/udp_windows.go9
-rw-r--r--vendor/github.com/miekg/dns/update_test.go36
-rw-r--r--vendor/github.com/miekg/dns/zcompress.go119
-rw-r--r--vendor/github.com/miekg/dns/ztypes.go58
-rw-r--r--vendor/github.com/prometheus/client_model/AUTHORS.md13
-rw-r--r--vendor/github.com/prometheus/client_model/CONTRIBUTING.md8
-rw-r--r--vendor/github.com/prometheus/client_model/MAINTAINERS.md1
-rw-r--r--vendor/github.com/prometheus/common/AUTHORS.md11
-rw-r--r--vendor/github.com/prometheus/common/CONTRIBUTING.md6
-rw-r--r--vendor/github.com/prometheus/common/MAINTAINERS.md1
-rw-r--r--vendor/github.com/prometheus/common/model/value.go5
-rw-r--r--vendor/github.com/prometheus/common/model/value_test.go53
-rw-r--r--vendor/github.com/prometheus/procfs/AUTHORS.md21
-rw-r--r--vendor/github.com/prometheus/procfs/CONTRIBUTING.md6
-rw-r--r--vendor/github.com/prometheus/procfs/MAINTAINERS.md1
-rw-r--r--vendor/github.com/prometheus/procfs/README.md1
-rw-r--r--vendor/github.com/prometheus/procfs/buddyinfo.go95
-rw-r--r--vendor/github.com/prometheus/procfs/buddyinfo_test.go64
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/buddyinfo/short/buddyinfo3
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/buddyinfo/sizemismatch/buddyinfo3
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/buddyinfo/valid/buddyinfo3
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/fs/xfs/stat23
-rw-r--r--vendor/github.com/prometheus/procfs/fs.go13
-rw-r--r--vendor/github.com/prometheus/procfs/fs_test.go13
-rw-r--r--vendor/github.com/prometheus/procfs/ipvs_test.go16
-rw-r--r--vendor/github.com/prometheus/procfs/mdstat_test.go14
-rw-r--r--vendor/github.com/prometheus/procfs/mountstats.go4
-rw-r--r--vendor/github.com/prometheus/procfs/mountstats_test.go9
-rw-r--r--vendor/github.com/prometheus/procfs/xfs/parse.go361
-rw-r--r--vendor/github.com/prometheus/procfs/xfs/parse_test.go446
-rw-r--r--vendor/github.com/prometheus/procfs/xfs/xfs.go158
-rw-r--r--vendor/github.com/spf13/cobra/.travis.yml5
-rw-r--r--vendor/github.com/spf13/cobra/README.md3
-rw-r--r--vendor/github.com/spf13/cobra/command.go62
-rw-r--r--vendor/github.com/spf13/cobra/doc/man_docs.go4
-rw-r--r--vendor/github.com/spf13/cobra/doc/md_docs.go4
-rw-r--r--vendor/github.com/spf13/cobra/doc/util.go2
-rw-r--r--vendor/github.com/spf13/cobra/doc/yaml_docs.go4
-rw-r--r--vendor/github.com/tylerb/graceful/README.md5
-rw-r--r--vendor/github.com/tylerb/graceful/graceful.go1
-rw-r--r--vendor/github.com/xenolf/lego/README.md6
-rw-r--r--vendor/github.com/xenolf/lego/acme/client.go9
-rw-r--r--vendor/github.com/xenolf/lego/acme/client_test.go71
-rw-r--r--vendor/github.com/xenolf/lego/acme/error.go15
-rw-r--r--vendor/github.com/xenolf/lego/acme/http.go10
-rw-r--r--vendor/github.com/xenolf/lego/acme/jws.go93
-rw-r--r--vendor/github.com/xenolf/lego/providers/http/memcached/memcached.go3
104 files changed, 3818 insertions, 822 deletions
diff --git a/vendor/github.com/goamz/goamz/aws/aws.go b/vendor/github.com/goamz/goamz/aws/aws.go
index 49eed632e..a4d359160 100644
--- a/vendor/github.com/goamz/goamz/aws/aws.go
+++ b/vendor/github.com/goamz/goamz/aws/aws.go
@@ -71,6 +71,7 @@ var Regions = map[string]Region{
EUCentral.Name: EUCentral,
EUWest.Name: EUWest,
USEast.Name: USEast,
+ USEast2.Name: USEast2,
USWest.Name: USWest,
USWest2.Name: USWest2,
USGovWest.Name: USGovWest,
diff --git a/vendor/github.com/goamz/goamz/aws/regions.go b/vendor/github.com/goamz/goamz/aws/regions.go
index 94d79d46d..c75d41e11 100644
--- a/vendor/github.com/goamz/goamz/aws/regions.go
+++ b/vendor/github.com/goamz/goamz/aws/regions.go
@@ -46,6 +46,29 @@ var USEast = Region{
"https://streams.dynamodb.us-east-1.amazonaws.com",
}
+var USEast2 = Region{
+ "us-east-2",
+ "https://ec2.us-east-2.amazonaws.com",
+ "https://s3.amazonaws.com",
+ "",
+ true,
+ true,
+ "",
+ "",
+ "https://sns.us-east-2.amazonaws.com",
+ "https://sqs.us-east-2.amazonaws.com",
+ "https://iam.amazonaws.com",
+ "https://elasticloadbalancing.us-east-2.amazonaws.com",
+ "https://dynamodb.us-east-2.amazonaws.com",
+ ServiceInfo{"https://monitoring.us-east-2.amazonaws.com", V2Signature},
+ "https://autoscaling.us-east-1.amazonaws.com",
+ ServiceInfo{"https://rds.us-east-2.amazonaws.com", V2Signature},
+ "https://sts.amazonaws.com",
+ "https://cloudformation.us-east-2.amazonaws.com",
+ "https://ecs.us-east-2.amazonaws.com",
+ "https://streams.dynamodb.us-east-2.amazonaws.com",
+}
+
var USWest = Region{
"us-west-1",
"https://ec2.us-west-1.amazonaws.com",
diff --git a/vendor/github.com/goamz/goamz/sqs/sqs.go b/vendor/github.com/goamz/goamz/sqs/sqs.go
index 23f1951ab..b54b698a8 100644
--- a/vendor/github.com/goamz/goamz/sqs/sqs.go
+++ b/vendor/github.com/goamz/goamz/sqs/sqs.go
@@ -48,6 +48,8 @@ func NewFrom(accessKey, secretKey, region string) (*SQS, error) {
switch region {
case "us.east", "us.east.1":
aws_region = aws.USEast
+ case "us.east.2":
+ aws_region = aws.USEast2
case "us.west", "us.west.1":
aws_region = aws.USWest
case "us.west.2":
diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor.go b/vendor/github.com/golang/protobuf/descriptor/descriptor.go
index 83607ff6c..ac7e51bfb 100644
--- a/vendor/github.com/golang/protobuf/descriptor/descriptor.go
+++ b/vendor/github.com/golang/protobuf/descriptor/descriptor.go
@@ -43,7 +43,7 @@ import (
"io/ioutil"
"github.com/golang/protobuf/proto"
- protobuf "google.golang.org/genproto/protobuf"
+ protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
)
// extractFile extracts a FileDescriptorProto from a gzip'd buffer.
diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go b/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go
index 282a1e3a7..27b0729cb 100644
--- a/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go
+++ b/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go
@@ -6,7 +6,7 @@ import (
"github.com/golang/protobuf/descriptor"
tpb "github.com/golang/protobuf/proto/testdata"
- protobuf "google.golang.org/genproto/protobuf"
+ protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
)
func TestMessage(t *testing.T) {
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile
index b1ac45c77..a0bf9fefd 100644
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile
@@ -45,9 +45,9 @@ my_test/test.pb.go: my_test/test.proto
golden:
make -B my_test/test.pb.go
- sed -i '/return.*fileDescriptor/d' my_test/test.pb.go
- sed -i '/^var fileDescriptor/,/^}/d' my_test/test.pb.go
- sed -i '/proto.RegisterFile.*fileDescriptor/d' my_test/test.pb.go
+ sed -i -e '/return.*fileDescriptor/d' my_test/test.pb.go
+ sed -i -e '/^var fileDescriptor/,/^}/d' my_test/test.pb.go
+ sed -i -e '/proto.RegisterFile.*fileDescriptor/d' my_test/test.pb.go
gofmt -w my_test/test.pb.go
diff -w my_test/test.pb.go my_test/test.pb.go.golden
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go
index 9ec3e1292..d8717d57d 100644
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go
@@ -319,7 +319,7 @@ func (m *Reply) GetCompactKeys() []int32 {
type Reply_Entry struct {
KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng,json=keyThatNeeds1234camelCasIng" json:"key_that_needs_1234camel_CasIng,omitempty"`
Value *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"`
- XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=myFieldName2" json:"_my_field_name_2,omitempty"`
+ XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=MyFieldName2" json:"_my_field_name_2,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
@@ -389,6 +389,7 @@ var E_ReplyExtensions_Time = &proto.ExtensionDesc{
Field: 101,
Name: "my.test.ReplyExtensions.time",
Tag: "fixed64,101,opt,name=time",
+ Filename: "my_test/test.proto",
}
var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{
@@ -397,6 +398,7 @@ var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{
Field: 105,
Name: "my.test.ReplyExtensions.carrot",
Tag: "bytes,105,opt,name=carrot",
+ Filename: "my_test/test.proto",
}
var E_ReplyExtensions_Donut = &proto.ExtensionDesc{
@@ -405,6 +407,7 @@ var E_ReplyExtensions_Donut = &proto.ExtensionDesc{
Field: 101,
Name: "my.test.ReplyExtensions.donut",
Tag: "bytes,101,opt,name=donut",
+ Filename: "my_test/test.proto",
}
type OtherReplyExtensions struct {
@@ -832,6 +835,7 @@ var E_Tag = &proto.ExtensionDesc{
Field: 103,
Name: "my.test.tag",
Tag: "bytes,103,opt,name=tag",
+ Filename: "my_test/test.proto",
}
var E_Donut = &proto.ExtensionDesc{
@@ -840,6 +844,7 @@ var E_Donut = &proto.ExtensionDesc{
Field: 106,
Name: "my.test.donut",
Tag: "bytes,106,opt,name=donut",
+ Filename: "my_test/test.proto",
}
func init() {
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden
index 9ec3e1292..d8717d57d 100644
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden
@@ -319,7 +319,7 @@ func (m *Reply) GetCompactKeys() []int32 {
type Reply_Entry struct {
KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng,json=keyThatNeeds1234camelCasIng" json:"key_that_needs_1234camel_CasIng,omitempty"`
Value *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"`
- XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=myFieldName2" json:"_my_field_name_2,omitempty"`
+ XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=MyFieldName2" json:"_my_field_name_2,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
@@ -389,6 +389,7 @@ var E_ReplyExtensions_Time = &proto.ExtensionDesc{
Field: 101,
Name: "my.test.ReplyExtensions.time",
Tag: "fixed64,101,opt,name=time",
+ Filename: "my_test/test.proto",
}
var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{
@@ -397,6 +398,7 @@ var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{
Field: 105,
Name: "my.test.ReplyExtensions.carrot",
Tag: "bytes,105,opt,name=carrot",
+ Filename: "my_test/test.proto",
}
var E_ReplyExtensions_Donut = &proto.ExtensionDesc{
@@ -405,6 +407,7 @@ var E_ReplyExtensions_Donut = &proto.ExtensionDesc{
Field: 101,
Name: "my.test.ReplyExtensions.donut",
Tag: "bytes,101,opt,name=donut",
+ Filename: "my_test/test.proto",
}
type OtherReplyExtensions struct {
@@ -832,6 +835,7 @@ var E_Tag = &proto.ExtensionDesc{
Field: 103,
Name: "my.test.tag",
Tag: "bytes,103,opt,name=tag",
+ Filename: "my_test/test.proto",
}
var E_Donut = &proto.ExtensionDesc{
@@ -840,6 +844,7 @@ var E_Donut = &proto.ExtensionDesc{
Field: 106,
Name: "my.test.donut",
Tag: "bytes,106,opt,name=donut",
+ Filename: "my_test/test.proto",
}
func init() {
diff --git a/vendor/github.com/jaytaylor/html2text/.gitignore b/vendor/github.com/jaytaylor/html2text/.gitignore
new file mode 100644
index 000000000..daf913b1b
--- /dev/null
+++ b/vendor/github.com/jaytaylor/html2text/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/jaytaylor/html2text/.travis.yml b/vendor/github.com/jaytaylor/html2text/.travis.yml
new file mode 100644
index 000000000..6c7f48efd
--- /dev/null
+++ b/vendor/github.com/jaytaylor/html2text/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+go:
+ - tip
+ - 1.8
+ - 1.7
+ - 1.6
+ - 1.5
+ - 1.4
+ - 1.3
+ - 1.2
+notifications:
+ email:
+ on_success: change
+ on_failure: always
diff --git a/vendor/github.com/jaytaylor/html2text/LICENSE b/vendor/github.com/jaytaylor/html2text/LICENSE
new file mode 100644
index 000000000..24dc4abec
--- /dev/null
+++ b/vendor/github.com/jaytaylor/html2text/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Jay Taylor
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/jaytaylor/html2text/README.md b/vendor/github.com/jaytaylor/html2text/README.md
new file mode 100644
index 000000000..ac1124739
--- /dev/null
+++ b/vendor/github.com/jaytaylor/html2text/README.md
@@ -0,0 +1,116 @@
+# html2text
+
+[![Documentation](https://godoc.org/github.com/jaytaylor/html2text?status.svg)](https://godoc.org/github.com/jaytaylor/html2text)
+[![Build Status](https://travis-ci.org/jaytaylor/html2text.svg?branch=master)](https://travis-ci.org/jaytaylor/html2text)
+[![Report Card](https://goreportcard.com/badge/github.com/jaytaylor/html2text)](https://goreportcard.com/report/github.com/jaytaylor/html2text)
+
+### Converts HTML into text
+
+
+## Introduction
+
+Ensure your emails are readable by all!
+
+Turns HTML into raw text, useful for sending fancy HTML emails with a equivalently nicely formatted TXT document as a fallback (e.g. for people who don't allow HTML emails or have other display issues).
+
+html2text is a simple golang package for rendering HTML into plaintext.
+
+There are still lots of improvements to be had, but FWIW this has worked fine for my [basic] HTML-2-text needs.
+
+It requires go 1.x or newer ;)
+
+
+## Download the package
+
+```bash
+go get github.com/jaytaylor/html2text
+```
+
+## Example usage
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaytaylor/html2text"
+)
+
+func main() {
+ inputHtml := `
+ <html>
+ <head>
+ <title>My Mega Service</title>
+ <link rel=\"stylesheet\" href=\"main.css\">
+ <style type=\"text/css\">body { color: #fff; }</style>
+ </head>
+
+ <body>
+ <div class="logo">
+ <a href="http://mymegaservice.com/"><img src="/logo-image.jpg" alt="Mega Service"/></a>
+ </div>
+
+ <h1>Welcome to your new account on my service!</h1>
+
+ <p>
+ Here is some more information:
+
+ <ul>
+ <li>Link 1: <a href="https://example.com">Example.com</a></li>
+ <li>Link 2: <a href="https://example2.com">Example2.com</a></li>
+ <li>Something else</li>
+ </ul>
+ </p>
+ </body>
+ </html>
+ `
+
+ text, err := html2text.FromString(inputHtml)
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println(text)
+}
+```
+
+Output:
+```
+Mega Service ( http://mymegaservice.com/ )
+
+******************************************
+Welcome to your new account on my service!
+******************************************
+
+Here is some more information:
+
+* Link 1: Example.com ( https://example.com )
+* Link 2: Example2.com ( https://example2.com )
+* Something else
+```
+
+
+## Unit-tests
+
+Running the unit-tests is straightforward and standard:
+
+```bash
+go test
+```
+
+
+# License
+
+Permissive MIT license.
+
+
+## Contact
+
+You are more than welcome to open issues and send pull requests if you find a bug or want a new feature.
+
+If you appreciate this library please feel free to drop me a line and tell me! It's always nice to hear from people who have benefitted from my work.
+
+Email: jay at (my github username).com
+
+Twitter: [@jtaylor](https://twitter.com/jtaylor)
+
diff --git a/vendor/github.com/jaytaylor/html2text/html2text.go b/vendor/github.com/jaytaylor/html2text/html2text.go
new file mode 100644
index 000000000..2a013a039
--- /dev/null
+++ b/vendor/github.com/jaytaylor/html2text/html2text.go
@@ -0,0 +1,312 @@
+package html2text
+
+import (
+ "bytes"
+ "io"
+ "regexp"
+ "strings"
+ "unicode"
+
+ "github.com/ssor/bom"
+
+ "golang.org/x/net/html"
+ "golang.org/x/net/html/atom"
+)
+
+var (
+ spacingRe = regexp.MustCompile(`[ \r\n\t]+`)
+ newlineRe = regexp.MustCompile(`\n\n+`)
+)
+
+type textifyTraverseCtx struct {
+ Buf bytes.Buffer
+
+ prefix string
+ blockquoteLevel int
+ lineLength int
+ endsWithSpace bool
+ endsWithNewline bool
+ justClosedDiv bool
+}
+
+func (ctx *textifyTraverseCtx) traverse(node *html.Node) error {
+ switch node.Type {
+ default:
+ return ctx.traverseChildren(node)
+
+ case html.TextNode:
+ data := strings.Trim(spacingRe.ReplaceAllString(node.Data, " "), " ")
+ return ctx.emit(data)
+
+ case html.ElementNode:
+ return ctx.handleElementNode(node)
+ }
+}
+
+func (ctx *textifyTraverseCtx) handleElementNode(node *html.Node) error {
+ ctx.justClosedDiv = false
+ switch node.DataAtom {
+ case atom.Br:
+ return ctx.emit("\n")
+
+ case atom.H1, atom.H2, atom.H3:
+ subCtx := textifyTraverseCtx{}
+ if err := subCtx.traverseChildren(node); err != nil {
+ return err
+ }
+
+ str := subCtx.Buf.String()
+ dividerLen := 0
+ for _, line := range strings.Split(str, "\n") {
+ if lineLen := len([]rune(line)); lineLen-1 > dividerLen {
+ dividerLen = lineLen - 1
+ }
+ }
+ divider := ""
+ if node.DataAtom == atom.H1 {
+ divider = strings.Repeat("*", dividerLen)
+ } else {
+ divider = strings.Repeat("-", dividerLen)
+ }
+
+ if node.DataAtom == atom.H3 {
+ return ctx.emit("\n\n" + str + "\n" + divider + "\n\n")
+ }
+ return ctx.emit("\n\n" + divider + "\n" + str + "\n" + divider + "\n\n")
+
+ case atom.Blockquote:
+ ctx.blockquoteLevel++
+ ctx.prefix = strings.Repeat(">", ctx.blockquoteLevel) + " "
+ if err := ctx.emit("\n"); err != nil {
+ return err
+ }
+ if ctx.blockquoteLevel == 1 {
+ if err := ctx.emit("\n"); err != nil {
+ return err
+ }
+ }
+ if err := ctx.traverseChildren(node); err != nil {
+ return err
+ }
+ ctx.blockquoteLevel--
+ ctx.prefix = strings.Repeat(">", ctx.blockquoteLevel)
+ if ctx.blockquoteLevel > 0 {
+ ctx.prefix += " "
+ }
+ return ctx.emit("\n\n")
+
+ case atom.Div:
+ if ctx.lineLength > 0 {
+ if err := ctx.emit("\n"); err != nil {
+ return err
+ }
+ }
+ if err := ctx.traverseChildren(node); err != nil {
+ return err
+ }
+ var err error
+ if ctx.justClosedDiv == false {
+ err = ctx.emit("\n")
+ }
+ ctx.justClosedDiv = true
+ return err
+
+ case atom.Li:
+ if err := ctx.emit("* "); err != nil {
+ return err
+ }
+
+ if err := ctx.traverseChildren(node); err != nil {
+ return err
+ }
+
+ return ctx.emit("\n")
+
+ case atom.B, atom.Strong:
+ subCtx := textifyTraverseCtx{}
+ subCtx.endsWithSpace = true
+ if err := subCtx.traverseChildren(node); err != nil {
+ return err
+ }
+ str := subCtx.Buf.String()
+ return ctx.emit("*" + str + "*")
+
+ case atom.A:
+ // If image is the only child, take its alt text as the link text
+ if img := node.FirstChild; img != nil && node.LastChild == img && img.DataAtom == atom.Img {
+ if altText := getAttrVal(img, "alt"); altText != "" {
+ ctx.emit(altText)
+ }
+ } else if err := ctx.traverseChildren(node); err != nil {
+ return err
+ }
+
+ hrefLink := ""
+ if attrVal := getAttrVal(node, "href"); attrVal != "" {
+ attrVal = ctx.normalizeHrefLink(attrVal)
+ if attrVal != "" {
+ hrefLink = "( " + attrVal + " )"
+ }
+ }
+
+ return ctx.emit(hrefLink)
+
+ case atom.P, atom.Ul, atom.Table:
+ if err := ctx.emit("\n\n"); err != nil {
+ return err
+ }
+
+ if err := ctx.traverseChildren(node); err != nil {
+ return err
+ }
+
+ return ctx.emit("\n\n")
+
+ case atom.Tr:
+ if err := ctx.traverseChildren(node); err != nil {
+ return err
+ }
+
+ return ctx.emit("\n")
+
+ case atom.Style, atom.Script, atom.Head:
+ // Ignore the subtree
+ return nil
+
+ default:
+ return ctx.traverseChildren(node)
+ }
+}
+func (ctx *textifyTraverseCtx) traverseChildren(node *html.Node) error {
+ for c := node.FirstChild; c != nil; c = c.NextSibling {
+ if err := ctx.traverse(c); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (ctx *textifyTraverseCtx) emit(data string) error {
+ if len(data) == 0 {
+ return nil
+ }
+ lines := ctx.breakLongLines(data)
+ var err error
+ for _, line := range lines {
+ runes := []rune(line)
+ startsWithSpace := unicode.IsSpace(runes[0])
+ if !startsWithSpace && !ctx.endsWithSpace {
+ ctx.Buf.WriteByte(' ')
+ ctx.lineLength++
+ }
+ ctx.endsWithSpace = unicode.IsSpace(runes[len(runes)-1])
+ for _, c := range line {
+ _, err = ctx.Buf.WriteString(string(c))
+ if err != nil {
+ return err
+ }
+ ctx.lineLength++
+ if c == '\n' {
+ ctx.lineLength = 0
+ if ctx.prefix != "" {
+ _, err = ctx.Buf.WriteString(ctx.prefix)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (ctx *textifyTraverseCtx) breakLongLines(data string) []string {
+ // only break lines when we are in blockquotes
+ if ctx.blockquoteLevel == 0 {
+ return []string{data}
+ }
+ var ret []string
+ runes := []rune(data)
+ l := len(runes)
+ existing := ctx.lineLength
+ if existing >= 74 {
+ ret = append(ret, "\n")
+ existing = 0
+ }
+ for l+existing > 74 {
+ i := 74 - existing
+ for i >= 0 && !unicode.IsSpace(runes[i]) {
+ i--
+ }
+ if i == -1 {
+ // no spaces, so go the other way
+ i = 74 - existing
+ for i < l && !unicode.IsSpace(runes[i]) {
+ i++
+ }
+ }
+ ret = append(ret, string(runes[:i])+"\n")
+ for i < l && unicode.IsSpace(runes[i]) {
+ i++
+ }
+ runes = runes[i:]
+ l = len(runes)
+ existing = 0
+ }
+ if len(runes) > 0 {
+ ret = append(ret, string(runes))
+ }
+ return ret
+}
+
+func (ctx *textifyTraverseCtx) normalizeHrefLink(link string) string {
+ link = strings.TrimSpace(link)
+ link = strings.TrimPrefix(link, "mailto:")
+ return link
+}
+
+func getAttrVal(node *html.Node, attrName string) string {
+ for _, attr := range node.Attr {
+ if attr.Key == attrName {
+ return attr.Val
+ }
+ }
+
+ return ""
+}
+
+func FromHtmlNode(doc *html.Node) (string, error) {
+ ctx := textifyTraverseCtx{
+ Buf: bytes.Buffer{},
+ }
+ if err := ctx.traverse(doc); err != nil {
+ return "", err
+ }
+
+ text := strings.TrimSpace(newlineRe.ReplaceAllString(
+ strings.Replace(ctx.Buf.String(), "\n ", "\n", -1), "\n\n"))
+ return text, nil
+
+}
+
+func FromReader(reader io.Reader) (string, error) {
+ newReader, err := bom.NewReaderWithoutBom(reader)
+ if err != nil {
+ return "", err
+ }
+ doc, err := html.Parse(newReader)
+ if err != nil {
+ return "", err
+ }
+ return FromHtmlNode(doc)
+}
+
+func FromString(input string) (string, error) {
+ bs := bom.CleanBom([]byte(input))
+ text, err := FromReader(bytes.NewReader(bs))
+ if err != nil {
+ return "", err
+ }
+ return text, nil
+}
diff --git a/vendor/github.com/jaytaylor/html2text/html2text_test.go b/vendor/github.com/jaytaylor/html2text/html2text_test.go
new file mode 100644
index 000000000..b30d68ac9
--- /dev/null
+++ b/vendor/github.com/jaytaylor/html2text/html2text_test.go
@@ -0,0 +1,674 @@
+package html2text
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "path"
+ "regexp"
+ "strings"
+ "testing"
+)
+
+const (
+ destPath = "testdata"
+)
+
+func TestParseUTF8(t *testing.T) {
+ htmlFiles := []struct {
+ file string
+ keywordShouldNotExist string
+ keywordShouldExist string
+ }{
+ {
+ "utf8.html",
+ "学习之道:美国公认学习第一书title",
+ "次世界冠军赛上,我几近疯狂",
+ },
+ {
+ "utf8_with_bom.xhtml",
+ "1892年波兰文版序言title",
+ "种新的波兰文本已成为必要",
+ },
+ }
+
+ for _, htmlFile := range htmlFiles {
+ bs, err := ioutil.ReadFile(path.Join(destPath, htmlFile.file))
+ if err != nil {
+ t.Fatal(err)
+ }
+ text, err := FromReader(bytes.NewReader(bs))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !strings.Contains(text, htmlFile.keywordShouldExist) {
+ t.Fatalf("keyword %s should exists in file %s", htmlFile.keywordShouldExist, htmlFile.file)
+ }
+ if strings.Contains(text, htmlFile.keywordShouldNotExist) {
+ t.Fatalf("keyword %s should not exists in file %s", htmlFile.keywordShouldNotExist, htmlFile.file)
+ }
+ }
+}
+
+func TestStrippingWhitespace(t *testing.T) {
+ testCases := []struct {
+ input string
+ output string
+ }{
+ {
+ "test text",
+ "test text",
+ },
+ {
+ " \ttext\ntext\n",
+ "text text",
+ },
+ {
+ " \na \n\t \n \n a \t",
+ "a a",
+ },
+ {
+ "test text",
+ "test text",
+ },
+ {
+ "test&nbsp;&nbsp;&nbsp; text&nbsp;",
+ "test    text",
+ },
+ }
+
+ for _, testCase := range testCases {
+ assertString(t, testCase.input, testCase.output)
+ }
+}
+
+func TestParagraphsAndBreaks(t *testing.T) {
+ testCases := []struct {
+ input string
+ output string
+ }{
+ {
+ "Test text",
+ "Test text",
+ },
+ {
+ "Test text<br>",
+ "Test text",
+ },
+ {
+ "Test text<br>Test",
+ "Test text\nTest",
+ },
+ {
+ "<p>Test text</p>",
+ "Test text",
+ },
+ {
+ "<p>Test text</p><p>Test text</p>",
+ "Test text\n\nTest text",
+ },
+ {
+ "\n<p>Test text</p>\n\n\n\t<p>Test text</p>\n",
+ "Test text\n\nTest text",
+ },
+ {
+ "\n<p>Test text<br/>Test text</p>\n",
+ "Test text\nTest text",
+ },
+ {
+ "\n<p>Test text<br> \tTest text<br></p>\n",
+ "Test text\nTest text",
+ },
+ {
+ "Test text<br><BR />Test text",
+ "Test text\n\nTest text",
+ },
+ }
+
+ for _, testCase := range testCases {
+ assertString(t, testCase.input, testCase.output)
+ }
+}
+
+func TestTables(t *testing.T) {
+ testCases := []struct {
+ input string
+ output string
+ }{
+ {
+ "<table><tr><td></td><td></td></tr></table>",
+ "",
+ },
+ {
+ "<table><tr><td>cell1</td><td>cell2</td></tr></table>",
+ "cell1 cell2",
+ },
+ {
+ "<table><tr><td>row1</td></tr><tr><td>row2</td></tr></table>",
+ "row1\nrow2",
+ },
+ {
+ `<table>
+ <tr><td>cell1-1</td><td>cell1-2</td></tr>
+ <tr><td>cell2-1</td><td>cell2-2</td></tr>
+ </table>`,
+ "cell1-1 cell1-2\ncell2-1 cell2-2",
+ },
+ {
+ "_<table><tr><td>cell</td></tr></table>_",
+ "_\n\ncell\n\n_",
+ },
+ }
+
+ for _, testCase := range testCases {
+ assertString(t, testCase.input, testCase.output)
+ }
+}
+
+func TestStrippingLists(t *testing.T) {
+ testCases := []struct {
+ input string
+ output string
+ }{
+ {
+ "<ul></ul>",
+ "",
+ },
+ {
+ "<ul><li>item</li></ul>_",
+ "* item\n\n_",
+ },
+ {
+ "<li class='123'>item 1</li> <li>item 2</li>\n_",
+ "* item 1\n* item 2\n_",
+ },
+ {
+ "<li>item 1</li> \t\n <li>item 2</li> <li> item 3</li>\n_",
+ "* item 1\n* item 2\n* item 3\n_",
+ },
+ }
+
+ for _, testCase := range testCases {
+ assertString(t, testCase.input, testCase.output)
+ }
+}
+
+func TestLinks(t *testing.T) {
+ testCases := []struct {
+ input string
+ output string
+ }{
+ {
+ `<a></a>`,
+ ``,
+ },
+ {
+ `<a href=""></a>`,
+ ``,
+ },
+ {
+ `<a href="http://example.com/"></a>`,
+ `( http://example.com/ )`,
+ },
+ {
+ `<a href="">Link</a>`,
+ `Link`,
+ },
+ {
+ `<a href="http://example.com/">Link</a>`,
+ `Link ( http://example.com/ )`,
+ },
+ {
+ `<a href="http://example.com/"><span class="a">Link</span></a>`,
+ `Link ( http://example.com/ )`,
+ },
+ {
+ "<a href='http://example.com/'>\n\t<span class='a'>Link</span>\n\t</a>",
+ `Link ( http://example.com/ )`,
+ },
+ {
+ "<a href='mailto:contact@example.org'>Contact Us</a>",
+ `Contact Us ( contact@example.org )`,
+ },
+ {
+ "<a href=\"http://example.com:80/~user?aaa=bb&amp;c=d,e,f#foo\">Link</a>",
+ `Link ( http://example.com:80/~user?aaa=bb&c=d,e,f#foo )`,
+ },
+ {
+ "<a title='title' href=\"http://example.com/\">Link</a>",
+ `Link ( http://example.com/ )`,
+ },
+ {
+ "<a href=\" http://example.com/ \"> Link </a>",
+ `Link ( http://example.com/ )`,
+ },
+ {
+ "<a href=\"http://example.com/a/\">Link A</a> <a href=\"http://example.com/b/\">Link B</a>",
+ `Link A ( http://example.com/a/ ) Link B ( http://example.com/b/ )`,
+ },
+ {
+ "<a href=\"%%LINK%%\">Link</a>",
+ `Link ( %%LINK%% )`,
+ },
+ {
+ "<a href=\"[LINK]\">Link</a>",
+ `Link ( [LINK] )`,
+ },
+ {
+ "<a href=\"{LINK}\">Link</a>",
+ `Link ( {LINK} )`,
+ },
+ {
+ "<a href=\"[[!unsubscribe]]\">Link</a>",
+ `Link ( [[!unsubscribe]] )`,
+ },
+ {
+ "<p>This is <a href=\"http://www.google.com\" >link1</a> and <a href=\"http://www.google.com\" >link2 </a> is next.</p>",
+ `This is link1 ( http://www.google.com ) and link2 ( http://www.google.com ) is next.`,
+ },
+ }
+
+ for _, testCase := range testCases {
+ assertString(t, testCase.input, testCase.output)
+ }
+}
+
+func TestImageAltTags(t *testing.T) {
+ testCases := []struct {
+ input string
+ output string
+ }{
+ {
+ `<img />`,
+ ``,
+ },
+ {
+ `<img src="http://example.ru/hello.jpg" />`,
+ ``,
+ },
+ {
+ `<img alt="Example"/>`,
+ ``,
+ },
+ {
+ `<img src="http://example.ru/hello.jpg" alt="Example"/>`,
+ ``,
+ },
+ // Images do matter if they are in a link
+ {
+ `<a href="http://example.com/"><img src="http://example.ru/hello.jpg" alt="Example"/></a>`,
+ `Example ( http://example.com/ )`,
+ },
+ {
+ `<a href="http://example.com/"><img src="http://example.ru/hello.jpg" alt="Example"></a>`,
+ `Example ( http://example.com/ )`,
+ },
+ {
+ `<a href='http://example.com/'><img src='http://example.ru/hello.jpg' alt='Example'/></a>`,
+ `Example ( http://example.com/ )`,
+ },
+ {
+ `<a href='http://example.com/'><img src='http://example.ru/hello.jpg' alt='Example'></a>`,
+ `Example ( http://example.com/ )`,
+ },
+ }
+
+ for _, testCase := range testCases {
+ assertString(t, testCase.input, testCase.output)
+ }
+}
+
+func TestHeadings(t *testing.T) {
+ testCases := []struct {
+ input string
+ output string
+ }{
+ {
+ "<h1>Test</h1>",
+ "****\nTest\n****",
+ },
+ {
+ "\t<h1>\nTest</h1> ",
+ "****\nTest\n****",
+ },
+ {
+ "\t<h1>\nTest line 1<br>Test 2</h1> ",
+ "***********\nTest line 1\nTest 2\n***********",
+ },
+ {
+ "<h1>Test</h1> <h1>Test</h1>",
+ "****\nTest\n****\n\n****\nTest\n****",
+ },
+ {
+ "<h2>Test</h2>",
+ "----\nTest\n----",
+ },
+ {
+ "<h1><a href='http://example.com/'>Test</a></h1>",
+ "****************************\nTest ( http://example.com/ )\n****************************",
+ },
+ {
+ "<h3> <span class='a'>Test </span></h3>",
+ "Test\n----",
+ },
+ }
+
+ for _, testCase := range testCases {
+ assertString(t, testCase.input, testCase.output)
+ }
+
+}
+
+func TestBold(t *testing.T) {
+ testCases := []struct {
+ input string
+ output string
+ }{
+ {
+ "<b>Test</b>",
+ "*Test*",
+ },
+ {
+ "\t<b>Test</b> ",
+ "*Test*",
+ },
+ {
+ "\t<b>Test line 1<br>Test 2</b> ",
+ "*Test line 1\nTest 2*",
+ },
+ {
+ "<b>Test</b> <b>Test</b>",
+ "*Test* *Test*",
+ },
+ }
+
+ for _, testCase := range testCases {
+ assertString(t, testCase.input, testCase.output)
+ }
+
+}
+
+func TestDiv(t *testing.T) {
+ testCases := []struct {
+ input string
+ output string
+ }{
+ {
+ "<div>Test</div>",
+ "Test",
+ },
+ {
+ "\t<div>Test</div> ",
+ "Test",
+ },
+ {
+ "<div>Test line 1<div>Test 2</div></div>",
+ "Test line 1\nTest 2",
+ },
+ {
+ "Test 1<div>Test 2</div> <div>Test 3</div>Test 4",
+ "Test 1\nTest 2\nTest 3\nTest 4",
+ },
+ }
+
+ for _, testCase := range testCases {
+ assertString(t, testCase.input, testCase.output)
+ }
+
+}
+
+func TestBlockquotes(t *testing.T) {
+ testCases := []struct {
+ input string
+ output string
+ }{
+ {
+ "<div>level 0<blockquote>level 1<br><blockquote>level 2</blockquote>level 1</blockquote><div>level 0</div></div>",
+ "level 0\n> \n> level 1\n> \n>> level 2\n> \n> level 1\n\nlevel 0",
+ },
+ {
+ "<blockquote>Test</blockquote>Test",
+ "> \n> Test\n\nTest",
+ },
+ {
+ "\t<blockquote> \nTest<br></blockquote> ",
+ "> \n> Test\n>",
+ },
+ {
+ "\t<blockquote> \nTest line 1<br>Test 2</blockquote> ",
+ "> \n> Test line 1\n> Test 2",
+ },
+ {
+ "<blockquote>Test</blockquote> <blockquote>Test</blockquote> Other Test",
+ "> \n> Test\n\n> \n> Test\n\nOther Test",
+ },
+ {
+ "<blockquote>Lorem ipsum Commodo id consectetur pariatur ea occaecat minim aliqua ad sit consequat quis ex commodo Duis incididunt eu mollit consectetur fugiat voluptate dolore in pariatur in commodo occaecat Ut occaecat velit esse labore aute quis commodo non sit dolore officia Excepteur cillum amet cupidatat culpa velit labore ullamco dolore mollit elit in aliqua dolor irure do</blockquote>",
+ "> \n> Lorem ipsum Commodo id consectetur pariatur ea occaecat minim aliqua ad\n> sit consequat quis ex commodo Duis incididunt eu mollit consectetur fugiat\n> voluptate dolore in pariatur in commodo occaecat Ut occaecat velit esse\n> labore aute quis commodo non sit dolore officia Excepteur cillum amet\n> cupidatat culpa velit labore ullamco dolore mollit elit in aliqua dolor\n> irure do",
+ },
+ {
+ "<blockquote>Lorem<b>ipsum</b><b>Commodo</b><b>id</b><b>consectetur</b><b>pariatur</b><b>ea</b><b>occaecat</b><b>minim</b><b>aliqua</b><b>ad</b><b>sit</b><b>consequat</b><b>quis</b><b>ex</b><b>commodo</b><b>Duis</b><b>incididunt</b><b>eu</b><b>mollit</b><b>consectetur</b><b>fugiat</b><b>voluptate</b><b>dolore</b><b>in</b><b>pariatur</b><b>in</b><b>commodo</b><b>occaecat</b><b>Ut</b><b>occaecat</b><b>velit</b><b>esse</b><b>labore</b><b>aute</b><b>quis</b><b>commodo</b><b>non</b><b>sit</b><b>dolore</b><b>officia</b><b>Excepteur</b><b>cillum</b><b>amet</b><b>cupidatat</b><b>culpa</b><b>velit</b><b>labore</b><b>ullamco</b><b>dolore</b><b>mollit</b><b>elit</b><b>in</b><b>aliqua</b><b>dolor</b><b>irure</b><b>do</b></blockquote>",
+ "> \n> Lorem *ipsum* *Commodo* *id* *consectetur* *pariatur* *ea* *occaecat* *minim*\n> *aliqua* *ad* *sit* *consequat* *quis* *ex* *commodo* *Duis* *incididunt* *eu*\n> *mollit* *consectetur* *fugiat* *voluptate* *dolore* *in* *pariatur* *in* *commodo*\n> *occaecat* *Ut* *occaecat* *velit* *esse* *labore* *aute* *quis* *commodo*\n> *non* *sit* *dolore* *officia* *Excepteur* *cillum* *amet* *cupidatat* *culpa*\n> *velit* *labore* *ullamco* *dolore* *mollit* *elit* *in* *aliqua* *dolor* *irure*\n> *do*",
+ },
+ }
+
+ for _, testCase := range testCases {
+ assertString(t, testCase.input, testCase.output)
+ }
+
+}
+
+func TestIgnoreStylesScriptsHead(t *testing.T) {
+ testCases := []struct {
+ input string
+ output string
+ }{
+ {
+ "<style>Test</style>",
+ "",
+ },
+ {
+ "<style type=\"text/css\">body { color: #fff; }</style>",
+ "",
+ },
+ {
+ "<link rel=\"stylesheet\" href=\"main.css\">",
+ "",
+ },
+ {
+ "<script>Test</script>",
+ "",
+ },
+ {
+ "<script src=\"main.js\"></script>",
+ "",
+ },
+ {
+ "<script type=\"text/javascript\" src=\"main.js\"></script>",
+ "",
+ },
+ {
+ "<script type=\"text/javascript\">Test</script>",
+ "",
+ },
+ {
+ "<script type=\"text/ng-template\" id=\"template.html\"><a href=\"http://google.com\">Google</a></script>",
+ "",
+ },
+ {
+ "<script type=\"bla-bla-bla\" id=\"template.html\">Test</script>",
+ "",
+ },
+ {
+ `<html><head><title>Title</title></head><body></body></html>`,
+ "",
+ },
+ }
+
+ for _, testCase := range testCases {
+ assertString(t, testCase.input, testCase.output)
+ }
+}
+
+func TestText(t *testing.T) {
+ testCases := []struct {
+ input string
+ expr string
+ }{
+ {
+ `<li>
+ <a href="/new" data-ga-click="Header, create new repository, icon:repo"><span class="octicon octicon-repo"></span> New repository</a>
+ </li>`,
+ `\* New repository \( /new \)`,
+ },
+ {
+ `hi
+
+ <br>
+
+ hello <a href="https://google.com">google</a>
+ <br><br>
+ test<p>List:</p>
+
+ <ul>
+ <li><a href="foo">Foo</a></li>
+ <li><a href="http://www.microshwhat.com/bar/soapy">Barsoap</a></li>
+ <li>Baz</li>
+ </ul>
+`,
+ `hi
+hello google \( https://google.com \)
+
+test
+
+List:
+
+\* Foo \( foo \)
+\* Barsoap \( http://www.microshwhat.com/bar/soapy \)
+\* Baz`,
+ },
+ // Malformed input html.
+ {
+ `hi
+
+ hello <a href="https://google.com">google</a>
+
+ test<p>List:</p>
+
+ <ul>
+ <li><a href="foo">Foo</a>
+ <li><a href="/
+ bar/baz">Bar</a>
+ <li>Baz</li>
+ </ul>
+ `,
+ `hi hello google \( https://google.com \) test
+
+List:
+
+\* Foo \( foo \)
+\* Bar \( /\n[ \t]+bar/baz \)
+\* Baz`,
+ },
+ }
+
+ for _, testCase := range testCases {
+ assertRegexp(t, testCase.input, testCase.expr)
+ }
+}
+
+type StringMatcher interface {
+ MatchString(string) bool
+ String() string
+}
+
+type RegexpStringMatcher string
+
+func (m RegexpStringMatcher) MatchString(str string) bool {
+ return regexp.MustCompile(string(m)).MatchString(str)
+}
+func (m RegexpStringMatcher) String() string {
+ return string(m)
+}
+
+type ExactStringMatcher string
+
+func (m ExactStringMatcher) MatchString(str string) bool {
+ return string(m) == str
+}
+func (m ExactStringMatcher) String() string {
+ return string(m)
+}
+
+func assertRegexp(t *testing.T, input string, outputRE string) {
+ assertPlaintext(t, input, RegexpStringMatcher(outputRE))
+}
+
+func assertString(t *testing.T, input string, output string) {
+ assertPlaintext(t, input, ExactStringMatcher(output))
+}
+
+func assertPlaintext(t *testing.T, input string, matcher StringMatcher) {
+ text, err := FromString(input)
+ if err != nil {
+ t.Error(err)
+ }
+ if !matcher.MatchString(text) {
+ t.Errorf("Input did not match expression\n"+
+ "Input:\n>>>>\n%s\n<<<<\n\n"+
+ "Output:\n>>>>\n%s\n<<<<\n\n"+
+ "Expected output:\n>>>>\n%s\n<<<<\n\n",
+ input, text, matcher.String())
+ } else {
+ t.Logf("input:\n\n%s\n\n\n\noutput:\n\n%s\n", input, text)
+ }
+}
+
+func Example() {
+ inputHtml := `
+ <html>
+ <head>
+ <title>My Mega Service</title>
+ <link rel=\"stylesheet\" href=\"main.css\">
+ <style type=\"text/css\">body { color: #fff; }</style>
+ </head>
+
+ <body>
+ <div class="logo">
+ <a href="http://mymegaservice.com/"><img src="/logo-image.jpg" alt="Mega Service"/></a>
+ </div>
+
+ <h1>Welcome to your new account on my service!</h1>
+
+ <p>
+ Here is some more information:
+
+ <ul>
+ <li>Link 1: <a href="https://example.com">Example.com</a></li>
+ <li>Link 2: <a href="https://example2.com">Example2.com</a></li>
+ <li>Something else</li>
+ </ul>
+ </p>
+ </body>
+ </html>
+ `
+
+ text, err := FromString(inputHtml)
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println(text)
+
+ // Output:
+ // Mega Service ( http://mymegaservice.com/ )
+ //
+ // ******************************************
+ // Welcome to your new account on my service!
+ // ******************************************
+ //
+ // Here is some more information:
+ //
+ // * Link 1: Example.com ( https://example.com )
+ // * Link 2: Example2.com ( https://example2.com )
+ // * Something else
+}
diff --git a/vendor/github.com/jaytaylor/html2text/testdata/utf8.html b/vendor/github.com/jaytaylor/html2text/testdata/utf8.html
new file mode 100755
index 000000000..53d401ce9
--- /dev/null
+++ b/vendor/github.com/jaytaylor/html2text/testdata/utf8.html
@@ -0,0 +1,22 @@
+<?xml version='1.0' encoding='utf-8'?>
+<html xmlns="http://www.w3.org/1999/xhtml">
+
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+ <title>学习之道:美国公认学习第一书title</title>
+ <link href="stylesheet.css" rel="stylesheet" type="text/css" />
+ <link href="page_styles.css" rel="stylesheet" type="text/css" />
+</head>
+
+<body class="calibre">
+ <p id="filepos9452" class="calibre_"><span class="calibre6"><span class="bold">写在前面的话</span></span>
+ </p>
+ <p class="calibre_12">在台湾的那次世界冠军赛上,我几近疯狂,直至两年后的今天,我仍沉浸在这次的经历中。这是我生平第一次如此深入地审视我自己,甚至是第一次尝试审视自己。这个过程令人很是兴奋,同时也有点感觉怪异。我重新认识了自我,看到了自己的另外一面,自己从未发觉的另外一面。为了生存,为了取胜,我成了一名角斗士,彻头彻尾,简单纯粹。我并没有意识到这一角色早已在我的心中生根发芽,呼之欲出。也许,他的出现已是不可避免。</p>
+ <p class="calibre_7">而我这全新的一面,与我一直熟识的那个乔希,那个曾经害怕黑暗的孩子,那个象棋手,那个狂热于雨水、反复诵读杰克·克鲁亚克作品的年轻人之间,又有什么样的联系呢?这些都是我正在努力弄清楚的问题。</p>
+ <p class="calibre_7">自台湾赛事之后,我急切非常,一心想要回到训练中去,摆脱自己已经达到巅峰的想法。在过去的两年中,我已经重新开始。这是一个新的起点。前方的路还很长,有待进一步的探索。</p>
+ <p class="calibre_7">这本书的创作耗费了相当多的时间和精力。在成长的过程中,我在我的小房间里从未想过等待我的会是这样的战斗。在创作中,我的思想逐渐成熟;爱恋从分崩离析,到失而复得,世界冠军头衔从失之交臂,到囊中取物。如果说在我人生的第一个二十九年中,我学到了什么,那就是,我们永远无法预测结局,无论是重要的比赛、冒险,还是轰轰烈烈的爱情。我们唯一可以肯定的只有,出乎意料。不管我们做了多么万全的准备,在生活的真实场景中,我们总是会处于陌生的境地。我们也许会无法冷静,失去理智,感觉似乎整个世界都在针对我们。在这个时候,我们所要做的是要付出加倍的努力,要表现得比预想得更好。我认为,关键在于准备好随机应变,准备好在所能想象的高压下发挥出创造力。</p>
+ <p class="calibre_7">读者朋友们,我非常希望你们在读过这本书后,可以得到启发,甚至会得到触动,从而能够根据各自的天赋与特长,去实现自己的梦想。这就是我写作此书的目的。我在字里行间所传达的理念曾经使我受益匪浅,我很希望它们可以为大家提供一个基本的框架和方向。如果我的方法言之有理,那么就请接受它,琢磨它,并加之自己的见解。忘记我的那些数字。真正的掌握需要通过自己发现一些最能够引起共鸣的信息,并将其彻底地融合进来,直至成为一体,这样我们才能随心所欲地驾驭它。</p>
+ <div class="mbp_pagebreak" id="calibre_pb_4"></div>
+</body>
+
+</html> \ No newline at end of file
diff --git a/vendor/github.com/jaytaylor/html2text/testdata/utf8_with_bom.xhtml b/vendor/github.com/jaytaylor/html2text/testdata/utf8_with_bom.xhtml
new file mode 100755
index 000000000..68f0ee707
--- /dev/null
+++ b/vendor/github.com/jaytaylor/html2text/testdata/utf8_with_bom.xhtml
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="zh-CN">
+
+<head>
+ <meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8" />
+ <title>1892年波兰文版序言title</title>
+ <link rel="stylesheet" href="css/stylesheet.css" type="text/css" />
+</head>
+
+<body>
+ <div id="page30" />
+ <h2 id="CHP2-6">1892年波兰文版序言<a id="wzyy_18_30" href="#wz_18_30"><sup>[18]</sup></a></h2>
+ <p>出版共产主义宣言的一种新的波兰文本已成为必要,这一事实,引起了许多感想。</p>
+ <p>首先值得注意的是,近来宣言在一定程度上已成为欧洲大陆大工业发展的一种尺度。一个国家的大工业越发展,该国工人中想认清自己作为工人阶级在有产阶级面前所处地位的要求就越增加,他们中间的社会主义运动也越扩大,因而对宣言的需求也越增长。这样,根据宣言用某国文字销行的份数,不仅能够相当确切地断定该国工人运动的状况,而且还能够相当确切地断定该国大工业发展的程度。</p>
+ <p>因此,波兰文的新版本标志着波兰工业的决定性进步。从十年前发表的上一个版本以来确实有了这种进步,对此丝毫不容置疑。俄国的波兰,会议的波兰<a id="wzyy_19_30" href="#wz_19_30"><sup>[19]</sup></a>,成了俄罗斯帝国巨大的工业区。俄国大工业是零星分散的,一部分在芬兰湾沿岸,一部分在中央区(莫斯科和弗拉基米尔),第三部分在黑海和亚速海沿岸,还有另一些散布在别处;而波兰工业则紧缩于相对狭小的地区,享受到由这种积聚引起的长处与短处。这种长处是竞争着的俄罗斯工厂主所承认的,他们要求实行保护关税以对付波兰,尽管他们渴望使波兰人俄罗斯化。这种短处,对波兰工厂主与俄罗斯政府来说,表现在社会主义思想在波兰工人中间的迅速传播和对宣言需求的增长。</p>
+ <p>但是,波兰工业的迅速发展——它超过了俄国工业——本身<a id="page31" />是波兰人民的坚强生命力的一个新证明,是波兰人民临近的民族复兴的一个新保证。而一个独立强盛的波兰的复兴,不只是一件同波兰人有关、而且是同我们大家有关的事情。只有当每个民族在自己内部完全自主时,欧洲各民族间真诚的国际合作才是可能的。1848年革命在无产阶级旗帜下,使无产阶级的战士最终只作了资产阶级的工作,这次革命通过自己遗嘱的执行者路易·波拿巴和俾斯麦也实现了意大利、德国和匈牙利的独立。然而波兰,它从1792年以来为革命做的比所有这三个国家总共做的还要多,而当它1863年失败于强大十倍的俄军的时候,人们却把它抛弃不顾了。贵族既未能保持住、也未能重新争得波兰的独立;今天波兰的独立对资产阶级至少是无所谓的。然而波兰的独立对于欧洲各民族和谐的合作是必需的。这种独立只有年轻的波兰无产阶级才能争得,而且在它的手中会很好地保持住。因为欧洲所有其余的工人都象波兰工人自己一样也需要波兰的独立。</p>
+ <p>弗·恩格斯</p>
+ <p>1892年2月10日于伦敦</p>
+ <div id="page74" />
+ <div><a id="wz_18_30" href="#wzyy_18_30">[18]</a> 恩格斯用德文为《宣言》新的波兰文本写了这篇序言。1892年由波兰社会主义者在伦敦办的《黎明》杂志社出版。序言寄出后,恩格斯写信给门德尔森(1892年2月11日),信中说,他很愿意学会波兰文,并且深入研究波兰工人运动的发展,以便能够为《宣言》的下一版写一篇更详细的序言。——第20页</div>
+ <div><a id="wz_19_30" href="#wzyy_19_30">[19]</a> 指维也纳会议的波兰,即根据1814—1815年维也纳会议的决定,以波兰王国的正式名义割给俄国的那部分波兰土地。——第20页</div>
+</body>
+
+</html> \ No newline at end of file
diff --git a/vendor/github.com/lib/pq/README.md b/vendor/github.com/lib/pq/README.md
index 5eb9e1445..7670fc87a 100644
--- a/vendor/github.com/lib/pq/README.md
+++ b/vendor/github.com/lib/pq/README.md
@@ -1,6 +1,6 @@
# pq - A pure Go postgres driver for Go's database/sql package
-[![Build Status](https://travis-ci.org/lib/pq.png?branch=master)](https://travis-ci.org/lib/pq)
+[![Build Status](https://travis-ci.org/lib/pq.svg?branch=master)](https://travis-ci.org/lib/pq)
## Install
diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go
index 3c8f77cb6..4b2fb4462 100644
--- a/vendor/github.com/lib/pq/conn.go
+++ b/vendor/github.com/lib/pq/conn.go
@@ -133,7 +133,7 @@ type conn struct {
// Handle driver-side settings in parsed connection string.
func (c *conn) handleDriverSettings(o values) (err error) {
boolSetting := func(key string, val *bool) error {
- if value := o.Get(key); value != "" {
+ if value, ok := o[key]; ok {
if value == "yes" {
*val = true
} else if value == "no" {
@@ -158,8 +158,7 @@ func (c *conn) handleDriverSettings(o values) (err error) {
func (c *conn) handlePgpass(o values) {
// if a password was supplied, do not process .pgpass
- _, ok := o["password"]
- if ok {
+ if _, ok := o["password"]; ok {
return
}
filename := os.Getenv("PGPASSFILE")
@@ -187,11 +186,11 @@ func (c *conn) handlePgpass(o values) {
}
defer file.Close()
scanner := bufio.NewScanner(io.Reader(file))
- hostname := o.Get("host")
+ hostname := o["host"]
ntw, _ := network(o)
- port := o.Get("port")
- db := o.Get("dbname")
- username := o.Get("user")
+ port := o["port"]
+ db := o["dbname"]
+ username := o["user"]
// From: https://github.com/tg/pgpass/blob/master/reader.go
getFields := func(s string) []string {
fs := make([]string, 0, 5)
@@ -256,13 +255,13 @@ func DialOpen(d Dialer, name string) (_ driver.Conn, err error) {
// * Very low precedence defaults applied in every situation
// * Environment variables
// * Explicitly passed connection information
- o.Set("host", "localhost")
- o.Set("port", "5432")
+ o["host"] = "localhost"
+ o["port"] = "5432"
// N.B.: Extra float digits should be set to 3, but that breaks
// Postgres 8.4 and older, where the max is 2.
- o.Set("extra_float_digits", "2")
+ o["extra_float_digits"] = "2"
for k, v := range parseEnviron(os.Environ()) {
- o.Set(k, v)
+ o[k] = v
}
if strings.HasPrefix(name, "postgres://") || strings.HasPrefix(name, "postgresql://") {
@@ -277,9 +276,9 @@ func DialOpen(d Dialer, name string) (_ driver.Conn, err error) {
}
// Use the "fallback" application name if necessary
- if fallback := o.Get("fallback_application_name"); fallback != "" {
- if !o.Isset("application_name") {
- o.Set("application_name", fallback)
+ if fallback, ok := o["fallback_application_name"]; ok {
+ if _, ok := o["application_name"]; !ok {
+ o["application_name"] = fallback
}
}
@@ -290,29 +289,29 @@ func DialOpen(d Dialer, name string) (_ driver.Conn, err error) {
// parsing its value is not worth it. Instead, we always explicitly send
// client_encoding as a separate run-time parameter, which should override
// anything set in options.
- if enc := o.Get("client_encoding"); enc != "" && !isUTF8(enc) {
+ if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) {
return nil, errors.New("client_encoding must be absent or 'UTF8'")
}
- o.Set("client_encoding", "UTF8")
+ o["client_encoding"] = "UTF8"
// DateStyle needs a similar treatment.
- if datestyle := o.Get("datestyle"); datestyle != "" {
+ if datestyle, ok := o["datestyle"]; ok {
if datestyle != "ISO, MDY" {
panic(fmt.Sprintf("setting datestyle must be absent or %v; got %v",
"ISO, MDY", datestyle))
}
} else {
- o.Set("datestyle", "ISO, MDY")
+ o["datestyle"] = "ISO, MDY"
}
// If a user is not provided by any other means, the last
// resort is to use the current operating system provided user
// name.
- if o.Get("user") == "" {
+ if _, ok := o["user"]; !ok {
u, err := userCurrent()
if err != nil {
return nil, err
} else {
- o.Set("user", u)
+ o["user"] = u
}
}
@@ -335,7 +334,7 @@ func DialOpen(d Dialer, name string) (_ driver.Conn, err error) {
cn.startup(o)
// reset the deadline, in case one was set (see dial)
- if timeout := o.Get("connect_timeout"); timeout != "" && timeout != "0" {
+ if timeout, ok := o["connect_timeout"]; ok && timeout != "0" {
err = cn.c.SetDeadline(time.Time{})
}
return cn, err
@@ -349,7 +348,7 @@ func dial(d Dialer, o values) (net.Conn, error) {
}
// Zero or not specified means wait indefinitely.
- if timeout := o.Get("connect_timeout"); timeout != "" && timeout != "0" {
+ if timeout, ok := o["connect_timeout"]; ok && timeout != "0" {
seconds, err := strconv.ParseInt(timeout, 10, 0)
if err != nil {
return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err)
@@ -371,31 +370,18 @@ func dial(d Dialer, o values) (net.Conn, error) {
}
func network(o values) (string, string) {
- host := o.Get("host")
+ host := o["host"]
if strings.HasPrefix(host, "/") {
- sockPath := path.Join(host, ".s.PGSQL."+o.Get("port"))
+ sockPath := path.Join(host, ".s.PGSQL."+o["port"])
return "unix", sockPath
}
- return "tcp", net.JoinHostPort(host, o.Get("port"))
+ return "tcp", net.JoinHostPort(host, o["port"])
}
type values map[string]string
-func (vs values) Set(k, v string) {
- vs[k] = v
-}
-
-func (vs values) Get(k string) (v string) {
- return vs[k]
-}
-
-func (vs values) Isset(k string) bool {
- _, ok := vs[k]
- return ok
-}
-
// scanner implements a tokenizer for libpq-style option strings.
type scanner struct {
s []rune
@@ -466,7 +452,7 @@ func parseOpts(name string, o values) error {
// Skip any whitespace after the =
if r, ok = s.SkipSpaces(); !ok {
// If we reach the end here, the last value is just an empty string as per libpq.
- o.Set(string(keyRunes), "")
+ o[string(keyRunes)] = ""
break
}
@@ -501,7 +487,7 @@ func parseOpts(name string, o values) error {
}
}
- o.Set(string(keyRunes), string(valRunes))
+ o[string(keyRunes)] = string(valRunes)
}
return nil
@@ -665,6 +651,12 @@ func (cn *conn) simpleQuery(q string) (res *rows, err error) {
cn: cn,
}
}
+ // Set the result and tag to the last command complete if there wasn't a
+ // query already run. Although queries usually return from here and cede
+ // control to Next, a query with zero results does not.
+ if t == 'C' && res.colNames == nil {
+ res.result, res.tag = cn.parseComplete(r.string())
+ }
res.done = true
case 'Z':
cn.processReadyForQuery(r)
@@ -1119,7 +1111,7 @@ func (cn *conn) auth(r *readBuf, o values) {
// OK
case 3:
w := cn.writeBuf('p')
- w.string(o.Get("password"))
+ w.string(o["password"])
cn.send(w)
t, r := cn.recv()
@@ -1133,7 +1125,7 @@ func (cn *conn) auth(r *readBuf, o values) {
case 5:
s := string(r.next(4))
w := cn.writeBuf('p')
- w.string("md5" + md5s(md5s(o.Get("password")+o.Get("user"))+s))
+ w.string("md5" + md5s(md5s(o["password"]+o["user"])+s))
cn.send(w)
t, r := cn.recv()
@@ -1333,6 +1325,8 @@ type rows struct {
colFmts []format
done bool
rb readBuf
+ result driver.Result
+ tag string
}
func (rs *rows) Close() error {
@@ -1356,6 +1350,17 @@ func (rs *rows) Columns() []string {
return rs.colNames
}
+func (rs *rows) Result() driver.Result {
+ if rs.result == nil {
+ return emptyRows
+ }
+ return rs.result
+}
+
+func (rs *rows) Tag() string {
+ return rs.tag
+}
+
func (rs *rows) Next(dest []driver.Value) (err error) {
if rs.done {
return io.EOF
@@ -1373,6 +1378,9 @@ func (rs *rows) Next(dest []driver.Value) (err error) {
case 'E':
err = parseError(&rs.rb)
case 'C', 'I':
+ if t == 'C' {
+ rs.result, rs.tag = conn.parseComplete(rs.rb.string())
+ }
continue
case 'Z':
conn.processReadyForQuery(&rs.rb)
diff --git a/vendor/github.com/lib/pq/conn_go18.go b/vendor/github.com/lib/pq/conn_go18.go
index 0aca1d002..43cc35f7b 100644
--- a/vendor/github.com/lib/pq/conn_go18.go
+++ b/vendor/github.com/lib/pq/conn_go18.go
@@ -14,10 +14,7 @@ func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.Na
for i, nv := range args {
list[i] = nv.Value
}
- var closed chan<- struct{}
- if ctx.Done() != nil {
- closed = watchCancel(ctx, cn.cancel)
- }
+ closed := cn.watchCancel(ctx)
r, err := cn.query(query, list)
if err != nil {
return nil, err
@@ -33,8 +30,7 @@ func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.Nam
list[i] = nv.Value
}
- if ctx.Done() != nil {
- closed := watchCancel(ctx, cn.cancel)
+ if closed := cn.watchCancel(ctx); closed != nil {
defer close(closed)
}
@@ -53,22 +49,23 @@ func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx,
if err != nil {
return nil, err
}
- if ctx.Done() != nil {
- cn.txnClosed = watchCancel(ctx, cn.cancel)
- }
+ cn.txnClosed = cn.watchCancel(ctx)
return tx, nil
}
-func watchCancel(ctx context.Context, cancel func()) chan<- struct{} {
- closed := make(chan struct{})
- go func() {
- select {
- case <-ctx.Done():
- cancel()
- case <-closed:
- }
- }()
- return closed
+func (cn *conn) watchCancel(ctx context.Context) chan<- struct{} {
+ if done := ctx.Done(); done != nil {
+ closed := make(chan struct{})
+ go func() {
+ select {
+ case <-done:
+ cn.cancel()
+ case <-closed:
+ }
+ }()
+ return closed
+ }
+ return nil
}
func (cn *conn) cancel() {
diff --git a/vendor/github.com/lib/pq/conn_test.go b/vendor/github.com/lib/pq/conn_test.go
index 183e6dcd6..c9135b727 100644
--- a/vendor/github.com/lib/pq/conn_test.go
+++ b/vendor/github.com/lib/pq/conn_test.go
@@ -191,7 +191,7 @@ localhost:*:*:*:pass_C
pgpass.Close()
assertPassword := func(extra values, expected string) {
- o := &values{
+ o := values{
"host": "localhost",
"sslmode": "disable",
"connect_timeout": "20",
@@ -203,11 +203,11 @@ localhost:*:*:*:pass_C
"datestyle": "ISO, MDY",
}
for k, v := range extra {
- (*o)[k] = v
+ o[k] = v
}
- (&conn{}).handlePgpass(*o)
- if o.Get("password") != expected {
- t.Fatalf("For %v expected %s got %s", extra, expected, o.Get("password"))
+ (&conn{}).handlePgpass(o)
+ if pw := o["password"]; pw != expected {
+ t.Fatalf("For %v expected %s got %s", extra, expected, pw)
}
}
// wrong permissions for the pgpass file means it should be ignored
@@ -686,17 +686,28 @@ func TestCloseBadConn(t *testing.T) {
if err := cn.Close(); err != nil {
t.Fatal(err)
}
+
+ // During the Go 1.9 cycle, https://github.com/golang/go/commit/3792db5
+ // changed this error from
+ //
+ // net.errClosing = errors.New("use of closed network connection")
+ //
+ // to
+ //
+ // internal/poll.ErrClosing = errors.New("use of closed file or network connection")
+ const errClosing = "use of closed"
+
// Verify write after closing fails.
if _, err := nc.Write(nil); err == nil {
t.Fatal("expected error")
- } else if !strings.Contains(err.Error(), "use of closed network connection") {
- t.Fatalf("expected use of closed network connection error, got %s", err)
+ } else if !strings.Contains(err.Error(), errClosing) {
+ t.Fatalf("expected %s error, got %s", errClosing, err)
}
// Verify second close fails.
if err := cn.Close(); err == nil {
t.Fatal("expected error")
- } else if !strings.Contains(err.Error(), "use of closed network connection") {
- t.Fatalf("expected use of closed network connection error, got %s", err)
+ } else if !strings.Contains(err.Error(), errClosing) {
+ t.Fatalf("expected %s error, got %s", errClosing, err)
}
}
@@ -1493,3 +1504,82 @@ func TestQuoteIdentifier(t *testing.T) {
}
}
}
+
+func TestRowsResultTag(t *testing.T) {
+ type ResultTag interface {
+ Result() driver.Result
+ Tag() string
+ }
+
+ tests := []struct {
+ query string
+ tag string
+ ra int64
+ }{
+ {
+ query: "CREATE TEMP TABLE temp (a int)",
+ tag: "CREATE TABLE",
+ },
+ {
+ query: "INSERT INTO temp VALUES (1), (2)",
+ tag: "INSERT",
+ ra: 2,
+ },
+ {
+ query: "SELECT 1",
+ },
+ // A SELECT anywhere should take precedent.
+ {
+ query: "SELECT 1; INSERT INTO temp VALUES (1), (2)",
+ },
+ {
+ query: "INSERT INTO temp VALUES (1), (2); SELECT 1",
+ },
+ // Multiple statements that don't return rows should return the last tag.
+ {
+ query: "CREATE TEMP TABLE t (a int); DROP TABLE t",
+ tag: "DROP TABLE",
+ },
+ // Ensure a rows-returning query in any position among various tags-returing
+ // statements will prefer the rows.
+ {
+ query: "SELECT 1; CREATE TEMP TABLE t (a int); DROP TABLE t",
+ },
+ {
+ query: "CREATE TEMP TABLE t (a int); SELECT 1; DROP TABLE t",
+ },
+ {
+ query: "CREATE TEMP TABLE t (a int); DROP TABLE t; SELECT 1",
+ },
+ // Verify that an no-results query doesn't set the tag.
+ {
+ query: "CREATE TEMP TABLE t (a int); SELECT 1 WHERE FALSE; DROP TABLE t;",
+ },
+ }
+
+ // If this is the only test run, this will correct the connection string.
+ openTestConn(t).Close()
+
+ conn, err := Open("")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer conn.Close()
+ q := conn.(driver.Queryer)
+
+ for _, test := range tests {
+ if rows, err := q.Query(test.query, nil); err != nil {
+ t.Fatalf("%s: %s", test.query, err)
+ } else {
+ r := rows.(ResultTag)
+ if tag := r.Tag(); tag != test.tag {
+ t.Fatalf("%s: unexpected tag %q", test.query, tag)
+ }
+ res := r.Result()
+ if ra, _ := res.RowsAffected(); ra != test.ra {
+ t.Fatalf("%s: unexpected rows affected: %d", test.query, ra)
+ }
+ rows.Close()
+ }
+ }
+}
diff --git a/vendor/github.com/lib/pq/go18_test.go b/vendor/github.com/lib/pq/go18_test.go
index 15546d865..5d17e4d92 100644
--- a/vendor/github.com/lib/pq/go18_test.go
+++ b/vendor/github.com/lib/pq/go18_test.go
@@ -79,10 +79,7 @@ func TestContextCancelExec(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
// Delay execution for just a bit until db.ExecContext has begun.
- go func() {
- time.Sleep(time.Millisecond * 10)
- cancel()
- }()
+ defer time.AfterFunc(time.Millisecond*10, cancel).Stop()
// Not canceled until after the exec has started.
if _, err := db.ExecContext(ctx, "select pg_sleep(1)"); err == nil {
@@ -106,10 +103,7 @@ func TestContextCancelQuery(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
// Delay execution for just a bit until db.QueryContext has begun.
- go func() {
- time.Sleep(time.Millisecond * 10)
- cancel()
- }()
+ defer time.AfterFunc(time.Millisecond*10, cancel).Stop()
// Not canceled until after the exec has started.
if _, err := db.QueryContext(ctx, "select pg_sleep(1)"); err == nil {
@@ -137,10 +131,7 @@ func TestContextCancelBegin(t *testing.T) {
}
// Delay execution for just a bit until tx.Exec has begun.
- go func() {
- time.Sleep(time.Millisecond * 10)
- cancel()
- }()
+ defer time.AfterFunc(time.Millisecond*10, cancel).Stop()
// Not canceled until after the exec has started.
if _, err := tx.Exec("select pg_sleep(1)"); err == nil {
diff --git a/vendor/github.com/lib/pq/listen_example/doc.go b/vendor/github.com/lib/pq/listen_example/doc.go
index 5bc99f5c1..80f0a9b97 100644
--- a/vendor/github.com/lib/pq/listen_example/doc.go
+++ b/vendor/github.com/lib/pq/listen_example/doc.go
@@ -51,21 +51,15 @@ mechanism to avoid polling the database while waiting for more work to arrive.
}
func waitForNotification(l *pq.Listener) {
- for {
- select {
- case <-l.Notify:
- fmt.Println("received notification, new work available")
- return
- case <-time.After(90 * time.Second):
- go func() {
- l.Ping()
- }()
- // Check if there's more work available, just in case it takes
- // a while for the Listener to notice connection loss and
- // reconnect.
- fmt.Println("received no work for 90 seconds, checking for new work")
- return
- }
+ select {
+ case <-l.Notify:
+ fmt.Println("received notification, new work available")
+ case <-time.After(90 * time.Second):
+ go l.Ping()
+ // Check if there's more work available, just in case it takes
+ // a while for the Listener to notice connection loss and
+ // reconnect.
+ fmt.Println("received no work for 90 seconds, checking for new work")
}
}
diff --git a/vendor/github.com/lib/pq/notify_test.go b/vendor/github.com/lib/pq/notify_test.go
index fe8941a4e..82a77e1eb 100644
--- a/vendor/github.com/lib/pq/notify_test.go
+++ b/vendor/github.com/lib/pq/notify_test.go
@@ -7,7 +7,6 @@ import (
"os"
"runtime"
"sync"
- "sync/atomic"
"testing"
"time"
)
@@ -235,15 +234,10 @@ func TestConnExecDeadlock(t *testing.T) {
// calls Close on the net.Conn; equivalent to a network failure
l.Close()
- var done int32 = 0
- go func() {
- time.Sleep(10 * time.Second)
- if atomic.LoadInt32(&done) != 1 {
- panic("timed out")
- }
- }()
+ defer time.AfterFunc(10*time.Second, func() {
+ panic("timed out")
+ }).Stop()
wg.Wait()
- atomic.StoreInt32(&done, 1)
}
// Test for ListenerConn being closed while a slow query is executing
@@ -271,15 +265,11 @@ func TestListenerConnCloseWhileQueryIsExecuting(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- var done int32 = 0
- go func() {
- time.Sleep(10 * time.Second)
- if atomic.LoadInt32(&done) != 1 {
- panic("timed out")
- }
- }()
+
+ defer time.AfterFunc(10*time.Second, func() {
+ panic("timed out")
+ }).Stop()
wg.Wait()
- atomic.StoreInt32(&done, 1)
}
func TestNotifyExtra(t *testing.T) {
diff --git a/vendor/github.com/lib/pq/oid/types.go b/vendor/github.com/lib/pq/oid/types.go
index 03df05a61..a3390c23a 100644
--- a/vendor/github.com/lib/pq/oid/types.go
+++ b/vendor/github.com/lib/pq/oid/types.go
@@ -18,6 +18,7 @@ const (
T_xid Oid = 28
T_cid Oid = 29
T_oidvector Oid = 30
+ T_pg_ddl_command Oid = 32
T_pg_type Oid = 71
T_pg_attribute Oid = 75
T_pg_proc Oid = 81
@@ -28,6 +29,7 @@ const (
T_pg_node_tree Oid = 194
T__json Oid = 199
T_smgr Oid = 210
+ T_index_am_handler Oid = 325
T_point Oid = 600
T_lseg Oid = 601
T_path Oid = 602
@@ -133,6 +135,9 @@ const (
T__uuid Oid = 2951
T_txid_snapshot Oid = 2970
T_fdw_handler Oid = 3115
+ T_pg_lsn Oid = 3220
+ T__pg_lsn Oid = 3221
+ T_tsm_handler Oid = 3310
T_anyenum Oid = 3500
T_tsvector Oid = 3614
T_tsquery Oid = 3615
@@ -144,6 +149,8 @@ const (
T__regconfig Oid = 3735
T_regdictionary Oid = 3769
T__regdictionary Oid = 3770
+ T_jsonb Oid = 3802
+ T__jsonb Oid = 3807
T_anyrange Oid = 3831
T_event_trigger Oid = 3838
T_int4range Oid = 3904
@@ -158,4 +165,9 @@ const (
T__daterange Oid = 3913
T_int8range Oid = 3926
T__int8range Oid = 3927
+ T_pg_shseclabel Oid = 4066
+ T_regnamespace Oid = 4089
+ T__regnamespace Oid = 4090
+ T_regrole Oid = 4096
+ T__regrole Oid = 4097
)
diff --git a/vendor/github.com/lib/pq/ssl.go b/vendor/github.com/lib/pq/ssl.go
index b282ebd92..7deb30436 100644
--- a/vendor/github.com/lib/pq/ssl.go
+++ b/vendor/github.com/lib/pq/ssl.go
@@ -15,7 +15,7 @@ import (
func ssl(o values) func(net.Conn) net.Conn {
verifyCaOnly := false
tlsConf := tls.Config{}
- switch mode := o.Get("sslmode"); mode {
+ switch mode := o["sslmode"]; mode {
// "require" is the default.
case "", "require":
// We must skip TLS's own verification since it requires full
@@ -23,15 +23,19 @@ func ssl(o values) func(net.Conn) net.Conn {
tlsConf.InsecureSkipVerify = true
// From http://www.postgresql.org/docs/current/static/libpq-ssl.html:
- // Note: For backwards compatibility with earlier versions of PostgreSQL, if a
- // root CA file exists, the behavior of sslmode=require will be the same as
- // that of verify-ca, meaning the server certificate is validated against the
- // CA. Relying on this behavior is discouraged, and applications that need
- // certificate validation should always use verify-ca or verify-full.
- if _, err := os.Stat(o.Get("sslrootcert")); err == nil {
- verifyCaOnly = true
- } else {
- o.Set("sslrootcert", "")
+ //
+ // Note: For backwards compatibility with earlier versions of
+ // PostgreSQL, if a root CA file exists, the behavior of
+ // sslmode=require will be the same as that of verify-ca, meaning the
+ // server certificate is validated against the CA. Relying on this
+ // behavior is discouraged, and applications that need certificate
+ // validation should always use verify-ca or verify-full.
+ if sslrootcert, ok := o["sslrootcert"]; ok {
+ if _, err := os.Stat(sslrootcert); err == nil {
+ verifyCaOnly = true
+ } else {
+ delete(o, "sslrootcert")
+ }
}
case "verify-ca":
// We must skip TLS's own verification since it requires full
@@ -39,7 +43,7 @@ func ssl(o values) func(net.Conn) net.Conn {
tlsConf.InsecureSkipVerify = true
verifyCaOnly = true
case "verify-full":
- tlsConf.ServerName = o.Get("host")
+ tlsConf.ServerName = o["host"]
case "disable":
return nil
default:
@@ -64,38 +68,43 @@ func ssl(o values) func(net.Conn) net.Conn {
// in the user's home directory. The configured files must exist and have
// the correct permissions.
func sslClientCertificates(tlsConf *tls.Config, o values) {
- sslkey := o.Get("sslkey")
- sslcert := o.Get("sslcert")
-
- var cinfo, kinfo os.FileInfo
- var err error
+ // user.Current() might fail when cross-compiling. We have to ignore the
+ // error and continue without home directory defaults, since we wouldn't
+ // know from where to load them.
+ user, _ := user.Current()
+
+ // In libpq, the client certificate is only loaded if the setting is not blank.
+ //
+ // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037
+ sslcert := o["sslcert"]
+ if len(sslcert) == 0 && user != nil {
+ sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt")
+ }
+ // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045
+ if len(sslcert) == 0 {
+ return
+ }
+ // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054
+ if _, err := os.Stat(sslcert); os.IsNotExist(err) {
+ return
+ } else if err != nil {
+ panic(err)
+ }
- if sslcert != "" && sslkey != "" {
- // Check that both files exist. Note that we don't do any more extensive
- // checks than this (such as checking that the paths aren't directories);
- // LoadX509KeyPair() will take care of the rest.
- cinfo, err = os.Stat(sslcert)
- if err != nil {
- panic(err)
- }
+ // In libpq, the ssl key is only loaded if the setting is not blank.
+ //
+ // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222
+ sslkey := o["sslkey"]
+ if len(sslkey) == 0 && user != nil {
+ sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key")
+ }
- kinfo, err = os.Stat(sslkey)
- if err != nil {
+ if len(sslkey) > 0 {
+ if err := sslKeyPermissions(sslkey); err != nil {
panic(err)
}
- } else {
- // Automatically find certificates from ~/.postgresql
- sslcert, sslkey, cinfo, kinfo = sslHomeCertificates()
-
- if cinfo == nil || kinfo == nil {
- // No certificates to load
- return
- }
}
- // The files must also have the correct permissions
- sslCertificatePermissions(cinfo, kinfo)
-
cert, err := tls.LoadX509KeyPair(sslcert, sslkey)
if err != nil {
panic(err)
@@ -105,7 +114,10 @@ func sslClientCertificates(tlsConf *tls.Config, o values) {
// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting.
func sslCertificateAuthority(tlsConf *tls.Config, o values) {
- if sslrootcert := o.Get("sslrootcert"); sslrootcert != "" {
+ // In libpq, the root certificate is only loaded if the setting is not blank.
+ //
+ // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951
+ if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 {
tlsConf.RootCAs = x509.NewCertPool()
cert, err := ioutil.ReadFile(sslrootcert)
@@ -113,41 +125,12 @@ func sslCertificateAuthority(tlsConf *tls.Config, o values) {
panic(err)
}
- ok := tlsConf.RootCAs.AppendCertsFromPEM(cert)
- if !ok {
+ if !tlsConf.RootCAs.AppendCertsFromPEM(cert) {
errorf("couldn't parse pem in sslrootcert")
}
}
}
-// sslHomeCertificates returns the path and stats of certificates in the current
-// user's home directory.
-func sslHomeCertificates() (cert, key string, cinfo, kinfo os.FileInfo) {
- user, err := user.Current()
-
- if err != nil {
- // user.Current() might fail when cross-compiling. We have to ignore the
- // error and continue without client certificates, since we wouldn't know
- // from where to load them.
- return
- }
-
- cert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt")
- key = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key")
-
- cinfo, err = os.Stat(cert)
- if err != nil {
- cinfo = nil
- }
-
- kinfo, err = os.Stat(key)
- if err != nil {
- kinfo = nil
- }
-
- return
-}
-
// sslVerifyCertificateAuthority carries out a TLS handshake to the server and
// verifies the presented certificate against the CA, i.e. the one specified in
// sslrootcert or the system CA if sslrootcert was not specified.
diff --git a/vendor/github.com/lib/pq/ssl_permissions.go b/vendor/github.com/lib/pq/ssl_permissions.go
index 33076a8da..3b7c3a2a3 100644
--- a/vendor/github.com/lib/pq/ssl_permissions.go
+++ b/vendor/github.com/lib/pq/ssl_permissions.go
@@ -4,13 +4,17 @@ package pq
import "os"
-// sslCertificatePermissions checks the permissions on user-supplied certificate
-// files. The key file should have very little access.
+// sslKeyPermissions checks the permissions on user-supplied ssl key files.
+// The key file should have very little access.
//
// libpq does not check key file permissions on Windows.
-func sslCertificatePermissions(cert, key os.FileInfo) {
- kmode := key.Mode()
- if kmode != kmode&0600 {
- panic(ErrSSLKeyHasWorldPermissions)
+func sslKeyPermissions(sslkey string) error {
+ info, err := os.Stat(sslkey)
+ if err != nil {
+ return err
}
+ if info.Mode().Perm()&0077 != 0 {
+ return ErrSSLKeyHasWorldPermissions
+ }
+ return nil
}
diff --git a/vendor/github.com/lib/pq/ssl_test.go b/vendor/github.com/lib/pq/ssl_test.go
index f70a5fd57..3eafbfd20 100644
--- a/vendor/github.com/lib/pq/ssl_test.go
+++ b/vendor/github.com/lib/pq/ssl_test.go
@@ -6,7 +6,6 @@ import (
_ "crypto/sha256"
"crypto/x509"
"database/sql"
- "fmt"
"os"
"path/filepath"
"testing"
@@ -42,10 +41,13 @@ func openSSLConn(t *testing.T, conninfo string) (*sql.DB, error) {
}
func checkSSLSetup(t *testing.T, conninfo string) {
- db, err := openSSLConn(t, conninfo)
- if err == nil {
- db.Close()
- t.Fatalf("expected error with conninfo=%q", conninfo)
+ _, err := openSSLConn(t, conninfo)
+ if pge, ok := err.(*Error); ok {
+ if pge.Code.Name() != "invalid_authorization_specification" {
+ t.Fatalf("unexpected error code '%s'", pge.Code.Name())
+ }
+ } else {
+ t.Fatalf("expected %T, got %v", (*Error)(nil), err)
}
}
@@ -150,120 +152,128 @@ func TestSSLVerifyCA(t *testing.T) {
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
// Not OK according to the system CA
- _, err := openSSLConn(t, "host=postgres sslmode=verify-ca user=pqgossltest")
- if err == nil {
- t.Fatal("expected error")
+ {
+ _, err := openSSLConn(t, "host=postgres sslmode=verify-ca user=pqgossltest")
+ if _, ok := err.(x509.UnknownAuthorityError); !ok {
+ t.Fatalf("expected %T, got %#+v", x509.UnknownAuthorityError{}, err)
+ }
}
- _, ok := err.(x509.UnknownAuthorityError)
- if !ok {
- t.Fatalf("expected x509.UnknownAuthorityError, got %#+v", err)
+
+ // Still not OK according to the system CA; empty sslrootcert is treated as unspecified.
+ {
+ _, err := openSSLConn(t, "host=postgres sslmode=verify-ca user=pqgossltest sslrootcert=''")
+ if _, ok := err.(x509.UnknownAuthorityError); !ok {
+ t.Fatalf("expected %T, got %#+v", x509.UnknownAuthorityError{}, err)
+ }
}
rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt")
rootCert := "sslrootcert=" + rootCertPath + " "
// No match on Common Name, but that's OK
- _, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-ca user=pqgossltest")
- if err != nil {
+ if _, err := openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-ca user=pqgossltest"); err != nil {
t.Fatal(err)
}
// Everything OK
- _, err = openSSLConn(t, rootCert+"host=postgres sslmode=verify-ca user=pqgossltest")
- if err != nil {
+ if _, err := openSSLConn(t, rootCert+"host=postgres sslmode=verify-ca user=pqgossltest"); err != nil {
t.Fatal(err)
}
}
-func getCertConninfo(t *testing.T, source string) string {
- var sslkey string
- var sslcert string
-
- certpath := os.Getenv("PQSSLCERTTEST_PATH")
-
- switch source {
- case "missingkey":
- sslkey = "/tmp/filedoesnotexist"
- sslcert = filepath.Join(certpath, "postgresql.crt")
- case "missingcert":
- sslkey = filepath.Join(certpath, "postgresql.key")
- sslcert = "/tmp/filedoesnotexist"
- case "certtwice":
- sslkey = filepath.Join(certpath, "postgresql.crt")
- sslcert = filepath.Join(certpath, "postgresql.crt")
- case "valid":
- sslkey = filepath.Join(certpath, "postgresql.key")
- sslcert = filepath.Join(certpath, "postgresql.crt")
- default:
- t.Fatalf("invalid source %q", source)
- }
- return fmt.Sprintf("sslmode=require user=pqgosslcert sslkey=%s sslcert=%s", sslkey, sslcert)
-}
-
// Authenticate over SSL using client certificates
func TestSSLClientCertificates(t *testing.T) {
maybeSkipSSLTests(t)
// Environment sanity check: should fail without SSL
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
- // Should also fail without a valid certificate
- db, err := openSSLConn(t, "sslmode=require user=pqgosslcert")
- if err == nil {
- db.Close()
- t.Fatal("expected error")
- }
- pge, ok := err.(*Error)
- if !ok {
- t.Fatal("expected pq.Error")
+ const baseinfo = "sslmode=require user=pqgosslcert"
+
+ // Certificate not specified, should fail
+ {
+ _, err := openSSLConn(t, baseinfo)
+ if pge, ok := err.(*Error); ok {
+ if pge.Code.Name() != "invalid_authorization_specification" {
+ t.Fatalf("unexpected error code '%s'", pge.Code.Name())
+ }
+ } else {
+ t.Fatalf("expected %T, got %v", (*Error)(nil), err)
+ }
}
- if pge.Code.Name() != "invalid_authorization_specification" {
- t.Fatalf("unexpected error code %q", pge.Code.Name())
+
+ // Empty certificate specified, should fail
+ {
+ _, err := openSSLConn(t, baseinfo+" sslcert=''")
+ if pge, ok := err.(*Error); ok {
+ if pge.Code.Name() != "invalid_authorization_specification" {
+ t.Fatalf("unexpected error code '%s'", pge.Code.Name())
+ }
+ } else {
+ t.Fatalf("expected %T, got %v", (*Error)(nil), err)
+ }
}
- // Should work
- db, err = openSSLConn(t, getCertConninfo(t, "valid"))
- if err != nil {
- t.Fatal(err)
+ // Non-existent certificate specified, should fail
+ {
+ _, err := openSSLConn(t, baseinfo+" sslcert=/tmp/filedoesnotexist")
+ if pge, ok := err.(*Error); ok {
+ if pge.Code.Name() != "invalid_authorization_specification" {
+ t.Fatalf("unexpected error code '%s'", pge.Code.Name())
+ }
+ } else {
+ t.Fatalf("expected %T, got %v", (*Error)(nil), err)
+ }
}
- rows, err := db.Query("SELECT 1")
- if err != nil {
- t.Fatal(err)
+
+ certpath, ok := os.LookupEnv("PQSSLCERTTEST_PATH")
+ if !ok {
+ t.Fatalf("PQSSLCERTTEST_PATH not present in environment")
}
- rows.Close()
-}
-// Test errors with ssl certificates
-func TestSSLClientCertificatesMissingFiles(t *testing.T) {
- maybeSkipSSLTests(t)
- // Environment sanity check: should fail without SSL
- checkSSLSetup(t, "sslmode=disable user=pqgossltest")
+ sslcert := filepath.Join(certpath, "postgresql.crt")
- // Key missing, should fail
- _, err := openSSLConn(t, getCertConninfo(t, "missingkey"))
- if err == nil {
- t.Fatal("expected error")
- }
- // should be a PathError
- _, ok := err.(*os.PathError)
- if !ok {
- t.Fatalf("expected PathError, got %#+v", err)
+ // Cert present, key not specified, should fail
+ {
+ _, err := openSSLConn(t, baseinfo+" sslcert="+sslcert)
+ if _, ok := err.(*os.PathError); !ok {
+ t.Fatalf("expected %T, got %#+v", (*os.PathError)(nil), err)
+ }
}
- // Cert missing, should fail
- _, err = openSSLConn(t, getCertConninfo(t, "missingcert"))
- if err == nil {
- t.Fatal("expected error")
+ // Cert present, empty key specified, should fail
+ {
+ _, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey=''")
+ if _, ok := err.(*os.PathError); !ok {
+ t.Fatalf("expected %T, got %#+v", (*os.PathError)(nil), err)
+ }
}
- // should be a PathError
- _, ok = err.(*os.PathError)
- if !ok {
- t.Fatalf("expected PathError, got %#+v", err)
+
+ // Cert present, non-existent key, should fail
+ {
+ _, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey=/tmp/filedoesnotexist")
+ if _, ok := err.(*os.PathError); !ok {
+ t.Fatalf("expected %T, got %#+v", (*os.PathError)(nil), err)
+ }
}
- // Key has wrong permissions, should fail
- _, err = openSSLConn(t, getCertConninfo(t, "certtwice"))
- if err == nil {
- t.Fatal("expected error")
+ // Key has wrong permissions (passing the cert as the key), should fail
+ if _, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey="+sslcert); err != ErrSSLKeyHasWorldPermissions {
+ t.Fatalf("expected %s, got %#+v", ErrSSLKeyHasWorldPermissions, err)
}
- if err != ErrSSLKeyHasWorldPermissions {
- t.Fatalf("expected ErrSSLKeyHasWorldPermissions, got %#+v", err)
+
+ sslkey := filepath.Join(certpath, "postgresql.key")
+
+ // Should work
+ if db, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey="+sslkey); err != nil {
+ t.Fatal(err)
+ } else {
+ rows, err := db.Query("SELECT 1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := rows.Close(); err != nil {
+ t.Fatal(err)
+ }
+ if err := db.Close(); err != nil {
+ t.Fatal(err)
+ }
}
}
diff --git a/vendor/github.com/lib/pq/ssl_windows.go b/vendor/github.com/lib/pq/ssl_windows.go
index 529daed22..5d2c763ce 100644
--- a/vendor/github.com/lib/pq/ssl_windows.go
+++ b/vendor/github.com/lib/pq/ssl_windows.go
@@ -2,8 +2,8 @@
package pq
-import "os"
-
-// sslCertificatePermissions checks the permissions on user-supplied certificate
-// files. In libpq, this is a no-op on Windows.
-func sslCertificatePermissions(cert, key os.FileInfo) {}
+// sslKeyPermissions checks the permissions on user-supplied ssl key files.
+// The key file should have very little access.
+//
+// libpq does not check key file permissions on Windows.
+func sslKeyPermissions(string) error { return nil }
diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md
index 0c1f1b6a9..2acc7f1ff 100644
--- a/vendor/github.com/miekg/dns/README.md
+++ b/vendor/github.com/miekg/dns/README.md
@@ -25,6 +25,7 @@ two versions of Go, currently: 1.6 and 1.7.
A not-so-up-to-date-list-that-may-be-actually-current:
+* https://github.com/coredns/coredns
* https://cloudflare.com
* https://github.com/abh/geodns
* http://www.statdns.com/
@@ -54,6 +55,8 @@ A not-so-up-to-date-list-that-may-be-actually-current:
* https://github.com/mehrdadrad/mylg
* https://github.com/bamarni/dockness
* https://github.com/fffaraz/microdns
+* http://quilt.io
+* https://github.com/ipdcode/hades (JD.COM)
Send pull request if you want to be listed here.
@@ -142,6 +145,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
* 6975 - Algorithm Understanding in DNSSEC
* 7043 - EUI48/EUI64 records
* 7314 - DNS (EDNS) EXPIRE Option
+* 7828 - edns-tcp-keepalive EDNS0 Option
* 7553 - URI record
* 7858 - DNS over TLS: Initiation and Performance Considerations (draft)
* 7873 - Domain Name System (DNS) Cookies (draft-ietf-dnsop-cookies)
diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go
index 0db7f7bf6..d54d6422e 100644
--- a/vendor/github.com/miekg/dns/client.go
+++ b/vendor/github.com/miekg/dns/client.go
@@ -121,12 +121,12 @@ func (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err erro
r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {
return c.exchange(m, a)
})
+ if r != nil && shared {
+ r = r.Copy()
+ }
if err != nil {
return r, rtt, err
}
- if shared {
- return r.Copy(), rtt, nil
- }
return r, rtt, nil
}
@@ -300,6 +300,18 @@ func tcpMsgLen(t io.Reader) (int, error) {
if err != nil {
return 0, err
}
+
+ // As seen with my local router/switch, retursn 1 byte on the above read,
+ // resulting a a ShortRead. Just write it out (instead of loop) and read the
+ // other byte.
+ if n == 1 {
+ n1, err := t.Read(p[1:])
+ if err != nil {
+ return 0, err
+ }
+ n += n1
+ }
+
if n != 2 {
return 0, ErrShortRead
}
@@ -400,7 +412,7 @@ func (co *Conn) Write(p []byte) (n int, err error) {
n, err := io.Copy(w, bytes.NewReader(p))
return int(n), err
}
- n, err = co.Conn.(*net.UDPConn).Write(p)
+ n, err = co.Conn.Write(p)
return n, err
}
diff --git a/vendor/github.com/miekg/dns/client_test.go b/vendor/github.com/miekg/dns/client_test.go
index 850bcfcda..dee585f36 100644
--- a/vendor/github.com/miekg/dns/client_test.go
+++ b/vendor/github.com/miekg/dns/client_test.go
@@ -5,6 +5,7 @@ import (
"fmt"
"net"
"strconv"
+ "sync"
"testing"
"time"
)
@@ -77,8 +78,8 @@ func TestClientTLSSync(t *testing.T) {
}
}
-func TestClientSyncBadId(t *testing.T) {
- HandleFunc("miek.nl.", HelloServerBadId)
+func TestClientSyncBadID(t *testing.T) {
+ HandleFunc("miek.nl.", HelloServerBadID)
defer HandleRemove("miek.nl.")
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
@@ -450,3 +451,61 @@ func TestTimeout(t *testing.T) {
t.Errorf("exchange took longer (%v) than specified Timeout (%v)", length, timeout)
}
}
+
+// Check that responses from deduplicated requests aren't shared between callers
+func TestConcurrentExchanges(t *testing.T) {
+ cases := make([]*Msg, 2)
+ cases[0] = new(Msg)
+ cases[1] = new(Msg)
+ cases[1].Truncated = true
+ for _, m := range cases {
+ block := make(chan struct{})
+ waiting := make(chan struct{})
+
+ handler := func(w ResponseWriter, req *Msg) {
+ r := m.Copy()
+ r.SetReply(req)
+
+ waiting <- struct{}{}
+ <-block
+ w.WriteMsg(r)
+ }
+
+ HandleFunc("miek.nl.", handler)
+ defer HandleRemove("miek.nl.")
+
+ s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("unable to run test server: %s", err)
+ }
+ defer s.Shutdown()
+
+ m := new(Msg)
+ m.SetQuestion("miek.nl.", TypeSRV)
+ c := &Client{
+ SingleInflight: true,
+ }
+ r := make([]*Msg, 2)
+
+ var wg sync.WaitGroup
+ wg.Add(len(r))
+ for i := 0; i < len(r); i++ {
+ go func(i int) {
+ r[i], _, _ = c.Exchange(m.Copy(), addrstr)
+ wg.Done()
+ }(i)
+ }
+ select {
+ case <-waiting:
+ case <-time.After(time.Second):
+ t.FailNow()
+ }
+ close(block)
+ wg.Wait()
+
+ if r[0] == r[1] {
+ t.Log("Got same response object, expected non-shared responses")
+ t.Fail()
+ }
+ }
+}
diff --git a/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/miekg/dns/clientconfig.go
index cfa9ad0b2..0a1f5a92c 100644
--- a/vendor/github.com/miekg/dns/clientconfig.go
+++ b/vendor/github.com/miekg/dns/clientconfig.go
@@ -97,3 +97,35 @@ func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) {
}
return c, nil
}
+
+// NameList returns all of the names that should be queried based on the
+// config. It is based off of go's net/dns name building, but it does not
+// check the length of the resulting names.
+func (c *ClientConfig) NameList(name string) []string {
+ // if this domain is already fully qualified, no append needed.
+ if IsFqdn(name) {
+ return []string{name}
+ }
+
+ // Check to see if the name has more labels than Ndots. Do this before making
+ // the domain fully qualified.
+ hasNdots := CountLabel(name) > c.Ndots
+ // Make the domain fully qualified.
+ name = Fqdn(name)
+
+ // Make a list of names based off search.
+ names := []string{}
+
+ // If name has enough dots, try that first.
+ if hasNdots {
+ names = append(names, name)
+ }
+ for _, s := range c.Search {
+ names = append(names, Fqdn(name+s))
+ }
+ // If we didn't have enough dots, try after suffixes.
+ if !hasNdots {
+ names = append(names, name)
+ }
+ return names
+}
diff --git a/vendor/github.com/miekg/dns/clientconfig_test.go b/vendor/github.com/miekg/dns/clientconfig_test.go
index 63bc5c814..7755a8a6f 100644
--- a/vendor/github.com/miekg/dns/clientconfig_test.go
+++ b/vendor/github.com/miekg/dns/clientconfig_test.go
@@ -48,3 +48,40 @@ func testConfig(t *testing.T, data string) {
func TestNameserver(t *testing.T) { testConfig(t, normal) }
func TestMissingFinalNewLine(t *testing.T) { testConfig(t, missingNewline) }
+
+func TestNameList(t *testing.T) {
+ cfg := ClientConfig{
+ Ndots: 1,
+ }
+ // fqdn should be only result returned
+ names := cfg.NameList("miek.nl.")
+ if len(names) != 1 {
+ t.Errorf("NameList returned != 1 names: %v", names)
+ } else if names[0] != "miek.nl." {
+ t.Errorf("NameList didn't return sent fqdn domain: %v", names[0])
+ }
+
+ cfg.Search = []string{
+ "test",
+ }
+ // Sent domain has NDots and search
+ names = cfg.NameList("miek.nl")
+ if len(names) != 2 {
+ t.Errorf("NameList returned != 2 names: %v", names)
+ } else if names[0] != "miek.nl." {
+ t.Errorf("NameList didn't return sent domain first: %v", names[0])
+ } else if names[1] != "miek.nl.test." {
+ t.Errorf("NameList didn't return search last: %v", names[1])
+ }
+
+ cfg.Ndots = 2
+ // Sent domain has less than NDots and search
+ names = cfg.NameList("miek.nl")
+ if len(names) != 2 {
+ t.Errorf("NameList returned != 2 names: %v", names)
+ } else if names[0] != "miek.nl.test." {
+ t.Errorf("NameList didn't return search first: %v", names[0])
+ } else if names[1] != "miek.nl." {
+ t.Errorf("NameList didn't return sent domain last: %v", names[1])
+ }
+}
diff --git a/vendor/github.com/miekg/dns/compress_generate.go b/vendor/github.com/miekg/dns/compress_generate.go
new file mode 100644
index 000000000..1a301e9f3
--- /dev/null
+++ b/vendor/github.com/miekg/dns/compress_generate.go
@@ -0,0 +1,184 @@
+//+build ignore
+
+// compression_generate.go is meant to run with go generate. It will use
+// go/{importer,types} to track down all the RR struct types. Then for each type
+// it will look to see if there are (compressible) names, if so it will add that
+// type to compressionLenHelperType and comressionLenSearchType which "fake" the
+// compression so that Len() is fast.
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "go/importer"
+ "go/types"
+ "log"
+ "os"
+)
+
+var packageHdr = `
+// *** DO NOT MODIFY ***
+// AUTOGENERATED BY go generate from compress_generate.go
+
+package dns
+
+`
+
+// getTypeStruct will take a type and the package scope, and return the
+// (innermost) struct if the type is considered a RR type (currently defined as
+// those structs beginning with a RR_Header, could be redefined as implementing
+// the RR interface). The bool return value indicates if embedded structs were
+// resolved.
+func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
+ st, ok := t.Underlying().(*types.Struct)
+ if !ok {
+ return nil, false
+ }
+ if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
+ return st, false
+ }
+ if st.Field(0).Anonymous() {
+ st, _ := getTypeStruct(st.Field(0).Type(), scope)
+ return st, true
+ }
+ return nil, false
+}
+
+func main() {
+ // Import and type-check the package
+ pkg, err := importer.Default().Import("github.com/miekg/dns")
+ fatalIfErr(err)
+ scope := pkg.Scope()
+
+ domainTypes := map[string]bool{} // Types that have a domain name in them (either comressible or not).
+ cdomainTypes := map[string]bool{} // Types that have a compressible domain name in them (subset of domainType)
+ for _, name := range scope.Names() {
+ o := scope.Lookup(name)
+ if o == nil || !o.Exported() {
+ continue
+ }
+ st, _ := getTypeStruct(o.Type(), scope)
+ if st == nil {
+ continue
+ }
+ if name == "PrivateRR" {
+ continue
+ }
+
+ if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
+ log.Fatalf("Constant Type%s does not exist.", o.Name())
+ }
+
+ for i := 1; i < st.NumFields(); i++ {
+ if _, ok := st.Field(i).Type().(*types.Slice); ok {
+ if st.Tag(i) == `dns:"domain-name"` {
+ domainTypes[o.Name()] = true
+ }
+ if st.Tag(i) == `dns:"cdomain-name"` {
+ cdomainTypes[o.Name()] = true
+ domainTypes[o.Name()] = true
+ }
+ continue
+ }
+
+ switch {
+ case st.Tag(i) == `dns:"domain-name"`:
+ domainTypes[o.Name()] = true
+ case st.Tag(i) == `dns:"cdomain-name"`:
+ cdomainTypes[o.Name()] = true
+ domainTypes[o.Name()] = true
+ }
+ }
+ }
+
+ b := &bytes.Buffer{}
+ b.WriteString(packageHdr)
+
+ // compressionLenHelperType - all types that have domain-name/cdomain-name can be used for compressing names
+
+ fmt.Fprint(b, "func compressionLenHelperType(c map[string]int, r RR) {\n")
+ fmt.Fprint(b, "switch x := r.(type) {\n")
+ for name, _ := range domainTypes {
+ o := scope.Lookup(name)
+ st, _ := getTypeStruct(o.Type(), scope)
+
+ fmt.Fprintf(b, "case *%s:\n", name)
+ for i := 1; i < st.NumFields(); i++ {
+ out := func(s string) { fmt.Fprintf(b, "compressionLenHelper(c, x.%s)\n", st.Field(i).Name()) }
+
+ if _, ok := st.Field(i).Type().(*types.Slice); ok {
+ switch st.Tag(i) {
+ case `dns:"domain-name"`:
+ fallthrough
+ case `dns:"cdomain-name"`:
+ // For HIP we need to slice over the elements in this slice.
+ fmt.Fprintf(b, `for i := range x.%s {
+ compressionLenHelper(c, x.%s[i])
+ }
+`, st.Field(i).Name(), st.Field(i).Name())
+ }
+ continue
+ }
+
+ switch {
+ case st.Tag(i) == `dns:"cdomain-name"`:
+ fallthrough
+ case st.Tag(i) == `dns:"domain-name"`:
+ out(st.Field(i).Name())
+ }
+ }
+ }
+ fmt.Fprintln(b, "}\n}\n\n")
+
+ // compressionLenSearchType - search cdomain-tags types for compressible names.
+
+ fmt.Fprint(b, "func compressionLenSearchType(c map[string]int, r RR) (int, bool) {\n")
+ fmt.Fprint(b, "switch x := r.(type) {\n")
+ for name, _ := range cdomainTypes {
+ o := scope.Lookup(name)
+ st, _ := getTypeStruct(o.Type(), scope)
+
+ fmt.Fprintf(b, "case *%s:\n", name)
+ j := 1
+ for i := 1; i < st.NumFields(); i++ {
+ out := func(s string, j int) {
+ fmt.Fprintf(b, "k%d, ok%d := compressionLenSearch(c, x.%s)\n", j, j, st.Field(i).Name())
+ }
+
+ // There are no slice types with names that can be compressed.
+
+ switch {
+ case st.Tag(i) == `dns:"cdomain-name"`:
+ out(st.Field(i).Name(), j)
+ j++
+ }
+ }
+ k := "k1"
+ ok := "ok1"
+ for i := 2; i < j; i++ {
+ k += fmt.Sprintf(" + k%d", i)
+ ok += fmt.Sprintf(" && ok%d", i)
+ }
+ fmt.Fprintf(b, "return %s, %s\n", k, ok)
+ }
+ fmt.Fprintln(b, "}\nreturn 0, false\n}\n\n")
+
+ // gofmt
+ res, err := format.Source(b.Bytes())
+ if err != nil {
+ b.WriteTo(os.Stderr)
+ log.Fatal(err)
+ }
+
+ f, err := os.Create("zcompress.go")
+ fatalIfErr(err)
+ defer f.Close()
+ f.Write(res)
+}
+
+func fatalIfErr(err error) {
+ if err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/miekg/dns/dane.go b/vendor/github.com/miekg/dns/dane.go
index cdaa833ff..8c4a14ef1 100644
--- a/vendor/github.com/miekg/dns/dane.go
+++ b/vendor/github.com/miekg/dns/dane.go
@@ -6,7 +6,6 @@ import (
"crypto/x509"
"encoding/hex"
"errors"
- "io"
)
// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records.
@@ -23,20 +22,20 @@ func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (st
h := sha256.New()
switch selector {
case 0:
- io.WriteString(h, string(cert.Raw))
+ h.Write(cert.Raw)
return hex.EncodeToString(h.Sum(nil)), nil
case 1:
- io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
+ h.Write(cert.RawSubjectPublicKeyInfo)
return hex.EncodeToString(h.Sum(nil)), nil
}
case 2:
h := sha512.New()
switch selector {
case 0:
- io.WriteString(h, string(cert.Raw))
+ h.Write(cert.Raw)
return hex.EncodeToString(h.Sum(nil)), nil
case 1:
- io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
+ h.Write(cert.RawSubjectPublicKeyInfo)
return hex.EncodeToString(h.Sum(nil)), nil
}
}
diff --git a/vendor/github.com/miekg/dns/dns_test.go b/vendor/github.com/miekg/dns/dns_test.go
index ad68533fd..dbfe25328 100644
--- a/vendor/github.com/miekg/dns/dns_test.go
+++ b/vendor/github.com/miekg/dns/dns_test.go
@@ -310,6 +310,23 @@ func TestMsgLengthCompressionMalformed(t *testing.T) {
m.Len() // Should not crash.
}
+func TestMsgCompressLength2(t *testing.T) {
+ msg := new(Msg)
+ msg.Compress = true
+ msg.SetQuestion(Fqdn("bliep."), TypeANY)
+ msg.Answer = append(msg.Answer, &SRV{Hdr: RR_Header{Name: "blaat.", Rrtype: 0x21, Class: 0x1, Ttl: 0x3c}, Port: 0x4c57, Target: "foo.bar."})
+ msg.Extra = append(msg.Extra, &A{Hdr: RR_Header{Name: "foo.bar.", Rrtype: 0x1, Class: 0x1, Ttl: 0x3c}, A: net.IP{0xac, 0x11, 0x0, 0x3}})
+ predicted := msg.Len()
+ buf, err := msg.Pack()
+ if err != nil {
+ t.Error(err)
+ }
+ if predicted != len(buf) {
+ t.Errorf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d",
+ msg.Question[0].Name, len(msg.Answer), predicted, len(buf))
+ }
+}
+
func TestToRFC3597(t *testing.T) {
a, _ := NewRR("miek.nl. IN A 10.0.1.1")
x := new(RFC3597)
diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go
index f5f3fbdd8..07262011e 100644
--- a/vendor/github.com/miekg/dns/dnssec.go
+++ b/vendor/github.com/miekg/dns/dnssec.go
@@ -43,7 +43,7 @@ const (
PRIVATEOID uint8 = 254
)
-// Map for algorithm names.
+// AlgorithmToString is a map of algorithm IDs to algorithm names.
var AlgorithmToString = map[uint8]string{
RSAMD5: "RSAMD5",
DH: "DH",
@@ -61,10 +61,10 @@ var AlgorithmToString = map[uint8]string{
PRIVATEOID: "PRIVATEOID",
}
-// Map of algorithm strings.
+// StringToAlgorithm is the reverse of AlgorithmToString.
var StringToAlgorithm = reverseInt8(AlgorithmToString)
-// Map of algorithm crypto hashes.
+// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's.
var AlgorithmToHash = map[uint8]crypto.Hash{
RSAMD5: crypto.MD5, // Deprecated in RFC 6725
RSASHA1: crypto.SHA1,
@@ -85,7 +85,7 @@ const (
SHA512 // Experimental
)
-// Map for hash names.
+// HashToString is a map of hash IDs to names.
var HashToString = map[uint8]string{
SHA1: "SHA1",
SHA256: "SHA256",
@@ -94,7 +94,7 @@ var HashToString = map[uint8]string{
SHA512: "SHA512",
}
-// Map of hash strings.
+// StringToHash is a map of names to hash IDs.
var StringToHash = reverseInt8(HashToString)
// DNSKEY flag values.
@@ -208,9 +208,6 @@ func (k *DNSKEY) ToDS(h uint8) *DS {
// "|" denotes concatenation
// DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key.
- // digest buffer
- digest := append(owner, wire...) // another copy
-
var hash crypto.Hash
switch h {
case SHA1:
@@ -226,7 +223,8 @@ func (k *DNSKEY) ToDS(h uint8) *DS {
}
s := hash.New()
- s.Write(digest)
+ s.Write(owner)
+ s.Write(wire)
ds.Digest = hex.EncodeToString(s.Sum(nil))
return ds
}
@@ -297,7 +295,6 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
if err != nil {
return err
}
- signdata = append(signdata, wire...)
hash, ok := AlgorithmToHash[rr.Algorithm]
if !ok {
@@ -306,6 +303,7 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
h := hash.New()
h.Write(signdata)
+ h.Write(wire)
signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm)
if err != nil {
@@ -415,7 +413,6 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
if err != nil {
return err
}
- signeddata = append(signeddata, wire...)
sigbuf := rr.sigBuf() // Get the binary signature data
if rr.Algorithm == PRIVATEDNS { // PRIVATEOID
@@ -438,6 +435,7 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
h := hash.New()
h.Write(signeddata)
+ h.Write(wire)
return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf)
case ECDSAP256SHA256, ECDSAP384SHA384:
@@ -452,6 +450,7 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
h := hash.New()
h.Write(signeddata)
+ h.Write(wire)
if ecdsa.Verify(pubkey, h.Sum(nil), r, s) {
return nil
}
diff --git a/vendor/github.com/miekg/dns/dnssec_keygen.go b/vendor/github.com/miekg/dns/dnssec_keygen.go
index 229a07937..5e4b7741a 100644
--- a/vendor/github.com/miekg/dns/dnssec_keygen.go
+++ b/vendor/github.com/miekg/dns/dnssec_keygen.go
@@ -121,17 +121,17 @@ func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool {
// RFC 3110: Section 2. RSA Public KEY Resource Records
func exponentToBuf(_E int) []byte {
var buf []byte
- i := big.NewInt(int64(_E))
- if len(i.Bytes()) < 256 {
- buf = make([]byte, 1)
- buf[0] = uint8(len(i.Bytes()))
+ i := big.NewInt(int64(_E)).Bytes()
+ if len(i) < 256 {
+ buf = make([]byte, 1, 1+len(i))
+ buf[0] = uint8(len(i))
} else {
- buf = make([]byte, 3)
+ buf = make([]byte, 3, 3+len(i))
buf[0] = 0
- buf[1] = uint8(len(i.Bytes()) >> 8)
- buf[2] = uint8(len(i.Bytes()))
+ buf[1] = uint8(len(i) >> 8)
+ buf[2] = uint8(len(i))
}
- buf = append(buf, i.Bytes()...)
+ buf = append(buf, i...)
return buf
}
diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go
index 9ff3a617e..4f8d830b8 100644
--- a/vendor/github.com/miekg/dns/dnssec_keyscan.go
+++ b/vendor/github.com/miekg/dns/dnssec_keyscan.go
@@ -36,7 +36,7 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, er
return nil, ErrPrivKey
}
// TODO(mg): check if the pubkey matches the private key
- algo, err := strconv.Atoi(strings.SplitN(m["algorithm"], " ", 2)[0])
+ algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8)
if err != nil {
return nil, ErrPrivKey
}
diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go
index 465b85f37..dbff3714c 100644
--- a/vendor/github.com/miekg/dns/edns.go
+++ b/vendor/github.com/miekg/dns/edns.go
@@ -157,7 +157,7 @@ type EDNS0 interface {
String() string
}
-// The nsid EDNS0 option is used to retrieve a nameserver
+// EDNS0_NSID option is used to retrieve a nameserver
// identifier. When sending a request Nsid must be set to the empty string
// The identifier is an opaque string encoded as hex.
// Basic use pattern for creating an nsid option:
@@ -197,7 +197,7 @@ func (e *EDNS0_NSID) String() string { return string(e.Nsid) }
// e := new(dns.EDNS0_SUBNET)
// e.Code = dns.EDNS0SUBNET
// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6
-// e.SourceNetMask = 32 // 32 for IPV4, 128 for IPv6
+// e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6
// e.SourceScope = 0
// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4
// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6
@@ -301,7 +301,7 @@ func (e *EDNS0_SUBNET) String() (s string) {
return
}
-// The Cookie EDNS0 option
+// The EDNS0_COOKIE option is used to add a DNS Cookie to a message.
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
@@ -543,15 +543,15 @@ func (e *EDNS0_LOCAL) unpack(b []byte) error {
return nil
}
+// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep
+// the TCP connection alive. See RFC 7828.
type EDNS0_TCP_KEEPALIVE struct {
Code uint16 // Always EDNSTCPKEEPALIVE
Length uint16 // the value 0 if the TIMEOUT is omitted, the value 2 if it is present;
Timeout uint16 // an idle timeout value for the TCP connection, specified in units of 100 milliseconds, encoded in network byte order.
}
-func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 {
- return EDNS0TCPKEEPALIVE
-}
+func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 { return EDNS0TCPKEEPALIVE }
func (e *EDNS0_TCP_KEEPALIVE) pack() ([]byte, error) {
if e.Timeout != 0 && e.Length != 2 {
diff --git a/vendor/github.com/miekg/dns/idn/punycode.go b/vendor/github.com/miekg/dns/idn/punycode.go
index 7e5c263fc..1d03bf6ae 100644
--- a/vendor/github.com/miekg/dns/idn/punycode.go
+++ b/vendor/github.com/miekg/dns/idn/punycode.go
@@ -242,11 +242,8 @@ func encode(input []byte) []byte {
t, k, cp rune // weight and codepoint calculation
)
- s := &bytes.Buffer{}
for h := basiclen; h < fulllen; n, delta = n+1, delta+1 {
nextltr = next(b, n)
- s.Truncate(0)
- s.WriteRune(nextltr)
delta, n = delta+(nextltr-n)*rune(h+1), nextltr
for _, ltr = range b {
diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go
index 0d8cc6fb3..b5c074f05 100644
--- a/vendor/github.com/miekg/dns/msg.go
+++ b/vendor/github.com/miekg/dns/msg.go
@@ -9,6 +9,7 @@
package dns
//go:generate go run msg_generate.go
+//go:generate go run compress_generate.go
import (
crand "crypto/rand"
@@ -16,22 +17,9 @@ import (
"math/big"
"math/rand"
"strconv"
+ "sync"
)
-func init() {
- // Initialize default math/rand source using crypto/rand to provide better
- // security without the performance trade-off.
- buf := make([]byte, 8)
- _, err := crand.Read(buf)
- if err != nil {
- // Failed to read from cryptographic source, fallback to default initial
- // seed (1) by returning early
- return
- }
- seed := binary.BigEndian.Uint64(buf)
- rand.Seed(int64(seed))
-}
-
const maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer
var (
@@ -66,11 +54,45 @@ var (
// dns.Id = func() uint16 { return 3 }
var Id func() uint16 = id
+var (
+ idLock sync.Mutex
+ idRand *rand.Rand
+)
+
// id returns a 16 bits random number to be used as a
// message id. The random provided should be good enough.
func id() uint16 {
- id32 := rand.Uint32()
- return uint16(id32)
+ idLock.Lock()
+
+ if idRand == nil {
+ // This (partially) works around
+ // https://github.com/golang/go/issues/11833 by only
+ // seeding idRand upon the first call to id.
+
+ var seed int64
+ var buf [8]byte
+
+ if _, err := crand.Read(buf[:]); err == nil {
+ seed = int64(binary.LittleEndian.Uint64(buf[:]))
+ } else {
+ seed = rand.Int63()
+ }
+
+ idRand = rand.New(rand.NewSource(seed))
+ }
+
+ // The call to idRand.Uint32 must be within the
+ // mutex lock because *rand.Rand is not safe for
+ // concurrent use.
+ //
+ // There is no added performance overhead to calling
+ // idRand.Uint32 inside a mutex lock over just
+ // calling rand.Uint32 as the global math/rand rng
+ // is internally protected by a sync.Mutex.
+ id := uint16(idRand.Uint32())
+
+ idLock.Unlock()
+ return id
}
// MsgHdr is a a manually-unpacked version of (id, bits).
@@ -241,7 +263,9 @@ func packDomainName(s string, msg []byte, off int, compression map[string]int, c
bsFresh = true
}
// Don't try to compress '.'
- if compress && roBs[begin:] != "." {
+ // We should only compress when compress it true, but we should also still pick
+ // up names that can be used for *future* compression(s).
+ if compression != nil && roBs[begin:] != "." {
if p, ok := compression[roBs[begin:]]; !ok {
// Only offsets smaller than this can be used.
if offset < maxCompressionOffset {
@@ -303,6 +327,7 @@ End:
// UnpackDomainName unpacks a domain name into a string.
func UnpackDomainName(msg []byte, off int) (string, int, error) {
s := make([]byte, 0, 64)
+ labels := 0
off1 := 0
lenmsg := len(msg)
ptr := 0 // number of pointers followed
@@ -345,6 +370,15 @@ Loop:
}
}
}
+ // never exceed the allowed label count lenght (63)
+ if labels >= 63 {
+ return "", lenmsg, &Error{err: "name exceeds 63 labels"}
+ }
+ labels += 1
+ // never exceed the allowed doman name length (255 octets)
+ if len(s) >= 255 {
+ return "", lenmsg, &Error{err: "name exceeded allowed 255 octets"}
+ }
s = append(s, '.')
off += c
case 0xC0:
@@ -364,6 +398,9 @@ Loop:
if ptr++; ptr > 10 {
return "", lenmsg, &Error{err: "too many compression pointers"}
}
+ // pointer should guarantee that it advances and points forwards at least
+ // but the condition on previous three lines guarantees that it's
+ // at least loop-free
off = (c^0xC0)<<8 | int(c1)
default:
// 0x80 and 0x40 are reserved
@@ -710,12 +747,10 @@ func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
// We need the uncompressed length here, because we first pack it and then compress it.
msg = buf
- compress := dns.Compress
- dns.Compress = false
- if packLen := dns.Len() + 1; len(msg) < packLen {
+ uncompressedLen := compressedLen(dns, false)
+ if packLen := uncompressedLen + 1; len(msg) < packLen {
msg = make([]byte, packLen)
}
- dns.Compress = compress
// Pack it in: header and then the pieces.
off := 0
@@ -868,16 +903,18 @@ func (dns *Msg) String() string {
// If dns.Compress is true compression it is taken into account. Len()
// is provided to be a faster way to get the size of the resulting packet,
// than packing it, measuring the size and discarding the buffer.
-func (dns *Msg) Len() int {
+func (dns *Msg) Len() int { return compressedLen(dns, dns.Compress) }
+
+// compressedLen returns the message length when in compressed wire format
+// when compress is true, otherwise the uncompressed length is returned.
+func compressedLen(dns *Msg, compress bool) int {
// We always return one more than needed.
l := 12 // Message header is always 12 bytes
- var compression map[string]int
- if dns.Compress {
- compression = make(map[string]int)
- }
+ compression := map[string]int{}
+
for i := 0; i < len(dns.Question); i++ {
l += dns.Question[i].len()
- if dns.Compress {
+ if compress {
compressionLenHelper(compression, dns.Question[i].Name)
}
}
@@ -886,7 +923,7 @@ func (dns *Msg) Len() int {
continue
}
l += dns.Answer[i].len()
- if dns.Compress {
+ if compress {
k, ok := compressionLenSearch(compression, dns.Answer[i].Header().Name)
if ok {
l += 1 - k
@@ -904,7 +941,7 @@ func (dns *Msg) Len() int {
continue
}
l += dns.Ns[i].len()
- if dns.Compress {
+ if compress {
k, ok := compressionLenSearch(compression, dns.Ns[i].Header().Name)
if ok {
l += 1 - k
@@ -922,7 +959,7 @@ func (dns *Msg) Len() int {
continue
}
l += dns.Extra[i].len()
- if dns.Compress {
+ if compress {
k, ok := compressionLenSearch(compression, dns.Extra[i].Header().Name)
if ok {
l += 1 - k
@@ -970,97 +1007,6 @@ func compressionLenSearch(c map[string]int, s string) (int, bool) {
return 0, false
}
-// TODO(miek): should add all types, because the all can be *used* for compression. Autogenerate from msg_generate and put in zmsg.go
-func compressionLenHelperType(c map[string]int, r RR) {
- switch x := r.(type) {
- case *NS:
- compressionLenHelper(c, x.Ns)
- case *MX:
- compressionLenHelper(c, x.Mx)
- case *CNAME:
- compressionLenHelper(c, x.Target)
- case *PTR:
- compressionLenHelper(c, x.Ptr)
- case *SOA:
- compressionLenHelper(c, x.Ns)
- compressionLenHelper(c, x.Mbox)
- case *MB:
- compressionLenHelper(c, x.Mb)
- case *MG:
- compressionLenHelper(c, x.Mg)
- case *MR:
- compressionLenHelper(c, x.Mr)
- case *MF:
- compressionLenHelper(c, x.Mf)
- case *MD:
- compressionLenHelper(c, x.Md)
- case *RT:
- compressionLenHelper(c, x.Host)
- case *RP:
- compressionLenHelper(c, x.Mbox)
- compressionLenHelper(c, x.Txt)
- case *MINFO:
- compressionLenHelper(c, x.Rmail)
- compressionLenHelper(c, x.Email)
- case *AFSDB:
- compressionLenHelper(c, x.Hostname)
- case *SRV:
- compressionLenHelper(c, x.Target)
- case *NAPTR:
- compressionLenHelper(c, x.Replacement)
- case *RRSIG:
- compressionLenHelper(c, x.SignerName)
- case *NSEC:
- compressionLenHelper(c, x.NextDomain)
- // HIP?
- }
-}
-
-// Only search on compressing these types.
-func compressionLenSearchType(c map[string]int, r RR) (int, bool) {
- switch x := r.(type) {
- case *NS:
- return compressionLenSearch(c, x.Ns)
- case *MX:
- return compressionLenSearch(c, x.Mx)
- case *CNAME:
- return compressionLenSearch(c, x.Target)
- case *DNAME:
- return compressionLenSearch(c, x.Target)
- case *PTR:
- return compressionLenSearch(c, x.Ptr)
- case *SOA:
- k, ok := compressionLenSearch(c, x.Ns)
- k1, ok1 := compressionLenSearch(c, x.Mbox)
- if !ok && !ok1 {
- return 0, false
- }
- return k + k1, true
- case *MB:
- return compressionLenSearch(c, x.Mb)
- case *MG:
- return compressionLenSearch(c, x.Mg)
- case *MR:
- return compressionLenSearch(c, x.Mr)
- case *MF:
- return compressionLenSearch(c, x.Mf)
- case *MD:
- return compressionLenSearch(c, x.Md)
- case *RT:
- return compressionLenSearch(c, x.Host)
- case *MINFO:
- k, ok := compressionLenSearch(c, x.Rmail)
- k1, ok1 := compressionLenSearch(c, x.Email)
- if !ok && !ok1 {
- return 0, false
- }
- return k + k1, true
- case *AFSDB:
- return compressionLenSearch(c, x.Hostname)
- }
- return 0, false
-}
-
// Copy returns a new RR which is a deep-copy of r.
func Copy(r RR) RR { r1 := r.copy(); return r1 }
diff --git a/vendor/github.com/miekg/dns/nsecx.go b/vendor/github.com/miekg/dns/nsecx.go
index 6f10f3e65..51ce7f8b1 100644
--- a/vendor/github.com/miekg/dns/nsecx.go
+++ b/vendor/github.com/miekg/dns/nsecx.go
@@ -3,7 +3,6 @@ package dns
import (
"crypto/sha1"
"hash"
- "io"
"strings"
)
@@ -36,15 +35,15 @@ func HashName(label string, ha uint8, iter uint16, salt string) string {
}
// k = 0
- name = append(name, wire...)
- io.WriteString(s, string(name))
+ s.Write(name)
+ s.Write(wire)
nsec3 := s.Sum(nil)
// k > 0
for k := uint16(0); k < iter; k++ {
s.Reset()
- nsec3 = append(nsec3, wire...)
- io.WriteString(s, string(nsec3))
- nsec3 = s.Sum(nil)
+ s.Write(nsec3)
+ s.Write(wire)
+ nsec3 = s.Sum(nsec3[:0])
}
return toBase32(nsec3)
}
diff --git a/vendor/github.com/miekg/dns/parse_test.go b/vendor/github.com/miekg/dns/parse_test.go
index dc18b59ce..c727c1301 100644
--- a/vendor/github.com/miekg/dns/parse_test.go
+++ b/vendor/github.com/miekg/dns/parse_test.go
@@ -836,11 +836,7 @@ func TestSRVPacking(t *testing.T) {
if err != nil {
continue
}
- port := 8484
- tmp, err := strconv.Atoi(p)
- if err == nil {
- port = tmp
- }
+ port, _ := strconv.ParseUint(p, 10, 16)
rr := &SRV{
Hdr: RR_Header{Name: "somename.",
diff --git a/vendor/github.com/miekg/dns/reverse.go b/vendor/github.com/miekg/dns/reverse.go
index 099dac948..f6e7a47a6 100644
--- a/vendor/github.com/miekg/dns/reverse.go
+++ b/vendor/github.com/miekg/dns/reverse.go
@@ -6,10 +6,10 @@ var StringToType = reverseInt16(TypeToString)
// StringToClass is the reverse of ClassToString, needed for string parsing.
var StringToClass = reverseInt16(ClassToString)
-// Map of opcodes strings.
+// StringToOpcode is a map of opcodes to strings.
var StringToOpcode = reverseInt(OpcodeToString)
-// Map of rcodes strings.
+// StringToRcode is a map of rcodes to strings.
var StringToRcode = reverseInt(RcodeToString)
// Reverse a map
diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go
index d34597ba3..de0db32f8 100644
--- a/vendor/github.com/miekg/dns/scan.go
+++ b/vendor/github.com/miekg/dns/scan.go
@@ -819,8 +819,8 @@ func classToInt(token string) (uint16, bool) {
if len(token) < offset+1 {
return 0, false
}
- class, ok := strconv.Atoi(token[offset:])
- if ok != nil || class > maxUint16 {
+ class, err := strconv.ParseUint(token[offset:], 10, 16)
+ if err != nil {
return 0, false
}
return uint16(class), true
@@ -832,8 +832,8 @@ func typeToInt(token string) (uint16, bool) {
if len(token) < offset+1 {
return 0, false
}
- typ, ok := strconv.Atoi(token[offset:])
- if ok != nil || typ > maxUint16 {
+ typ, err := strconv.ParseUint(token[offset:], 10, 16)
+ if err != nil {
return 0, false
}
return uint16(typ), true
diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go
index 8d6a1bf24..f4c0d3767 100644
--- a/vendor/github.com/miekg/dns/scan_rr.go
+++ b/vendor/github.com/miekg/dns/scan_rr.go
@@ -447,7 +447,7 @@ func setMX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 {
return rr, nil, ""
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad MX Pref", l}, ""
}
@@ -476,7 +476,7 @@ func setRT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 {
return rr, nil, ""
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil {
return nil, &ParseError{f, "bad RT Preference", l}, ""
}
@@ -506,7 +506,7 @@ func setAFSDB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 {
return rr, nil, ""
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad AFSDB Subtype", l}, ""
}
@@ -551,7 +551,7 @@ func setKX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 {
return rr, nil, ""
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad KX Pref", l}, ""
}
@@ -665,7 +665,7 @@ func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.err {
return nil, &ParseError{f, "bad SOA zone parameter", l}, ""
}
- if j, e := strconv.Atoi(l.token); e != nil {
+ if j, e := strconv.ParseUint(l.token, 10, 32); e != nil {
if i == 0 {
// Serial should be a number
return nil, &ParseError{f, "bad SOA zone parameter", l}, ""
@@ -705,21 +705,21 @@ func setSRV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 {
return rr, nil, ""
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad SRV Priority", l}, ""
}
rr.Priority = uint16(i)
<-c // zBlank
l = <-c // zString
- i, e = strconv.Atoi(l.token)
+ i, e = strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad SRV Weight", l}, ""
}
rr.Weight = uint16(i)
<-c // zBlank
l = <-c // zString
- i, e = strconv.Atoi(l.token)
+ i, e = strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad SRV Port", l}, ""
}
@@ -749,14 +749,14 @@ func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 {
return rr, nil, ""
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad NAPTR Order", l}, ""
}
rr.Order = uint16(i)
<-c // zBlank
l = <-c // zString
- i, e = strconv.Atoi(l.token)
+ i, e = strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad NAPTR Preference", l}, ""
}
@@ -885,7 +885,7 @@ func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 {
return rr, nil, ""
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 32)
if e != nil || l.err {
return nil, &ParseError{f, "bad LOC Latitude", l}, ""
}
@@ -897,7 +897,7 @@ func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok {
goto East
}
- i, e = strconv.Atoi(l.token)
+ i, e = strconv.ParseUint(l.token, 10, 32)
if e != nil || l.err {
return nil, &ParseError{f, "bad LOC Latitude minutes", l}, ""
}
@@ -923,7 +923,7 @@ East:
// East
<-c // zBlank
l = <-c
- if i, e := strconv.Atoi(l.token); e != nil || l.err {
+ if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err {
return nil, &ParseError{f, "bad LOC Longitude", l}, ""
} else {
rr.Longitude = 1000 * 60 * 60 * uint32(i)
@@ -934,7 +934,7 @@ East:
if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok {
goto Altitude
}
- if i, e := strconv.Atoi(l.token); e != nil || l.err {
+ if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err {
return nil, &ParseError{f, "bad LOC Longitude minutes", l}, ""
} else {
rr.Longitude += 1000 * 60 * uint32(i)
@@ -1016,7 +1016,7 @@ func setHIP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 {
return rr, nil, l.comment
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad HIP PublicKeyAlgorithm", l}, ""
}
@@ -1077,14 +1077,14 @@ func setCERT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
}
if v, ok := StringToCertType[l.token]; ok {
rr.Type = v
- } else if i, e := strconv.Atoi(l.token); e != nil {
+ } else if i, e := strconv.ParseUint(l.token, 10, 16); e != nil {
return nil, &ParseError{f, "bad CERT Type", l}, ""
} else {
rr.Type = uint16(i)
}
<-c // zBlank
l = <-c // zString
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad CERT KeyTag", l}, ""
}
@@ -1093,7 +1093,7 @@ func setCERT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
l = <-c // zString
if v, ok := StringToAlgorithm[l.token]; ok {
rr.Algorithm = v
- } else if i, e := strconv.Atoi(l.token); e != nil {
+ } else if i, e := strconv.ParseUint(l.token, 10, 8); e != nil {
return nil, &ParseError{f, "bad CERT Algorithm", l}, ""
} else {
rr.Algorithm = uint8(i)
@@ -1148,21 +1148,21 @@ func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
}
<-c // zBlank
l = <-c
- i, err := strconv.Atoi(l.token)
+ i, err := strconv.ParseUint(l.token, 10, 8)
if err != nil || l.err {
return nil, &ParseError{f, "bad RRSIG Algorithm", l}, ""
}
rr.Algorithm = uint8(i)
<-c // zBlank
l = <-c
- i, err = strconv.Atoi(l.token)
+ i, err = strconv.ParseUint(l.token, 10, 8)
if err != nil || l.err {
return nil, &ParseError{f, "bad RRSIG Labels", l}, ""
}
rr.Labels = uint8(i)
<-c // zBlank
l = <-c
- i, err = strconv.Atoi(l.token)
+ i, err = strconv.ParseUint(l.token, 10, 32)
if err != nil || l.err {
return nil, &ParseError{f, "bad RRSIG OrigTtl", l}, ""
}
@@ -1193,7 +1193,7 @@ func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
}
<-c // zBlank
l = <-c
- i, err = strconv.Atoi(l.token)
+ i, err = strconv.ParseUint(l.token, 10, 16)
if err != nil || l.err {
return nil, &ParseError{f, "bad RRSIG KeyTag", l}, ""
}
@@ -1274,21 +1274,21 @@ func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 {
return rr, nil, l.comment
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad NSEC3 Hash", l}, ""
}
rr.Hash = uint8(i)
<-c // zBlank
l = <-c
- i, e = strconv.Atoi(l.token)
+ i, e = strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad NSEC3 Flags", l}, ""
}
rr.Flags = uint8(i)
<-c // zBlank
l = <-c
- i, e = strconv.Atoi(l.token)
+ i, e = strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad NSEC3 Iterations", l}, ""
}
@@ -1342,21 +1342,21 @@ func setNSEC3PARAM(h RR_Header, c chan lex, o, f string) (RR, *ParseError, strin
if l.length == 0 {
return rr, nil, ""
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad NSEC3PARAM Hash", l}, ""
}
rr.Hash = uint8(i)
<-c // zBlank
l = <-c
- i, e = strconv.Atoi(l.token)
+ i, e = strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad NSEC3PARAM Flags", l}, ""
}
rr.Flags = uint8(i)
<-c // zBlank
l = <-c
- i, e = strconv.Atoi(l.token)
+ i, e = strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad NSEC3PARAM Iterations", l}, ""
}
@@ -1440,14 +1440,14 @@ func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 {
return rr, nil, ""
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad SSHFP Algorithm", l}, ""
}
rr.Algorithm = uint8(i)
<-c // zBlank
l = <-c
- i, e = strconv.Atoi(l.token)
+ i, e = strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad SSHFP Type", l}, ""
}
@@ -1469,21 +1469,21 @@ func setDNSKEYs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, str
if l.length == 0 {
return rr, nil, l.comment
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad " + typ + " Flags", l}, ""
}
rr.Flags = uint16(i)
<-c // zBlank
l = <-c // zString
- i, e = strconv.Atoi(l.token)
+ i, e = strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad " + typ + " Protocol", l}, ""
}
rr.Protocol = uint8(i)
<-c // zBlank
l = <-c // zString
- i, e = strconv.Atoi(l.token)
+ i, e = strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, ""
}
@@ -1525,21 +1525,21 @@ func setRKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 {
return rr, nil, l.comment
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad RKEY Flags", l}, ""
}
rr.Flags = uint16(i)
<-c // zBlank
l = <-c // zString
- i, e = strconv.Atoi(l.token)
+ i, e = strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad RKEY Protocol", l}, ""
}
rr.Protocol = uint8(i)
<-c // zBlank
l = <-c // zString
- i, e = strconv.Atoi(l.token)
+ i, e = strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad RKEY Algorithm", l}, ""
}
@@ -1610,14 +1610,14 @@ func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string)
if l.length == 0 {
return rr, nil, l.comment
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad " + typ + " KeyTag", l}, ""
}
rr.KeyTag = uint16(i)
<-c // zBlank
l = <-c
- if i, e := strconv.Atoi(l.token); e != nil {
+ if i, e = strconv.ParseUint(l.token, 10, 8); e != nil {
i, ok := StringToAlgorithm[l.tokenUpper]
if !ok || l.err {
return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, ""
@@ -1628,7 +1628,7 @@ func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string)
}
<-c // zBlank
l = <-c
- i, e = strconv.Atoi(l.token)
+ i, e = strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad " + typ + " DigestType", l}, ""
}
@@ -1669,14 +1669,14 @@ func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 {
return rr, nil, l.comment
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad TA KeyTag", l}, ""
}
rr.KeyTag = uint16(i)
<-c // zBlank
l = <-c
- if i, e := strconv.Atoi(l.token); e != nil {
+ if i, e := strconv.ParseUint(l.token, 10, 8); e != nil {
i, ok := StringToAlgorithm[l.tokenUpper]
if !ok || l.err {
return nil, &ParseError{f, "bad TA Algorithm", l}, ""
@@ -1687,7 +1687,7 @@ func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
}
<-c // zBlank
l = <-c
- i, e = strconv.Atoi(l.token)
+ i, e = strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad TA DigestType", l}, ""
}
@@ -1707,21 +1707,21 @@ func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 {
return rr, nil, l.comment
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad TLSA Usage", l}, ""
}
rr.Usage = uint8(i)
<-c // zBlank
l = <-c
- i, e = strconv.Atoi(l.token)
+ i, e = strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad TLSA Selector", l}, ""
}
rr.Selector = uint8(i)
<-c // zBlank
l = <-c
- i, e = strconv.Atoi(l.token)
+ i, e = strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad TLSA MatchingType", l}, ""
}
@@ -1742,21 +1742,21 @@ func setSMIMEA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 {
return rr, nil, l.comment
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad SMIMEA Usage", l}, ""
}
rr.Usage = uint8(i)
<-c // zBlank
l = <-c
- i, e = strconv.Atoi(l.token)
+ i, e = strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad SMIMEA Selector", l}, ""
}
rr.Selector = uint8(i)
<-c // zBlank
l = <-c
- i, e = strconv.Atoi(l.token)
+ i, e = strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
return nil, &ParseError{f, "bad SMIMEA MatchingType", l}, ""
}
@@ -1842,14 +1842,14 @@ func setURI(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
return rr, nil, ""
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad URI Priority", l}, ""
}
rr.Priority = uint16(i)
<-c // zBlank
l = <-c
- i, e = strconv.Atoi(l.token)
+ i, e = strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad URI Weight", l}, ""
}
@@ -1888,7 +1888,7 @@ func setNID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 {
return rr, nil, ""
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad NID Preference", l}, ""
}
@@ -1911,7 +1911,7 @@ func setL32(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 {
return rr, nil, ""
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad L32 Preference", l}, ""
}
@@ -1933,7 +1933,7 @@ func setLP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 {
return rr, nil, ""
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad LP Preference", l}, ""
}
@@ -1966,7 +1966,7 @@ func setL64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 {
return rr, nil, ""
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad L64 Preference", l}, ""
}
@@ -1988,7 +1988,7 @@ func setUID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 {
return rr, nil, ""
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 32)
if e != nil || l.err {
return nil, &ParseError{f, "bad UID Uid", l}, ""
}
@@ -2003,7 +2003,7 @@ func setGID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 {
return rr, nil, ""
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 32)
if e != nil || l.err {
return nil, &ParseError{f, "bad GID Gid", l}, ""
}
@@ -2033,7 +2033,7 @@ func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 {
return rr, nil, ""
}
- i, e := strconv.Atoi(l.token)
+ i, e := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err {
return nil, &ParseError{f, "bad PX Preference", l}, ""
}
@@ -2079,7 +2079,7 @@ func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if l.length == 0 {
return rr, nil, l.comment
}
- i, err := strconv.Atoi(l.token)
+ i, err := strconv.ParseUint(l.token, 10, 8)
if err != nil || l.err {
return nil, &ParseError{f, "bad CAA Flag", l}, ""
}
diff --git a/vendor/github.com/miekg/dns/server_test.go b/vendor/github.com/miekg/dns/server_test.go
index 098be2ebe..f17a2f90f 100644
--- a/vendor/github.com/miekg/dns/server_test.go
+++ b/vendor/github.com/miekg/dns/server_test.go
@@ -20,7 +20,7 @@ func HelloServer(w ResponseWriter, req *Msg) {
w.WriteMsg(m)
}
-func HelloServerBadId(w ResponseWriter, req *Msg) {
+func HelloServerBadID(w ResponseWriter, req *Msg) {
m := new(Msg)
m.SetReply(req)
m.Id++
@@ -548,7 +548,7 @@ func TestHandlerCloseTCP(t *testing.T) {
return
}
time.Sleep(time.Second / 10)
- tries += 1
+ tries++
goto exchange
}
}()
diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go
index 2dce06af8..f31e9e684 100644
--- a/vendor/github.com/miekg/dns/sig0.go
+++ b/vendor/github.com/miekg/dns/sig0.go
@@ -60,16 +60,15 @@ func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) {
}
rr.Signature = toBase64(signature)
- sig := string(signature)
- buf = append(buf, sig...)
+ buf = append(buf, signature...)
if len(buf) > int(^uint16(0)) {
return nil, ErrBuf
}
// Adjust sig data length
rdoff := len(mbuf) + 1 + 2 + 2 + 4
rdlen := binary.BigEndian.Uint16(buf[rdoff:])
- rdlen += uint16(len(sig))
+ rdlen += uint16(len(signature))
binary.BigEndian.PutUint16(buf[rdoff:], rdlen)
// Adjust additional count
adc := binary.BigEndian.Uint16(buf[10:])
diff --git a/vendor/github.com/miekg/dns/smimea.go b/vendor/github.com/miekg/dns/smimea.go
index 3a4bb5700..4e7ded4b3 100644
--- a/vendor/github.com/miekg/dns/smimea.go
+++ b/vendor/github.com/miekg/dns/smimea.go
@@ -33,15 +33,15 @@ func (r *SMIMEA) Verify(cert *x509.Certificate) error {
return ErrSig // ErrSig, really?
}
-// SIMEAName returns the ownername of a SMIMEA resource record as per the
+// SMIMEAName returns the ownername of a SMIMEA resource record as per the
// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3
-func SMIMEAName(email_address string, domain_name string) (string, error) {
+func SMIMEAName(email, domain string) (string, error) {
hasher := sha256.New()
- hasher.Write([]byte(email_address))
+ hasher.Write([]byte(email))
// RFC Section 3: "The local-part is hashed using the SHA2-256
// algorithm with the hash truncated to 28 octets and
// represented in its hexadecimal representation to become the
// left-most label in the prepared domain name"
- return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain_name, nil
+ return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain, nil
}
diff --git a/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/miekg/dns/tsig.go
index 78365e1c5..24013096b 100644
--- a/vendor/github.com/miekg/dns/tsig.go
+++ b/vendor/github.com/miekg/dns/tsig.go
@@ -9,7 +9,6 @@ import (
"encoding/binary"
"encoding/hex"
"hash"
- "io"
"strconv"
"strings"
"time"
@@ -124,7 +123,7 @@ func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, s
default:
return nil, "", ErrKeyAlg
}
- io.WriteString(h, string(buf))
+ h.Write(buf)
t.MAC = hex.EncodeToString(h.Sum(nil))
t.MACSize = uint16(len(t.MAC) / 2) // Size is half!
diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go
index c8b3191e5..c17251cb6 100644
--- a/vendor/github.com/miekg/dns/types.go
+++ b/vendor/github.com/miekg/dns/types.go
@@ -144,7 +144,7 @@ const (
OpcodeUpdate = 5
)
-// Headers is the wire format for the DNS packet header.
+// Header is the wire format for the DNS packet header.
type Header struct {
Id uint16
Bits uint16
diff --git a/vendor/github.com/miekg/dns/types_generate.go b/vendor/github.com/miekg/dns/types_generate.go
index bf80da329..dd1310942 100644
--- a/vendor/github.com/miekg/dns/types_generate.go
+++ b/vendor/github.com/miekg/dns/types_generate.go
@@ -197,7 +197,7 @@ func main() {
case st.Tag(i) == "":
switch st.Field(i).Type().(*types.Basic).Kind() {
case types.Uint8:
- o("l += 1 // %s\n")
+ o("l++ // %s\n")
case types.Uint16:
o("l += 2 // %s\n")
case types.Uint32:
diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go
index c79c6c883..af111b9a8 100644
--- a/vendor/github.com/miekg/dns/udp.go
+++ b/vendor/github.com/miekg/dns/udp.go
@@ -1,10 +1,9 @@
-// +build !windows,!plan9
+// +build !windows
package dns
import (
"net"
- "syscall"
)
// SessionUDP holds the remote address and the associated
@@ -17,29 +16,6 @@ type SessionUDP struct {
// RemoteAddr returns the remote network address.
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
-// setUDPSocketOptions sets the UDP socket options.
-// This function is implemented on a per platform basis. See udp_*.go for more details
-func setUDPSocketOptions(conn *net.UDPConn) error {
- sa, err := getUDPSocketName(conn)
- if err != nil {
- return err
- }
- switch sa.(type) {
- case *syscall.SockaddrInet6:
- v6only, err := getUDPSocketOptions6Only(conn)
- if err != nil {
- return err
- }
- setUDPSocketOptions6(conn)
- if !v6only {
- setUDPSocketOptions4(conn)
- }
- case *syscall.SockaddrInet4:
- setUDPSocketOptions4(conn)
- }
- return nil
-}
-
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
// net.UDPAddr.
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
diff --git a/vendor/github.com/miekg/dns/udp_linux.go b/vendor/github.com/miekg/dns/udp_linux.go
index 142a80073..033df4239 100644
--- a/vendor/github.com/miekg/dns/udp_linux.go
+++ b/vendor/github.com/miekg/dns/udp_linux.go
@@ -1,4 +1,4 @@
-// +build linux
+// +build linux,!appengine
package dns
@@ -15,6 +15,29 @@ import (
"syscall"
)
+// setUDPSocketOptions sets the UDP socket options.
+// This function is implemented on a per platform basis. See udp_*.go for more details
+func setUDPSocketOptions(conn *net.UDPConn) error {
+ sa, err := getUDPSocketName(conn)
+ if err != nil {
+ return err
+ }
+ switch sa.(type) {
+ case *syscall.SockaddrInet6:
+ v6only, err := getUDPSocketOptions6Only(conn)
+ if err != nil {
+ return err
+ }
+ setUDPSocketOptions6(conn)
+ if !v6only {
+ setUDPSocketOptions4(conn)
+ }
+ case *syscall.SockaddrInet4:
+ setUDPSocketOptions4(conn)
+ }
+ return nil
+}
+
// setUDPSocketOptions4 prepares the v4 socket for sessions.
func setUDPSocketOptions4(conn *net.UDPConn) error {
file, err := conn.File()
diff --git a/vendor/github.com/miekg/dns/udp_other.go b/vendor/github.com/miekg/dns/udp_other.go
index d40732441..488a282b2 100644
--- a/vendor/github.com/miekg/dns/udp_other.go
+++ b/vendor/github.com/miekg/dns/udp_other.go
@@ -1,17 +1,15 @@
-// +build !linux,!plan9
+// +build !linux appengine
package dns
import (
"net"
- "syscall"
)
// These do nothing. See udp_linux.go for an example of how to implement this.
// We tried to adhire to some kind of naming scheme.
-
+func setUDPSocketOptions(conn *net.UDPConn) error { return nil }
func setUDPSocketOptions4(conn *net.UDPConn) error { return nil }
func setUDPSocketOptions6(conn *net.UDPConn) error { return nil }
func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { return false, nil }
-func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) { return nil, nil }
diff --git a/vendor/github.com/miekg/dns/udp_plan9.go b/vendor/github.com/miekg/dns/udp_plan9.go
deleted file mode 100644
index b794deeba..000000000
--- a/vendor/github.com/miekg/dns/udp_plan9.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package dns
-
-import (
- "net"
-)
-
-func setUDPSocketOptions(conn *net.UDPConn) error { return nil }
-
-// SessionUDP holds the remote address and the associated
-// out-of-band data.
-type SessionUDP struct {
- raddr *net.UDPAddr
- context []byte
-}
-
-// RemoteAddr returns the remote network address.
-func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
-
-// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
-// net.UDPAddr.
-func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
- oob := make([]byte, 40)
- n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob)
- if err != nil {
- return n, nil, err
- }
- return n, &SessionUDP{raddr, oob[:oobn]}, err
-}
-
-// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr.
-func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
- n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr)
- return n, err
-}
diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go
index 2ce4b3300..51e532ac2 100644
--- a/vendor/github.com/miekg/dns/udp_windows.go
+++ b/vendor/github.com/miekg/dns/udp_windows.go
@@ -8,6 +8,8 @@ type SessionUDP struct {
raddr *net.UDPAddr
}
+func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
+
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
// net.UDPAddr.
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
@@ -25,10 +27,3 @@ func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, e
return n, err
}
-func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
-
-// setUDPSocketOptions sets the UDP socket options.
-// This function is implemented on a per platform basis. See udp_*.go for more details
-func setUDPSocketOptions(conn *net.UDPConn) error {
- return nil
-}
diff --git a/vendor/github.com/miekg/dns/update_test.go b/vendor/github.com/miekg/dns/update_test.go
index 56602dfe9..12760a1ee 100644
--- a/vendor/github.com/miekg/dns/update_test.go
+++ b/vendor/github.com/miekg/dns/update_test.go
@@ -92,28 +92,28 @@ func TestPreReqAndRemovals(t *testing.T) {
m.Id = 1234
// Use a full set of RRs each time, so we are sure the rdata is stripped.
- rr_name1, _ := NewRR("name_used. 3600 IN A 127.0.0.1")
- rr_name2, _ := NewRR("name_not_used. 3600 IN A 127.0.0.1")
- rr_remove1, _ := NewRR("remove1. 3600 IN A 127.0.0.1")
- rr_remove2, _ := NewRR("remove2. 3600 IN A 127.0.0.1")
- rr_remove3, _ := NewRR("remove3. 3600 IN A 127.0.0.1")
- rr_insert, _ := NewRR("insert. 3600 IN A 127.0.0.1")
- rr_rrset1, _ := NewRR("rrset_used1. 3600 IN A 127.0.0.1")
- rr_rrset2, _ := NewRR("rrset_used2. 3600 IN A 127.0.0.1")
- rr_rrset3, _ := NewRR("rrset_not_used. 3600 IN A 127.0.0.1")
+ rrName1, _ := NewRR("name_used. 3600 IN A 127.0.0.1")
+ rrName2, _ := NewRR("name_not_used. 3600 IN A 127.0.0.1")
+ rrRemove1, _ := NewRR("remove1. 3600 IN A 127.0.0.1")
+ rrRemove2, _ := NewRR("remove2. 3600 IN A 127.0.0.1")
+ rrRemove3, _ := NewRR("remove3. 3600 IN A 127.0.0.1")
+ rrInsert, _ := NewRR("insert. 3600 IN A 127.0.0.1")
+ rrRrset1, _ := NewRR("rrset_used1. 3600 IN A 127.0.0.1")
+ rrRrset2, _ := NewRR("rrset_used2. 3600 IN A 127.0.0.1")
+ rrRrset3, _ := NewRR("rrset_not_used. 3600 IN A 127.0.0.1")
// Handle the prereqs.
- m.NameUsed([]RR{rr_name1})
- m.NameNotUsed([]RR{rr_name2})
- m.RRsetUsed([]RR{rr_rrset1})
- m.Used([]RR{rr_rrset2})
- m.RRsetNotUsed([]RR{rr_rrset3})
+ m.NameUsed([]RR{rrName1})
+ m.NameNotUsed([]RR{rrName2})
+ m.RRsetUsed([]RR{rrRrset1})
+ m.Used([]RR{rrRrset2})
+ m.RRsetNotUsed([]RR{rrRrset3})
// and now the updates.
- m.RemoveName([]RR{rr_remove1})
- m.RemoveRRset([]RR{rr_remove2})
- m.Remove([]RR{rr_remove3})
- m.Insert([]RR{rr_insert})
+ m.RemoveName([]RR{rrRemove1})
+ m.RemoveRRset([]RR{rrRemove2})
+ m.Remove([]RR{rrRemove3})
+ m.Insert([]RR{rrInsert})
// This test function isn't a Example function because we print these RR with tabs at the
// end and the Example function trim these, thus they never match.
diff --git a/vendor/github.com/miekg/dns/zcompress.go b/vendor/github.com/miekg/dns/zcompress.go
new file mode 100644
index 000000000..86a31a916
--- /dev/null
+++ b/vendor/github.com/miekg/dns/zcompress.go
@@ -0,0 +1,119 @@
+// *** DO NOT MODIFY ***
+// AUTOGENERATED BY go generate from compress_generate.go
+
+package dns
+
+func compressionLenHelperType(c map[string]int, r RR) {
+ switch x := r.(type) {
+ case *KX:
+ compressionLenHelper(c, x.Exchanger)
+ case *MX:
+ compressionLenHelper(c, x.Mx)
+ case *NSEC:
+ compressionLenHelper(c, x.NextDomain)
+ case *DNAME:
+ compressionLenHelper(c, x.Target)
+ case *HIP:
+ for i := range x.RendezvousServers {
+ compressionLenHelper(c, x.RendezvousServers[i])
+ }
+ case *CNAME:
+ compressionLenHelper(c, x.Target)
+ case *MR:
+ compressionLenHelper(c, x.Mr)
+ case *PX:
+ compressionLenHelper(c, x.Map822)
+ compressionLenHelper(c, x.Mapx400)
+ case *SIG:
+ compressionLenHelper(c, x.SignerName)
+ case *SRV:
+ compressionLenHelper(c, x.Target)
+ case *TALINK:
+ compressionLenHelper(c, x.PreviousName)
+ compressionLenHelper(c, x.NextName)
+ case *LP:
+ compressionLenHelper(c, x.Fqdn)
+ case *NAPTR:
+ compressionLenHelper(c, x.Replacement)
+ case *NS:
+ compressionLenHelper(c, x.Ns)
+ case *RP:
+ compressionLenHelper(c, x.Mbox)
+ compressionLenHelper(c, x.Txt)
+ case *RRSIG:
+ compressionLenHelper(c, x.SignerName)
+ case *TKEY:
+ compressionLenHelper(c, x.Algorithm)
+ case *TSIG:
+ compressionLenHelper(c, x.Algorithm)
+ case *AFSDB:
+ compressionLenHelper(c, x.Hostname)
+ case *MF:
+ compressionLenHelper(c, x.Mf)
+ case *RT:
+ compressionLenHelper(c, x.Host)
+ case *MINFO:
+ compressionLenHelper(c, x.Rmail)
+ compressionLenHelper(c, x.Email)
+ case *PTR:
+ compressionLenHelper(c, x.Ptr)
+ case *SOA:
+ compressionLenHelper(c, x.Ns)
+ compressionLenHelper(c, x.Mbox)
+ case *MD:
+ compressionLenHelper(c, x.Md)
+ case *NSAPPTR:
+ compressionLenHelper(c, x.Ptr)
+ case *MG:
+ compressionLenHelper(c, x.Mg)
+ case *MB:
+ compressionLenHelper(c, x.Mb)
+ }
+}
+
+func compressionLenSearchType(c map[string]int, r RR) (int, bool) {
+ switch x := r.(type) {
+ case *MF:
+ k1, ok1 := compressionLenSearch(c, x.Mf)
+ return k1, ok1
+ case *MG:
+ k1, ok1 := compressionLenSearch(c, x.Mg)
+ return k1, ok1
+ case *MINFO:
+ k1, ok1 := compressionLenSearch(c, x.Rmail)
+ k2, ok2 := compressionLenSearch(c, x.Email)
+ return k1 + k2, ok1 && ok2
+ case *MR:
+ k1, ok1 := compressionLenSearch(c, x.Mr)
+ return k1, ok1
+ case *PTR:
+ k1, ok1 := compressionLenSearch(c, x.Ptr)
+ return k1, ok1
+ case *AFSDB:
+ k1, ok1 := compressionLenSearch(c, x.Hostname)
+ return k1, ok1
+ case *CNAME:
+ k1, ok1 := compressionLenSearch(c, x.Target)
+ return k1, ok1
+ case *MD:
+ k1, ok1 := compressionLenSearch(c, x.Md)
+ return k1, ok1
+ case *RT:
+ k1, ok1 := compressionLenSearch(c, x.Host)
+ return k1, ok1
+ case *SOA:
+ k1, ok1 := compressionLenSearch(c, x.Ns)
+ k2, ok2 := compressionLenSearch(c, x.Mbox)
+ return k1 + k2, ok1 && ok2
+ case *MB:
+ k1, ok1 := compressionLenSearch(c, x.Mb)
+ return k1, ok1
+ case *MX:
+ k1, ok1 := compressionLenSearch(c, x.Mx)
+ return k1, ok1
+ case *NS:
+ k1, ok1 := compressionLenSearch(c, x.Ns)
+ return k1, ok1
+ }
+ return 0, false
+}
diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go
index 3c052773e..311b8243b 100644
--- a/vendor/github.com/miekg/dns/ztypes.go
+++ b/vendor/github.com/miekg/dns/ztypes.go
@@ -254,7 +254,7 @@ func (rr *ANY) len() int {
}
func (rr *CAA) len() int {
l := rr.Hdr.len()
- l += 1 // Flag
+ l++ // Flag
l += len(rr.Tag) + 1
l += len(rr.Value)
return l
@@ -263,7 +263,7 @@ func (rr *CERT) len() int {
l := rr.Hdr.len()
l += 2 // Type
l += 2 // KeyTag
- l += 1 // Algorithm
+ l++ // Algorithm
l += base64.StdEncoding.DecodedLen(len(rr.Certificate))
return l
}
@@ -285,16 +285,16 @@ func (rr *DNAME) len() int {
func (rr *DNSKEY) len() int {
l := rr.Hdr.len()
l += 2 // Flags
- l += 1 // Protocol
- l += 1 // Algorithm
+ l++ // Protocol
+ l++ // Algorithm
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
return l
}
func (rr *DS) len() int {
l := rr.Hdr.len()
l += 2 // KeyTag
- l += 1 // Algorithm
- l += 1 // DigestType
+ l++ // Algorithm
+ l++ // DigestType
l += len(rr.Digest)/2 + 1
return l
}
@@ -333,8 +333,8 @@ func (rr *HINFO) len() int {
}
func (rr *HIP) len() int {
l := rr.Hdr.len()
- l += 1 // HitLength
- l += 1 // PublicKeyAlgorithm
+ l++ // HitLength
+ l++ // PublicKeyAlgorithm
l += 2 // PublicKeyLength
l += len(rr.Hit)/2 + 1
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
@@ -363,10 +363,10 @@ func (rr *L64) len() int {
}
func (rr *LOC) len() int {
l := rr.Hdr.len()
- l += 1 // Version
- l += 1 // Size
- l += 1 // HorizPre
- l += 1 // VertPre
+ l++ // Version
+ l++ // Size
+ l++ // HorizPre
+ l++ // VertPre
l += 4 // Latitude
l += 4 // Longitude
l += 4 // Altitude
@@ -455,10 +455,10 @@ func (rr *NSAPPTR) len() int {
}
func (rr *NSEC3PARAM) len() int {
l := rr.Hdr.len()
- l += 1 // Hash
- l += 1 // Flags
+ l++ // Hash
+ l++ // Flags
l += 2 // Iterations
- l += 1 // SaltLength
+ l++ // SaltLength
l += len(rr.Salt)/2 + 1
return l
}
@@ -487,8 +487,8 @@ func (rr *RFC3597) len() int {
func (rr *RKEY) len() int {
l := rr.Hdr.len()
l += 2 // Flags
- l += 1 // Protocol
- l += 1 // Algorithm
+ l++ // Protocol
+ l++ // Algorithm
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
return l
}
@@ -501,8 +501,8 @@ func (rr *RP) len() int {
func (rr *RRSIG) len() int {
l := rr.Hdr.len()
l += 2 // TypeCovered
- l += 1 // Algorithm
- l += 1 // Labels
+ l++ // Algorithm
+ l++ // Labels
l += 4 // OrigTtl
l += 4 // Expiration
l += 4 // Inception
@@ -519,9 +519,9 @@ func (rr *RT) len() int {
}
func (rr *SMIMEA) len() int {
l := rr.Hdr.len()
- l += 1 // Usage
- l += 1 // Selector
- l += 1 // MatchingType
+ l++ // Usage
+ l++ // Selector
+ l++ // MatchingType
l += len(rr.Certificate)/2 + 1
return l
}
@@ -553,16 +553,16 @@ func (rr *SRV) len() int {
}
func (rr *SSHFP) len() int {
l := rr.Hdr.len()
- l += 1 // Algorithm
- l += 1 // Type
+ l++ // Algorithm
+ l++ // Type
l += len(rr.FingerPrint)/2 + 1
return l
}
func (rr *TA) len() int {
l := rr.Hdr.len()
l += 2 // KeyTag
- l += 1 // Algorithm
- l += 1 // DigestType
+ l++ // Algorithm
+ l++ // DigestType
l += len(rr.Digest)/2 + 1
return l
}
@@ -587,9 +587,9 @@ func (rr *TKEY) len() int {
}
func (rr *TLSA) len() int {
l := rr.Hdr.len()
- l += 1 // Usage
- l += 1 // Selector
- l += 1 // MatchingType
+ l++ // Usage
+ l++ // Selector
+ l++ // MatchingType
l += len(rr.Certificate)/2 + 1
return l
}
diff --git a/vendor/github.com/prometheus/client_model/AUTHORS.md b/vendor/github.com/prometheus/client_model/AUTHORS.md
deleted file mode 100644
index e8b3efa6a..000000000
--- a/vendor/github.com/prometheus/client_model/AUTHORS.md
+++ /dev/null
@@ -1,13 +0,0 @@
-The Prometheus project was started by Matt T. Proud (emeritus) and
-Julius Volz in 2012.
-
-Maintainers of this repository:
-
-* Björn Rabenstein <beorn@soundcloud.com>
-
-The following individuals have contributed code to this repository
-(listed in alphabetical order):
-
-* Björn Rabenstein <beorn@soundcloud.com>
-* Matt T. Proud <matt.proud@gmail.com>
-* Tobias Schmidt <ts@soundcloud.com>
diff --git a/vendor/github.com/prometheus/client_model/CONTRIBUTING.md b/vendor/github.com/prometheus/client_model/CONTRIBUTING.md
index 573d58741..40503edbf 100644
--- a/vendor/github.com/prometheus/client_model/CONTRIBUTING.md
+++ b/vendor/github.com/prometheus/client_model/CONTRIBUTING.md
@@ -2,16 +2,16 @@
Prometheus uses GitHub to manage reviews of pull requests.
-* If you have a trivial fix or improvement, go ahead and create a pull
- request, addressing (with `@...`) one or more of the maintainers
- (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+* If you have a trivial fix or improvement, go ahead and create a pull request,
+ addressing (with `@...`) the maintainer of this repository (see
+ [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
* If you plan to do something more involved, first discuss your ideas
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
This will avoid unnecessary work and surely give you and us a good deal
of inspiration.
-* Relevant coding style guidelines for the Go parts are the [Go Code Review
+* Relevant coding style guidelines are the [Go Code Review
Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
and the _Formatting and style_ section of Peter Bourgon's [Go: Best
Practices for Production
diff --git a/vendor/github.com/prometheus/client_model/MAINTAINERS.md b/vendor/github.com/prometheus/client_model/MAINTAINERS.md
new file mode 100644
index 000000000..3ede55fe1
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/MAINTAINERS.md
@@ -0,0 +1 @@
+* Björn Rabenstein <beorn@soundcloud.com>
diff --git a/vendor/github.com/prometheus/common/AUTHORS.md b/vendor/github.com/prometheus/common/AUTHORS.md
deleted file mode 100644
index c63f4d395..000000000
--- a/vendor/github.com/prometheus/common/AUTHORS.md
+++ /dev/null
@@ -1,11 +0,0 @@
-Maintainers of this repository:
-
-* Fabian Reinartz <fabian@soundcloud.com>
-
-The following individuals have contributed code to this repository
-(listed in alphabetical order):
-
-* Björn Rabenstein <beorn@soundcloud.com>
-* Fabian Reinartz <fabian@soundcloud.com>
-* Julius Volz <julius.volz@gmail.com>
-* Miguel Molina <hi@mvader.me>
diff --git a/vendor/github.com/prometheus/common/CONTRIBUTING.md b/vendor/github.com/prometheus/common/CONTRIBUTING.md
index 5705f0fbe..40503edbf 100644
--- a/vendor/github.com/prometheus/common/CONTRIBUTING.md
+++ b/vendor/github.com/prometheus/common/CONTRIBUTING.md
@@ -2,9 +2,9 @@
Prometheus uses GitHub to manage reviews of pull requests.
-* If you have a trivial fix or improvement, go ahead and create a pull
- request, addressing (with `@...`) one or more of the maintainers
- (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+* If you have a trivial fix or improvement, go ahead and create a pull request,
+ addressing (with `@...`) the maintainer of this repository (see
+ [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
* If you plan to do something more involved, first discuss your ideas
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
diff --git a/vendor/github.com/prometheus/common/MAINTAINERS.md b/vendor/github.com/prometheus/common/MAINTAINERS.md
new file mode 100644
index 000000000..1b3152161
--- /dev/null
+++ b/vendor/github.com/prometheus/common/MAINTAINERS.md
@@ -0,0 +1 @@
+* Fabian Reinartz <fabian.reinartz@coreos.com>
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
index 7728abaee..c9ed3ffd8 100644
--- a/vendor/github.com/prometheus/common/model/value.go
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -129,11 +129,8 @@ func (s *Sample) Equal(o *Sample) bool {
if !s.Timestamp.Equal(o.Timestamp) {
return false
}
- if s.Value.Equal(o.Value) {
- return false
- }
- return true
+ return s.Value.Equal(o.Value)
}
func (s Sample) String() string {
diff --git a/vendor/github.com/prometheus/common/model/value_test.go b/vendor/github.com/prometheus/common/model/value_test.go
index 8d2b69ea1..b97dcf84c 100644
--- a/vendor/github.com/prometheus/common/model/value_test.go
+++ b/vendor/github.com/prometheus/common/model/value_test.go
@@ -21,7 +21,7 @@ import (
"testing"
)
-func TestEqual(t *testing.T) {
+func TestEqualValues(t *testing.T) {
tests := map[string]struct {
in1, in2 SampleValue
want bool
@@ -76,6 +76,57 @@ func TestEqual(t *testing.T) {
}
}
+func TestEqualSamples(t *testing.T) {
+ testSample := &Sample{}
+
+ tests := map[string]struct {
+ in1, in2 *Sample
+ want bool
+ }{
+ "equal pointers": {
+ in1: testSample,
+ in2: testSample,
+ want: true,
+ },
+ "different metrics": {
+ in1: &Sample{Metric: Metric{"foo": "bar"}},
+ in2: &Sample{Metric: Metric{"foo": "biz"}},
+ want: false,
+ },
+ "different timestamp": {
+ in1: &Sample{Timestamp: 0},
+ in2: &Sample{Timestamp: 1},
+ want: false,
+ },
+ "different value": {
+ in1: &Sample{Value: 0},
+ in2: &Sample{Value: 1},
+ want: false,
+ },
+ "equal samples": {
+ in1: &Sample{
+ Metric: Metric{"foo": "bar"},
+ Timestamp: 0,
+ Value: 1,
+ },
+ in2: &Sample{
+ Metric: Metric{"foo": "bar"},
+ Timestamp: 0,
+ Value: 1,
+ },
+ want: true,
+ },
+ }
+
+ for name, test := range tests {
+ got := test.in1.Equal(test.in2)
+ if got != test.want {
+ t.Errorf("Comparing %s, %v and %v: got %t, want %t", name, test.in1, test.in2, got, test.want)
+ }
+ }
+
+}
+
func TestSamplePairJSON(t *testing.T) {
input := []struct {
plain string
diff --git a/vendor/github.com/prometheus/procfs/AUTHORS.md b/vendor/github.com/prometheus/procfs/AUTHORS.md
deleted file mode 100644
index d55863560..000000000
--- a/vendor/github.com/prometheus/procfs/AUTHORS.md
+++ /dev/null
@@ -1,21 +0,0 @@
-The Prometheus project was started by Matt T. Proud (emeritus) and
-Julius Volz in 2012.
-
-Maintainers of this repository:
-
-* Tobias Schmidt <ts@soundcloud.com>
-
-The following individuals have contributed code to this repository
-(listed in alphabetical order):
-
-* Armen Baghumian <abaghumian@noggin.com.au>
-* Bjoern Rabenstein <beorn@soundcloud.com>
-* David Cournapeau <cournape@gmail.com>
-* Ji-Hoon, Seol <jihoon.seol@gmail.com>
-* Jonas Große Sundrup <cherti@letopolis.de>
-* Julius Volz <julius.volz@gmail.com>
-* Matt Layher <mdlayher@gmail.com>
-* Matthias Rampke <mr@soundcloud.com>
-* Nicky Gerritsen <nicky@streamone.nl>
-* Rémi Audebert <contact@halfr.net>
-* Tobias Schmidt <tobidt@gmail.com>
diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
index 5705f0fbe..40503edbf 100644
--- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
+++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
@@ -2,9 +2,9 @@
Prometheus uses GitHub to manage reviews of pull requests.
-* If you have a trivial fix or improvement, go ahead and create a pull
- request, addressing (with `@...`) one or more of the maintainers
- (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+* If you have a trivial fix or improvement, go ahead and create a pull request,
+ addressing (with `@...`) the maintainer of this repository (see
+ [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
* If you plan to do something more involved, first discuss your ideas
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
new file mode 100644
index 000000000..35993c41c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
@@ -0,0 +1 @@
+* Tobias Schmidt <tobidt@gmail.com>
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
index 6e7ee6b8b..209549471 100644
--- a/vendor/github.com/prometheus/procfs/README.md
+++ b/vendor/github.com/prometheus/procfs/README.md
@@ -8,3 +8,4 @@ backwards-incompatible ways without warnings. Use it at your own risk.
[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
+[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go
new file mode 100644
index 000000000..680a9842a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/buddyinfo.go
@@ -0,0 +1,95 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// A BuddyInfo is the details parsed from /proc/buddyinfo.
+// The data is comprised of an array of free fragments of each size.
+// The sizes are 2^n*PAGE_SIZE, where n is the array index.
+type BuddyInfo struct {
+ Node string
+ Zone string
+ Sizes []float64
+}
+
+// NewBuddyInfo reads the buddyinfo statistics.
+func NewBuddyInfo() ([]BuddyInfo, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return nil, err
+ }
+
+ return fs.NewBuddyInfo()
+}
+
+// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
+func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) {
+ file, err := os.Open(fs.Path("buddyinfo"))
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return parseBuddyInfo(file)
+}
+
+func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
+ var (
+ buddyInfo = []BuddyInfo{}
+ scanner = bufio.NewScanner(r)
+ bucketCount = -1
+ )
+
+ for scanner.Scan() {
+ var err error
+ line := scanner.Text()
+ parts := strings.Fields(string(line))
+
+ if len(parts) < 4 {
+ return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
+ }
+
+ node := strings.TrimRight(parts[1], ",")
+ zone := strings.TrimRight(parts[3], ",")
+ arraySize := len(parts[4:])
+
+ if bucketCount == -1 {
+ bucketCount = arraySize
+ } else {
+ if bucketCount != arraySize {
+ return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize)
+ }
+ }
+
+ sizes := make([]float64, arraySize)
+ for i := 0; i < arraySize; i++ {
+ sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid value in buddyinfo: %s", err)
+ }
+ }
+
+ buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes})
+ }
+
+ return buddyInfo, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/buddyinfo_test.go b/vendor/github.com/prometheus/procfs/buddyinfo_test.go
new file mode 100644
index 000000000..bcf9355ca
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/buddyinfo_test.go
@@ -0,0 +1,64 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestBuddyInfo(t *testing.T) {
+ buddyInfo, err := FS("fixtures/buddyinfo/valid").NewBuddyInfo()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if want, got := "DMA", buddyInfo[0].Zone; want != got {
+ t.Errorf("want Node 0, Zone %s, got %s", want, got)
+ }
+
+ if want, got := "Normal", buddyInfo[2].Zone; want != got {
+ t.Errorf("want Node 0, Zone %s, got %s", want, got)
+ }
+
+ if want, got := 4381.0, buddyInfo[2].Sizes[0]; want != got {
+ t.Errorf("want Node 0, Zone Normal %f, got %f", want, got)
+ }
+
+ if want, got := 572.0, buddyInfo[1].Sizes[1]; want != got {
+ t.Errorf("want Node 0, Zone DMA32 %f, got %f", want, got)
+ }
+}
+
+func TestBuddyInfoShort(t *testing.T) {
+ _, err := FS("fixtures/buddyinfo/short").NewBuddyInfo()
+ if err == nil {
+ t.Errorf("expected error, but none occurred")
+ }
+
+ if want, got := "invalid number of fields when parsing buddyinfo", err.Error(); want != got {
+ t.Errorf("wrong error returned, wanted %q, got %q", want, got)
+ }
+}
+
+func TestBuddyInfoSizeMismatch(t *testing.T) {
+ _, err := FS("fixtures/buddyinfo/sizemismatch").NewBuddyInfo()
+ if err == nil {
+ t.Errorf("expected error, but none occurred")
+ }
+
+ if want, got := "mismatch in number of buddyinfo buckets", err.Error(); !strings.HasPrefix(got, want) {
+ t.Errorf("wrong error returned, wanted prefix %q, got %q", want, got)
+ }
+}
diff --git a/vendor/github.com/prometheus/procfs/fixtures/buddyinfo/short/buddyinfo b/vendor/github.com/prometheus/procfs/fixtures/buddyinfo/short/buddyinfo
new file mode 100644
index 000000000..40e71ca35
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/buddyinfo/short/buddyinfo
@@ -0,0 +1,3 @@
+Node 0, zone
+Node 0, zone
+Node 0, zone
diff --git a/vendor/github.com/prometheus/procfs/fixtures/buddyinfo/sizemismatch/buddyinfo b/vendor/github.com/prometheus/procfs/fixtures/buddyinfo/sizemismatch/buddyinfo
new file mode 100644
index 000000000..945636182
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/buddyinfo/sizemismatch/buddyinfo
@@ -0,0 +1,3 @@
+Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3
+Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 0
+Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0
diff --git a/vendor/github.com/prometheus/procfs/fixtures/buddyinfo/valid/buddyinfo b/vendor/github.com/prometheus/procfs/fixtures/buddyinfo/valid/buddyinfo
new file mode 100644
index 000000000..f90594a81
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/buddyinfo/valid/buddyinfo
@@ -0,0 +1,3 @@
+Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3
+Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0
+Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0
diff --git a/vendor/github.com/prometheus/procfs/fixtures/fs/xfs/stat b/vendor/github.com/prometheus/procfs/fixtures/fs/xfs/stat
new file mode 100644
index 000000000..f7ca7f940
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/fs/xfs/stat
@@ -0,0 +1,23 @@
+extent_alloc 92447 97589 92448 93751
+abt 0 0 0 0
+blk_map 1767055 188820 184891 92447 92448 2140766 0
+bmbt 0 0 0 0
+dir 185039 92447 92444 136422
+trans 706 944304 0
+ig 185045 58807 0 126238 0 33637 22
+log 2883 113448 9 17360 739
+push_ail 945014 0 134260 15483 0 3940 464 159985 0 40
+xstrat 92447 0
+rw 107739 94045
+attr 4 0 0 0
+icluster 8677 7849 135802
+vnodes 92601 0 0 0 92444 92444 92444 0
+buf 2666287 7122 2659202 3599 2 7085 0 10297 7085
+abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147
+abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023
+bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0
+fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+qm 0 0 0 0 0 0 0 0
+xpc 399724544 92823103 86219234
+debug 0
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
index 49aaab050..17546756b 100644
--- a/vendor/github.com/prometheus/procfs/fs.go
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -4,6 +4,8 @@ import (
"fmt"
"os"
"path"
+
+ "github.com/prometheus/procfs/xfs"
)
// FS represents the pseudo-filesystem proc, which provides an interface to
@@ -31,3 +33,14 @@ func NewFS(mountPoint string) (FS, error) {
func (fs FS) Path(p ...string) string {
return path.Join(append([]string{string(fs)}, p...)...)
}
+
+// XFSStats retrieves XFS filesystem runtime statistics.
+func (fs FS) XFSStats() (*xfs.Stats, error) {
+ f, err := os.Open(fs.Path("fs/xfs/stat"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return xfs.ParseStats(f)
+}
diff --git a/vendor/github.com/prometheus/procfs/fs_test.go b/vendor/github.com/prometheus/procfs/fs_test.go
index 91f1c6c97..e492cde55 100644
--- a/vendor/github.com/prometheus/procfs/fs_test.go
+++ b/vendor/github.com/prometheus/procfs/fs_test.go
@@ -11,3 +11,16 @@ func TestNewFS(t *testing.T) {
t.Error("want NewFS to fail if mount point is not a directory")
}
}
+
+func TestFSXFSStats(t *testing.T) {
+ stats, err := FS("fixtures").XFSStats()
+ if err != nil {
+ t.Fatalf("failed to parse XFS stats: %v", err)
+ }
+
+ // Very lightweight test just to sanity check the path used
+ // to open XFS stats. Heavier tests in package xfs.
+ if want, got := uint32(92447), stats.ExtentAllocation.ExtentsAllocated; want != got {
+ t.Errorf("unexpected extents allocated:\nwant: %d\nhave: %d", want, got)
+ }
+}
diff --git a/vendor/github.com/prometheus/procfs/ipvs_test.go b/vendor/github.com/prometheus/procfs/ipvs_test.go
index c836c23ac..796ee5b88 100644
--- a/vendor/github.com/prometheus/procfs/ipvs_test.go
+++ b/vendor/github.com/prometheus/procfs/ipvs_test.go
@@ -14,7 +14,7 @@ var (
OutgoingBytes: 0,
}
expectedIPVSBackendStatuses = []IPVSBackendStatus{
- IPVSBackendStatus{
+ {
LocalAddress: net.ParseIP("192.168.0.22"),
LocalPort: 3306,
RemoteAddress: net.ParseIP("192.168.82.22"),
@@ -24,7 +24,7 @@ var (
ActiveConn: 248,
InactConn: 2,
},
- IPVSBackendStatus{
+ {
LocalAddress: net.ParseIP("192.168.0.22"),
LocalPort: 3306,
RemoteAddress: net.ParseIP("192.168.83.24"),
@@ -34,7 +34,7 @@ var (
ActiveConn: 248,
InactConn: 2,
},
- IPVSBackendStatus{
+ {
LocalAddress: net.ParseIP("192.168.0.22"),
LocalPort: 3306,
RemoteAddress: net.ParseIP("192.168.83.21"),
@@ -44,7 +44,7 @@ var (
ActiveConn: 248,
InactConn: 1,
},
- IPVSBackendStatus{
+ {
LocalAddress: net.ParseIP("192.168.0.57"),
LocalPort: 3306,
RemoteAddress: net.ParseIP("192.168.84.22"),
@@ -54,7 +54,7 @@ var (
ActiveConn: 0,
InactConn: 0,
},
- IPVSBackendStatus{
+ {
LocalAddress: net.ParseIP("192.168.0.57"),
LocalPort: 3306,
RemoteAddress: net.ParseIP("192.168.82.21"),
@@ -64,7 +64,7 @@ var (
ActiveConn: 1499,
InactConn: 0,
},
- IPVSBackendStatus{
+ {
LocalAddress: net.ParseIP("192.168.0.57"),
LocalPort: 3306,
RemoteAddress: net.ParseIP("192.168.50.21"),
@@ -74,7 +74,7 @@ var (
ActiveConn: 1498,
InactConn: 0,
},
- IPVSBackendStatus{
+ {
LocalAddress: net.ParseIP("192.168.0.55"),
LocalPort: 3306,
RemoteAddress: net.ParseIP("192.168.50.26"),
@@ -84,7 +84,7 @@ var (
ActiveConn: 0,
InactConn: 0,
},
- IPVSBackendStatus{
+ {
LocalAddress: net.ParseIP("192.168.0.55"),
LocalPort: 3306,
RemoteAddress: net.ParseIP("192.168.49.32"),
diff --git a/vendor/github.com/prometheus/procfs/mdstat_test.go b/vendor/github.com/prometheus/procfs/mdstat_test.go
index ca5fe4d1b..fa463c2fb 100644
--- a/vendor/github.com/prometheus/procfs/mdstat_test.go
+++ b/vendor/github.com/prometheus/procfs/mdstat_test.go
@@ -11,13 +11,13 @@ func TestMDStat(t *testing.T) {
}
refs := map[string]MDStat{
- "md3": MDStat{"md3", "active", 8, 8, 5853468288, 5853468288},
- "md127": MDStat{"md127", "active", 2, 2, 312319552, 312319552},
- "md0": MDStat{"md0", "active", 2, 2, 248896, 248896},
- "md4": MDStat{"md4", "inactive", 2, 2, 4883648, 4883648},
- "md6": MDStat{"md6", "active", 1, 2, 195310144, 16775552},
- "md8": MDStat{"md8", "active", 2, 2, 195310144, 16775552},
- "md7": MDStat{"md7", "active", 3, 4, 7813735424, 7813735424},
+ "md3": {"md3", "active", 8, 8, 5853468288, 5853468288},
+ "md127": {"md127", "active", 2, 2, 312319552, 312319552},
+ "md0": {"md0", "active", 2, 2, 248896, 248896},
+ "md4": {"md4", "inactive", 2, 2, 4883648, 4883648},
+ "md6": {"md6", "active", 1, 2, 195310144, 16775552},
+ "md8": {"md8", "active", 2, 2, 195310144, 16775552},
+ "md7": {"md7", "active", 3, 4, 7813735424, 7813735424},
}
if want, have := len(refs), len(mdStates); want != have {
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
index 47ab0a744..fe8f1f6a2 100644
--- a/vendor/github.com/prometheus/procfs/mountstats.go
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -123,7 +123,7 @@ type NFSEventsStats struct {
VFSFlush uint64
// Number of times fsync() has been called on directories and files.
VFSFsync uint64
- // Number of times locking has been attemped on a file.
+ // Number of times locking has been attempted on a file.
VFSLock uint64
// Number of times files have been closed and released.
VFSFileRelease uint64
@@ -356,7 +356,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
}
// When encountering "per-operation statistics", we must break this
- // loop and parse them seperately to ensure we can terminate parsing
+ // loop and parse them separately to ensure we can terminate parsing
// before reaching another device entry; hence why this 'if' statement
// is not just another switch case
if ss[0] == fieldPerOpStats {
diff --git a/vendor/github.com/prometheus/procfs/mountstats_test.go b/vendor/github.com/prometheus/procfs/mountstats_test.go
index e65707939..75fd4a0f1 100644
--- a/vendor/github.com/prometheus/procfs/mountstats_test.go
+++ b/vendor/github.com/prometheus/procfs/mountstats_test.go
@@ -12,7 +12,6 @@ func TestMountStats(t *testing.T) {
tests := []struct {
name string
s string
- fs bool
mounts []*Mount
invalid bool
}{
@@ -113,7 +112,6 @@ func TestMountStats(t *testing.T) {
},
{
name: "fixtures OK",
- fs: true,
mounts: []*Mount{
{
Device: "rootfs",
@@ -201,10 +199,9 @@ func TestMountStats(t *testing.T) {
if tt.s != "" {
mounts, err = parseMountStats(strings.NewReader(tt.s))
- }
- if tt.fs {
- proc, err := FS("fixtures").NewProc(26231)
- if err != nil {
+ } else {
+ proc, e := FS("fixtures").NewProc(26231)
+ if e != nil {
t.Fatalf("failed to create proc: %v", err)
}
diff --git a/vendor/github.com/prometheus/procfs/xfs/parse.go b/vendor/github.com/prometheus/procfs/xfs/parse.go
new file mode 100644
index 000000000..d1285fa6c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/xfs/parse.go
@@ -0,0 +1,361 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package xfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "log"
+ "strconv"
+ "strings"
+)
+
+// ParseStats parses a Stats from an input io.Reader, using the format
+// found in /proc/fs/xfs/stat.
+func ParseStats(r io.Reader) (*Stats, error) {
+ const (
+ // Fields parsed into stats structures.
+ fieldExtentAlloc = "extent_alloc"
+ fieldAbt = "abt"
+ fieldBlkMap = "blk_map"
+ fieldBmbt = "bmbt"
+ fieldDir = "dir"
+ fieldTrans = "trans"
+ fieldIg = "ig"
+ fieldLog = "log"
+ fieldRw = "rw"
+ fieldAttr = "attr"
+ fieldIcluster = "icluster"
+ fieldVnodes = "vnodes"
+ fieldBuf = "buf"
+ fieldXpc = "xpc"
+
+ // Unimplemented at this time due to lack of documentation.
+ fieldPushAil = "push_ail"
+ fieldXstrat = "xstrat"
+ fieldAbtb2 = "abtb2"
+ fieldAbtc2 = "abtc2"
+ fieldBmbt2 = "bmbt2"
+ fieldIbt2 = "ibt2"
+ fieldFibt2 = "fibt2"
+ fieldQm = "qm"
+ fieldDebug = "debug"
+ )
+
+ var xfss Stats
+
+ s := bufio.NewScanner(r)
+ for s.Scan() {
+ // Expect at least a string label and a single integer value, ex:
+ // - abt 0
+ // - rw 1 2
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) < 2 {
+ continue
+ }
+ label := ss[0]
+
+ // Extended precision counters are uint64 values.
+ if label == fieldXpc {
+ us, err := parseUint64s(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ xfss.ExtendedPrecision, err = extendedPrecisionStats(us)
+ if err != nil {
+ return nil, err
+ }
+
+ continue
+ }
+
+ // All other counters are uint32 values.
+ us, err := parseUint32s(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ switch label {
+ case fieldExtentAlloc:
+ xfss.ExtentAllocation, err = extentAllocationStats(us)
+ case fieldAbt:
+ xfss.AllocationBTree, err = btreeStats(us)
+ case fieldBlkMap:
+ xfss.BlockMapping, err = blockMappingStats(us)
+ case fieldBmbt:
+ xfss.BlockMapBTree, err = btreeStats(us)
+ case fieldDir:
+ xfss.DirectoryOperation, err = directoryOperationStats(us)
+ case fieldTrans:
+ xfss.Transaction, err = transactionStats(us)
+ case fieldIg:
+ xfss.InodeOperation, err = inodeOperationStats(us)
+ case fieldLog:
+ xfss.LogOperation, err = logOperationStats(us)
+ case fieldRw:
+ xfss.ReadWrite, err = readWriteStats(us)
+ case fieldAttr:
+ xfss.AttributeOperation, err = attributeOperationStats(us)
+ case fieldIcluster:
+ xfss.InodeClustering, err = inodeClusteringStats(us)
+ case fieldVnodes:
+ xfss.Vnode, err = vnodeStats(us)
+ case fieldBuf:
+ xfss.Buffer, err = bufferStats(us)
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &xfss, s.Err()
+}
+
+// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s.
+func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) {
+ if l := len(us); l != 4 {
+ return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l)
+ }
+
+ return ExtentAllocationStats{
+ ExtentsAllocated: us[0],
+ BlocksAllocated: us[1],
+ ExtentsFreed: us[2],
+ BlocksFreed: us[3],
+ }, nil
+}
+
+// btreeStats builds a BTreeStats from a slice of uint32s.
+func btreeStats(us []uint32) (BTreeStats, error) {
+ if l := len(us); l != 4 {
+ return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l)
+ }
+
+ return BTreeStats{
+ Lookups: us[0],
+ Compares: us[1],
+ RecordsInserted: us[2],
+ RecordsDeleted: us[3],
+ }, nil
+}
+
+// BlockMappingStat builds a BlockMappingStats from a slice of uint32s.
+func blockMappingStats(us []uint32) (BlockMappingStats, error) {
+ if l := len(us); l != 7 {
+ return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l)
+ }
+
+ return BlockMappingStats{
+ Reads: us[0],
+ Writes: us[1],
+ Unmaps: us[2],
+ ExtentListInsertions: us[3],
+ ExtentListDeletions: us[4],
+ ExtentListLookups: us[5],
+ ExtentListCompares: us[6],
+ }, nil
+}
+
+// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s.
+func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) {
+ if l := len(us); l != 4 {
+ return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l)
+ }
+
+ return DirectoryOperationStats{
+ Lookups: us[0],
+ Creates: us[1],
+ Removes: us[2],
+ Getdents: us[3],
+ }, nil
+}
+
+// TransactionStats builds a TransactionStats from a slice of uint32s.
+func transactionStats(us []uint32) (TransactionStats, error) {
+ if l := len(us); l != 3 {
+ return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l)
+ }
+
+ return TransactionStats{
+ Sync: us[0],
+ Async: us[1],
+ Empty: us[2],
+ }, nil
+}
+
+// InodeOperationStats builds an InodeOperationStats from a slice of uint32s.
+func inodeOperationStats(us []uint32) (InodeOperationStats, error) {
+ if l := len(us); l != 7 {
+ return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l)
+ }
+
+ return InodeOperationStats{
+ Attempts: us[0],
+ Found: us[1],
+ Recycle: us[2],
+ Missed: us[3],
+ Duplicate: us[4],
+ Reclaims: us[5],
+ AttributeChange: us[6],
+ }, nil
+}
+
+// LogOperationStats builds a LogOperationStats from a slice of uint32s.
+func logOperationStats(us []uint32) (LogOperationStats, error) {
+ if l := len(us); l != 5 {
+ return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l)
+ }
+
+ return LogOperationStats{
+ Writes: us[0],
+ Blocks: us[1],
+ NoInternalBuffers: us[2],
+ Force: us[3],
+ ForceSleep: us[4],
+ }, nil
+}
+
+// ReadWriteStats builds a ReadWriteStats from a slice of uint32s.
+func readWriteStats(us []uint32) (ReadWriteStats, error) {
+ if l := len(us); l != 2 {
+ return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l)
+ }
+
+ return ReadWriteStats{
+ Read: us[0],
+ Write: us[1],
+ }, nil
+}
+
+// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s.
+func attributeOperationStats(us []uint32) (AttributeOperationStats, error) {
+ if l := len(us); l != 4 {
+ return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l)
+ }
+
+ return AttributeOperationStats{
+ Get: us[0],
+ Set: us[1],
+ Remove: us[2],
+ List: us[3],
+ }, nil
+}
+
+// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s.
+func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) {
+ if l := len(us); l != 3 {
+ return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l)
+ }
+
+ return InodeClusteringStats{
+ Iflush: us[0],
+ Flush: us[1],
+ FlushInode: us[2],
+ }, nil
+}
+
+// VnodeStats builds a VnodeStats from a slice of uint32s.
+func vnodeStats(us []uint32) (VnodeStats, error) {
+ // The attribute "Free" appears to not be available on older XFS
+ // stats versions. Therefore, 7 or 8 elements may appear in
+ // this slice.
+ l := len(us)
+ log.Println(l)
+ if l != 7 && l != 8 {
+ return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l)
+ }
+
+ s := VnodeStats{
+ Active: us[0],
+ Allocate: us[1],
+ Get: us[2],
+ Hold: us[3],
+ Release: us[4],
+ Reclaim: us[5],
+ Remove: us[6],
+ }
+
+ // Skip adding free, unless it is present. The zero value will
+ // be used in place of an actual count.
+ if l == 7 {
+ return s, nil
+ }
+
+ s.Free = us[7]
+ return s, nil
+}
+
+// BufferStats builds a BufferStats from a slice of uint32s.
+func bufferStats(us []uint32) (BufferStats, error) {
+ if l := len(us); l != 9 {
+ return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l)
+ }
+
+ return BufferStats{
+ Get: us[0],
+ Create: us[1],
+ GetLocked: us[2],
+ GetLockedWaited: us[3],
+ BusyLocked: us[4],
+ MissLocked: us[5],
+ PageRetries: us[6],
+ PageFound: us[7],
+ GetRead: us[8],
+ }, nil
+}
+
+// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s.
+func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) {
+ if l := len(us); l != 3 {
+ return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l)
+ }
+
+ return ExtendedPrecisionStats{
+ FlushBytes: us[0],
+ WriteBytes: us[1],
+ ReadBytes: us[2],
+ }, nil
+}
+
+// parseUint32s parses a slice of strings into a slice of uint32s.
+func parseUint32s(ss []string) ([]uint32, error) {
+ us := make([]uint32, 0, len(ss))
+ for _, s := range ss {
+ u, err := strconv.ParseUint(s, 10, 32)
+ if err != nil {
+ return nil, err
+ }
+
+ us = append(us, uint32(u))
+ }
+
+ return us, nil
+}
+
+// parseUint64s parses a slice of strings into a slice of uint64s.
+func parseUint64s(ss []string) ([]uint64, error) {
+ us := make([]uint64, 0, len(ss))
+ for _, s := range ss {
+ u, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ us = append(us, u)
+ }
+
+ return us, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/xfs/parse_test.go b/vendor/github.com/prometheus/procfs/xfs/parse_test.go
new file mode 100644
index 000000000..11ddb7425
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/xfs/parse_test.go
@@ -0,0 +1,446 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package xfs_test
+
+import (
+ "log"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/prometheus/procfs"
+ "github.com/prometheus/procfs/xfs"
+)
+
+func TestParseStats(t *testing.T) {
+ tests := []struct {
+ name string
+ s string
+ fs bool
+ stats *xfs.Stats
+ invalid bool
+ }{
+ {
+ name: "empty file OK",
+ },
+ {
+ name: "short or empty lines and unknown labels ignored",
+ s: "one\n\ntwo 1 2 3\n",
+ stats: &xfs.Stats{},
+ },
+ {
+ name: "bad uint32",
+ s: "extent_alloc XXX",
+ invalid: true,
+ },
+ {
+ name: "bad uint64",
+ s: "xpc XXX",
+ invalid: true,
+ },
+ {
+ name: "extent_alloc bad",
+ s: "extent_alloc 1",
+ invalid: true,
+ },
+ {
+ name: "extent_alloc OK",
+ s: "extent_alloc 1 2 3 4",
+ stats: &xfs.Stats{
+ ExtentAllocation: xfs.ExtentAllocationStats{
+ ExtentsAllocated: 1,
+ BlocksAllocated: 2,
+ ExtentsFreed: 3,
+ BlocksFreed: 4,
+ },
+ },
+ },
+ {
+ name: "abt bad",
+ s: "abt 1",
+ invalid: true,
+ },
+ {
+ name: "abt OK",
+ s: "abt 1 2 3 4",
+ stats: &xfs.Stats{
+ AllocationBTree: xfs.BTreeStats{
+ Lookups: 1,
+ Compares: 2,
+ RecordsInserted: 3,
+ RecordsDeleted: 4,
+ },
+ },
+ },
+ {
+ name: "blk_map bad",
+ s: "blk_map 1",
+ invalid: true,
+ },
+ {
+ name: "blk_map OK",
+ s: "blk_map 1 2 3 4 5 6 7",
+ stats: &xfs.Stats{
+ BlockMapping: xfs.BlockMappingStats{
+ Reads: 1,
+ Writes: 2,
+ Unmaps: 3,
+ ExtentListInsertions: 4,
+ ExtentListDeletions: 5,
+ ExtentListLookups: 6,
+ ExtentListCompares: 7,
+ },
+ },
+ },
+ {
+ name: "bmbt bad",
+ s: "bmbt 1",
+ invalid: true,
+ },
+ {
+ name: "bmbt OK",
+ s: "bmbt 1 2 3 4",
+ stats: &xfs.Stats{
+ BlockMapBTree: xfs.BTreeStats{
+ Lookups: 1,
+ Compares: 2,
+ RecordsInserted: 3,
+ RecordsDeleted: 4,
+ },
+ },
+ },
+ {
+ name: "dir bad",
+ s: "dir 1",
+ invalid: true,
+ },
+ {
+ name: "dir OK",
+ s: "dir 1 2 3 4",
+ stats: &xfs.Stats{
+ DirectoryOperation: xfs.DirectoryOperationStats{
+ Lookups: 1,
+ Creates: 2,
+ Removes: 3,
+ Getdents: 4,
+ },
+ },
+ },
+ {
+ name: "trans bad",
+ s: "trans 1",
+ invalid: true,
+ },
+ {
+ name: "trans OK",
+ s: "trans 1 2 3",
+ stats: &xfs.Stats{
+ Transaction: xfs.TransactionStats{
+ Sync: 1,
+ Async: 2,
+ Empty: 3,
+ },
+ },
+ },
+ {
+ name: "ig bad",
+ s: "ig 1",
+ invalid: true,
+ },
+ {
+ name: "ig OK",
+ s: "ig 1 2 3 4 5 6 7",
+ stats: &xfs.Stats{
+ InodeOperation: xfs.InodeOperationStats{
+ Attempts: 1,
+ Found: 2,
+ Recycle: 3,
+ Missed: 4,
+ Duplicate: 5,
+ Reclaims: 6,
+ AttributeChange: 7,
+ },
+ },
+ },
+ {
+ name: "log bad",
+ s: "log 1",
+ invalid: true,
+ },
+ {
+ name: "log OK",
+ s: "log 1 2 3 4 5",
+ stats: &xfs.Stats{
+ LogOperation: xfs.LogOperationStats{
+ Writes: 1,
+ Blocks: 2,
+ NoInternalBuffers: 3,
+ Force: 4,
+ ForceSleep: 5,
+ },
+ },
+ },
+ {
+ name: "rw bad",
+ s: "rw 1",
+ invalid: true,
+ },
+ {
+ name: "rw OK",
+ s: "rw 1 2",
+ stats: &xfs.Stats{
+ ReadWrite: xfs.ReadWriteStats{
+ Read: 1,
+ Write: 2,
+ },
+ },
+ },
+ {
+ name: "attr bad",
+ s: "attr 1",
+ invalid: true,
+ },
+ {
+ name: "attr OK",
+ s: "attr 1 2 3 4",
+ stats: &xfs.Stats{
+ AttributeOperation: xfs.AttributeOperationStats{
+ Get: 1,
+ Set: 2,
+ Remove: 3,
+ List: 4,
+ },
+ },
+ },
+ {
+ name: "icluster bad",
+ s: "icluster 1",
+ invalid: true,
+ },
+ {
+ name: "icluster OK",
+ s: "icluster 1 2 3",
+ stats: &xfs.Stats{
+ InodeClustering: xfs.InodeClusteringStats{
+ Iflush: 1,
+ Flush: 2,
+ FlushInode: 3,
+ },
+ },
+ },
+ {
+ name: "vnodes bad",
+ s: "vnodes 1",
+ invalid: true,
+ },
+ {
+ name: "vnodes (missing free) OK",
+ s: "vnodes 1 2 3 4 5 6 7",
+ stats: &xfs.Stats{
+ Vnode: xfs.VnodeStats{
+ Active: 1,
+ Allocate: 2,
+ Get: 3,
+ Hold: 4,
+ Release: 5,
+ Reclaim: 6,
+ Remove: 7,
+ },
+ },
+ },
+ {
+ name: "vnodes (with free) OK",
+ s: "vnodes 1 2 3 4 5 6 7 8",
+ stats: &xfs.Stats{
+ Vnode: xfs.VnodeStats{
+ Active: 1,
+ Allocate: 2,
+ Get: 3,
+ Hold: 4,
+ Release: 5,
+ Reclaim: 6,
+ Remove: 7,
+ Free: 8,
+ },
+ },
+ },
+ {
+ name: "buf bad",
+ s: "buf 1",
+ invalid: true,
+ },
+ {
+ name: "buf OK",
+ s: "buf 1 2 3 4 5 6 7 8 9",
+ stats: &xfs.Stats{
+ Buffer: xfs.BufferStats{
+ Get: 1,
+ Create: 2,
+ GetLocked: 3,
+ GetLockedWaited: 4,
+ BusyLocked: 5,
+ MissLocked: 6,
+ PageRetries: 7,
+ PageFound: 8,
+ GetRead: 9,
+ },
+ },
+ },
+ {
+ name: "xpc bad",
+ s: "xpc 1",
+ invalid: true,
+ },
+ {
+ name: "xpc OK",
+ s: "xpc 1 2 3",
+ stats: &xfs.Stats{
+ ExtendedPrecision: xfs.ExtendedPrecisionStats{
+ FlushBytes: 1,
+ WriteBytes: 2,
+ ReadBytes: 3,
+ },
+ },
+ },
+ {
+ name: "fixtures OK",
+ fs: true,
+ stats: &xfs.Stats{
+ ExtentAllocation: xfs.ExtentAllocationStats{
+ ExtentsAllocated: 92447,
+ BlocksAllocated: 97589,
+ ExtentsFreed: 92448,
+ BlocksFreed: 93751,
+ },
+ AllocationBTree: xfs.BTreeStats{
+ Lookups: 0,
+ Compares: 0,
+ RecordsInserted: 0,
+ RecordsDeleted: 0,
+ },
+ BlockMapping: xfs.BlockMappingStats{
+ Reads: 1767055,
+ Writes: 188820,
+ Unmaps: 184891,
+ ExtentListInsertions: 92447,
+ ExtentListDeletions: 92448,
+ ExtentListLookups: 2140766,
+ ExtentListCompares: 0,
+ },
+ BlockMapBTree: xfs.BTreeStats{
+ Lookups: 0,
+ Compares: 0,
+ RecordsInserted: 0,
+ RecordsDeleted: 0,
+ },
+ DirectoryOperation: xfs.DirectoryOperationStats{
+ Lookups: 185039,
+ Creates: 92447,
+ Removes: 92444,
+ Getdents: 136422,
+ },
+ Transaction: xfs.TransactionStats{
+ Sync: 706,
+ Async: 944304,
+ Empty: 0,
+ },
+ InodeOperation: xfs.InodeOperationStats{
+ Attempts: 185045,
+ Found: 58807,
+ Recycle: 0,
+ Missed: 126238,
+ Duplicate: 0,
+ Reclaims: 33637,
+ AttributeChange: 22,
+ },
+ LogOperation: xfs.LogOperationStats{
+ Writes: 2883,
+ Blocks: 113448,
+ NoInternalBuffers: 9,
+ Force: 17360,
+ ForceSleep: 739,
+ },
+ ReadWrite: xfs.ReadWriteStats{
+ Read: 107739,
+ Write: 94045,
+ },
+ AttributeOperation: xfs.AttributeOperationStats{
+ Get: 4,
+ Set: 0,
+ Remove: 0,
+ List: 0,
+ },
+ InodeClustering: xfs.InodeClusteringStats{
+ Iflush: 8677,
+ Flush: 7849,
+ FlushInode: 135802,
+ },
+ Vnode: xfs.VnodeStats{
+ Active: 92601,
+ Allocate: 0,
+ Get: 0,
+ Hold: 0,
+ Release: 92444,
+ Reclaim: 92444,
+ Remove: 92444,
+ Free: 0,
+ },
+ Buffer: xfs.BufferStats{
+ Get: 2666287,
+ Create: 7122,
+ GetLocked: 2659202,
+ GetLockedWaited: 3599,
+ BusyLocked: 2,
+ MissLocked: 7085,
+ PageRetries: 0,
+ PageFound: 10297,
+ GetRead: 7085,
+ },
+ ExtendedPrecision: xfs.ExtendedPrecisionStats{
+ FlushBytes: 399724544,
+ WriteBytes: 92823103,
+ ReadBytes: 86219234,
+ },
+ },
+ },
+ }
+
+ for i, tt := range tests {
+ t.Logf("[%02d] test %q", i, tt.name)
+
+ var (
+ stats *xfs.Stats
+ err error
+ )
+
+ if tt.s != "" {
+ stats, err = xfs.ParseStats(strings.NewReader(tt.s))
+ }
+ if tt.fs {
+ stats, err = procfs.FS("../fixtures").XFSStats()
+ }
+
+ if tt.invalid && err == nil {
+ t.Error("expected an error, but none occurred")
+ }
+ if !tt.invalid && err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+
+ if want, have := tt.stats, stats; !reflect.DeepEqual(want, have) {
+ log.Printf("stats: %#v", have)
+ t.Errorf("unexpected XFS stats:\nwant:\n%v\nhave:\n%v", want, have)
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/procfs/xfs/xfs.go b/vendor/github.com/prometheus/procfs/xfs/xfs.go
new file mode 100644
index 000000000..ed77d907a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/xfs/xfs.go
@@ -0,0 +1,158 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package xfs provides access to statistics exposed by the XFS filesystem.
+package xfs
+
+// Stats contains XFS filesystem runtime statistics, parsed from
+// /proc/fs/xfs/stat.
+//
+// The names and meanings of each statistic were taken from
+// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux
+// kernel source. Most counters are uint32s (same data types used in
+// xfs_stats.h), but some of the "extended precision stats" are uint64s.
+type Stats struct {
+ ExtentAllocation ExtentAllocationStats
+ AllocationBTree BTreeStats
+ BlockMapping BlockMappingStats
+ BlockMapBTree BTreeStats
+ DirectoryOperation DirectoryOperationStats
+ Transaction TransactionStats
+ InodeOperation InodeOperationStats
+ LogOperation LogOperationStats
+ ReadWrite ReadWriteStats
+ AttributeOperation AttributeOperationStats
+ InodeClustering InodeClusteringStats
+ Vnode VnodeStats
+ Buffer BufferStats
+ ExtendedPrecision ExtendedPrecisionStats
+}
+
+// ExtentAllocationStats contains statistics regarding XFS extent allocations.
+type ExtentAllocationStats struct {
+ ExtentsAllocated uint32
+ BlocksAllocated uint32
+ ExtentsFreed uint32
+ BlocksFreed uint32
+}
+
+// BTreeStats contains statistics regarding an XFS internal B-tree.
+type BTreeStats struct {
+ Lookups uint32
+ Compares uint32
+ RecordsInserted uint32
+ RecordsDeleted uint32
+}
+
+// BlockMappingStats contains statistics regarding XFS block maps.
+type BlockMappingStats struct {
+ Reads uint32
+ Writes uint32
+ Unmaps uint32
+ ExtentListInsertions uint32
+ ExtentListDeletions uint32
+ ExtentListLookups uint32
+ ExtentListCompares uint32
+}
+
+// DirectoryOperationStats contains statistics regarding XFS directory entries.
+type DirectoryOperationStats struct {
+ Lookups uint32
+ Creates uint32
+ Removes uint32
+ Getdents uint32
+}
+
+// TransactionStats contains statistics regarding XFS metadata transactions.
+type TransactionStats struct {
+ Sync uint32
+ Async uint32
+ Empty uint32
+}
+
+// InodeOperationStats contains statistics regarding XFS inode operations.
+type InodeOperationStats struct {
+ Attempts uint32
+ Found uint32
+ Recycle uint32
+ Missed uint32
+ Duplicate uint32
+ Reclaims uint32
+ AttributeChange uint32
+}
+
+// LogOperationStats contains statistics regarding the XFS log buffer.
+type LogOperationStats struct {
+ Writes uint32
+ Blocks uint32
+ NoInternalBuffers uint32
+ Force uint32
+ ForceSleep uint32
+}
+
+// ReadWriteStats contains statistics regarding the number of read and write
+// system calls for XFS filesystems.
+type ReadWriteStats struct {
+ Read uint32
+ Write uint32
+}
+
+// AttributeOperationStats contains statistics regarding manipulation of
+// XFS extended file attributes.
+type AttributeOperationStats struct {
+ Get uint32
+ Set uint32
+ Remove uint32
+ List uint32
+}
+
+// InodeClusteringStats contains statistics regarding XFS inode clustering
+// operations.
+type InodeClusteringStats struct {
+ Iflush uint32
+ Flush uint32
+ FlushInode uint32
+}
+
+// VnodeStats contains statistics regarding XFS vnode operations.
+type VnodeStats struct {
+ Active uint32
+ Allocate uint32
+ Get uint32
+ Hold uint32
+ Release uint32
+ Reclaim uint32
+ Remove uint32
+ Free uint32
+}
+
+// BufferStats contains statistics regarding XFS read/write I/O buffers.
+type BufferStats struct {
+ Get uint32
+ Create uint32
+ GetLocked uint32
+ GetLockedWaited uint32
+ BusyLocked uint32
+ MissLocked uint32
+ PageRetries uint32
+ PageFound uint32
+ GetRead uint32
+}
+
+// ExtendedPrecisionStats contains high precision counters used to track the
+// total number of bytes read, written, or flushed, during XFS operations.
+type ExtendedPrecisionStats struct {
+ FlushBytes uint64
+ WriteBytes uint64
+ ReadBytes uint64
+}
diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml
index bd72adf68..42d1f57d3 100644
--- a/vendor/github.com/spf13/cobra/.travis.yml
+++ b/vendor/github.com/spf13/cobra/.travis.yml
@@ -5,8 +5,9 @@ matrix:
- go: 1.4.3
env: NOVET=true # No bundled vet.
- go: 1.5.4
- - go: 1.6.3
- - go: 1.7
+ - go: 1.6.4
+ - go: 1.7.5
+ - go: 1.8
- go: tip
allow_failures:
- go: tip
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
index 2efda5920..2de984012 100644
--- a/vendor/github.com/spf13/cobra/README.md
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -8,6 +8,7 @@ Many of the most widely used Go projects are built using Cobra including:
* [Hugo](http://gohugo.io)
* [rkt](https://github.com/coreos/rkt)
* [etcd](https://github.com/coreos/etcd)
+* [Docker](https://github.com/docker/docker)
* [Docker (distribution)](https://github.com/docker/distribution)
* [OpenShift](https://www.openshift.com/)
* [Delve](https://github.com/derekparker/delve)
@@ -755,7 +756,7 @@ providing a way to handle the errors in one location. The current list of functi
* PersistentPostRunE
If you would like to silence the default `error` and `usage` output in favor of your own, you can set `SilenceUsage`
-and `SilenceErrors` to `false` on the command. A child command respects these flags if they are set on the parent
+and `SilenceErrors` to `true` on the command. A child command respects these flags if they are set on the parent
command.
**Example Usage using RunE:**
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
index 3ee1a0a9d..ae3930dfc 100644
--- a/vendor/github.com/spf13/cobra/command.go
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -57,6 +57,9 @@ type Command struct {
Deprecated string
// Is this command hidden and should NOT show up in the list of available commands?
Hidden bool
+ // Annotations are key/value pairs that can be used by applications to identify or
+ // group commands
+ Annotations map[string]string
// Full set of flags
flags *flag.FlagSet
// Set of flags childrens of this command will inherit
@@ -152,12 +155,12 @@ func (c *Command) SetUsageTemplate(s string) {
}
// SetFlagErrorFunc sets a function to generate an error when flag parsing
-// fails
+// fails.
func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) {
c.flagErrorFunc = f
}
-// SetHelpFunc sets help function. Can be defined by Application
+// SetHelpFunc sets help function. Can be defined by Application.
func (c *Command) SetHelpFunc(f func(*Command, []string)) {
c.helpFunc = f
}
@@ -184,7 +187,7 @@ func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string
}
}
-// OutOrStdout returns output to stdout
+// OutOrStdout returns output to stdout.
func (c *Command) OutOrStdout() io.Writer {
return c.getOut(os.Stdout)
}
@@ -342,19 +345,19 @@ Aliases:
{{end}}{{if .HasExample}}
Examples:
-{{ .Example }}{{end}}{{ if .HasAvailableSubCommands}}
+{{ .Example }}{{end}}{{if .HasAvailableSubCommands}}
-Available Commands:{{range .Commands}}{{if .IsAvailableCommand}}
- {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableLocalFlags}}
+Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
+ {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
Flags:
-{{.LocalFlags.FlagUsages | trimRightSpace}}{{end}}{{ if .HasAvailableInheritedFlags}}
+{{.LocalFlags.FlagUsages | trimRightSpace}}{{end}}{{if .HasAvailableInheritedFlags}}
Global Flags:
{{.InheritedFlags.FlagUsages | trimRightSpace}}{{end}}{{if .HasHelpSubCommands}}
-Additional help topics:{{range .Commands}}{{if .IsHelpCommand}}
- {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableSubCommands }}
+Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
+ {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}}
`
@@ -381,20 +384,18 @@ func (c *Command) resetChildrensParents() {
}
}
-// Test if the named flag is a boolean flag.
-func isBooleanFlag(name string, f *flag.FlagSet) bool {
+func hasNoOptDefVal(name string, f *flag.FlagSet) bool {
flag := f.Lookup(name)
if flag == nil {
return false
}
- return flag.Value.Type() == "bool"
+ return len(flag.NoOptDefVal) > 0
}
-// Test if the named flag is a boolean flag.
-func isBooleanShortFlag(name string, f *flag.FlagSet) bool {
+func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool {
result := false
- f.VisitAll(func(f *flag.Flag) {
- if f.Shorthand == name && f.Value.Type() == "bool" {
+ fs.VisitAll(func(flag *flag.Flag) {
+ if flag.Shorthand == name && len(flag.NoOptDefVal) > 0 {
result = true
}
})
@@ -420,8 +421,8 @@ func stripFlags(args []string, c *Command) []string {
inQuote = true
case strings.HasPrefix(y, "--") && !strings.Contains(y, "="):
// TODO: this isn't quite right, we should really check ahead for 'true' or 'false'
- inFlag = !isBooleanFlag(y[2:], c.Flags())
- case strings.HasPrefix(y, "-") && !strings.Contains(y, "=") && len(y) == 2 && !isBooleanShortFlag(y[1:], c.Flags()):
+ inFlag = !hasNoOptDefVal(y[2:], c.Flags())
+ case strings.HasPrefix(y, "-") && !strings.Contains(y, "=") && len(y) == 2 && !shortHasNoOptDefVal(y[1:], c.Flags()):
inFlag = true
case inFlag:
inFlag = false
@@ -455,7 +456,7 @@ func argsMinusFirstX(args []string, x string) []string {
return args
}
-// Find finds the target command given the args and command tree
+// Find the target command given the args and command tree
// Meant to be run on the highest node. Only searches down.
func (c *Command) Find(args []string) (*Command, []string, error) {
if c == nil {
@@ -695,7 +696,6 @@ func (c *Command) Execute() error {
// ExecuteC executes the command.
func (c *Command) ExecuteC() (cmd *Command, err error) {
-
// Regardless of what command execute is called on, run on Root only
if c.HasParent() {
return c.Root().ExecuteC()
@@ -780,7 +780,7 @@ func (c *Command) initHelpCmd() {
Run: func(c *Command, args []string) {
cmd, _, e := c.Root().Find(args)
if cmd == nil || e != nil {
- c.Printf("Unknown help topic %#q.", args)
+ c.Printf("Unknown help topic %#q\n", args)
c.Root().Usage()
} else {
cmd.Help()
@@ -969,7 +969,8 @@ func (c *Command) Name() string {
if i >= 0 {
name = name[:i]
}
- return name
+ c.name = name
+ return c.name
}
// HasAlias determines if a given string is an alias of the command.
@@ -1020,11 +1021,12 @@ func (c *Command) IsAvailableCommand() bool {
return false
}
-// IsHelpCommand determines if a command is a 'help' command; a help command is
-// determined by the fact that it is NOT runnable/hidden/deprecated, and has no
-// sub commands that are runnable/hidden/deprecated.
-func (c *Command) IsHelpCommand() bool {
-
+// IsAdditionalHelpTopicCommand determines if a command is an additional
+// help topic command; additional help topic command is determined by the
+// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that
+// are runnable/hidden/deprecated.
+// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924.
+func (c *Command) IsAdditionalHelpTopicCommand() bool {
// if a command is runnable, deprecated, or hidden it is not a 'help' command
if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden {
return false
@@ -1032,7 +1034,7 @@ func (c *Command) IsHelpCommand() bool {
// if any non-help sub commands are found, the command is not a 'help' command
for _, sub := range c.commands {
- if !sub.IsHelpCommand() {
+ if !sub.IsAdditionalHelpTopicCommand() {
return false
}
}
@@ -1045,10 +1047,9 @@ func (c *Command) IsHelpCommand() bool {
// that need to be shown in the usage/help default template under 'additional help
// topics'.
func (c *Command) HasHelpSubCommands() bool {
-
// return true on the first found available 'help' sub command
for _, sub := range c.commands {
- if sub.IsHelpCommand() {
+ if sub.IsAdditionalHelpTopicCommand() {
return true
}
}
@@ -1060,7 +1061,6 @@ func (c *Command) HasHelpSubCommands() bool {
// HasAvailableSubCommands determines if a command has available sub commands that
// need to be shown in the usage/help default template under 'available commands'.
func (c *Command) HasAvailableSubCommands() bool {
-
// return true on the first found available (non deprecated/help/hidden)
// sub command
for _, sub := range c.commands {
diff --git a/vendor/github.com/spf13/cobra/doc/man_docs.go b/vendor/github.com/spf13/cobra/doc/man_docs.go
index fd7107c42..b9266c367 100644
--- a/vendor/github.com/spf13/cobra/doc/man_docs.go
+++ b/vendor/github.com/spf13/cobra/doc/man_docs.go
@@ -49,7 +49,7 @@ func GenManTreeFromOpts(cmd *cobra.Command, opts GenManTreeOptions) error {
header = &GenManHeader{}
}
for _, c := range cmd.Commands() {
- if !c.IsAvailableCommand() || c.IsHelpCommand() {
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
continue
}
if err := GenManTreeFromOpts(c, opts); err != nil {
@@ -216,7 +216,7 @@ func genMan(cmd *cobra.Command, header *GenManHeader) []byte {
children := cmd.Commands()
sort.Sort(byName(children))
for _, c := range children {
- if !c.IsAvailableCommand() || c.IsHelpCommand() {
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
continue
}
seealso := fmt.Sprintf("**%s-%s(%s)**", dashCommandName, c.Name(), header.Section)
diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.go b/vendor/github.com/spf13/cobra/doc/md_docs.go
index fa1363180..8d159c1d7 100644
--- a/vendor/github.com/spf13/cobra/doc/md_docs.go
+++ b/vendor/github.com/spf13/cobra/doc/md_docs.go
@@ -119,7 +119,7 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string)
sort.Sort(byName(children))
for _, child := range children {
- if !child.IsAvailableCommand() || child.IsHelpCommand() {
+ if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() {
continue
}
cname := name + " " + child.Name()
@@ -149,7 +149,7 @@ func GenMarkdownTree(cmd *cobra.Command, dir string) error {
func GenMarkdownTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error {
for _, c := range cmd.Commands() {
- if !c.IsAvailableCommand() || c.IsHelpCommand() {
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
continue
}
if err := GenMarkdownTreeCustom(c, dir, filePrepender, linkHandler); err != nil {
diff --git a/vendor/github.com/spf13/cobra/doc/util.go b/vendor/github.com/spf13/cobra/doc/util.go
index a7d2765a9..8d3dbecec 100644
--- a/vendor/github.com/spf13/cobra/doc/util.go
+++ b/vendor/github.com/spf13/cobra/doc/util.go
@@ -27,7 +27,7 @@ func hasSeeAlso(cmd *cobra.Command) bool {
return true
}
for _, c := range cmd.Commands() {
- if !c.IsAvailableCommand() || c.IsHelpCommand() {
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
continue
}
return true
diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.go b/vendor/github.com/spf13/cobra/doc/yaml_docs.go
index 75474d299..ac8db89eb 100644
--- a/vendor/github.com/spf13/cobra/doc/yaml_docs.go
+++ b/vendor/github.com/spf13/cobra/doc/yaml_docs.go
@@ -57,7 +57,7 @@ func GenYamlTree(cmd *cobra.Command, dir string) error {
// GenYamlTreeCustom creates yaml structured ref files
func GenYamlTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error {
for _, c := range cmd.Commands() {
- if !c.IsAvailableCommand() || c.IsHelpCommand() {
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
continue
}
if err := GenYamlTreeCustom(c, dir, filePrepender, linkHandler); err != nil {
@@ -117,7 +117,7 @@ func GenYamlCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) str
children := cmd.Commands()
sort.Sort(byName(children))
for _, child := range children {
- if !child.IsAvailableCommand() || child.IsHelpCommand() {
+ if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() {
continue
}
result = append(result, child.Name()+" - "+child.Short)
diff --git a/vendor/github.com/tylerb/graceful/README.md b/vendor/github.com/tylerb/graceful/README.md
index 328c3acf8..c641b6e78 100644
--- a/vendor/github.com/tylerb/graceful/README.md
+++ b/vendor/github.com/tylerb/graceful/README.md
@@ -3,6 +3,11 @@ graceful [![GoDoc](https://godoc.org/github.com/tylerb/graceful?status.png)](htt
Graceful is a Go 1.3+ package enabling graceful shutdown of http.Handler servers.
+## Using Go 1.8?
+
+If you are using Go 1.8, you may not need to use this library! Consider using `http.Server`'s built-in [Shutdown()](https://golang.org/pkg/net/http/#Server.Shutdown)
+method for graceful shutdowns.
+
## Installation
To install, simply execute:
diff --git a/vendor/github.com/tylerb/graceful/graceful.go b/vendor/github.com/tylerb/graceful/graceful.go
index d6a9ca068..ebf0aeb7d 100644
--- a/vendor/github.com/tylerb/graceful/graceful.go
+++ b/vendor/github.com/tylerb/graceful/graceful.go
@@ -366,6 +366,7 @@ func (srv *Server) manageConnections(add, idle, active, remove chan net.Conn, sh
select {
case conn := <-add:
srv.connections[conn] = struct{}{}
+ srv.idleConnections[conn] = struct{}{} // Newly-added connections are considered idle until they become active.
case conn := <-idle:
srv.idleConnections[conn] = struct{}{}
case conn := <-active:
diff --git a/vendor/github.com/xenolf/lego/README.md b/vendor/github.com/xenolf/lego/README.md
index 9be562944..852f0a833 100644
--- a/vendor/github.com/xenolf/lego/README.md
+++ b/vendor/github.com/xenolf/lego/README.md
@@ -23,7 +23,11 @@ To build lego inside a Docker container, just run
```
docker build -t lego .
```
-
+##### From the package manager
+- [ArchLinux (AUR)](https://aur.archlinux.org/packages/lego-git):
+```
+yaourt -S lego-git
+```
#### Features
- Register with CA
diff --git a/vendor/github.com/xenolf/lego/acme/client.go b/vendor/github.com/xenolf/lego/acme/client.go
index e824f5080..ba56e796c 100644
--- a/vendor/github.com/xenolf/lego/acme/client.go
+++ b/vendor/github.com/xenolf/lego/acme/client.go
@@ -535,6 +535,7 @@ func (c *Client) getChallenges(domains []string) ([]authorizationResource, map[s
links := parseLinks(hdr["Link"])
if links["next"] == "" {
logf("[ERROR][%s] acme: Server did not provide next link to proceed", domain)
+ errc <- domainError{Domain: domain, Error: errors.New("Server did not provide next link to proceed")}
return
}
@@ -560,12 +561,20 @@ func (c *Client) getChallenges(domains []string) ([]authorizationResource, map[s
}
}
+ logAuthz(challenges)
+
close(resc)
close(errc)
return challenges, failures
}
+func logAuthz(authz []authorizationResource) {
+ for _, auth := range authz {
+ logf("[INFO][%s] AuthURL: %s", auth.Domain, auth.AuthURL)
+ }
+}
+
func (c *Client) requestCertificate(authz []authorizationResource, bundle bool, privKey crypto.PrivateKey, mustStaple bool) (CertificateResource, error) {
if len(authz) == 0 {
return CertificateResource{}, errors.New("Passed no authorizations to requestCertificate!")
diff --git a/vendor/github.com/xenolf/lego/acme/client_test.go b/vendor/github.com/xenolf/lego/acme/client_test.go
index e309554f3..b18334c8a 100644
--- a/vendor/github.com/xenolf/lego/acme/client_test.go
+++ b/vendor/github.com/xenolf/lego/acme/client_test.go
@@ -10,6 +10,7 @@ import (
"net/http/httptest"
"strings"
"testing"
+ "time"
)
func TestNewClient(t *testing.T) {
@@ -118,6 +119,39 @@ func TestClientOptPort(t *testing.T) {
}
}
+func TestNotHoldingLockWhileMakingHTTPRequests(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ time.Sleep(250 * time.Millisecond)
+ w.Header().Add("Replay-Nonce", "12345")
+ w.Header().Add("Retry-After", "0")
+ writeJSONResponse(w, &challenge{Type: "http-01", Status: "Valid", URI: "http://example.com/", Token: "token"})
+ }))
+ defer ts.Close()
+
+ privKey, _ := rsa.GenerateKey(rand.Reader, 512)
+ j := &jws{privKey: privKey, directoryURL: ts.URL}
+ ch := make(chan bool)
+ resultCh := make(chan bool)
+ go func() {
+ j.Nonce()
+ ch <- true
+ }()
+ go func() {
+ j.Nonce()
+ ch <- true
+ }()
+ go func() {
+ <-ch
+ <-ch
+ resultCh <- true
+ }()
+ select {
+ case <-resultCh:
+ case <-time.After(400 * time.Millisecond):
+ t.Fatal("JWS is probably holding a lock while making HTTP request")
+ }
+}
+
func TestValidate(t *testing.T) {
var statuses []string
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -168,6 +202,43 @@ func TestValidate(t *testing.T) {
}
}
+func TestGetChallenges(t *testing.T) {
+ var ts *httptest.Server
+ ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ switch r.Method {
+ case "GET", "HEAD":
+ w.Header().Add("Replay-Nonce", "12345")
+ w.Header().Add("Retry-After", "0")
+ writeJSONResponse(w, directory{NewAuthzURL: ts.URL, NewCertURL: ts.URL, NewRegURL: ts.URL, RevokeCertURL: ts.URL})
+ case "POST":
+ writeJSONResponse(w, authorization{})
+ }
+ }))
+ defer ts.Close()
+
+ keyBits := 512 // small value keeps test fast
+ keyType := RSA2048
+ key, err := rsa.GenerateKey(rand.Reader, keyBits)
+ if err != nil {
+ t.Fatal("Could not generate test key:", err)
+ }
+ user := mockUser{
+ email: "test@test.com",
+ regres: &RegistrationResource{NewAuthzURL: ts.URL},
+ privatekey: key,
+ }
+
+ client, err := NewClient(ts.URL, user, keyType)
+ if err != nil {
+ t.Fatalf("Could not create client: %v", err)
+ }
+
+ _, failures := client.getChallenges([]string{"example.com"})
+ if failures["example.com"] == nil {
+ t.Fatal("Expecting \"Server did not provide next link to proceed\" error, got nil")
+ }
+}
+
// writeJSONResponse marshals the body as JSON and writes it to the response.
func writeJSONResponse(w http.ResponseWriter, body interface{}) {
bs, err := json.Marshal(body)
diff --git a/vendor/github.com/xenolf/lego/acme/error.go b/vendor/github.com/xenolf/lego/acme/error.go
index 6d7013cf1..e4bc934c2 100644
--- a/vendor/github.com/xenolf/lego/acme/error.go
+++ b/vendor/github.com/xenolf/lego/acme/error.go
@@ -8,7 +8,10 @@ import (
"strings"
)
-const tosAgreementError = "Must agree to subscriber agreement before any further actions"
+const (
+ tosAgreementError = "Must agree to subscriber agreement before any further actions"
+ invalidNonceError = "JWS has invalid anti-replay nonce"
+)
// RemoteError is the base type for all errors specific to the ACME protocol.
type RemoteError struct {
@@ -28,6 +31,12 @@ type TOSError struct {
RemoteError
}
+// NonceError represents the error which is returned if the
+// nonce sent by the client was not accepted by the server.
+type NonceError struct {
+ RemoteError
+}
+
type domainError struct {
Domain string
Error error
@@ -73,6 +82,10 @@ func handleHTTPError(resp *http.Response) error {
return TOSError{errorDetail}
}
+ if errorDetail.StatusCode == http.StatusBadRequest && strings.HasPrefix(errorDetail.Detail, invalidNonceError) {
+ return NonceError{errorDetail}
+ }
+
return errorDetail
}
diff --git a/vendor/github.com/xenolf/lego/acme/http.go b/vendor/github.com/xenolf/lego/acme/http.go
index 180db786d..dc958046a 100644
--- a/vendor/github.com/xenolf/lego/acme/http.go
+++ b/vendor/github.com/xenolf/lego/acme/http.go
@@ -31,14 +31,14 @@ const (
func httpHead(url string) (resp *http.Response, err error) {
req, err := http.NewRequest("HEAD", url, nil)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("failed to head %q: %v", url, err)
}
req.Header.Set("User-Agent", userAgent())
resp, err = HTTPClient.Do(req)
if err != nil {
- return resp, err
+ return resp, fmt.Errorf("failed to do head %q: %v", url, err)
}
resp.Body.Close()
return resp, err
@@ -49,7 +49,7 @@ func httpHead(url string) (resp *http.Response, err error) {
func httpPost(url string, bodyType string, body io.Reader) (resp *http.Response, err error) {
req, err := http.NewRequest("POST", url, body)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("failed to post %q: %v", url, err)
}
req.Header.Set("Content-Type", bodyType)
req.Header.Set("User-Agent", userAgent())
@@ -62,7 +62,7 @@ func httpPost(url string, bodyType string, body io.Reader) (resp *http.Response,
func httpGet(url string) (resp *http.Response, err error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("failed to get %q: %v", url, err)
}
req.Header.Set("User-Agent", userAgent())
@@ -74,7 +74,7 @@ func httpGet(url string) (resp *http.Response, err error) {
func getJSON(uri string, respBody interface{}) (http.Header, error) {
resp, err := httpGet(uri)
if err != nil {
- return nil, fmt.Errorf("failed to get %q: %v", uri, err)
+ return nil, fmt.Errorf("failed to get json %q: %v", uri, err)
}
defer resp.Body.Close()
diff --git a/vendor/github.com/xenolf/lego/acme/jws.go b/vendor/github.com/xenolf/lego/acme/jws.go
index 2a1fc244d..1b4d29d53 100644
--- a/vendor/github.com/xenolf/lego/acme/jws.go
+++ b/vendor/github.com/xenolf/lego/acme/jws.go
@@ -16,8 +16,7 @@ import (
type jws struct {
directoryURL string
privKey crypto.PrivateKey
- nonces []string
- sync.Mutex
+ nonces nonceManager
}
func keyAsJWK(key interface{}) *jose.JsonWebKey {
@@ -38,19 +37,31 @@ func keyAsJWK(key interface{}) *jose.JsonWebKey {
func (j *jws) post(url string, content []byte) (*http.Response, error) {
signedContent, err := j.signContent(content)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("Failed to sign content -> %s", err.Error())
}
resp, err := httpPost(url, "application/jose+json", bytes.NewBuffer([]byte(signedContent.FullSerialize())))
- if err != nil {
- return nil, err
+
+ // Even in case of an error, the response should still contain a nonce.
+ nonce, nonceErr := getNonceFromResponse(resp)
+ if nonceErr == nil {
+ j.nonces.Push(nonce)
}
- j.Lock()
- defer j.Unlock()
- j.getNonceFromResponse(resp)
+ if err != nil {
+ switch err.(type) {
+ case NonceError:
+ // In case of a nonce error - retry once
+ resp, err = httpPost(url, "application/jose+json", bytes.NewBuffer([]byte(signedContent.FullSerialize())))
+ if err != nil {
+ return nil, fmt.Errorf("Failed to HTTP POST to %s -> %s", url, err.Error())
+ }
+ default:
+ return nil, fmt.Errorf("Failed to HTTP POST to %s -> %s", url, err.Error())
+ }
+ }
- return resp, err
+ return resp, nil
}
func (j *jws) signContent(content []byte) (*jose.JsonWebSignature, error) {
@@ -69,49 +80,63 @@ func (j *jws) signContent(content []byte) (*jose.JsonWebSignature, error) {
signer, err := jose.NewSigner(alg, j.privKey)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("Failed to create jose signer -> %s", err.Error())
}
signer.SetNonceSource(j)
signed, err := signer.Sign(content)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("Failed to sign content -> %s", err.Error())
}
return signed, nil
}
-func (j *jws) getNonceFromResponse(resp *http.Response) error {
- nonce := resp.Header.Get("Replay-Nonce")
- if nonce == "" {
- return fmt.Errorf("Server did not respond with a proper nonce header.")
+func (j *jws) Nonce() (string, error) {
+ if nonce, ok := j.nonces.Pop(); ok {
+ return nonce, nil
}
- j.nonces = append(j.nonces, nonce)
- return nil
+ return getNonce(j.directoryURL)
}
-func (j *jws) getNonce() error {
- resp, err := httpHead(j.directoryURL)
- if err != nil {
- return err
+type nonceManager struct {
+ nonces []string
+ sync.Mutex
+}
+
+func (n *nonceManager) Pop() (string, bool) {
+ n.Lock()
+ defer n.Unlock()
+
+ if len(n.nonces) == 0 {
+ return "", false
}
- return j.getNonceFromResponse(resp)
+ nonce := n.nonces[len(n.nonces)-1]
+ n.nonces = n.nonces[:len(n.nonces)-1]
+ return nonce, true
}
-func (j *jws) Nonce() (string, error) {
- j.Lock()
- defer j.Unlock()
- nonce := ""
- if len(j.nonces) == 0 {
- err := j.getNonce()
- if err != nil {
- return nonce, err
- }
+func (n *nonceManager) Push(nonce string) {
+ n.Lock()
+ defer n.Unlock()
+ n.nonces = append(n.nonces, nonce)
+}
+
+func getNonce(url string) (string, error) {
+ resp, err := httpHead(url)
+ if err != nil {
+ return "", fmt.Errorf("Failed to get nonce from HTTP HEAD -> %s", err.Error())
}
- if len(j.nonces) == 0 {
- return "", fmt.Errorf("Can't get nonce")
+
+ return getNonceFromResponse(resp)
+}
+
+func getNonceFromResponse(resp *http.Response) (string, error) {
+ nonce := resp.Header.Get("Replay-Nonce")
+ if nonce == "" {
+ return "", fmt.Errorf("Server did not respond with a proper nonce header.")
}
- nonce, j.nonces = j.nonces[len(j.nonces)-1], j.nonces[:len(j.nonces)-1]
+
return nonce, nil
}
diff --git a/vendor/github.com/xenolf/lego/providers/http/memcached/memcached.go b/vendor/github.com/xenolf/lego/providers/http/memcached/memcached.go
index 9c5f6c0b4..9ac8b811d 100644
--- a/vendor/github.com/xenolf/lego/providers/http/memcached/memcached.go
+++ b/vendor/github.com/xenolf/lego/providers/http/memcached/memcached.go
@@ -1,4 +1,5 @@
-// Package webroot implements a HTTP provider for solving the HTTP-01 challenge using web server's root path.
+// Package memcached implements a HTTP provider for solving the HTTP-01 challenge using memcached
+// in combination with a webserver.
package memcached
import (