summaryrefslogtreecommitdiffstats
path: root/vendor/github.com
diff options
context:
space:
mode:
authorChristopher Speller <crspeller@gmail.com>2017-02-02 09:32:00 -0500
committerHarrison Healey <harrisonmhealey@gmail.com>2017-02-02 09:32:00 -0500
commit701d1ab638b23c24877fc41824add66232446676 (patch)
treeec120c88d38ac9d38d9eabdd3270b52bb6ac9d96 /vendor/github.com
parentca3211bc04f6dea34e8168217182637d1419f998 (diff)
downloadchat-701d1ab638b23c24877fc41824add66232446676.tar.gz
chat-701d1ab638b23c24877fc41824add66232446676.tar.bz2
chat-701d1ab638b23c24877fc41824add66232446676.zip
Updating server dependancies (#5249)
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/NYTimes/gziphandler/gzip.go14
-rw-r--r--vendor/github.com/disintegration/imaging/effects.go30
-rw-r--r--vendor/github.com/disintegration/imaging/effects_test.go14
-rw-r--r--vendor/github.com/disintegration/imaging/histogram.go43
-rw-r--r--vendor/github.com/disintegration/imaging/histogram_test.go42
-rw-r--r--vendor/github.com/disintegration/imaging/resize.go38
-rw-r--r--vendor/github.com/disintegration/imaging/resize_test.go18
-rw-r--r--vendor/github.com/go-ldap/ldap/.travis.yml9
-rw-r--r--vendor/github.com/go-ldap/ldap/LICENSE43
-rw-r--r--vendor/github.com/go-ldap/ldap/control.go12
-rw-r--r--vendor/github.com/go-ldap/ldap/control_test.go39
-rw-r--r--vendor/github.com/go-ldap/ldap/dn.go96
-rw-r--r--vendor/github.com/go-ldap/ldap/dn_test.go139
-rw-r--r--vendor/github.com/go-ldap/ldap/ldap.go55
-rw-r--r--vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md21
-rw-r--r--vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md9
-rw-r--r--vendor/github.com/go-sql-driver/mysql/.travis.yml8
-rw-r--r--vendor/github.com/go-sql-driver/mysql/AUTHORS21
-rw-r--r--vendor/github.com/go-sql-driver/mysql/CHANGELOG.md47
-rw-r--r--vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md17
-rw-r--r--vendor/github.com/go-sql-driver/mysql/README.md143
-rw-r--r--vendor/github.com/go-sql-driver/mysql/benchmark_test.go40
-rw-r--r--vendor/github.com/go-sql-driver/mysql/buffer.go55
-rw-r--r--vendor/github.com/go-sql-driver/mysql/collations.go16
-rw-r--r--vendor/github.com/go-sql-driver/mysql/connection.go277
-rw-r--r--vendor/github.com/go-sql-driver/mysql/connection_test.go67
-rw-r--r--vendor/github.com/go-sql-driver/mysql/const.go37
-rw-r--r--vendor/github.com/go-sql-driver/mysql/driver.go117
-rw-r--r--vendor/github.com/go-sql-driver/mysql/driver_test.go940
-rw-r--r--vendor/github.com/go-sql-driver/mysql/dsn.go548
-rw-r--r--vendor/github.com/go-sql-driver/mysql/dsn_test.go231
-rw-r--r--vendor/github.com/go-sql-driver/mysql/errors.go23
-rw-r--r--vendor/github.com/go-sql-driver/mysql/infile.go56
-rw-r--r--vendor/github.com/go-sql-driver/mysql/packets.go449
-rw-r--r--vendor/github.com/go-sql-driver/mysql/packets_test.go282
-rw-r--r--vendor/github.com/go-sql-driver/mysql/rows.go50
-rw-r--r--vendor/github.com/go-sql-driver/mysql/statement.go45
-rw-r--r--vendor/github.com/go-sql-driver/mysql/utils.go574
-rw-r--r--vendor/github.com/go-sql-driver/mysql/utils_test.go143
-rw-r--r--vendor/github.com/goamz/goamz/s3/s3.go3
-rw-r--r--vendor/github.com/golang/freetype/truetype/glyph.go5
-rw-r--r--vendor/github.com/gorilla/handlers/recovery.go15
-rw-r--r--vendor/github.com/gorilla/mux/README.md44
-rw-r--r--vendor/github.com/gorilla/mux/doc.go5
-rw-r--r--vendor/github.com/gorilla/mux/mux_test.go10
-rw-r--r--vendor/github.com/gorilla/mux/regexp.go7
-rw-r--r--vendor/github.com/gorilla/websocket/compression.go26
-rw-r--r--vendor/github.com/gorilla/websocket/conn.go42
-rw-r--r--vendor/github.com/lib/pq/.travis.yml14
-rw-r--r--vendor/github.com/lib/pq/array.go45
-rw-r--r--vendor/github.com/lib/pq/array_test.go164
-rw-r--r--vendor/github.com/lib/pq/conn.go189
-rw-r--r--vendor/github.com/lib/pq/conn_go18.go92
-rw-r--r--vendor/github.com/lib/pq/copy.go27
-rw-r--r--vendor/github.com/lib/pq/encode.go8
-rw-r--r--vendor/github.com/lib/pq/encode_test.go52
-rw-r--r--vendor/github.com/lib/pq/go18_test.go99
-rw-r--r--vendor/github.com/lib/pq/ssl.go175
-rw-r--r--vendor/github.com/lib/pq/ssl_go1.7.go14
-rw-r--r--vendor/github.com/lib/pq/ssl_permissions.go16
-rw-r--r--vendor/github.com/lib/pq/ssl_renegotiation.go8
-rw-r--r--vendor/github.com/lib/pq/ssl_windows.go9
-rw-r--r--vendor/github.com/lib/pq/uuid.go23
-rw-r--r--vendor/github.com/lib/pq/uuid_test.go46
-rw-r--r--vendor/github.com/miekg/dns/.travis.yml8
-rw-r--r--vendor/github.com/miekg/dns/README.md5
-rw-r--r--vendor/github.com/miekg/dns/defaults.go4
-rw-r--r--vendor/github.com/miekg/dns/edns.go2
-rw-r--r--vendor/github.com/miekg/dns/msg.go29
-rw-r--r--vendor/github.com/miekg/dns/msg_generate.go4
-rw-r--r--vendor/github.com/miekg/dns/msg_helpers.go2
-rw-r--r--vendor/github.com/miekg/dns/parse_test.go19
-rw-r--r--vendor/github.com/miekg/dns/sanitize_test.go2
-rw-r--r--vendor/github.com/miekg/dns/scan_rr.go110
-rw-r--r--vendor/github.com/miekg/dns/server.go2
-rw-r--r--vendor/github.com/miekg/dns/server_test.go40
-rw-r--r--vendor/github.com/miekg/dns/types.go19
-rw-r--r--vendor/github.com/miekg/dns/udp_linux.go9
-rw-r--r--vendor/github.com/miekg/dns/zmsg.go34
-rw-r--r--vendor/github.com/minio/minio-go/.travis.yml6
-rw-r--r--vendor/github.com/minio/minio-go/README.md28
-rw-r--r--vendor/github.com/minio/minio-go/api-datatypes.go9
-rw-r--r--vendor/github.com/minio/minio-go/api-get-object.go4
-rw-r--r--vendor/github.com/minio/minio-go/api-get-policy.go4
-rw-r--r--vendor/github.com/minio/minio-go/api-list.go1
-rw-r--r--vendor/github.com/minio/minio-go/api-notification.go29
-rw-r--r--vendor/github.com/minio/minio-go/api-presigned.go11
-rw-r--r--vendor/github.com/minio/minio-go/api-put-bucket.go13
-rw-r--r--vendor/github.com/minio/minio-go/api-put-bucket_test.go17
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-common.go72
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-copy.go8
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-file.go121
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-multipart.go54
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-progress.go25
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-readat.go135
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object.go46
-rw-r--r--vendor/github.com/minio/minio-go/api-remove.go11
-rw-r--r--vendor/github.com/minio/minio-go/api-s3-datatypes.go9
-rw-r--r--vendor/github.com/minio/minio-go/api-stat.go74
-rw-r--r--vendor/github.com/minio/minio-go/api.go113
-rw-r--r--vendor/github.com/minio/minio-go/api_functional_v2_test.go44
-rw-r--r--vendor/github.com/minio/minio-go/api_functional_v4_test.go210
-rw-r--r--vendor/github.com/minio/minio-go/api_unit_test.go92
-rw-r--r--vendor/github.com/minio/minio-go/bucket-cache.go19
-rw-r--r--vendor/github.com/minio/minio-go/bucket-cache_test.go12
-rw-r--r--vendor/github.com/minio/minio-go/bucket-notification.go2
-rw-r--r--vendor/github.com/minio/minio-go/constants.go6
-rw-r--r--vendor/github.com/minio/minio-go/docs/API.md347
-rw-r--r--vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go9
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go (renamed from vendor/github.com/minio/minio-go/request-signature-v2.go)28
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2_test.go (renamed from vendor/github.com/minio/minio-go/request-signature-v2_test.go)2
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go (renamed from vendor/github.com/minio/minio-go/request-signature-v4.go)26
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go70
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/utils.go39
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/utils_test.go66
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3utils/utils.go195
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3utils/utils_test.go284
-rw-r--r--vendor/github.com/minio/minio-go/post-policy.go18
-rw-r--r--vendor/github.com/minio/minio-go/retry-continous.go52
-rw-r--r--vendor/github.com/minio/minio-go/s3-endpoints.go3
-rw-r--r--vendor/github.com/minio/minio-go/test-utils_test.go10
-rw-r--r--vendor/github.com/minio/minio-go/utils.go226
-rw-r--r--vendor/github.com/minio/minio-go/utils_test.go280
-rw-r--r--vendor/github.com/nicksnyder/go-i18n/i18n/bundle/bundle.go52
-rw-r--r--vendor/github.com/nicksnyder/go-i18n/i18n/bundle/bundle_test.go55
-rw-r--r--vendor/github.com/prometheus/common/expfmt/decode.go47
-rw-r--r--vendor/github.com/prometheus/common/expfmt/decode_test.go68
-rw-r--r--vendor/github.com/prometheus/common/expfmt/expfmt.go7
-rw-r--r--vendor/github.com/prometheus/common/log/syslog_formatter.go11
-rw-r--r--vendor/github.com/prometheus/common/log/syslog_formatter_test.go52
-rw-r--r--vendor/github.com/prometheus/common/model/metric.go2
-rw-r--r--vendor/github.com/prometheus/common/route/route.go7
-rw-r--r--vendor/github.com/prometheus/common/route/route_test.go2
-rw-r--r--vendor/github.com/prometheus/procfs/.travis.yml4
-rw-r--r--vendor/github.com/prometheus/procfs/AUTHORS.md1
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/26231/mountstats19
-rw-r--r--vendor/github.com/prometheus/procfs/mountstats.go552
-rw-r--r--vendor/github.com/prometheus/procfs/mountstats_test.go252
-rw-r--r--vendor/github.com/prometheus/procfs/proc.go12
-rw-r--r--vendor/github.com/spf13/cobra/README.md15
-rw-r--r--vendor/github.com/spf13/cobra/bash_completions.go8
-rw-r--r--vendor/github.com/spf13/cobra/bash_completions.md2
-rw-r--r--vendor/github.com/spf13/cobra/cobra.go3
-rw-r--r--vendor/github.com/spf13/cobra/command.go54
-rw-r--r--vendor/github.com/spf13/cobra/doc/man_docs.go2
-rw-r--r--vendor/github.com/spf13/cobra/doc/md_docs.md6
-rw-r--r--vendor/github.com/spf13/cobra/doc/util.go15
-rw-r--r--vendor/github.com/spf13/cobra/doc/yaml_docs.go165
-rw-r--r--vendor/github.com/spf13/cobra/doc/yaml_docs.md103
-rw-r--r--vendor/github.com/spf13/cobra/doc/yaml_docs_test.go88
-rw-r--r--vendor/github.com/spf13/pflag/.gitignore2
-rw-r--r--vendor/github.com/spf13/pflag/README.md6
-rw-r--r--vendor/github.com/spf13/pflag/bool_slice.go147
-rw-r--r--vendor/github.com/spf13/pflag/bool_slice_test.go215
-rw-r--r--vendor/github.com/spf13/pflag/flag.go146
-rw-r--r--vendor/github.com/spf13/pflag/flag_test.go66
-rw-r--r--vendor/github.com/spf13/pflag/golangflag.go3
-rw-r--r--vendor/github.com/spf13/pflag/ip.go2
-rw-r--r--vendor/github.com/spf13/pflag/ip_slice.go148
-rw-r--r--vendor/github.com/spf13/pflag/ip_slice_test.go222
-rw-r--r--vendor/github.com/spf13/pflag/ipnet.go2
-rw-r--r--vendor/github.com/spf13/pflag/string_array.go6
-rw-r--r--vendor/github.com/spf13/pflag/string_slice.go5
-rw-r--r--vendor/github.com/spf13/pflag/uint_slice.go126
-rw-r--r--vendor/github.com/spf13/pflag/uint_slice_test.go161
-rw-r--r--vendor/github.com/tylerb/graceful/graceful.go12
-rw-r--r--vendor/github.com/xenolf/lego/README.md2
-rw-r--r--vendor/github.com/xenolf/lego/acme/client.go149
-rw-r--r--vendor/github.com/xenolf/lego/acme/crypto.go2
-rw-r--r--vendor/github.com/xenolf/lego/acme/error.go15
-rw-r--r--vendor/github.com/xenolf/lego/acme/jws.go12
-rw-r--r--vendor/github.com/xenolf/lego/acme/messages.go23
-rw-r--r--vendor/github.com/xenolf/lego/cli_handlers.go8
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/azure/azure.go4
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/vultr/vultr.go14
175 files changed, 9443 insertions, 2724 deletions
diff --git a/vendor/github.com/NYTimes/gziphandler/gzip.go b/vendor/github.com/NYTimes/gziphandler/gzip.go
index dad0eb747..fbc99396d 100644
--- a/vendor/github.com/NYTimes/gziphandler/gzip.go
+++ b/vendor/github.com/NYTimes/gziphandler/gzip.go
@@ -1,8 +1,10 @@
package gziphandler
import (
+ "bufio"
"compress/gzip"
"fmt"
+ "net"
"net/http"
"strconv"
"strings"
@@ -131,6 +133,18 @@ func (w *GzipResponseWriter) Flush() {
}
}
+// Hijack implements http.Hijacker. If the underlying ResponseWriter is a
+// Hijacker, its Hijack method is returned. Otherwise an error is returned.
+func (w *GzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ if hj, ok := w.ResponseWriter.(http.Hijacker); ok {
+ return hj.Hijack()
+ }
+ return nil, nil, fmt.Errorf("http.Hijacker interface is not supported")
+}
+
+// verify Hijacker interface implementation
+var _ http.Hijacker = &GzipResponseWriter{}
+
// MustNewGzipLevelHandler behaves just like NewGzipLevelHandler except that in
// an error case it panics rather than returning an error.
func MustNewGzipLevelHandler(level int) func(http.Handler) http.Handler {
diff --git a/vendor/github.com/disintegration/imaging/effects.go b/vendor/github.com/disintegration/imaging/effects.go
index fe92e10a2..19d6e405e 100644
--- a/vendor/github.com/disintegration/imaging/effects.go
+++ b/vendor/github.com/disintegration/imaging/effects.go
@@ -67,15 +67,16 @@ func blurHorizontal(src *image.NRGBA, kernel []float64) *image.NRGBA {
for ix := start; ix <= end; ix++ {
weight := kernel[absint(x-ix)]
i := y*src.Stride + ix*4
- r += float64(src.Pix[i+0]) * weight
- g += float64(src.Pix[i+1]) * weight
- b += float64(src.Pix[i+2]) * weight
- a += float64(src.Pix[i+3]) * weight
+ wa := float64(src.Pix[i+3]) * weight
+ r += float64(src.Pix[i+0]) * wa
+ g += float64(src.Pix[i+1]) * wa
+ b += float64(src.Pix[i+2]) * wa
+ a += wa
}
- r = math.Min(math.Max(r/weightSum, 0.0), 255.0)
- g = math.Min(math.Max(g/weightSum, 0.0), 255.0)
- b = math.Min(math.Max(b/weightSum, 0.0), 255.0)
+ r = math.Min(math.Max(r/a, 0.0), 255.0)
+ g = math.Min(math.Max(g/a, 0.0), 255.0)
+ b = math.Min(math.Max(b/a, 0.0), 255.0)
a = math.Min(math.Max(a/weightSum, 0.0), 255.0)
j := y*dst.Stride + x*4
@@ -121,15 +122,16 @@ func blurVertical(src *image.NRGBA, kernel []float64) *image.NRGBA {
for iy := start; iy <= end; iy++ {
weight := kernel[absint(y-iy)]
i := iy*src.Stride + x*4
- r += float64(src.Pix[i+0]) * weight
- g += float64(src.Pix[i+1]) * weight
- b += float64(src.Pix[i+2]) * weight
- a += float64(src.Pix[i+3]) * weight
+ wa := float64(src.Pix[i+3]) * weight
+ r += float64(src.Pix[i+0]) * wa
+ g += float64(src.Pix[i+1]) * wa
+ b += float64(src.Pix[i+2]) * wa
+ a += wa
}
- r = math.Min(math.Max(r/weightSum, 0.0), 255.0)
- g = math.Min(math.Max(g/weightSum, 0.0), 255.0)
- b = math.Min(math.Max(b/weightSum, 0.0), 255.0)
+ r = math.Min(math.Max(r/a, 0.0), 255.0)
+ g = math.Min(math.Max(g/a, 0.0), 255.0)
+ b = math.Min(math.Max(b/a, 0.0), 255.0)
a = math.Min(math.Max(a/weightSum, 0.0), 255.0)
j := y*dst.Stride + x*4
diff --git a/vendor/github.com/disintegration/imaging/effects_test.go b/vendor/github.com/disintegration/imaging/effects_test.go
index a7e8cfffe..998ffe399 100644
--- a/vendor/github.com/disintegration/imaging/effects_test.go
+++ b/vendor/github.com/disintegration/imaging/effects_test.go
@@ -50,9 +50,9 @@ func TestBlur(t *testing.T) {
Rect: image.Rect(0, 0, 3, 3),
Stride: 3 * 4,
Pix: []uint8{
- 0x01, 0x02, 0x04, 0x04, 0x0a, 0x10, 0x18, 0x18, 0x01, 0x02, 0x04, 0x04,
- 0x09, 0x10, 0x18, 0x18, 0x3f, 0x69, 0x9e, 0x9e, 0x09, 0x10, 0x18, 0x18,
- 0x01, 0x02, 0x04, 0x04, 0x0a, 0x10, 0x18, 0x18, 0x01, 0x02, 0x04, 0x04,
+ 0x66, 0xaa, 0xff, 0x04, 0x66, 0xaa, 0xff, 0x18, 0x66, 0xaa, 0xff, 0x04,
+ 0x66, 0xaa, 0xff, 0x18, 0x66, 0xaa, 0xff, 0x9e, 0x66, 0xaa, 0xff, 0x18,
+ 0x66, 0xaa, 0xff, 0x04, 0x66, 0xaa, 0xff, 0x18, 0x66, 0xaa, 0xff, 0x04,
},
},
},
@@ -72,9 +72,9 @@ func TestBlur(t *testing.T) {
Rect: image.Rect(0, 0, 3, 3),
Stride: 3 * 4,
Pix: []uint8{
- 0x0b, 0x13, 0x1c, 0x1c, 0x0b, 0x13, 0x1c, 0x1c, 0x0b, 0x13, 0x1c, 0x1c,
- 0x0b, 0x13, 0x1c, 0x1c, 0x0b, 0x13, 0x1c, 0x1c, 0x0b, 0x13, 0x1c, 0x1c,
- 0x0b, 0x13, 0x1c, 0x1c, 0x0b, 0x13, 0x1c, 0x1c, 0x0b, 0x13, 0x1c, 0x1c,
+ 0x66, 0xaa, 0xff, 0x1c, 0x66, 0xaa, 0xff, 0x1c, 0x66, 0xaa, 0xff, 0x1c,
+ 0x66, 0xaa, 0xff, 0x1c, 0x66, 0xaa, 0xff, 0x1c, 0x66, 0xaa, 0xff, 0x1c,
+ 0x66, 0xaa, 0xff, 0x1c, 0x66, 0xaa, 0xff, 0x1c, 0x66, 0xaa, 0xff, 0x1c,
},
},
},
@@ -134,7 +134,7 @@ func TestSharpen(t *testing.T) {
Stride: 3 * 4,
Pix: []uint8{
0x66, 0x66, 0x66, 0x66, 0x64, 0x64, 0x64, 0x64, 0x66, 0x66, 0x66, 0x66,
- 0x64, 0x64, 0x64, 0x64, 0x7e, 0x7e, 0x7e, 0x7e, 0x64, 0x64, 0x64, 0x64,
+ 0x64, 0x64, 0x64, 0x64, 0x7d, 0x7d, 0x7d, 0x7e, 0x64, 0x64, 0x64, 0x64,
0x66, 0x66, 0x66, 0x66, 0x64, 0x64, 0x64, 0x64, 0x66, 0x66, 0x66, 0x66,
},
},
diff --git a/vendor/github.com/disintegration/imaging/histogram.go b/vendor/github.com/disintegration/imaging/histogram.go
new file mode 100644
index 000000000..aef333822
--- /dev/null
+++ b/vendor/github.com/disintegration/imaging/histogram.go
@@ -0,0 +1,43 @@
+package imaging
+
+import (
+ "image"
+)
+
+// Histogram returns a normalized histogram of an image.
+//
+// Resulting histogram is represented as an array of 256 floats, where
+// histogram[i] is a probability of a pixel being of a particular luminance i.
+func Histogram(img image.Image) [256]float64 {
+ src := toNRGBA(img)
+ width := src.Bounds().Max.X
+ height := src.Bounds().Max.Y
+
+ var histogram [256]float64
+ var total float64
+
+ if width == 0 || height == 0 {
+ return histogram
+ }
+
+ for y := 0; y < height; y++ {
+ for x := 0; x < width; x++ {
+ i := y*src.Stride + x*4
+
+ r := src.Pix[i+0]
+ g := src.Pix[i+1]
+ b := src.Pix[i+2]
+
+ var y float32 = 0.299*float32(r) + 0.587*float32(g) + 0.114*float32(b)
+
+ histogram[int(y+0.5)]++
+ total++
+ }
+ }
+
+ for i := 0; i < 256; i++ {
+ histogram[i] = histogram[i] / total
+ }
+
+ return histogram
+}
diff --git a/vendor/github.com/disintegration/imaging/histogram_test.go b/vendor/github.com/disintegration/imaging/histogram_test.go
new file mode 100644
index 000000000..0bcf82588
--- /dev/null
+++ b/vendor/github.com/disintegration/imaging/histogram_test.go
@@ -0,0 +1,42 @@
+package imaging
+
+import (
+ "image"
+ "image/color"
+ "testing"
+)
+
+func TestHistogram(t *testing.T) {
+ b := image.Rectangle{image.Point{0, 0}, image.Point{2, 2}}
+
+ i1 := image.NewRGBA(b)
+ i1.Set(0, 0, image.Black)
+ i1.Set(1, 0, image.White)
+ i1.Set(1, 1, image.White)
+ i1.Set(0, 1, color.Gray{123})
+
+ h := Histogram(i1)
+ if h[0] != 0.25 || h[123] != 0.25 || h[255] != 0.5 {
+ t.Errorf("Incorrect histogram for image i1")
+ }
+
+ i2 := image.NewRGBA(b)
+ i2.Set(0, 0, color.Gray{51})
+ i2.Set(0, 1, color.Gray{14})
+ i2.Set(1, 0, color.Gray{14})
+
+ h = Histogram(i2)
+ if h[14] != 0.5 || h[51] != 0.25 || h[0] != 0.25 {
+ t.Errorf("Incorrect histogram for image i2")
+ }
+
+ b = image.Rectangle{image.Point{0, 0}, image.Point{0, 0}}
+ i3 := image.NewRGBA(b)
+ h = Histogram(i3)
+ for _, val := range h {
+ if val != 0 {
+ t.Errorf("Histogram for an empty image should be a zero histogram.")
+ return
+ }
+ }
+}
diff --git a/vendor/github.com/disintegration/imaging/resize.go b/vendor/github.com/disintegration/imaging/resize.go
index 3c792e904..b21eed544 100644
--- a/vendor/github.com/disintegration/imaging/resize.go
+++ b/vendor/github.com/disintegration/imaging/resize.go
@@ -128,20 +128,21 @@ func resizeHorizontal(src *image.NRGBA, width int, filter ResampleFilter) *image
parallel(dstH, func(partStart, partEnd int) {
for dstY := partStart; dstY < partEnd; dstY++ {
for dstX := 0; dstX < dstW; dstX++ {
- var c [4]int32
+ var c [4]int64
for _, iw := range weights[dstX].iwpairs {
i := dstY*src.Stride + iw.i*4
- c[0] += int32(src.Pix[i+0]) * iw.w
- c[1] += int32(src.Pix[i+1]) * iw.w
- c[2] += int32(src.Pix[i+2]) * iw.w
- c[3] += int32(src.Pix[i+3]) * iw.w
+ a := int64(src.Pix[i+3]) * int64(iw.w)
+ c[0] += int64(src.Pix[i+0]) * a
+ c[1] += int64(src.Pix[i+1]) * a
+ c[2] += int64(src.Pix[i+2]) * a
+ c[3] += a
}
j := dstY*dst.Stride + dstX*4
sum := weights[dstX].wsum
- dst.Pix[j+0] = clampint32(int32(float32(c[0])/float32(sum) + 0.5))
- dst.Pix[j+1] = clampint32(int32(float32(c[1])/float32(sum) + 0.5))
- dst.Pix[j+2] = clampint32(int32(float32(c[2])/float32(sum) + 0.5))
- dst.Pix[j+3] = clampint32(int32(float32(c[3])/float32(sum) + 0.5))
+ dst.Pix[j+0] = clampint32(int32(float64(c[0])/float64(c[3]) + 0.5))
+ dst.Pix[j+1] = clampint32(int32(float64(c[1])/float64(c[3]) + 0.5))
+ dst.Pix[j+2] = clampint32(int32(float64(c[2])/float64(c[3]) + 0.5))
+ dst.Pix[j+3] = clampint32(int32(float64(c[3])/float64(sum) + 0.5))
}
}
})
@@ -165,20 +166,21 @@ func resizeVertical(src *image.NRGBA, height int, filter ResampleFilter) *image.
for dstX := partStart; dstX < partEnd; dstX++ {
for dstY := 0; dstY < dstH; dstY++ {
- var c [4]int32
+ var c [4]int64
for _, iw := range weights[dstY].iwpairs {
i := iw.i*src.Stride + dstX*4
- c[0] += int32(src.Pix[i+0]) * iw.w
- c[1] += int32(src.Pix[i+1]) * iw.w
- c[2] += int32(src.Pix[i+2]) * iw.w
- c[3] += int32(src.Pix[i+3]) * iw.w
+ a := int64(src.Pix[i+3]) * int64(iw.w)
+ c[0] += int64(src.Pix[i+0]) * a
+ c[1] += int64(src.Pix[i+1]) * a
+ c[2] += int64(src.Pix[i+2]) * a
+ c[3] += a
}
j := dstY*dst.Stride + dstX*4
sum := weights[dstY].wsum
- dst.Pix[j+0] = clampint32(int32(float32(c[0])/float32(sum) + 0.5))
- dst.Pix[j+1] = clampint32(int32(float32(c[1])/float32(sum) + 0.5))
- dst.Pix[j+2] = clampint32(int32(float32(c[2])/float32(sum) + 0.5))
- dst.Pix[j+3] = clampint32(int32(float32(c[3])/float32(sum) + 0.5))
+ dst.Pix[j+0] = clampint32(int32(float64(c[0])/float64(c[3]) + 0.5))
+ dst.Pix[j+1] = clampint32(int32(float64(c[1])/float64(c[3]) + 0.5))
+ dst.Pix[j+2] = clampint32(int32(float64(c[2])/float64(c[3]) + 0.5))
+ dst.Pix[j+3] = clampint32(int32(float64(c[3])/float64(sum) + 0.5))
}
}
diff --git a/vendor/github.com/disintegration/imaging/resize_test.go b/vendor/github.com/disintegration/imaging/resize_test.go
index 08d7f2d85..927f92512 100644
--- a/vendor/github.com/disintegration/imaging/resize_test.go
+++ b/vendor/github.com/disintegration/imaging/resize_test.go
@@ -28,7 +28,7 @@ func TestResize(t *testing.T) {
&image.NRGBA{
Rect: image.Rect(0, 0, 1, 1),
Stride: 1 * 4,
- Pix: []uint8{0x40, 0x40, 0x40, 0xc0},
+ Pix: []uint8{0x55, 0x55, 0x55, 0xc0},
},
},
{
@@ -108,10 +108,10 @@ func TestResize(t *testing.T) {
Rect: image.Rect(0, 0, 4, 4),
Stride: 4 * 4,
Pix: []uint8{
- 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x40, 0xbf, 0x00, 0x00, 0xbf, 0xff, 0x00, 0x00, 0xff,
- 0x00, 0x40, 0x00, 0x40, 0x30, 0x30, 0x10, 0x70, 0x8f, 0x10, 0x30, 0xcf, 0xbf, 0x00, 0x40, 0xff,
- 0x00, 0xbf, 0x00, 0xbf, 0x10, 0x8f, 0x30, 0xcf, 0x30, 0x30, 0x8f, 0xef, 0x40, 0x00, 0xbf, 0xff,
- 0x00, 0xff, 0x00, 0xff, 0x00, 0xbf, 0x40, 0xff, 0x00, 0x40, 0xbf, 0xff, 0x00, 0x00, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x3f, 0xff, 0x00, 0x00, 0xc0, 0xff, 0x00, 0x00, 0xff,
+ 0x00, 0xff, 0x00, 0x3f, 0x6d, 0x6e, 0x24, 0x6f, 0xb1, 0x13, 0x3a, 0xd0, 0xc0, 0x00, 0x3f, 0xff,
+ 0x00, 0xff, 0x00, 0xc0, 0x13, 0xb2, 0x3a, 0xcf, 0x33, 0x32, 0x9a, 0xef, 0x3f, 0x00, 0xc0, 0xff,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0xc0, 0x3f, 0xff, 0x00, 0x3f, 0xc0, 0xff, 0x00, 0x00, 0xff, 0xff,
},
},
},
@@ -224,7 +224,7 @@ func TestFit(t *testing.T) {
&image.NRGBA{
Rect: image.Rect(0, 0, 1, 1),
Stride: 1 * 4,
- Pix: []uint8{0x40, 0x40, 0x40, 0xc0},
+ Pix: []uint8{0x55, 0x55, 0x55, 0xc0},
},
},
{
@@ -242,7 +242,7 @@ func TestFit(t *testing.T) {
&image.NRGBA{
Rect: image.Rect(0, 0, 1, 1),
Stride: 1 * 4,
- Pix: []uint8{0x40, 0x40, 0x40, 0xc0},
+ Pix: []uint8{0x55, 0x55, 0x55, 0xc0},
},
},
{
@@ -512,7 +512,7 @@ func TestThumbnail(t *testing.T) {
&image.NRGBA{
Rect: image.Rect(0, 0, 1, 1),
Stride: 1 * 4,
- Pix: []uint8{0x40, 0x40, 0x40, 0xc0},
+ Pix: []uint8{0x55, 0x55, 0x55, 0xc0},
},
},
{
@@ -534,7 +534,7 @@ func TestThumbnail(t *testing.T) {
&image.NRGBA{
Rect: image.Rect(0, 0, 1, 1),
Stride: 1 * 4,
- Pix: []uint8{0x40, 0x40, 0x40, 0xc0},
+ Pix: []uint8{0x55, 0x55, 0x55, 0xc0},
},
},
{
diff --git a/vendor/github.com/go-ldap/ldap/.travis.yml b/vendor/github.com/go-ldap/ldap/.travis.yml
index 7e2f641e7..e32a2aa75 100644
--- a/vendor/github.com/go-ldap/ldap/.travis.yml
+++ b/vendor/github.com/go-ldap/ldap/.travis.yml
@@ -1,15 +1,20 @@
language: go
env:
global:
- - VET_VERSIONS="1.5 1.6 tip"
- - LINT_VERSIONS="1.5 1.6 tip"
+ - VET_VERSIONS="1.6 1.7 tip"
+ - LINT_VERSIONS="1.6 1.7 tip"
go:
- 1.2
- 1.3
- 1.4
- 1.5
- 1.6
+ - 1.7
- tip
+matrix:
+ fast_finish: true
+ allow_failures:
+ - go: tip
go_import_path: gopkg.in/ldap.v2
install:
- go get gopkg.in/asn1-ber.v1
diff --git a/vendor/github.com/go-ldap/ldap/LICENSE b/vendor/github.com/go-ldap/ldap/LICENSE
index 744875676..6c0ed4b38 100644
--- a/vendor/github.com/go-ldap/ldap/LICENSE
+++ b/vendor/github.com/go-ldap/ldap/LICENSE
@@ -1,27 +1,22 @@
-Copyright (c) 2012 The Go Authors. All rights reserved.
+The MIT License (MIT)
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
+Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com)
+Portions copyright (c) 2015-2016 go-ldap Authors
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/go-ldap/ldap/control.go b/vendor/github.com/go-ldap/ldap/control.go
index 5c62118d4..342f325ca 100644
--- a/vendor/github.com/go-ldap/ldap/control.go
+++ b/vendor/github.com/go-ldap/ldap/control.go
@@ -334,18 +334,18 @@ func DecodeControl(packet *ber.Packet) Control {
for _, child := range sequence.Children {
if child.Tag == 0 {
//Warning
- child := child.Children[0]
- packet := ber.DecodePacket(child.Data.Bytes())
+ warningPacket := child.Children[0]
+ packet := ber.DecodePacket(warningPacket.Data.Bytes())
val, ok := packet.Value.(int64)
if ok {
- if child.Tag == 0 {
+ if warningPacket.Tag == 0 {
//timeBeforeExpiration
c.Expire = val
- child.Value = c.Expire
- } else if child.Tag == 1 {
+ warningPacket.Value = c.Expire
+ } else if warningPacket.Tag == 1 {
//graceAuthNsRemaining
c.Grace = val
- child.Value = c.Grace
+ warningPacket.Value = c.Grace
}
}
} else if child.Tag == 1 {
diff --git a/vendor/github.com/go-ldap/ldap/control_test.go b/vendor/github.com/go-ldap/ldap/control_test.go
index 3fcdab0d7..11527463e 100644
--- a/vendor/github.com/go-ldap/ldap/control_test.go
+++ b/vendor/github.com/go-ldap/ldap/control_test.go
@@ -56,3 +56,42 @@ func runControlTest(t *testing.T, originalControl Control) {
t.Errorf("%sgot different type decoding from encoded bytes: %T vs %T", header, fromBytes, originalControl)
}
}
+
+func TestDescribeControlManageDsaIT(t *testing.T) {
+ runAddControlDescriptions(t, NewControlManageDsaIT(false), "Control Type (Manage DSA IT)")
+ runAddControlDescriptions(t, NewControlManageDsaIT(true), "Control Type (Manage DSA IT)", "Criticality")
+}
+
+func TestDescribeControlPaging(t *testing.T) {
+ runAddControlDescriptions(t, NewControlPaging(100), "Control Type (Paging)", "Control Value (Paging)")
+ runAddControlDescriptions(t, NewControlPaging(0), "Control Type (Paging)", "Control Value (Paging)")
+}
+
+func TestDescribeControlString(t *testing.T) {
+ runAddControlDescriptions(t, NewControlString("x", true, "y"), "Control Type ()", "Criticality", "Control Value")
+ runAddControlDescriptions(t, NewControlString("x", true, ""), "Control Type ()", "Criticality", "Control Value")
+ runAddControlDescriptions(t, NewControlString("x", false, "y"), "Control Type ()", "Control Value")
+ runAddControlDescriptions(t, NewControlString("x", false, ""), "Control Type ()", "Control Value")
+}
+
+func runAddControlDescriptions(t *testing.T, originalControl Control, childDescriptions ...string) {
+ header := ""
+ if callerpc, _, line, ok := runtime.Caller(1); ok {
+ if caller := runtime.FuncForPC(callerpc); caller != nil {
+ header = fmt.Sprintf("%s:%d: ", caller.Name(), line)
+ }
+ }
+
+ encodedControls := encodeControls([]Control{originalControl})
+ addControlDescriptions(encodedControls)
+ encodedPacket := encodedControls.Children[0]
+ if len(encodedPacket.Children) != len(childDescriptions) {
+ t.Errorf("%sinvalid number of children: %d != %d", header, len(encodedPacket.Children), len(childDescriptions))
+ }
+ for i, desc := range childDescriptions {
+ if encodedPacket.Children[i].Description != desc {
+ t.Errorf("%sdescription not as expected: %s != %s", header, encodedPacket.Children[i].Description, desc)
+ }
+ }
+
+}
diff --git a/vendor/github.com/go-ldap/ldap/dn.go b/vendor/github.com/go-ldap/ldap/dn.go
index cc70c894c..a8ece3142 100644
--- a/vendor/github.com/go-ldap/ldap/dn.go
+++ b/vendor/github.com/go-ldap/ldap/dn.go
@@ -83,9 +83,19 @@ func ParseDN(str string) (*DN, error) {
attribute := new(AttributeTypeAndValue)
escaping := false
+ unescapedTrailingSpaces := 0
+ stringFromBuffer := func() string {
+ s := buffer.String()
+ s = s[0 : len(s)-unescapedTrailingSpaces]
+ buffer.Reset()
+ unescapedTrailingSpaces = 0
+ return s
+ }
+
for i := 0; i < len(str); i++ {
char := str[i]
if escaping {
+ unescapedTrailingSpaces = 0
escaping = false
switch char {
case ' ', '"', '#', '+', ',', ';', '<', '=', '>', '\\':
@@ -107,10 +117,10 @@ func ParseDN(str string) (*DN, error) {
buffer.WriteByte(dst[0])
i++
} else if char == '\\' {
+ unescapedTrailingSpaces = 0
escaping = true
} else if char == '=' {
- attribute.Type = buffer.String()
- buffer.Reset()
+ attribute.Type = stringFromBuffer()
// Special case: If the first character in the value is # the
// following data is BER encoded so we can just fast forward
// and decode.
@@ -133,7 +143,7 @@ func ParseDN(str string) (*DN, error) {
}
} else if char == ',' || char == '+' {
// We're done with this RDN or value, push it
- attribute.Value = buffer.String()
+ attribute.Value = stringFromBuffer()
rdn.Attributes = append(rdn.Attributes, attribute)
attribute = new(AttributeTypeAndValue)
if char == ',' {
@@ -141,8 +151,17 @@ func ParseDN(str string) (*DN, error) {
rdn = new(RelativeDN)
rdn.Attributes = make([]*AttributeTypeAndValue, 0)
}
- buffer.Reset()
+ } else if char == ' ' && buffer.Len() == 0 {
+ // ignore unescaped leading spaces
+ continue
} else {
+ if char == ' ' {
+ // Track unescaped spaces in case they are trailing and we need to remove them
+ unescapedTrailingSpaces++
+ } else {
+ // Reset if we see a non-space char
+ unescapedTrailingSpaces = 0
+ }
buffer.WriteByte(char)
}
}
@@ -150,9 +169,76 @@ func ParseDN(str string) (*DN, error) {
if len(attribute.Type) == 0 {
return nil, errors.New("DN ended with incomplete type, value pair")
}
- attribute.Value = buffer.String()
+ attribute.Value = stringFromBuffer()
rdn.Attributes = append(rdn.Attributes, attribute)
dn.RDNs = append(dn.RDNs, rdn)
}
return dn, nil
}
+
+// Equal returns true if the DNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
+// Returns true if they have the same number of relative distinguished names
+// and corresponding relative distinguished names (by position) are the same.
+func (d *DN) Equal(other *DN) bool {
+ if len(d.RDNs) != len(other.RDNs) {
+ return false
+ }
+ for i := range d.RDNs {
+ if !d.RDNs[i].Equal(other.RDNs[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// AncestorOf returns true if the other DN consists of at least one RDN followed by all the RDNs of the current DN.
+// "ou=widgets,o=acme.com" is an ancestor of "ou=sprockets,ou=widgets,o=acme.com"
+// "ou=widgets,o=acme.com" is not an ancestor of "ou=sprockets,ou=widgets,o=foo.com"
+// "ou=widgets,o=acme.com" is not an ancestor of "ou=widgets,o=acme.com"
+func (d *DN) AncestorOf(other *DN) bool {
+ if len(d.RDNs) >= len(other.RDNs) {
+ return false
+ }
+ // Take the last `len(d.RDNs)` RDNs from the other DN to compare against
+ otherRDNs := other.RDNs[len(other.RDNs)-len(d.RDNs):]
+ for i := range d.RDNs {
+ if !d.RDNs[i].Equal(otherRDNs[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Equal returns true if the RelativeDNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
+// Relative distinguished names are the same if and only if they have the same number of AttributeTypeAndValues
+// and each attribute of the first RDN is the same as the attribute of the second RDN with the same attribute type.
+// The order of attributes is not significant.
+// Case of attribute types is not significant.
+func (r *RelativeDN) Equal(other *RelativeDN) bool {
+ if len(r.Attributes) != len(other.Attributes) {
+ return false
+ }
+ return r.hasAllAttributes(other.Attributes) && other.hasAllAttributes(r.Attributes)
+}
+
+func (r *RelativeDN) hasAllAttributes(attrs []*AttributeTypeAndValue) bool {
+ for _, attr := range attrs {
+ found := false
+ for _, myattr := range r.Attributes {
+ if myattr.Equal(attr) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return false
+ }
+ }
+ return true
+}
+
+// Equal returns true if the AttributeTypeAndValue is equivalent to the specified AttributeTypeAndValue
+// Case of the attribute type is not significant
+func (a *AttributeTypeAndValue) Equal(other *AttributeTypeAndValue) bool {
+ return strings.EqualFold(a.Type, other.Type) && a.Value == other.Value
+}
diff --git a/vendor/github.com/go-ldap/ldap/dn_test.go b/vendor/github.com/go-ldap/ldap/dn_test.go
index 39817c427..5055cc15b 100644
--- a/vendor/github.com/go-ldap/ldap/dn_test.go
+++ b/vendor/github.com/go-ldap/ldap/dn_test.go
@@ -31,6 +31,22 @@ func TestSuccessfulDNParsing(t *testing.T) {
&ldap.RelativeDN{[]*ldap.AttributeTypeAndValue{&ldap.AttributeTypeAndValue{"DC", "net"}}}}},
"CN=Lu\\C4\\8Di\\C4\\87": ldap.DN{[]*ldap.RelativeDN{
&ldap.RelativeDN{[]*ldap.AttributeTypeAndValue{&ldap.AttributeTypeAndValue{"CN", "Lučić"}}}}},
+ " CN = Lu\\C4\\8Di\\C4\\87 ": ldap.DN{[]*ldap.RelativeDN{
+ &ldap.RelativeDN{[]*ldap.AttributeTypeAndValue{&ldap.AttributeTypeAndValue{"CN", "Lučić"}}}}},
+ ` A = 1 , B = 2 `: ldap.DN{[]*ldap.RelativeDN{
+ &ldap.RelativeDN{[]*ldap.AttributeTypeAndValue{&ldap.AttributeTypeAndValue{"A", "1"}}},
+ &ldap.RelativeDN{[]*ldap.AttributeTypeAndValue{&ldap.AttributeTypeAndValue{"B", "2"}}}}},
+ ` A = 1 + B = 2 `: ldap.DN{[]*ldap.RelativeDN{
+ &ldap.RelativeDN{[]*ldap.AttributeTypeAndValue{
+ &ldap.AttributeTypeAndValue{"A", "1"},
+ &ldap.AttributeTypeAndValue{"B", "2"}}}}},
+ ` \ \ A\ \ = \ \ 1\ \ , \ \ B\ \ = \ \ 2\ \ `: ldap.DN{[]*ldap.RelativeDN{
+ &ldap.RelativeDN{[]*ldap.AttributeTypeAndValue{&ldap.AttributeTypeAndValue{" A ", " 1 "}}},
+ &ldap.RelativeDN{[]*ldap.AttributeTypeAndValue{&ldap.AttributeTypeAndValue{" B ", " 2 "}}}}},
+ ` \ \ A\ \ = \ \ 1\ \ + \ \ B\ \ = \ \ 2\ \ `: ldap.DN{[]*ldap.RelativeDN{
+ &ldap.RelativeDN{[]*ldap.AttributeTypeAndValue{
+ &ldap.AttributeTypeAndValue{" A ", " 1 "},
+ &ldap.AttributeTypeAndValue{" B ", " 2 "}}}}},
}
for test, answer := range testcases {
@@ -41,6 +57,13 @@ func TestSuccessfulDNParsing(t *testing.T) {
}
if !reflect.DeepEqual(dn, &answer) {
t.Errorf("Parsed DN %s is not equal to the expected structure", test)
+ t.Logf("Expected:")
+ for _, rdn := range answer.RDNs {
+ for _, attribs := range rdn.Attributes {
+ t.Logf("#%v\n", attribs)
+ }
+ }
+ t.Logf("Actual:")
for _, rdn := range dn.RDNs {
for _, attribs := range rdn.Attributes {
t.Logf("#%v\n", attribs)
@@ -68,3 +91,119 @@ func TestErrorDNParsing(t *testing.T) {
}
}
}
+
+func TestDNEqual(t *testing.T) {
+ testcases := []struct {
+ A string
+ B string
+ Equal bool
+ }{
+ // Exact match
+ {"", "", true},
+ {"o=A", "o=A", true},
+ {"o=A", "o=B", false},
+
+ {"o=A,o=B", "o=A,o=B", true},
+ {"o=A,o=B", "o=A,o=C", false},
+
+ {"o=A+o=B", "o=A+o=B", true},
+ {"o=A+o=B", "o=A+o=C", false},
+
+ // Case mismatch in type is ignored
+ {"o=A", "O=A", true},
+ {"o=A,o=B", "o=A,O=B", true},
+ {"o=A+o=B", "o=A+O=B", true},
+
+ // Case mismatch in value is significant
+ {"o=a", "O=A", false},
+ {"o=a,o=B", "o=A,O=B", false},
+ {"o=a+o=B", "o=A+O=B", false},
+
+ // Multi-valued RDN order mismatch is ignored
+ {"o=A+o=B", "O=B+o=A", true},
+ // Number of RDN attributes is significant
+ {"o=A+o=B", "O=B+o=A+O=B", false},
+
+ // Missing values are significant
+ {"o=A+o=B", "O=B+o=A+O=C", false}, // missing values matter
+ {"o=A+o=B+o=C", "O=B+o=A", false}, // missing values matter
+
+ // Whitespace tests
+ // Matching
+ {
+ "cn=John Doe, ou=People, dc=sun.com",
+ "cn=John Doe, ou=People, dc=sun.com",
+ true,
+ },
+ // Difference in leading/trailing chars is ignored
+ {
+ "cn=John Doe, ou=People, dc=sun.com",
+ "cn=John Doe,ou=People,dc=sun.com",
+ true,
+ },
+ // Difference in values is significant
+ {
+ "cn=John Doe, ou=People, dc=sun.com",
+ "cn=John Doe, ou=People, dc=sun.com",
+ false,
+ },
+ }
+
+ for i, tc := range testcases {
+ a, err := ldap.ParseDN(tc.A)
+ if err != nil {
+ t.Errorf("%d: %v", i, err)
+ continue
+ }
+ b, err := ldap.ParseDN(tc.B)
+ if err != nil {
+ t.Errorf("%d: %v", i, err)
+ continue
+ }
+ if expected, actual := tc.Equal, a.Equal(b); expected != actual {
+ t.Errorf("%d: when comparing '%s' and '%s' expected %v, got %v", i, tc.A, tc.B, expected, actual)
+ continue
+ }
+ if expected, actual := tc.Equal, b.Equal(a); expected != actual {
+ t.Errorf("%d: when comparing '%s' and '%s' expected %v, got %v", i, tc.A, tc.B, expected, actual)
+ continue
+ }
+ }
+}
+
+func TestDNAncestor(t *testing.T) {
+ testcases := []struct {
+ A string
+ B string
+ Ancestor bool
+ }{
+ // Exact match returns false
+ {"", "", false},
+ {"o=A", "o=A", false},
+ {"o=A,o=B", "o=A,o=B", false},
+ {"o=A+o=B", "o=A+o=B", false},
+
+ // Mismatch
+ {"ou=C,ou=B,o=A", "ou=E,ou=D,ou=B,o=A", false},
+
+ // Descendant
+ {"ou=C,ou=B,o=A", "ou=E,ou=C,ou=B,o=A", true},
+ }
+
+ for i, tc := range testcases {
+ a, err := ldap.ParseDN(tc.A)
+ if err != nil {
+ t.Errorf("%d: %v", i, err)
+ continue
+ }
+ b, err := ldap.ParseDN(tc.B)
+ if err != nil {
+ t.Errorf("%d: %v", i, err)
+ continue
+ }
+ if expected, actual := tc.Ancestor, a.AncestorOf(b); expected != actual {
+ t.Errorf("%d: when comparing '%s' and '%s' expected %v, got %v", i, tc.A, tc.B, expected, actual)
+ continue
+ }
+ }
+}
diff --git a/vendor/github.com/go-ldap/ldap/ldap.go b/vendor/github.com/go-ldap/ldap/ldap.go
index 90018be83..d27e639d0 100644
--- a/vendor/github.com/go-ldap/ldap/ldap.go
+++ b/vendor/github.com/go-ldap/ldap/ldap.go
@@ -153,16 +153,47 @@ func addLDAPDescriptions(packet *ber.Packet) (err error) {
func addControlDescriptions(packet *ber.Packet) {
packet.Description = "Controls"
for _, child := range packet.Children {
+ var value *ber.Packet
+ controlType := ""
child.Description = "Control"
- child.Children[0].Description = "Control Type (" + ControlTypeMap[child.Children[0].Value.(string)] + ")"
- value := child.Children[1]
- if len(child.Children) == 3 {
+ switch len(child.Children) {
+ case 0:
+ // at least one child is required for control type
+ continue
+
+ case 1:
+ // just type, no criticality or value
+ controlType = child.Children[0].Value.(string)
+ child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
+
+ case 2:
+ controlType = child.Children[0].Value.(string)
+ child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
+ // Children[1] could be criticality or value (both are optional)
+ // duck-type on whether this is a boolean
+ if _, ok := child.Children[1].Value.(bool); ok {
+ child.Children[1].Description = "Criticality"
+ } else {
+ child.Children[1].Description = "Control Value"
+ value = child.Children[1]
+ }
+
+ case 3:
+ // criticality and value present
+ controlType = child.Children[0].Value.(string)
+ child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
child.Children[1].Description = "Criticality"
+ child.Children[2].Description = "Control Value"
value = child.Children[2]
- }
- value.Description = "Control Value"
- switch child.Children[0].Value.(string) {
+ default:
+ // more than 3 children is invalid
+ continue
+ }
+ if value == nil {
+ continue
+ }
+ switch controlType {
case ControlTypePaging:
value.Description += " (Paging)"
if value.Value != nil {
@@ -188,18 +219,18 @@ func addControlDescriptions(packet *ber.Packet) {
for _, child := range sequence.Children {
if child.Tag == 0 {
//Warning
- child := child.Children[0]
- packet := ber.DecodePacket(child.Data.Bytes())
+ warningPacket := child.Children[0]
+ packet := ber.DecodePacket(warningPacket.Data.Bytes())
val, ok := packet.Value.(int64)
if ok {
- if child.Tag == 0 {
+ if warningPacket.Tag == 0 {
//timeBeforeExpiration
value.Description += " (TimeBeforeExpiration)"
- child.Value = val
- } else if child.Tag == 1 {
+ warningPacket.Value = val
+ } else if warningPacket.Tag == 1 {
//graceAuthNsRemaining
value.Description += " (GraceAuthNsRemaining)"
- child.Value = val
+ warningPacket.Value = val
}
}
} else if child.Tag == 1 {
diff --git a/vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md b/vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 000000000..d9771f1dd
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,21 @@
+### Issue description
+Tell us what should happen and what happens instead
+
+### Example code
+```go
+If possible, please enter some example code here to reproduce the issue.
+```
+
+### Error log
+```
+If you have an error log, please paste it here.
+```
+
+### Configuration
+*Driver version (or git SHA):*
+
+*Go version:* run `go version` in your console
+
+*Server version:* E.g. MySQL 5.6, MariaDB 10.0.20
+
+*Server OS:* E.g. Debian 8.1 (Jessie), Windows 10
diff --git a/vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 000000000..6f5c7ebeb
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,9 @@
+### Description
+Please explain the changes you made here.
+
+### Checklist
+- [ ] Code compiles correctly
+- [ ] Created tests which fail without the change (if possible)
+- [ ] All tests passing
+- [ ] Extended the README / documentation, if necessary
+- [ ] Added myself / the copyright holder to the AUTHORS file
diff --git a/vendor/github.com/go-sql-driver/mysql/.travis.yml b/vendor/github.com/go-sql-driver/mysql/.travis.yml
index cc97c31e7..c1cc10aaf 100644
--- a/vendor/github.com/go-sql-driver/mysql/.travis.yml
+++ b/vendor/github.com/go-sql-driver/mysql/.travis.yml
@@ -1,6 +1,12 @@
+sudo: false
language: go
go:
- - 1.1
+ - 1.2
+ - 1.3
+ - 1.4
+ - 1.5
+ - 1.6
+ - 1.7
- tip
before_script:
diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS
index f3c62e178..692c186fd 100644
--- a/vendor/github.com/go-sql-driver/mysql/AUTHORS
+++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS
@@ -14,22 +14,43 @@
Aaron Hopkins <go-sql-driver at die.net>
Arne Hormann <arnehormann at gmail.com>
Carlos Nieto <jose.carlos at menteslibres.net>
+Chris Moos <chris at tech9computers.com>
+Daniel Nichter <nil at codenode.com>
+Daniël van Eeden <git at myname.nl>
DisposaBoy <disposaboy at dby.me>
Frederick Mayle <frederickmayle at gmail.com>
Gustavo Kristic <gkristic at gmail.com>
Hanno Braun <mail at hannobraun.com>
+Henri Yandell <flamefew at gmail.com>
+Hirotaka Yamamoto <ymmt2005 at gmail.com>
+INADA Naoki <songofacandy at gmail.com>
James Harr <james.harr at gmail.com>
Jian Zhen <zhenjl at gmail.com>
+Joshua Prunier <joshua.prunier at gmail.com>
+Julien Lefevre <julien.lefevr at gmail.com>
Julien Schmidt <go-sql-driver at julienschmidt.com>
+Kamil Dziedzic <kamil at klecza.pl>
+Kevin Malachowski <kevin at chowski.com>
+Lennart Rudolph <lrudolph at hmc.edu>
Leonardo YongUk Kim <dalinaum at gmail.com>
+Luca Looz <luca.looz92 at gmail.com>
Lucas Liu <extrafliu at gmail.com>
Luke Scott <luke at webconnex.com>
Michael Woolnough <michael.woolnough at gmail.com>
Nicola Peduzzi <thenikso at gmail.com>
+Olivier Mengué <dolmen at cpan.org>
+Paul Bonser <misterpib at gmail.com>
+Runrioter Wung <runrioter at gmail.com>
+Soroush Pour <me at soroushjp.com>
+Stan Putrya <root.vagner at gmail.com>
+Stanley Gunawan <gunawan.stanley at gmail.com>
+Xiangyu Hu <xiangyu.hu at outlook.com>
Xiaobing Jiang <s7v7nislands at gmail.com>
Xiuming Chen <cc at cxm.cc>
+Zhenye Xie <xiezhenye at gmail.com>
# Organizations
Barracuda Networks, Inc.
Google Inc.
+Stripe Inc.
diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
index feb53a1e8..6bcad7eaa 100644
--- a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
+++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
@@ -1,3 +1,50 @@
+## Version 1.3 (2016-12-01)
+
+Changes:
+
+ - Go 1.1 is no longer supported
+ - Use decimals fields in MySQL to format time types (#249)
+ - Buffer optimizations (#269)
+ - TLS ServerName defaults to the host (#283)
+ - Refactoring (#400, #410, #437)
+ - Adjusted documentation for second generation CloudSQL (#485)
+ - Documented DSN system var quoting rules (#502)
+ - Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512)
+
+New Features:
+
+ - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
+ - Support for returning table alias on Columns() (#289, #359, #382)
+ - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
+ - Support for uint64 parameters with high bit set (#332, #345)
+ - Cleartext authentication plugin support (#327)
+ - Exported ParseDSN function and the Config struct (#403, #419, #429)
+ - Read / Write timeouts (#401)
+ - Support for JSON field type (#414)
+ - Support for multi-statements and multi-results (#411, #431)
+ - DSN parameter to set the driver-side max_allowed_packet value manually (#489)
+ - Native password authentication plugin support (#494, #524)
+
+Bugfixes:
+
+ - Fixed handling of queries without columns and rows (#255)
+ - Fixed a panic when SetKeepAlive() failed (#298)
+ - Handle ERR packets while reading rows (#321)
+ - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349)
+ - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356)
+ - Actually zero out bytes in handshake response (#378)
+ - Fixed race condition in registering LOAD DATA INFILE handler (#383)
+ - Fixed tests with MySQL 5.7.9+ (#380)
+ - QueryUnescape TLS config names (#397)
+ - Fixed "broken pipe" error by writing to closed socket (#390)
+ - Fixed LOAD LOCAL DATA INFILE buffering (#424)
+ - Fixed parsing of floats into float64 when placeholders are used (#434)
+ - Fixed DSN tests with Go 1.7+ (#459)
+ - Handle ERR packets while waiting for EOF (#473)
+ - Invalidate connection on error while discarding additional results (#513)
+ - Allow terminating packets of length 0 (#516)
+
+
## Version 1.2 (2014-06-03)
Changes:
diff --git a/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md b/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
index f87c19824..8fe16bcb4 100644
--- a/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
+++ b/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
@@ -4,28 +4,11 @@
Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed).
-Please provide the following minimum information:
-* Your Go-MySQL-Driver version (or git SHA)
-* Your Go version (run `go version` in your console)
-* A detailed issue description
-* Error Log if present
-* If possible, a short example
-
-
## Contributing Code
By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file.
Don't forget to add yourself to the AUTHORS file.
-### Pull Requests Checklist
-
-Please check the following points before submitting your pull request:
-- [x] Code compiles correctly
-- [x] Created tests, if possible
-- [x] All tests pass
-- [x] Extended the README / documentation, if necessary
-- [x] Added yourself to the AUTHORS file
-
### Code Review
Everyone is invited to review and comment on pull requests.
diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md
index 6b3475b37..a16012f81 100644
--- a/vendor/github.com/go-sql-driver/mysql/README.md
+++ b/vendor/github.com/go-sql-driver/mysql/README.md
@@ -4,10 +4,6 @@ A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) packa
![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin")
-**Latest stable Release:** [Version 1.2 (June 03, 2014)](https://github.com/go-sql-driver/mysql/releases)
-
-[![Build Status](https://travis-ci.org/go-sql-driver/mysql.png?branch=master)](https://travis-ci.org/go-sql-driver/mysql)
-
---------------------------------------
* [Features](#features)
* [Requirements](#requirements)
@@ -30,7 +26,7 @@ A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) packa
## Features
* Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
* Native Go implementation. No C-bindings, just pure Go
- * Connections over TCP/IPv4, TCP/IPv6 or Unix domain sockets
+ * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](http://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
* Automatic handling of broken connections
* Automatic Connection Pooling *(by database/sql package)*
* Supports queries larger than 16MB
@@ -38,9 +34,10 @@ A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) packa
* Intelligent `LONG DATA` handling in prepared statements
* Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support
* Optional `time.Time` parsing
+ * Optional placeholder interpolation
## Requirements
- * Go 1.1 or higher
+ * Go 1.2 or higher
* MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
---------------------------------------
@@ -92,6 +89,8 @@ This has the same effect as an empty DSN string:
```
+Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
+
#### Password
Passwords can consist of any character. Escaping is **not** necessary.
@@ -122,6 +121,25 @@ Default: false
`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files.
[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
+##### `allowCleartextPasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
+
+##### `allowNativePasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+`allowNativePasswords=true` allows the usage of the mysql native password method.
+
##### `allowOldPasswords`
```
@@ -166,6 +184,33 @@ Default: false
`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed.
+##### `columnsWithAlias`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example:
+
+```
+SELECT u.id FROM users as u
+```
+
+will return `u.id` instead of just `id` if `columnsWithAlias=true`.
+
+##### `interpolateParams`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`.
+
+*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!*
##### `loc`
@@ -177,8 +222,29 @@ Default: UTC
Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](http://golang.org/pkg/time/#LoadLocation) for details.
+Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
+
Please keep in mind, that param values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
+##### `maxAllowedPacket`
+```
+Type: decimal number
+Default: 0
+```
+
+Max packet size allowed in bytes. Use `maxAllowedPacket=0` to automatically fetch the `max_allowed_packet` variable from server.
+
+##### `multiStatements`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
+
+When `multiStatements` is used, `?` parameters must only be used in the first statement.
##### `parseTime`
@@ -191,6 +257,15 @@ Default: false
`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string`
+##### `readTimeout`
+
+```
+Type: decimal number
+Default: 0
+```
+
+I/O read timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
##### `strict`
```
@@ -199,10 +274,11 @@ Valid Values: true, false
Default: false
```
-`strict=true` enables the strict mode in which MySQL warnings are treated as errors.
+`strict=true` enables a driver-side strict mode in which MySQL warnings are treated as errors. This mode should not be used in production as it may lead to data corruption in certain situations.
-By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes. See the [examples](#examples) for an DSN example.
+A server-side strict mode, which is safe for production use, can be set via the [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html) system variable.
+By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes.
##### `timeout`
@@ -211,8 +287,7 @@ Type: decimal number
Default: OS default
```
-*Driver* side connection timeout. The value must be a string of decimal numbers, each with optional fraction and a unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout).
-
+*Driver* side connection timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout).
##### `tls`
@@ -224,16 +299,33 @@ Default: false
`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](http://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
+##### `writeTimeout`
+
+```
+Type: decimal number
+Default: 0
+```
+
+I/O write timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
##### System Variables
-All other parameters are interpreted as system variables:
- * `autocommit`: `"SET autocommit=<value>"`
- * `time_zone`: `"SET time_zone=<value>"`
- * [`tx_isolation`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `"SET tx_isolation=<value>"`
- * `param`: `"SET <param>=<value>"`
+Any other parameters are interpreted as system variables:
+ * `<boolean_var>=<value>`: `SET <boolean_var>=<value>`
+ * `<enum_var>=<value>`: `SET <enum_var>=<value>`
+ * `<string_var>=%27<value>%27`: `SET <string_var>='<value>'`
+
+Rules:
+* The values for string variables must be quoted with '
+* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!
+ (which implies values of string variables must be wrapped with `%27`)
+
+Examples:
+ * `autocommit=1`: `SET autocommit=1`
+ * [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'`
+ * [`tx_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `SET tx_isolation='REPEATABLE-READ'`
-*The values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!*
#### Examples
```
@@ -248,9 +340,9 @@ root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local
user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true
```
-Use the [strict mode](#strict) but ignore notes:
+Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html):
```
-user:password@/dbname?strict=true&sql_notes=false
+user:password@/dbname?sql_mode=TRADITIONAL
```
TCP via IPv6:
@@ -263,11 +355,16 @@ TCP on a remote host, e.g. Amazon RDS:
id:password@tcp(your-amazonaws-uri.com:3306)/dbname
```
-Google Cloud SQL on App Engine:
+Google Cloud SQL on App Engine (First Generation MySQL Server):
```
user@cloudsql(project-id:instance-name)/dbname
```
+Google Cloud SQL on App Engine (Second Generation MySQL Server):
+```
+user@cloudsql(project-id:regionname:instance-name)/dbname
+```
+
TCP using default port (3306) on localhost:
```
user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped
@@ -291,7 +388,7 @@ import "github.com/go-sql-driver/mysql"
Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
-To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::<name>` then.
+To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::<name>` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
See the [godoc of Go-MySQL-Driver](http://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
@@ -334,9 +431,9 @@ Mozilla summarizes the license scope as follows:
That means:
- * You can **use** the **unchanged** source code both in private as also commercial
- * You **needn't publish** the source code of your library as long the files licensed under the MPL 2.0 are **unchanged**
- * You **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0)
+ * You can **use** the **unchanged** source code both in private and commercially
+ * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0)
+ * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**
Please read the [MPL 2.0 FAQ](http://www.mozilla.org/MPL/2.0/FAQ.html) if you have further questions regarding the license.
diff --git a/vendor/github.com/go-sql-driver/mysql/benchmark_test.go b/vendor/github.com/go-sql-driver/mysql/benchmark_test.go
index d72a4183f..7da833a2a 100644
--- a/vendor/github.com/go-sql-driver/mysql/benchmark_test.go
+++ b/vendor/github.com/go-sql-driver/mysql/benchmark_test.go
@@ -11,10 +11,13 @@ package mysql
import (
"bytes"
"database/sql"
+ "database/sql/driver"
+ "math"
"strings"
"sync"
"sync/atomic"
"testing"
+ "time"
)
type TB testing.B
@@ -45,7 +48,11 @@ func initDB(b *testing.B, queries ...string) *sql.DB {
db := tb.checkDB(sql.Open("mysql", dsn))
for _, query := range queries {
if _, err := db.Exec(query); err != nil {
- b.Fatalf("Error on %q: %v", query, err)
+ if w, ok := err.(MySQLWarnings); ok {
+ b.Logf("warning on %q: %v", query, w)
+ } else {
+ b.Fatalf("error on %q: %v", query, err)
+ }
}
}
return db
@@ -206,3 +213,34 @@ func BenchmarkRoundtripBin(b *testing.B) {
rows.Close()
}
}
+
+func BenchmarkInterpolation(b *testing.B) {
+ mc := &mysqlConn{
+ cfg: &Config{
+ InterpolateParams: true,
+ Loc: time.UTC,
+ },
+ maxAllowedPacket: maxPacketSize,
+ maxWriteSize: maxPacketSize - 1,
+ buf: newBuffer(nil),
+ }
+
+ args := []driver.Value{
+ int64(42424242),
+ float64(math.Pi),
+ false,
+ time.Unix(1423411542, 807015000),
+ []byte("bytes containing special chars ' \" \a \x00"),
+ "string containing special chars ' \" \a \x00",
+ }
+ q := "SELECT ?, ?, ?, ?, ?, ?"
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := mc.interpolateParams(q, args)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go
index 794ac3099..2001feacd 100644
--- a/vendor/github.com/go-sql-driver/mysql/buffer.go
+++ b/vendor/github.com/go-sql-driver/mysql/buffer.go
@@ -8,7 +8,11 @@
package mysql
-import "io"
+import (
+ "io"
+ "net"
+ "time"
+)
const defaultBufSize = 4096
@@ -18,25 +22,28 @@ const defaultBufSize = 4096
// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
// Also highly optimized for this particular use case.
type buffer struct {
- buf []byte
- rd io.Reader
- idx int
- length int
+ buf []byte
+ nc net.Conn
+ idx int
+ length int
+ timeout time.Duration
}
-func newBuffer(rd io.Reader) buffer {
+func newBuffer(nc net.Conn) buffer {
var b [defaultBufSize]byte
return buffer{
buf: b[:],
- rd: rd,
+ nc: nc,
}
}
// fill reads into the buffer until at least _need_ bytes are in it
func (b *buffer) fill(need int) error {
+ n := b.length
+
// move existing data to the beginning
- if b.length > 0 && b.idx > 0 {
- copy(b.buf[0:b.length], b.buf[b.idx:])
+ if n > 0 && b.idx > 0 {
+ copy(b.buf[0:n], b.buf[b.idx:])
}
// grow buffer if necessary
@@ -52,19 +59,33 @@ func (b *buffer) fill(need int) error {
b.idx = 0
for {
- n, err := b.rd.Read(b.buf[b.length:])
- b.length += n
+ if b.timeout > 0 {
+ if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil {
+ return err
+ }
+ }
- if err == nil {
- if b.length < need {
+ nn, err := b.nc.Read(b.buf[n:])
+ n += nn
+
+ switch err {
+ case nil:
+ if n < need {
continue
}
+ b.length = n
return nil
+
+ case io.EOF:
+ if n >= need {
+ b.length = n
+ return nil
+ }
+ return io.ErrUnexpectedEOF
+
+ default:
+ return err
}
- if b.length >= need && err == io.EOF {
- return nil
- }
- return err
}
}
diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go
index aabe0055d..82079cfb9 100644
--- a/vendor/github.com/go-sql-driver/mysql/collations.go
+++ b/vendor/github.com/go-sql-driver/mysql/collations.go
@@ -8,7 +8,7 @@
package mysql
-const defaultCollation byte = 33 // utf8_general_ci
+const defaultCollation = "utf8_general_ci"
// A list of available collations mapped to the internal ID.
// To update this map use the following MySQL query:
@@ -234,3 +234,17 @@ var collations = map[string]byte{
"utf8mb4_unicode_520_ci": 246,
"utf8mb4_vietnamese_ci": 247,
}
+
+// A blacklist of collations which is unsafe to interpolate parameters.
+// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes.
+var unsafeCollations = map[string]bool{
+ "big5_chinese_ci": true,
+ "sjis_japanese_ci": true,
+ "gbk_chinese_ci": true,
+ "big5_bin": true,
+ "gb2312_bin": true,
+ "gbk_bin": true,
+ "sjis_bin": true,
+ "cp932_japanese_ci": true,
+ "cp932_bin": true,
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go
index 04607296e..d82c728f3 100644
--- a/vendor/github.com/go-sql-driver/mysql/connection.go
+++ b/vendor/github.com/go-sql-driver/mysql/connection.go
@@ -9,10 +9,9 @@
package mysql
import (
- "crypto/tls"
"database/sql/driver"
- "errors"
"net"
+ "strconv"
"strings"
"time"
)
@@ -22,34 +21,20 @@ type mysqlConn struct {
netConn net.Conn
affectedRows uint64
insertId uint64
- cfg *config
- maxPacketAllowed int
+ cfg *Config
+ maxAllowedPacket int
maxWriteSize int
+ writeTimeout time.Duration
flags clientFlag
+ status statusFlag
sequence uint8
parseTime bool
strict bool
}
-type config struct {
- user string
- passwd string
- net string
- addr string
- dbname string
- params map[string]string
- loc *time.Location
- tls *tls.Config
- timeout time.Duration
- collation uint8
- allowAllFiles bool
- allowOldPasswords bool
- clientFoundRows bool
-}
-
// Handles parameters set in DSN after the connection is established
func (mc *mysqlConn) handleParams() (err error) {
- for param, val := range mc.cfg.params {
+ for param, val := range mc.cfg.Params {
switch param {
// Charset
case "charset":
@@ -65,27 +50,6 @@ func (mc *mysqlConn) handleParams() (err error) {
return
}
- // time.Time parsing
- case "parseTime":
- var isBool bool
- mc.parseTime, isBool = readBool(val)
- if !isBool {
- return errors.New("Invalid Bool value: " + val)
- }
-
- // Strict mode
- case "strict":
- var isBool bool
- mc.strict, isBool = readBool(val)
- if !isBool {
- return errors.New("Invalid Bool value: " + val)
- }
-
- // Compression
- case "compress":
- err = errors.New("Compression not implemented yet")
- return
-
// System Vars
default:
err = mc.exec("SET " + param + "=" + val + "")
@@ -115,20 +79,29 @@ func (mc *mysqlConn) Close() (err error) {
// Makes Close idempotent
if mc.netConn != nil {
err = mc.writeCommandPacket(comQuit)
- if err == nil {
- err = mc.netConn.Close()
- } else {
- mc.netConn.Close()
- }
- mc.netConn = nil
}
- mc.cfg = nil
- mc.buf.rd = nil
+ mc.cleanup()
return
}
+// Closes the network connection and unsets internal variables. Do not call this
+// function after successfully authentication, call Close instead. This function
+// is called before auth or on auth failure because MySQL will have already
+// closed the network connection.
+func (mc *mysqlConn) cleanup() {
+ // Makes cleanup idempotent
+ if mc.netConn != nil {
+ if err := mc.netConn.Close(); err != nil {
+ errLog.Print(err)
+ }
+ mc.netConn = nil
+ }
+ mc.cfg = nil
+ mc.buf.nc = nil
+}
+
func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
if mc.netConn == nil {
errLog.Print(ErrInvalidConn)
@@ -161,28 +134,156 @@ func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
return stmt, err
}
+func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) {
+ // Number of ? should be same to len(args)
+ if strings.Count(query, "?") != len(args) {
+ return "", driver.ErrSkip
+ }
+
+ buf := mc.buf.takeCompleteBuffer()
+ if buf == nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return "", driver.ErrBadConn
+ }
+ buf = buf[:0]
+ argPos := 0
+
+ for i := 0; i < len(query); i++ {
+ q := strings.IndexByte(query[i:], '?')
+ if q == -1 {
+ buf = append(buf, query[i:]...)
+ break
+ }
+ buf = append(buf, query[i:i+q]...)
+ i += q
+
+ arg := args[argPos]
+ argPos++
+
+ if arg == nil {
+ buf = append(buf, "NULL"...)
+ continue
+ }
+
+ switch v := arg.(type) {
+ case int64:
+ buf = strconv.AppendInt(buf, v, 10)
+ case float64:
+ buf = strconv.AppendFloat(buf, v, 'g', -1, 64)
+ case bool:
+ if v {
+ buf = append(buf, '1')
+ } else {
+ buf = append(buf, '0')
+ }
+ case time.Time:
+ if v.IsZero() {
+ buf = append(buf, "'0000-00-00'"...)
+ } else {
+ v := v.In(mc.cfg.Loc)
+ v = v.Add(time.Nanosecond * 500) // To round under microsecond
+ year := v.Year()
+ year100 := year / 100
+ year1 := year % 100
+ month := v.Month()
+ day := v.Day()
+ hour := v.Hour()
+ minute := v.Minute()
+ second := v.Second()
+ micro := v.Nanosecond() / 1000
+
+ buf = append(buf, []byte{
+ '\'',
+ digits10[year100], digits01[year100],
+ digits10[year1], digits01[year1],
+ '-',
+ digits10[month], digits01[month],
+ '-',
+ digits10[day], digits01[day],
+ ' ',
+ digits10[hour], digits01[hour],
+ ':',
+ digits10[minute], digits01[minute],
+ ':',
+ digits10[second], digits01[second],
+ }...)
+
+ if micro != 0 {
+ micro10000 := micro / 10000
+ micro100 := micro / 100 % 100
+ micro1 := micro % 100
+ buf = append(buf, []byte{
+ '.',
+ digits10[micro10000], digits01[micro10000],
+ digits10[micro100], digits01[micro100],
+ digits10[micro1], digits01[micro1],
+ }...)
+ }
+ buf = append(buf, '\'')
+ }
+ case []byte:
+ if v == nil {
+ buf = append(buf, "NULL"...)
+ } else {
+ buf = append(buf, "_binary'"...)
+ if mc.status&statusNoBackslashEscapes == 0 {
+ buf = escapeBytesBackslash(buf, v)
+ } else {
+ buf = escapeBytesQuotes(buf, v)
+ }
+ buf = append(buf, '\'')
+ }
+ case string:
+ buf = append(buf, '\'')
+ if mc.status&statusNoBackslashEscapes == 0 {
+ buf = escapeStringBackslash(buf, v)
+ } else {
+ buf = escapeStringQuotes(buf, v)
+ }
+ buf = append(buf, '\'')
+ default:
+ return "", driver.ErrSkip
+ }
+
+ if len(buf)+4 > mc.maxAllowedPacket {
+ return "", driver.ErrSkip
+ }
+ }
+ if argPos != len(args) {
+ return "", driver.ErrSkip
+ }
+ return string(buf), nil
+}
+
func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
if mc.netConn == nil {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
- if len(args) == 0 { // no args, fastpath
- mc.affectedRows = 0
- mc.insertId = 0
-
- err := mc.exec(query)
- if err == nil {
- return &mysqlResult{
- affectedRows: int64(mc.affectedRows),
- insertId: int64(mc.insertId),
- }, err
+ if len(args) != 0 {
+ if !mc.cfg.InterpolateParams {
+ return nil, driver.ErrSkip
}
- return nil, err
+ // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement
+ prepared, err := mc.interpolateParams(query, args)
+ if err != nil {
+ return nil, err
+ }
+ query = prepared
+ args = nil
}
+ mc.affectedRows = 0
+ mc.insertId = 0
- // with args, must use prepared stmt
- return nil, driver.ErrSkip
-
+ err := mc.exec(query)
+ if err == nil {
+ return &mysqlResult{
+ affectedRows: int64(mc.affectedRows),
+ insertId: int64(mc.insertId),
+ }, err
+ }
+ return nil, err
}
// Internal function to execute commands
@@ -211,29 +312,38 @@ func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, erro
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
- if len(args) == 0 { // no args, fastpath
- // Send command
- err := mc.writeCommandPacketStr(comQuery, query)
+ if len(args) != 0 {
+ if !mc.cfg.InterpolateParams {
+ return nil, driver.ErrSkip
+ }
+ // try client-side prepare to reduce roundtrip
+ prepared, err := mc.interpolateParams(query, args)
+ if err != nil {
+ return nil, err
+ }
+ query = prepared
+ args = nil
+ }
+ // Send command
+ err := mc.writeCommandPacketStr(comQuery, query)
+ if err == nil {
+ // Read Result
+ var resLen int
+ resLen, err = mc.readResultSetHeaderPacket()
if err == nil {
- // Read Result
- var resLen int
- resLen, err = mc.readResultSetHeaderPacket()
- if err == nil {
- rows := new(textRows)
- rows.mc = mc
-
- if resLen > 0 {
- // Columns
- rows.columns, err = mc.readColumns(resLen)
- }
- return rows, err
+ rows := new(textRows)
+ rows.mc = mc
+
+ if resLen == 0 {
+ // no columns, no more data
+ return emptyRows{}, nil
}
+ // Columns
+ rows.columns, err = mc.readColumns(resLen)
+ return rows, err
}
- return nil, err
}
-
- // with args, must use prepared stmt
- return nil, driver.ErrSkip
+ return nil, err
}
// Gets the value of the given MySQL System Variable
@@ -249,6 +359,7 @@ func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
if err == nil {
rows := new(textRows)
rows.mc = mc
+ rows.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
if resLen > 0 {
// Columns
diff --git a/vendor/github.com/go-sql-driver/mysql/connection_test.go b/vendor/github.com/go-sql-driver/mysql/connection_test.go
new file mode 100644
index 000000000..65325f101
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/connection_test.go
@@ -0,0 +1,67 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "testing"
+)
+
+func TestInterpolateParams(t *testing.T) {
+ mc := &mysqlConn{
+ buf: newBuffer(nil),
+ maxAllowedPacket: maxPacketSize,
+ cfg: &Config{
+ InterpolateParams: true,
+ },
+ }
+
+ q, err := mc.interpolateParams("SELECT ?+?", []driver.Value{int64(42), "gopher"})
+ if err != nil {
+ t.Errorf("Expected err=nil, got %#v", err)
+ return
+ }
+ expected := `SELECT 42+'gopher'`
+ if q != expected {
+ t.Errorf("Expected: %q\nGot: %q", expected, q)
+ }
+}
+
+func TestInterpolateParamsTooManyPlaceholders(t *testing.T) {
+ mc := &mysqlConn{
+ buf: newBuffer(nil),
+ maxAllowedPacket: maxPacketSize,
+ cfg: &Config{
+ InterpolateParams: true,
+ },
+ }
+
+ q, err := mc.interpolateParams("SELECT ?+?", []driver.Value{int64(42)})
+ if err != driver.ErrSkip {
+ t.Errorf("Expected err=driver.ErrSkip, got err=%#v, q=%#v", err, q)
+ }
+}
+
+// We don't support placeholder in string literal for now.
+// https://github.com/go-sql-driver/mysql/pull/490
+func TestInterpolateParamsPlaceholderInString(t *testing.T) {
+ mc := &mysqlConn{
+ buf: newBuffer(nil),
+ maxAllowedPacket: maxPacketSize,
+ cfg: &Config{
+ InterpolateParams: true,
+ },
+ }
+
+ q, err := mc.interpolateParams("SELECT 'abc?xyz',?", []driver.Value{int64(42)})
+ // When InterpolateParams support string literal, this should return `"SELECT 'abc?xyz', 42`
+ if err != driver.ErrSkip {
+ t.Errorf("Expected err=driver.ErrSkip, got err=%#v, q=%#v", err, q)
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go
index 379eabec1..88cfff3fd 100644
--- a/vendor/github.com/go-sql-driver/mysql/const.go
+++ b/vendor/github.com/go-sql-driver/mysql/const.go
@@ -11,7 +11,7 @@ package mysql
const (
minProtocolVersion byte = 10
maxPacketSize = 1<<24 - 1
- timeFormat = "2006-01-02 15:04:05"
+ timeFormat = "2006-01-02 15:04:05.999999"
)
// MySQL constants documentation:
@@ -24,6 +24,7 @@ const (
iERR byte = 0xff
)
+// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags
type clientFlag uint32
const (
@@ -45,6 +46,13 @@ const (
clientSecureConn
clientMultiStatements
clientMultiResults
+ clientPSMultiResults
+ clientPluginAuth
+ clientConnectAttrs
+ clientPluginAuthLenEncClientData
+ clientCanHandleExpiredPasswords
+ clientSessionTrack
+ clientDeprecateEOF
)
const (
@@ -68,7 +76,7 @@ const (
comBinlogDump
comTableDump
comConnectOut
- comRegiserSlave
+ comRegisterSlave
comStmtPrepare
comStmtExecute
comStmtSendLongData
@@ -78,6 +86,7 @@ const (
comStmtFetch
)
+// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType
const (
fieldTypeDecimal byte = iota
fieldTypeTiny
@@ -98,7 +107,8 @@ const (
fieldTypeBit
)
const (
- fieldTypeNewDecimal byte = iota + 0xf6
+ fieldTypeJSON byte = iota + 0xf5
+ fieldTypeNewDecimal
fieldTypeEnum
fieldTypeSet
fieldTypeTinyBLOB
@@ -130,3 +140,24 @@ const (
flagUnknown3
flagUnknown4
)
+
+// http://dev.mysql.com/doc/internals/en/status-flags.html
+type statusFlag uint16
+
+const (
+ statusInTrans statusFlag = 1 << iota
+ statusInAutocommit
+ statusReserved // Not in documentation
+ statusMoreResultsExists
+ statusNoGoodIndexUsed
+ statusNoIndexUsed
+ statusCursorExists
+ statusLastRowSent
+ statusDbDropped
+ statusNoBackslashEscapes
+ statusMetadataChanged
+ statusQueryWasSlow
+ statusPsOutParams
+ statusInTransReadonly
+ statusSessionStateChanged
+)
diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go
index c0375fe18..0022d1f1e 100644
--- a/vendor/github.com/go-sql-driver/mysql/driver.go
+++ b/vendor/github.com/go-sql-driver/mysql/driver.go
@@ -4,7 +4,7 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+// Package mysql provides a MySQL driver for Go's database/sql package
//
// The driver should be used via the database/sql package:
//
@@ -22,7 +22,7 @@ import (
"net"
)
-// This struct is exported to make the driver directly accessible.
+// MySQLDriver is exported to make the driver directly accessible.
// In general the driver is used via the database/sql package.
type MySQLDriver struct{}
@@ -50,20 +50,22 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
// New mysqlConn
mc := &mysqlConn{
- maxPacketAllowed: maxPacketSize,
+ maxAllowedPacket: maxPacketSize,
maxWriteSize: maxPacketSize - 1,
}
- mc.cfg, err = parseDSN(dsn)
+ mc.cfg, err = ParseDSN(dsn)
if err != nil {
return nil, err
}
+ mc.parseTime = mc.cfg.ParseTime
+ mc.strict = mc.cfg.Strict
// Connect to Server
- if dial, ok := dials[mc.cfg.net]; ok {
- mc.netConn, err = dial(mc.cfg.addr)
+ if dial, ok := dials[mc.cfg.Net]; ok {
+ mc.netConn, err = dial(mc.cfg.Addr)
} else {
- nd := net.Dialer{Timeout: mc.cfg.timeout}
- mc.netConn, err = nd.Dial(mc.cfg.net, mc.cfg.addr)
+ nd := net.Dialer{Timeout: mc.cfg.Timeout}
+ mc.netConn, err = nd.Dial(mc.cfg.Net, mc.cfg.Addr)
}
if err != nil {
return nil, err
@@ -72,55 +74,54 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
// Enable TCP Keepalives on TCP connections
if tc, ok := mc.netConn.(*net.TCPConn); ok {
if err := tc.SetKeepAlive(true); err != nil {
- mc.Close()
+ // Don't send COM_QUIT before handshake.
+ mc.netConn.Close()
+ mc.netConn = nil
return nil, err
}
}
mc.buf = newBuffer(mc.netConn)
+ // Set I/O timeouts
+ mc.buf.timeout = mc.cfg.ReadTimeout
+ mc.writeTimeout = mc.cfg.WriteTimeout
+
// Reading Handshake Initialization Packet
cipher, err := mc.readInitPacket()
if err != nil {
- mc.Close()
+ mc.cleanup()
return nil, err
}
// Send Client Authentication Packet
if err = mc.writeAuthPacket(cipher); err != nil {
- mc.Close()
+ mc.cleanup()
return nil, err
}
- // Read Result Packet
- err = mc.readResultOK()
- if err != nil {
- // Retry with old authentication method, if allowed
- if mc.cfg != nil && mc.cfg.allowOldPasswords && err == ErrOldPassword {
- if err = mc.writeOldAuthPacket(cipher); err != nil {
- mc.Close()
- return nil, err
- }
- if err = mc.readResultOK(); err != nil {
- mc.Close()
- return nil, err
- }
- } else {
+ // Handle response to auth packet, switch methods if possible
+ if err = handleAuthResult(mc, cipher); err != nil {
+ // Authentication failed and MySQL has already closed the connection
+ // (https://dev.mysql.com/doc/internals/en/authentication-fails.html).
+ // Do not send COM_QUIT, just cleanup and return the error.
+ mc.cleanup()
+ return nil, err
+ }
+
+ if mc.cfg.MaxAllowedPacket > 0 {
+ mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket
+ } else {
+ // Get max allowed packet size
+ maxap, err := mc.getSystemVar("max_allowed_packet")
+ if err != nil {
mc.Close()
return nil, err
}
-
- }
-
- // Get max allowed packet size
- maxap, err := mc.getSystemVar("max_allowed_packet")
- if err != nil {
- mc.Close()
- return nil, err
+ mc.maxAllowedPacket = stringToInt(maxap) - 1
}
- mc.maxPacketAllowed = stringToInt(maxap) - 1
- if mc.maxPacketAllowed < maxPacketSize {
- mc.maxWriteSize = mc.maxPacketAllowed
+ if mc.maxAllowedPacket < maxPacketSize {
+ mc.maxWriteSize = mc.maxAllowedPacket
}
// Handle DSN Params
@@ -133,6 +134,50 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
return mc, nil
}
+func handleAuthResult(mc *mysqlConn, oldCipher []byte) error {
+ // Read Result Packet
+ cipher, err := mc.readResultOK()
+ if err == nil {
+ return nil // auth successful
+ }
+
+ if mc.cfg == nil {
+ return err // auth failed and retry not possible
+ }
+
+ // Retry auth if configured to do so.
+ if mc.cfg.AllowOldPasswords && err == ErrOldPassword {
+ // Retry with old authentication method. Note: there are edge cases
+ // where this should work but doesn't; this is currently "wontfix":
+ // https://github.com/go-sql-driver/mysql/issues/184
+
+ // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is
+ // sent and we have to keep using the cipher sent in the init packet.
+ if cipher == nil {
+ cipher = oldCipher
+ }
+
+ if err = mc.writeOldAuthPacket(cipher); err != nil {
+ return err
+ }
+ _, err = mc.readResultOK()
+ } else if mc.cfg.AllowCleartextPasswords && err == ErrCleartextPassword {
+ // Retry with clear text password for
+ // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
+ // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
+ if err = mc.writeClearAuthPacket(); err != nil {
+ return err
+ }
+ _, err = mc.readResultOK()
+ } else if mc.cfg.AllowNativePasswords && err == ErrNativePassword {
+ if err = mc.writeNativeAuthPacket(cipher); err != nil {
+ return err
+ }
+ _, err = mc.readResultOK()
+ }
+ return err
+}
+
func init() {
sql.Register("mysql", &MySQLDriver{})
}
diff --git a/vendor/github.com/go-sql-driver/mysql/driver_test.go b/vendor/github.com/go-sql-driver/mysql/driver_test.go
index ef5b371cf..78e68f5d0 100644
--- a/vendor/github.com/go-sql-driver/mysql/driver_test.go
+++ b/vendor/github.com/go-sql-driver/mysql/driver_test.go
@@ -9,12 +9,14 @@
package mysql
import (
+ "bytes"
"crypto/tls"
"database/sql"
"database/sql/driver"
"fmt"
"io"
"io/ioutil"
+ "log"
"net"
"net/url"
"os"
@@ -74,23 +76,75 @@ type DBTest struct {
db *sql.DB
}
+func runTestsWithMultiStatement(t *testing.T, dsn string, tests ...func(dbt *DBTest)) {
+ if !available {
+ t.Skipf("MySQL server not running on %s", netAddr)
+ }
+
+ dsn += "&multiStatements=true"
+ var db *sql.DB
+ if _, err := ParseDSN(dsn); err != errInvalidDSNUnsafeCollation {
+ db, err = sql.Open("mysql", dsn)
+ if err != nil {
+ t.Fatalf("error connecting: %s", err.Error())
+ }
+ defer db.Close()
+ }
+
+ dbt := &DBTest{t, db}
+ for _, test := range tests {
+ test(dbt)
+ dbt.db.Exec("DROP TABLE IF EXISTS test")
+ }
+}
+
func runTests(t *testing.T, dsn string, tests ...func(dbt *DBTest)) {
if !available {
- t.Skipf("MySQL-Server not running on %s", netAddr)
+ t.Skipf("MySQL server not running on %s", netAddr)
}
db, err := sql.Open("mysql", dsn)
if err != nil {
- t.Fatalf("Error connecting: %s", err.Error())
+ t.Fatalf("error connecting: %s", err.Error())
}
defer db.Close()
db.Exec("DROP TABLE IF EXISTS test")
+ dsn2 := dsn + "&interpolateParams=true"
+ var db2 *sql.DB
+ if _, err := ParseDSN(dsn2); err != errInvalidDSNUnsafeCollation {
+ db2, err = sql.Open("mysql", dsn2)
+ if err != nil {
+ t.Fatalf("error connecting: %s", err.Error())
+ }
+ defer db2.Close()
+ }
+
+ dsn3 := dsn + "&multiStatements=true"
+ var db3 *sql.DB
+ if _, err := ParseDSN(dsn3); err != errInvalidDSNUnsafeCollation {
+ db3, err = sql.Open("mysql", dsn3)
+ if err != nil {
+ t.Fatalf("error connecting: %s", err.Error())
+ }
+ defer db3.Close()
+ }
+
dbt := &DBTest{t, db}
+ dbt2 := &DBTest{t, db2}
+ dbt3 := &DBTest{t, db3}
for _, test := range tests {
test(dbt)
dbt.db.Exec("DROP TABLE IF EXISTS test")
+ if db2 != nil {
+ test(dbt2)
+ dbt2.db.Exec("DROP TABLE IF EXISTS test")
+ }
+ if db3 != nil {
+ test(dbt3)
+ dbt3.db.Exec("DROP TABLE IF EXISTS test")
+ }
}
}
@@ -98,13 +152,13 @@ func (dbt *DBTest) fail(method, query string, err error) {
if len(query) > 300 {
query = "[query too large to print]"
}
- dbt.Fatalf("Error on %s %s: %s", method, query, err.Error())
+ dbt.Fatalf("error on %s %s: %s", method, query, err.Error())
}
func (dbt *DBTest) mustExec(query string, args ...interface{}) (res sql.Result) {
res, err := dbt.db.Exec(query, args...)
if err != nil {
- dbt.fail("Exec", query, err)
+ dbt.fail("exec", query, err)
}
return res
}
@@ -112,11 +166,22 @@ func (dbt *DBTest) mustExec(query string, args ...interface{}) (res sql.Result)
func (dbt *DBTest) mustQuery(query string, args ...interface{}) (rows *sql.Rows) {
rows, err := dbt.db.Query(query, args...)
if err != nil {
- dbt.fail("Query", query, err)
+ dbt.fail("query", query, err)
}
return rows
}
+func TestEmptyQuery(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ // just a comment, no query
+ rows := dbt.mustQuery("--")
+ // will hang before #255
+ if rows.Next() {
+ dbt.Errorf("next on rows must be false")
+ }
+ })
+}
+
func TestCRUD(t *testing.T) {
runTests(t, dsn, func(dbt *DBTest) {
// Create Table
@@ -136,7 +201,7 @@ func TestCRUD(t *testing.T) {
dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
}
if count != 1 {
- dbt.Fatalf("Expected 1 affected row, got %d", count)
+ dbt.Fatalf("expected 1 affected row, got %d", count)
}
id, err := res.LastInsertId()
@@ -144,7 +209,7 @@ func TestCRUD(t *testing.T) {
dbt.Fatalf("res.LastInsertId() returned error: %s", err.Error())
}
if id != 0 {
- dbt.Fatalf("Expected InsertID 0, got %d", id)
+ dbt.Fatalf("expected InsertId 0, got %d", id)
}
// Read
@@ -169,7 +234,7 @@ func TestCRUD(t *testing.T) {
dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
}
if count != 1 {
- dbt.Fatalf("Expected 1 affected row, got %d", count)
+ dbt.Fatalf("expected 1 affected row, got %d", count)
}
// Check Update
@@ -194,7 +259,7 @@ func TestCRUD(t *testing.T) {
dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
}
if count != 1 {
- dbt.Fatalf("Expected 1 affected row, got %d", count)
+ dbt.Fatalf("expected 1 affected row, got %d", count)
}
// Check for unexpected rows
@@ -204,8 +269,52 @@ func TestCRUD(t *testing.T) {
dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
}
if count != 0 {
- dbt.Fatalf("Expected 0 affected row, got %d", count)
+ dbt.Fatalf("expected 0 affected row, got %d", count)
+ }
+ })
+}
+
+func TestMultiQuery(t *testing.T) {
+ runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
+ // Create Table
+ dbt.mustExec("CREATE TABLE `test` (`id` int(11) NOT NULL, `value` int(11) NOT NULL) ")
+
+ // Create Data
+ res := dbt.mustExec("INSERT INTO test VALUES (1, 1)")
+ count, err := res.RowsAffected()
+ if err != nil {
+ dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
+ }
+ if count != 1 {
+ dbt.Fatalf("expected 1 affected row, got %d", count)
+ }
+
+ // Update
+ res = dbt.mustExec("UPDATE test SET value = 3 WHERE id = 1; UPDATE test SET value = 4 WHERE id = 1; UPDATE test SET value = 5 WHERE id = 1;")
+ count, err = res.RowsAffected()
+ if err != nil {
+ dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
+ }
+ if count != 1 {
+ dbt.Fatalf("expected 1 affected row, got %d", count)
+ }
+
+ // Read
+ var out int
+ rows := dbt.mustQuery("SELECT value FROM test WHERE id=1;")
+ if rows.Next() {
+ rows.Scan(&out)
+ if 5 != out {
+ dbt.Errorf("5 != %d", out)
+ }
+
+ if rows.Next() {
+ dbt.Error("unexpected data")
+ }
+ } else {
+ dbt.Error("no data")
}
+
})
}
@@ -256,7 +365,7 @@ func TestInt(t *testing.T) {
})
}
-func TestFloat(t *testing.T) {
+func TestFloat32(t *testing.T) {
runTests(t, dsn, func(dbt *DBTest) {
types := [2]string{"FLOAT", "DOUBLE"}
in := float32(42.23)
@@ -279,6 +388,52 @@ func TestFloat(t *testing.T) {
})
}
+func TestFloat64(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ types := [2]string{"FLOAT", "DOUBLE"}
+ var expected float64 = 42.23
+ var out float64
+ var rows *sql.Rows
+ for _, v := range types {
+ dbt.mustExec("CREATE TABLE test (value " + v + ")")
+ dbt.mustExec("INSERT INTO test VALUES (42.23)")
+ rows = dbt.mustQuery("SELECT value FROM test")
+ if rows.Next() {
+ rows.Scan(&out)
+ if expected != out {
+ dbt.Errorf("%s: %g != %g", v, expected, out)
+ }
+ } else {
+ dbt.Errorf("%s: no data", v)
+ }
+ dbt.mustExec("DROP TABLE IF EXISTS test")
+ }
+ })
+}
+
+func TestFloat64Placeholder(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ types := [2]string{"FLOAT", "DOUBLE"}
+ var expected float64 = 42.23
+ var out float64
+ var rows *sql.Rows
+ for _, v := range types {
+ dbt.mustExec("CREATE TABLE test (id int, value " + v + ")")
+ dbt.mustExec("INSERT INTO test VALUES (1, 42.23)")
+ rows = dbt.mustQuery("SELECT value FROM test WHERE id = ?", 1)
+ if rows.Next() {
+ rows.Scan(&out)
+ if expected != out {
+ dbt.Errorf("%s: %g != %g", v, expected, out)
+ }
+ } else {
+ dbt.Errorf("%s: no data", v)
+ }
+ dbt.mustExec("DROP TABLE IF EXISTS test")
+ }
+ })
+}
+
func TestString(t *testing.T) {
runTests(t, dsn, func(dbt *DBTest) {
types := [6]string{"CHAR(255)", "VARCHAR(255)", "TINYTEXT", "TEXT", "MEDIUMTEXT", "LONGTEXT"}
@@ -327,97 +482,281 @@ func TestString(t *testing.T) {
})
}
-func TestDateTime(t *testing.T) {
- type testmode struct {
- selectSuffix string
- args []interface{}
+type timeTests struct {
+ dbtype string
+ tlayout string
+ tests []timeTest
+}
+
+type timeTest struct {
+ s string // leading "!": do not use t as value in queries
+ t time.Time
+}
+
+type timeMode byte
+
+func (t timeMode) String() string {
+ switch t {
+ case binaryString:
+ return "binary:string"
+ case binaryTime:
+ return "binary:time.Time"
+ case textString:
+ return "text:string"
+ }
+ panic("unsupported timeMode")
+}
+
+func (t timeMode) Binary() bool {
+ switch t {
+ case binaryString, binaryTime:
+ return true
}
- type timetest struct {
- in interface{}
- sOut string
- tOut time.Time
- tIsZero bool
+ return false
+}
+
+const (
+ binaryString timeMode = iota
+ binaryTime
+ textString
+)
+
+func (t timeTest) genQuery(dbtype string, mode timeMode) string {
+ var inner string
+ if mode.Binary() {
+ inner = "?"
+ } else {
+ inner = `"%s"`
}
- type tester func(dbt *DBTest, rows *sql.Rows,
- test *timetest, sqltype, resulttype, mode string)
- type setup struct {
- vartype string
- dsnSuffix string
- test tester
+ return `SELECT cast(` + inner + ` as ` + dbtype + `)`
+}
+
+func (t timeTest) run(dbt *DBTest, dbtype, tlayout string, mode timeMode) {
+ var rows *sql.Rows
+ query := t.genQuery(dbtype, mode)
+ switch mode {
+ case binaryString:
+ rows = dbt.mustQuery(query, t.s)
+ case binaryTime:
+ rows = dbt.mustQuery(query, t.t)
+ case textString:
+ query = fmt.Sprintf(query, t.s)
+ rows = dbt.mustQuery(query)
+ default:
+ panic("unsupported mode")
}
- var (
- modes = map[string]*testmode{
- "text": &testmode{},
- "binary": &testmode{" WHERE 1 = ?", []interface{}{1}},
- }
- timetests = map[string][]*timetest{
- "DATE": {
- {sDate, sDate, tDate, false},
- {sDate0, sDate0, tDate0, true},
- {tDate, sDate, tDate, false},
- {tDate0, sDate0, tDate0, true},
- },
- "DATETIME": {
- {sDateTime, sDateTime, tDateTime, false},
- {sDateTime0, sDateTime0, tDate0, true},
- {tDateTime, sDateTime, tDateTime, false},
- {tDate0, sDateTime0, tDate0, true},
- },
- }
- setups = []*setup{
- {"string", "&parseTime=false", func(
- dbt *DBTest, rows *sql.Rows, test *timetest, sqltype, resulttype, mode string) {
- var sOut string
- if err := rows.Scan(&sOut); err != nil {
- dbt.Errorf("%s (%s %s): %s", sqltype, resulttype, mode, err.Error())
- } else if test.sOut != sOut {
- dbt.Errorf("%s (%s %s): %s != %s", sqltype, resulttype, mode, test.sOut, sOut)
- }
- }},
- {"time.Time", "&parseTime=true", func(
- dbt *DBTest, rows *sql.Rows, test *timetest, sqltype, resulttype, mode string) {
- var tOut time.Time
- if err := rows.Scan(&tOut); err != nil {
- dbt.Errorf("%s (%s %s): %s", sqltype, resulttype, mode, err.Error())
- } else if test.tOut != tOut || test.tIsZero != tOut.IsZero() {
- dbt.Errorf("%s (%s %s): %s [%t] != %s [%t]", sqltype, resulttype, mode, test.tOut, test.tIsZero, tOut, tOut.IsZero())
- }
- }},
+ defer rows.Close()
+ var err error
+ if !rows.Next() {
+ err = rows.Err()
+ if err == nil {
+ err = fmt.Errorf("no data")
}
- )
+ dbt.Errorf("%s [%s]: %s", dbtype, mode, err)
+ return
+ }
+ var dst interface{}
+ err = rows.Scan(&dst)
+ if err != nil {
+ dbt.Errorf("%s [%s]: %s", dbtype, mode, err)
+ return
+ }
+ switch val := dst.(type) {
+ case []uint8:
+ str := string(val)
+ if str == t.s {
+ return
+ }
+ if mode.Binary() && dbtype == "DATETIME" && len(str) == 26 && str[:19] == t.s {
+ // a fix mainly for TravisCI:
+ // accept full microsecond resolution in result for DATETIME columns
+ // where the binary protocol was used
+ return
+ }
+ dbt.Errorf("%s [%s] to string: expected %q, got %q",
+ dbtype, mode,
+ t.s, str,
+ )
+ case time.Time:
+ if val == t.t {
+ return
+ }
+ dbt.Errorf("%s [%s] to string: expected %q, got %q",
+ dbtype, mode,
+ t.s, val.Format(tlayout),
+ )
+ default:
+ fmt.Printf("%#v\n", []interface{}{dbtype, tlayout, mode, t.s, t.t})
+ dbt.Errorf("%s [%s]: unhandled type %T (is '%v')",
+ dbtype, mode,
+ val, val,
+ )
+ }
+}
- var s *setup
- testTime := func(dbt *DBTest) {
- var rows *sql.Rows
- for sqltype, tests := range timetests {
- dbt.mustExec("CREATE TABLE test (value " + sqltype + ")")
- for _, test := range tests {
- for mode, q := range modes {
- dbt.mustExec("TRUNCATE test")
- dbt.mustExec("INSERT INTO test VALUES (?)", test.in)
- rows = dbt.mustQuery("SELECT value FROM test"+q.selectSuffix, q.args...)
- if rows.Next() {
- s.test(dbt, rows, test, sqltype, s.vartype, mode)
- } else {
- if err := rows.Err(); err != nil {
- dbt.Errorf("%s (%s %s): %s",
- sqltype, s.vartype, mode, err.Error())
- } else {
- dbt.Errorf("%s (%s %s): no data",
- sqltype, s.vartype, mode)
- }
+func TestDateTime(t *testing.T) {
+ afterTime := func(t time.Time, d string) time.Time {
+ dur, err := time.ParseDuration(d)
+ if err != nil {
+ panic(err)
+ }
+ return t.Add(dur)
+ }
+ // NOTE: MySQL rounds DATETIME(x) up - but that's not included in the tests
+ format := "2006-01-02 15:04:05.999999"
+ t0 := time.Time{}
+ tstr0 := "0000-00-00 00:00:00.000000"
+ testcases := []timeTests{
+ {"DATE", format[:10], []timeTest{
+ {t: time.Date(2011, 11, 20, 0, 0, 0, 0, time.UTC)},
+ {t: t0, s: tstr0[:10]},
+ }},
+ {"DATETIME", format[:19], []timeTest{
+ {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)},
+ {t: t0, s: tstr0[:19]},
+ }},
+ {"DATETIME(0)", format[:21], []timeTest{
+ {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)},
+ {t: t0, s: tstr0[:19]},
+ }},
+ {"DATETIME(1)", format[:21], []timeTest{
+ {t: time.Date(2011, 11, 20, 21, 27, 37, 100000000, time.UTC)},
+ {t: t0, s: tstr0[:21]},
+ }},
+ {"DATETIME(6)", format, []timeTest{
+ {t: time.Date(2011, 11, 20, 21, 27, 37, 123456000, time.UTC)},
+ {t: t0, s: tstr0},
+ }},
+ {"TIME", format[11:19], []timeTest{
+ {t: afterTime(t0, "12345s")},
+ {s: "!-12:34:56"},
+ {s: "!-838:59:59"},
+ {s: "!838:59:59"},
+ {t: t0, s: tstr0[11:19]},
+ }},
+ {"TIME(0)", format[11:19], []timeTest{
+ {t: afterTime(t0, "12345s")},
+ {s: "!-12:34:56"},
+ {s: "!-838:59:59"},
+ {s: "!838:59:59"},
+ {t: t0, s: tstr0[11:19]},
+ }},
+ {"TIME(1)", format[11:21], []timeTest{
+ {t: afterTime(t0, "12345600ms")},
+ {s: "!-12:34:56.7"},
+ {s: "!-838:59:58.9"},
+ {s: "!838:59:58.9"},
+ {t: t0, s: tstr0[11:21]},
+ }},
+ {"TIME(6)", format[11:], []timeTest{
+ {t: afterTime(t0, "1234567890123000ns")},
+ {s: "!-12:34:56.789012"},
+ {s: "!-838:59:58.999999"},
+ {s: "!838:59:58.999999"},
+ {t: t0, s: tstr0[11:]},
+ }},
+ }
+ dsns := []string{
+ dsn + "&parseTime=true",
+ dsn + "&parseTime=false",
+ }
+ for _, testdsn := range dsns {
+ runTests(t, testdsn, func(dbt *DBTest) {
+ microsecsSupported := false
+ zeroDateSupported := false
+ var rows *sql.Rows
+ var err error
+ rows, err = dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`)
+ if err == nil {
+ rows.Scan(&microsecsSupported)
+ rows.Close()
+ }
+ rows, err = dbt.db.Query(`SELECT cast("0000-00-00" as DATE) = "0000-00-00"`)
+ if err == nil {
+ rows.Scan(&zeroDateSupported)
+ rows.Close()
+ }
+ for _, setups := range testcases {
+ if t := setups.dbtype; !microsecsSupported && t[len(t)-1:] == ")" {
+ // skip fractional second tests if unsupported by server
+ continue
+ }
+ for _, setup := range setups.tests {
+ allowBinTime := true
+ if setup.s == "" {
+ // fill time string whereever Go can reliable produce it
+ setup.s = setup.t.Format(setups.tlayout)
+ } else if setup.s[0] == '!' {
+ // skip tests using setup.t as source in queries
+ allowBinTime = false
+ // fix setup.s - remove the "!"
+ setup.s = setup.s[1:]
+ }
+ if !zeroDateSupported && setup.s == tstr0[:len(setup.s)] {
+ // skip disallowed 0000-00-00 date
+ continue
+ }
+ setup.run(dbt, setups.dbtype, setups.tlayout, textString)
+ setup.run(dbt, setups.dbtype, setups.tlayout, binaryString)
+ if allowBinTime {
+ setup.run(dbt, setups.dbtype, setups.tlayout, binaryTime)
}
}
}
- dbt.mustExec("DROP TABLE IF EXISTS test")
- }
+ })
}
+}
- timeDsn := dsn + "&sql_mode=ALLOW_INVALID_DATES"
- for _, v := range setups {
- s = v
- runTests(t, timeDsn+s.dsnSuffix, testTime)
- }
+func TestTimestampMicros(t *testing.T) {
+ format := "2006-01-02 15:04:05.999999"
+ f0 := format[:19]
+ f1 := format[:21]
+ f6 := format[:26]
+ runTests(t, dsn, func(dbt *DBTest) {
+ // check if microseconds are supported.
+ // Do not use timestamp(x) for that check - before 5.5.6, x would mean display width
+ // and not precision.
+ // Se last paragraph at http://dev.mysql.com/doc/refman/5.6/en/fractional-seconds.html
+ microsecsSupported := false
+ if rows, err := dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`); err == nil {
+ rows.Scan(&microsecsSupported)
+ rows.Close()
+ }
+ if !microsecsSupported {
+ // skip test
+ return
+ }
+ _, err := dbt.db.Exec(`
+ CREATE TABLE test (
+ value0 TIMESTAMP NOT NULL DEFAULT '` + f0 + `',
+ value1 TIMESTAMP(1) NOT NULL DEFAULT '` + f1 + `',
+ value6 TIMESTAMP(6) NOT NULL DEFAULT '` + f6 + `'
+ )`,
+ )
+ if err != nil {
+ dbt.Error(err)
+ }
+ defer dbt.mustExec("DROP TABLE IF EXISTS test")
+ dbt.mustExec("INSERT INTO test SET value0=?, value1=?, value6=?", f0, f1, f6)
+ var res0, res1, res6 string
+ rows := dbt.mustQuery("SELECT * FROM test")
+ if !rows.Next() {
+ dbt.Errorf("test contained no selectable values")
+ }
+ err = rows.Scan(&res0, &res1, &res6)
+ if err != nil {
+ dbt.Error(err)
+ }
+ if res0 != f0 {
+ dbt.Errorf("expected %q, got %q", f0, res0)
+ }
+ if res1 != f1 {
+ dbt.Errorf("expected %q, got %q", f1, res1)
+ }
+ if res6 != f6 {
+ dbt.Errorf("expected %q, got %q", f6, res6)
+ }
+ })
}
func TestNULL(t *testing.T) {
@@ -441,14 +780,14 @@ func TestNULL(t *testing.T) {
dbt.Fatal(err)
}
if nb.Valid {
- dbt.Error("Valid NullBool which should be invalid")
+ dbt.Error("valid NullBool which should be invalid")
}
// Valid
if err = nonNullStmt.QueryRow().Scan(&nb); err != nil {
dbt.Fatal(err)
}
if !nb.Valid {
- dbt.Error("Invalid NullBool which should be valid")
+ dbt.Error("invalid NullBool which should be valid")
} else if nb.Bool != true {
dbt.Errorf("Unexpected NullBool value: %t (should be true)", nb.Bool)
}
@@ -460,16 +799,16 @@ func TestNULL(t *testing.T) {
dbt.Fatal(err)
}
if nf.Valid {
- dbt.Error("Valid NullFloat64 which should be invalid")
+ dbt.Error("valid NullFloat64 which should be invalid")
}
// Valid
if err = nonNullStmt.QueryRow().Scan(&nf); err != nil {
dbt.Fatal(err)
}
if !nf.Valid {
- dbt.Error("Invalid NullFloat64 which should be valid")
+ dbt.Error("invalid NullFloat64 which should be valid")
} else if nf.Float64 != float64(1) {
- dbt.Errorf("Unexpected NullFloat64 value: %f (should be 1.0)", nf.Float64)
+ dbt.Errorf("unexpected NullFloat64 value: %f (should be 1.0)", nf.Float64)
}
// NullInt64
@@ -479,16 +818,16 @@ func TestNULL(t *testing.T) {
dbt.Fatal(err)
}
if ni.Valid {
- dbt.Error("Valid NullInt64 which should be invalid")
+ dbt.Error("valid NullInt64 which should be invalid")
}
// Valid
if err = nonNullStmt.QueryRow().Scan(&ni); err != nil {
dbt.Fatal(err)
}
if !ni.Valid {
- dbt.Error("Invalid NullInt64 which should be valid")
+ dbt.Error("invalid NullInt64 which should be valid")
} else if ni.Int64 != int64(1) {
- dbt.Errorf("Unexpected NullInt64 value: %d (should be 1)", ni.Int64)
+ dbt.Errorf("unexpected NullInt64 value: %d (should be 1)", ni.Int64)
}
// NullString
@@ -498,16 +837,16 @@ func TestNULL(t *testing.T) {
dbt.Fatal(err)
}
if ns.Valid {
- dbt.Error("Valid NullString which should be invalid")
+ dbt.Error("valid NullString which should be invalid")
}
// Valid
if err = nonNullStmt.QueryRow().Scan(&ns); err != nil {
dbt.Fatal(err)
}
if !ns.Valid {
- dbt.Error("Invalid NullString which should be valid")
+ dbt.Error("invalid NullString which should be valid")
} else if ns.String != `1` {
- dbt.Error("Unexpected NullString value:" + ns.String + " (should be `1`)")
+ dbt.Error("unexpected NullString value:" + ns.String + " (should be `1`)")
}
// nil-bytes
@@ -517,14 +856,14 @@ func TestNULL(t *testing.T) {
dbt.Fatal(err)
}
if b != nil {
- dbt.Error("Non-nil []byte wich should be nil")
+ dbt.Error("non-nil []byte wich should be nil")
}
// Read non-nil
if err = nonNullStmt.QueryRow().Scan(&b); err != nil {
dbt.Fatal(err)
}
if b == nil {
- dbt.Error("Nil []byte wich should be non-nil")
+ dbt.Error("nil []byte wich should be non-nil")
}
// Insert nil
b = nil
@@ -533,7 +872,7 @@ func TestNULL(t *testing.T) {
dbt.Fatal(err)
}
if !success {
- dbt.Error("Inserting []byte(nil) as NULL failed")
+ dbt.Error("inserting []byte(nil) as NULL failed")
}
// Check input==output with input==nil
b = nil
@@ -541,7 +880,7 @@ func TestNULL(t *testing.T) {
dbt.Fatal(err)
}
if b != nil {
- dbt.Error("Non-nil echo from nil input")
+ dbt.Error("non-nil echo from nil input")
}
// Check input==output with input!=nil
b = []byte("")
@@ -570,6 +909,49 @@ func TestNULL(t *testing.T) {
})
}
+func TestUint64(t *testing.T) {
+ const (
+ u0 = uint64(0)
+ uall = ^u0
+ uhigh = uall >> 1
+ utop = ^uhigh
+ s0 = int64(0)
+ sall = ^s0
+ shigh = int64(uhigh)
+ stop = ^shigh
+ )
+ runTests(t, dsn, func(dbt *DBTest) {
+ stmt, err := dbt.db.Prepare(`SELECT ?, ?, ? ,?, ?, ?, ?, ?`)
+ if err != nil {
+ dbt.Fatal(err)
+ }
+ defer stmt.Close()
+ row := stmt.QueryRow(
+ u0, uhigh, utop, uall,
+ s0, shigh, stop, sall,
+ )
+
+ var ua, ub, uc, ud uint64
+ var sa, sb, sc, sd int64
+
+ err = row.Scan(&ua, &ub, &uc, &ud, &sa, &sb, &sc, &sd)
+ if err != nil {
+ dbt.Fatal(err)
+ }
+ switch {
+ case ua != u0,
+ ub != uhigh,
+ uc != utop,
+ ud != uall,
+ sa != s0,
+ sb != shigh,
+ sc != stop,
+ sd != sall:
+ dbt.Fatal("unexpected result value")
+ }
+ })
+}
+
func TestLongData(t *testing.T) {
runTests(t, dsn, func(dbt *DBTest) {
var maxAllowedPacketSize int
@@ -660,7 +1042,7 @@ func TestLoadData(t *testing.T) {
dbt.Fatalf("%d != %d", i, id)
}
if values[i-1] != value {
- dbt.Fatalf("%s != %s", values[i-1], value)
+ dbt.Fatalf("%q != %q", values[i-1], value)
}
}
err = rows.Err()
@@ -669,7 +1051,7 @@ func TestLoadData(t *testing.T) {
}
if i != 4 {
- dbt.Fatalf("Rows count mismatch. Got %d, want 4", i)
+ dbt.Fatalf("rows count mismatch. Got %d, want 4", i)
}
}
file, err := ioutil.TempFile("", "gotest")
@@ -685,13 +1067,13 @@ func TestLoadData(t *testing.T) {
// Local File
RegisterLocalFile(file.Name())
- dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE '%q' INTO TABLE test", file.Name()))
+ dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE %q INTO TABLE test", file.Name()))
verifyLoadDataResult()
// negative test
_, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'doesnotexist' INTO TABLE test")
if err == nil {
- dbt.Fatal("Load non-existent file didn't fail")
- } else if err.Error() != "Local File 'doesnotexist' is not registered. Use the DSN parameter 'allowAllFiles=true' to allow all files" {
+ dbt.Fatal("load non-existent file didn't fail")
+ } else if err.Error() != "local file 'doesnotexist' is not registered" {
dbt.Fatal(err.Error())
}
@@ -711,7 +1093,7 @@ func TestLoadData(t *testing.T) {
// negative test
_, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'Reader::doesnotexist' INTO TABLE test")
if err == nil {
- dbt.Fatal("Load non-existent Reader didn't fail")
+ dbt.Fatal("load non-existent Reader didn't fail")
} else if err.Error() != "Reader 'doesnotexist' is not registered" {
dbt.Fatal(err.Error())
}
@@ -765,7 +1147,18 @@ func TestFoundRows(t *testing.T) {
func TestStrict(t *testing.T) {
// ALLOW_INVALID_DATES to get rid of stricter modes - we want to test for warnings, not errors
- relaxedDsn := dsn + "&sql_mode=ALLOW_INVALID_DATES"
+ relaxedDsn := dsn + "&sql_mode='ALLOW_INVALID_DATES,NO_AUTO_CREATE_USER'"
+ // make sure the MySQL version is recent enough with a separate connection
+ // before running the test
+ conn, err := MySQLDriver{}.Open(relaxedDsn)
+ if conn != nil {
+ conn.Close()
+ }
+ if me, ok := err.(*MySQLError); ok && me.Number == 1231 {
+ // Error 1231: Variable 'sql_mode' can't be set to the value of 'ALLOW_INVALID_DATES'
+ // => skip test, MySQL server version is too old
+ return
+ }
runTests(t, relaxedDsn, func(dbt *DBTest) {
dbt.mustExec("CREATE TABLE test (a TINYINT NOT NULL, b CHAR(4))")
@@ -780,7 +1173,7 @@ func TestStrict(t *testing.T) {
var checkWarnings = func(err error, mode string, idx int) {
if err == nil {
- dbt.Errorf("Expected STRICT error on query [%s] %s", mode, queries[idx].in)
+ dbt.Errorf("expected STRICT error on query [%s] %s", mode, queries[idx].in)
}
if warnings, ok := err.(MySQLWarnings); ok {
@@ -789,18 +1182,18 @@ func TestStrict(t *testing.T) {
codes[i] = warnings[i].Code
}
if len(codes) != len(queries[idx].codes) {
- dbt.Errorf("Unexpected STRICT error count on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes)
+ dbt.Errorf("unexpected STRICT error count on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes)
}
for i := range warnings {
if codes[i] != queries[idx].codes[i] {
- dbt.Errorf("Unexpected STRICT error codes on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes)
+ dbt.Errorf("unexpected STRICT error codes on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes)
return
}
}
} else {
- dbt.Errorf("Unexpected error on query [%s] %s: %s", mode, queries[idx].in, err.Error())
+ dbt.Errorf("unexpected error on query [%s] %s: %s", mode, queries[idx].in, err.Error())
}
}
@@ -816,7 +1209,7 @@ func TestStrict(t *testing.T) {
for i := range queries {
stmt, err = dbt.db.Prepare(queries[i].in)
if err != nil {
- dbt.Errorf("Error on preparing query %s: %s", queries[i].in, err.Error())
+ dbt.Errorf("error on preparing query %s: %s", queries[i].in, err.Error())
}
_, err = stmt.Exec()
@@ -824,7 +1217,7 @@ func TestStrict(t *testing.T) {
err = stmt.Close()
if err != nil {
- dbt.Errorf("Error on closing stmt for query %s: %s", queries[i].in, err.Error())
+ dbt.Errorf("error on closing stmt for query %s: %s", queries[i].in, err.Error())
}
}
})
@@ -834,9 +1227,9 @@ func TestTLS(t *testing.T) {
tlsTest := func(dbt *DBTest) {
if err := dbt.db.Ping(); err != nil {
if err == ErrNoTLS {
- dbt.Skip("Server does not support TLS")
+ dbt.Skip("server does not support TLS")
} else {
- dbt.Fatalf("Error on Ping: %s", err.Error())
+ dbt.Fatalf("error on Ping: %s", err.Error())
}
}
@@ -849,7 +1242,7 @@ func TestTLS(t *testing.T) {
}
if value == nil {
- dbt.Fatal("No Cipher")
+ dbt.Fatal("no Cipher")
}
}
}
@@ -866,42 +1259,42 @@ func TestTLS(t *testing.T) {
func TestReuseClosedConnection(t *testing.T) {
// this test does not use sql.database, it uses the driver directly
if !available {
- t.Skipf("MySQL-Server not running on %s", netAddr)
+ t.Skipf("MySQL server not running on %s", netAddr)
}
md := &MySQLDriver{}
conn, err := md.Open(dsn)
if err != nil {
- t.Fatalf("Error connecting: %s", err.Error())
+ t.Fatalf("error connecting: %s", err.Error())
}
stmt, err := conn.Prepare("DO 1")
if err != nil {
- t.Fatalf("Error preparing statement: %s", err.Error())
+ t.Fatalf("error preparing statement: %s", err.Error())
}
_, err = stmt.Exec(nil)
if err != nil {
- t.Fatalf("Error executing statement: %s", err.Error())
+ t.Fatalf("error executing statement: %s", err.Error())
}
err = conn.Close()
if err != nil {
- t.Fatalf("Error closing connection: %s", err.Error())
+ t.Fatalf("error closing connection: %s", err.Error())
}
defer func() {
if err := recover(); err != nil {
- t.Errorf("Panic after reusing a closed connection: %v", err)
+ t.Errorf("panic after reusing a closed connection: %v", err)
}
}()
_, err = stmt.Exec(nil)
if err != nil && err != driver.ErrBadConn {
- t.Errorf("Unexpected error '%s', expected '%s'",
+ t.Errorf("unexpected error '%s', expected '%s'",
err.Error(), driver.ErrBadConn.Error())
}
}
func TestCharset(t *testing.T) {
if !available {
- t.Skipf("MySQL-Server not running on %s", netAddr)
+ t.Skipf("MySQL server not running on %s", netAddr)
}
mustSetCharset := func(charsetParam, expected string) {
@@ -910,14 +1303,14 @@ func TestCharset(t *testing.T) {
defer rows.Close()
if !rows.Next() {
- dbt.Fatalf("Error getting connection charset: %s", rows.Err())
+ dbt.Fatalf("error getting connection charset: %s", rows.Err())
}
var got string
rows.Scan(&got)
if got != expected {
- dbt.Fatalf("Expected connection charset %s but got %s", expected, got)
+ dbt.Fatalf("expected connection charset %s but got %s", expected, got)
}
})
}
@@ -939,14 +1332,14 @@ func TestFailingCharset(t *testing.T) {
_, err := dbt.db.Exec("SELECT 1")
if err == nil {
dbt.db.Close()
- t.Fatalf("Connection must not succeed without a valid charset")
+ t.Fatalf("connection must not succeed without a valid charset")
}
})
}
func TestCollation(t *testing.T) {
if !available {
- t.Skipf("MySQL-Server not running on %s", netAddr)
+ t.Skipf("MySQL server not running on %s", netAddr)
}
defaultCollation := "utf8_general_ci"
@@ -956,7 +1349,7 @@ func TestCollation(t *testing.T) {
"latin1_general_ci",
"binary",
"utf8_unicode_ci",
- "utf8mb4_general_ci",
+ "cp1257_bin",
}
for _, collation := range testCollations {
@@ -976,12 +1369,36 @@ func TestCollation(t *testing.T) {
}
if got != expected {
- dbt.Fatalf("Expected connection collation %s but got %s", expected, got)
+ dbt.Fatalf("expected connection collation %s but got %s", expected, got)
}
})
}
}
+func TestColumnsWithAlias(t *testing.T) {
+ runTests(t, dsn+"&columnsWithAlias=true", func(dbt *DBTest) {
+ rows := dbt.mustQuery("SELECT 1 AS A")
+ defer rows.Close()
+ cols, _ := rows.Columns()
+ if len(cols) != 1 {
+ t.Fatalf("expected 1 column, got %d", len(cols))
+ }
+ if cols[0] != "A" {
+ t.Fatalf("expected column name \"A\", got \"%s\"", cols[0])
+ }
+ rows.Close()
+
+ rows = dbt.mustQuery("SELECT * FROM (SELECT 1 AS one) AS A")
+ cols, _ = rows.Columns()
+ if len(cols) != 1 {
+ t.Fatalf("expected 1 column, got %d", len(cols))
+ }
+ if cols[0] != "A.one" {
+ t.Fatalf("expected column name \"A.one\", got \"%s\"", cols[0])
+ }
+ })
+}
+
func TestRawBytesResultExceedsBuffer(t *testing.T) {
runTests(t, dsn, func(dbt *DBTest) {
// defaultBufSize from buffer.go
@@ -1011,26 +1428,26 @@ func TestTimezoneConversion(t *testing.T) {
// Insert local time into database (should be converted)
usCentral, _ := time.LoadLocation("US/Central")
- now := time.Now().In(usCentral)
- dbt.mustExec("INSERT INTO test VALUE (?)", now)
+ reftime := time.Date(2014, 05, 30, 18, 03, 17, 0, time.UTC).In(usCentral)
+ dbt.mustExec("INSERT INTO test VALUE (?)", reftime)
// Retrieve time from DB
rows := dbt.mustQuery("SELECT ts FROM test")
if !rows.Next() {
- dbt.Fatal("Didn't get any rows out")
+ dbt.Fatal("did not get any rows out")
}
- var nowDB time.Time
- err := rows.Scan(&nowDB)
+ var dbTime time.Time
+ err := rows.Scan(&dbTime)
if err != nil {
dbt.Fatal("Err", err)
}
// Check that dates match
- if now.Unix() != nowDB.Unix() {
- dbt.Errorf("Times don't match.\n")
- dbt.Errorf(" Now(%v)=%v\n", usCentral, now)
- dbt.Errorf(" Now(UTC)=%v\n", nowDB)
+ if reftime.Unix() != dbTime.Unix() {
+ dbt.Errorf("times do not match.\n")
+ dbt.Errorf(" Now(%v)=%v\n", usCentral, reftime)
+ dbt.Errorf(" Now(UTC)=%v\n", dbTime)
}
}
@@ -1039,42 +1456,6 @@ func TestTimezoneConversion(t *testing.T) {
}
}
-// This tests for https://github.com/go-sql-driver/mysql/pull/139
-//
-// An extra (invisible) nil byte was being added to the beginning of positive
-// time strings.
-func TestTimeSign(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- var sTimes = []struct {
- value string
- fieldType string
- }{
- {"12:34:56", "TIME"},
- {"-12:34:56", "TIME"},
- // As described in http://dev.mysql.com/doc/refman/5.6/en/fractional-seconds.html
- // they *should* work, but only in 5.6+.
- // { "12:34:56.789", "TIME(3)" },
- // { "-12:34:56.789", "TIME(3)" },
- }
-
- for _, sTime := range sTimes {
- dbt.db.Exec("DROP TABLE IF EXISTS test")
- dbt.mustExec("CREATE TABLE test (id INT, time_field " + sTime.fieldType + ")")
- dbt.mustExec("INSERT INTO test (id, time_field) VALUES(1, '" + sTime.value + "')")
- rows := dbt.mustQuery("SELECT time_field FROM test WHERE id = ?", 1)
- if rows.Next() {
- var oTime string
- rows.Scan(&oTime)
- if oTime != sTime.value {
- dbt.Errorf(`time values differ: got %q, expected %q.`, oTime, sTime.value)
- }
- } else {
- dbt.Error("expecting at least one row.")
- }
- }
- })
-}
-
// Special cases
func TestRowsClose(t *testing.T) {
@@ -1090,7 +1471,7 @@ func TestRowsClose(t *testing.T) {
}
if rows.Next() {
- dbt.Fatal("Unexpected row after rows.Close()")
+ dbt.Fatal("unexpected row after rows.Close()")
}
err = rows.Err()
@@ -1122,7 +1503,7 @@ func TestCloseStmtBeforeRows(t *testing.T) {
}
if !rows.Next() {
- dbt.Fatal("Getting row failed")
+ dbt.Fatal("getting row failed")
} else {
err = rows.Err()
if err != nil {
@@ -1132,7 +1513,7 @@ func TestCloseStmtBeforeRows(t *testing.T) {
var out bool
err = rows.Scan(&out)
if err != nil {
- dbt.Fatalf("Error on rows.Scan(): %s", err.Error())
+ dbt.Fatalf("error on rows.Scan(): %s", err.Error())
}
if out != true {
dbt.Errorf("true != %t", out)
@@ -1168,7 +1549,7 @@ func TestStmtMultiRows(t *testing.T) {
// 1
if !rows1.Next() {
- dbt.Fatal("1st rows1.Next failed")
+ dbt.Fatal("first rows1.Next failed")
} else {
err = rows1.Err()
if err != nil {
@@ -1177,7 +1558,7 @@ func TestStmtMultiRows(t *testing.T) {
err = rows1.Scan(&out)
if err != nil {
- dbt.Fatalf("Error on rows.Scan(): %s", err.Error())
+ dbt.Fatalf("error on rows.Scan(): %s", err.Error())
}
if out != true {
dbt.Errorf("true != %t", out)
@@ -1185,7 +1566,7 @@ func TestStmtMultiRows(t *testing.T) {
}
if !rows2.Next() {
- dbt.Fatal("1st rows2.Next failed")
+ dbt.Fatal("first rows2.Next failed")
} else {
err = rows2.Err()
if err != nil {
@@ -1194,7 +1575,7 @@ func TestStmtMultiRows(t *testing.T) {
err = rows2.Scan(&out)
if err != nil {
- dbt.Fatalf("Error on rows.Scan(): %s", err.Error())
+ dbt.Fatalf("error on rows.Scan(): %s", err.Error())
}
if out != true {
dbt.Errorf("true != %t", out)
@@ -1203,7 +1584,7 @@ func TestStmtMultiRows(t *testing.T) {
// 2
if !rows1.Next() {
- dbt.Fatal("2nd rows1.Next failed")
+ dbt.Fatal("second rows1.Next failed")
} else {
err = rows1.Err()
if err != nil {
@@ -1212,14 +1593,14 @@ func TestStmtMultiRows(t *testing.T) {
err = rows1.Scan(&out)
if err != nil {
- dbt.Fatalf("Error on rows.Scan(): %s", err.Error())
+ dbt.Fatalf("error on rows.Scan(): %s", err.Error())
}
if out != false {
dbt.Errorf("false != %t", out)
}
if rows1.Next() {
- dbt.Fatal("Unexpected row on rows1")
+ dbt.Fatal("unexpected row on rows1")
}
err = rows1.Close()
if err != nil {
@@ -1228,7 +1609,7 @@ func TestStmtMultiRows(t *testing.T) {
}
if !rows2.Next() {
- dbt.Fatal("2nd rows2.Next failed")
+ dbt.Fatal("second rows2.Next failed")
} else {
err = rows2.Err()
if err != nil {
@@ -1237,14 +1618,14 @@ func TestStmtMultiRows(t *testing.T) {
err = rows2.Scan(&out)
if err != nil {
- dbt.Fatalf("Error on rows.Scan(): %s", err.Error())
+ dbt.Fatalf("error on rows.Scan(): %s", err.Error())
}
if out != false {
dbt.Errorf("false != %t", out)
}
if rows2.Next() {
- dbt.Fatal("Unexpected row on rows2")
+ dbt.Fatal("unexpected row on rows2")
}
err = rows2.Close()
if err != nil {
@@ -1289,7 +1670,7 @@ func TestConcurrent(t *testing.T) {
if err != nil {
dbt.Fatalf("%s", err.Error())
}
- dbt.Logf("Testing up to %d concurrent connections \r\n", max)
+ dbt.Logf("testing up to %d concurrent connections \r\n", max)
var remaining, succeeded int32 = int32(max), 0
@@ -1313,7 +1694,7 @@ func TestConcurrent(t *testing.T) {
if err != nil {
if err.Error() != "Error 1040: Too many connections" {
- fatalf("Error on Conn %d: %s", id, err.Error())
+ fatalf("error on conn %d: %s", id, err.Error())
}
return
}
@@ -1321,13 +1702,13 @@ func TestConcurrent(t *testing.T) {
// keep the connection busy until all connections are open
for remaining > 0 {
if _, err = tx.Exec("DO 1"); err != nil {
- fatalf("Error on Conn %d: %s", id, err.Error())
+ fatalf("error on conn %d: %s", id, err.Error())
return
}
}
if err = tx.Commit(); err != nil {
- fatalf("Error on Conn %d: %s", id, err.Error())
+ fatalf("error on conn %d: %s", id, err.Error())
return
}
@@ -1343,14 +1724,14 @@ func TestConcurrent(t *testing.T) {
dbt.Fatal(fatalError)
}
- dbt.Logf("Reached %d concurrent connections\r\n", succeeded)
+ dbt.Logf("reached %d concurrent connections\r\n", succeeded)
})
}
// Tests custom dial functions
func TestCustomDial(t *testing.T) {
if !available {
- t.Skipf("MySQL-Server not running on %s", netAddr)
+ t.Skipf("MySQL server not running on %s", netAddr)
}
// our custom dial function which justs wraps net.Dial here
@@ -1360,11 +1741,164 @@ func TestCustomDial(t *testing.T) {
db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s&strict=true", user, pass, addr, dbname))
if err != nil {
- t.Fatalf("Error connecting: %s", err.Error())
+ t.Fatalf("error connecting: %s", err.Error())
}
defer db.Close()
if _, err = db.Exec("DO 1"); err != nil {
- t.Fatalf("Connection failed: %s", err.Error())
+ t.Fatalf("connection failed: %s", err.Error())
+ }
+}
+
+func TestSQLInjection(t *testing.T) {
+ createTest := func(arg string) func(dbt *DBTest) {
+ return func(dbt *DBTest) {
+ dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ dbt.mustExec("INSERT INTO test VALUES (?)", 1)
+
+ var v int
+ // NULL can't be equal to anything, the idea here is to inject query so it returns row
+ // This test verifies that escapeQuotes and escapeBackslash are working properly
+ err := dbt.db.QueryRow("SELECT v FROM test WHERE NULL = ?", arg).Scan(&v)
+ if err == sql.ErrNoRows {
+ return // success, sql injection failed
+ } else if err == nil {
+ dbt.Errorf("sql injection successful with arg: %s", arg)
+ } else {
+ dbt.Errorf("error running query with arg: %s; err: %s", arg, err.Error())
+ }
+ }
+ }
+
+ dsns := []string{
+ dsn,
+ dsn + "&sql_mode='NO_BACKSLASH_ESCAPES,NO_AUTO_CREATE_USER'",
+ }
+ for _, testdsn := range dsns {
+ runTests(t, testdsn, createTest("1 OR 1=1"))
+ runTests(t, testdsn, createTest("' OR '1'='1"))
+ }
+}
+
+// Test if inserted data is correctly retrieved after being escaped
+func TestInsertRetrieveEscapedData(t *testing.T) {
+ testData := func(dbt *DBTest) {
+ dbt.mustExec("CREATE TABLE test (v VARCHAR(255))")
+
+ // All sequences that are escaped by escapeQuotes and escapeBackslash
+ v := "foo \x00\n\r\x1a\"'\\"
+ dbt.mustExec("INSERT INTO test VALUES (?)", v)
+
+ var out string
+ err := dbt.db.QueryRow("SELECT v FROM test").Scan(&out)
+ if err != nil {
+ dbt.Fatalf("%s", err.Error())
+ }
+
+ if out != v {
+ dbt.Errorf("%q != %q", out, v)
+ }
+ }
+
+ dsns := []string{
+ dsn,
+ dsn + "&sql_mode='NO_BACKSLASH_ESCAPES,NO_AUTO_CREATE_USER'",
}
+ for _, testdsn := range dsns {
+ runTests(t, testdsn, testData)
+ }
+}
+
+func TestUnixSocketAuthFail(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ // Save the current logger so we can restore it.
+ oldLogger := errLog
+
+ // Set a new logger so we can capture its output.
+ buffer := bytes.NewBuffer(make([]byte, 0, 64))
+ newLogger := log.New(buffer, "prefix: ", 0)
+ SetLogger(newLogger)
+
+ // Restore the logger.
+ defer SetLogger(oldLogger)
+
+ // Make a new DSN that uses the MySQL socket file and a bad password, which
+ // we can make by simply appending any character to the real password.
+ badPass := pass + "x"
+ socket := ""
+ if prot == "unix" {
+ socket = addr
+ } else {
+ // Get socket file from MySQL.
+ err := dbt.db.QueryRow("SELECT @@socket").Scan(&socket)
+ if err != nil {
+ t.Fatalf("error on SELECT @@socket: %s", err.Error())
+ }
+ }
+ t.Logf("socket: %s", socket)
+ badDSN := fmt.Sprintf("%s:%s@unix(%s)/%s?timeout=30s&strict=true", user, badPass, socket, dbname)
+ db, err := sql.Open("mysql", badDSN)
+ if err != nil {
+ t.Fatalf("error connecting: %s", err.Error())
+ }
+ defer db.Close()
+
+ // Connect to MySQL for real. This will cause an auth failure.
+ err = db.Ping()
+ if err == nil {
+ t.Error("expected Ping() to return an error")
+ }
+
+ // The driver should not log anything.
+ if actual := buffer.String(); actual != "" {
+ t.Errorf("expected no output, got %q", actual)
+ }
+ })
+}
+
+// See Issue #422
+func TestInterruptBySignal(t *testing.T) {
+ runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
+ dbt.mustExec(`
+ DROP PROCEDURE IF EXISTS test_signal;
+ CREATE PROCEDURE test_signal(ret INT)
+ BEGIN
+ SELECT ret;
+ SIGNAL SQLSTATE
+ '45001'
+ SET
+ MESSAGE_TEXT = "an error",
+ MYSQL_ERRNO = 45001;
+ END
+ `)
+ defer dbt.mustExec("DROP PROCEDURE test_signal")
+
+ var val int
+
+ // text protocol
+ rows, err := dbt.db.Query("CALL test_signal(42)")
+ if err != nil {
+ dbt.Fatalf("error on text query: %s", err.Error())
+ }
+ for rows.Next() {
+ if err := rows.Scan(&val); err != nil {
+ dbt.Error(err)
+ } else if val != 42 {
+ dbt.Errorf("expected val to be 42")
+ }
+ }
+
+ // binary protocol
+ rows, err = dbt.db.Query("CALL test_signal(?)", 42)
+ if err != nil {
+ dbt.Fatalf("error on binary query: %s", err.Error())
+ }
+ for rows.Next() {
+ if err := rows.Scan(&val); err != nil {
+ dbt.Error(err)
+ } else if val != 42 {
+ dbt.Errorf("expected val to be 42")
+ }
+ }
+ })
}
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go
new file mode 100644
index 000000000..ac00dcedd
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/dsn.go
@@ -0,0 +1,548 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?")
+ errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)")
+ errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name")
+ errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations")
+)
+
+// Config is a configuration parsed from a DSN string
+type Config struct {
+ User string // Username
+ Passwd string // Password (requires User)
+ Net string // Network type
+ Addr string // Network address (requires Net)
+ DBName string // Database name
+ Params map[string]string // Connection parameters
+ Collation string // Connection collation
+ Loc *time.Location // Location for time.Time values
+ MaxAllowedPacket int // Max packet size allowed
+ TLSConfig string // TLS configuration name
+ tls *tls.Config // TLS configuration
+ Timeout time.Duration // Dial timeout
+ ReadTimeout time.Duration // I/O read timeout
+ WriteTimeout time.Duration // I/O write timeout
+
+ AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE
+ AllowCleartextPasswords bool // Allows the cleartext client side plugin
+ AllowNativePasswords bool // Allows the native password authentication method
+ AllowOldPasswords bool // Allows the old insecure password method
+ ClientFoundRows bool // Return number of matching rows instead of rows changed
+ ColumnsWithAlias bool // Prepend table alias to column names
+ InterpolateParams bool // Interpolate placeholders into query string
+ MultiStatements bool // Allow multiple statements in one query
+ ParseTime bool // Parse time values to time.Time
+ Strict bool // Return warnings as errors
+}
+
+// FormatDSN formats the given Config into a DSN string which can be passed to
+// the driver.
+func (cfg *Config) FormatDSN() string {
+ var buf bytes.Buffer
+
+ // [username[:password]@]
+ if len(cfg.User) > 0 {
+ buf.WriteString(cfg.User)
+ if len(cfg.Passwd) > 0 {
+ buf.WriteByte(':')
+ buf.WriteString(cfg.Passwd)
+ }
+ buf.WriteByte('@')
+ }
+
+ // [protocol[(address)]]
+ if len(cfg.Net) > 0 {
+ buf.WriteString(cfg.Net)
+ if len(cfg.Addr) > 0 {
+ buf.WriteByte('(')
+ buf.WriteString(cfg.Addr)
+ buf.WriteByte(')')
+ }
+ }
+
+ // /dbname
+ buf.WriteByte('/')
+ buf.WriteString(cfg.DBName)
+
+ // [?param1=value1&...&paramN=valueN]
+ hasParam := false
+
+ if cfg.AllowAllFiles {
+ hasParam = true
+ buf.WriteString("?allowAllFiles=true")
+ }
+
+ if cfg.AllowCleartextPasswords {
+ if hasParam {
+ buf.WriteString("&allowCleartextPasswords=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?allowCleartextPasswords=true")
+ }
+ }
+
+ if cfg.AllowNativePasswords {
+ if hasParam {
+ buf.WriteString("&allowNativePasswords=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?allowNativePasswords=true")
+ }
+ }
+
+ if cfg.AllowOldPasswords {
+ if hasParam {
+ buf.WriteString("&allowOldPasswords=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?allowOldPasswords=true")
+ }
+ }
+
+ if cfg.ClientFoundRows {
+ if hasParam {
+ buf.WriteString("&clientFoundRows=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?clientFoundRows=true")
+ }
+ }
+
+ if col := cfg.Collation; col != defaultCollation && len(col) > 0 {
+ if hasParam {
+ buf.WriteString("&collation=")
+ } else {
+ hasParam = true
+ buf.WriteString("?collation=")
+ }
+ buf.WriteString(col)
+ }
+
+ if cfg.ColumnsWithAlias {
+ if hasParam {
+ buf.WriteString("&columnsWithAlias=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?columnsWithAlias=true")
+ }
+ }
+
+ if cfg.InterpolateParams {
+ if hasParam {
+ buf.WriteString("&interpolateParams=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?interpolateParams=true")
+ }
+ }
+
+ if cfg.Loc != time.UTC && cfg.Loc != nil {
+ if hasParam {
+ buf.WriteString("&loc=")
+ } else {
+ hasParam = true
+ buf.WriteString("?loc=")
+ }
+ buf.WriteString(url.QueryEscape(cfg.Loc.String()))
+ }
+
+ if cfg.MultiStatements {
+ if hasParam {
+ buf.WriteString("&multiStatements=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?multiStatements=true")
+ }
+ }
+
+ if cfg.ParseTime {
+ if hasParam {
+ buf.WriteString("&parseTime=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?parseTime=true")
+ }
+ }
+
+ if cfg.ReadTimeout > 0 {
+ if hasParam {
+ buf.WriteString("&readTimeout=")
+ } else {
+ hasParam = true
+ buf.WriteString("?readTimeout=")
+ }
+ buf.WriteString(cfg.ReadTimeout.String())
+ }
+
+ if cfg.Strict {
+ if hasParam {
+ buf.WriteString("&strict=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?strict=true")
+ }
+ }
+
+ if cfg.Timeout > 0 {
+ if hasParam {
+ buf.WriteString("&timeout=")
+ } else {
+ hasParam = true
+ buf.WriteString("?timeout=")
+ }
+ buf.WriteString(cfg.Timeout.String())
+ }
+
+ if len(cfg.TLSConfig) > 0 {
+ if hasParam {
+ buf.WriteString("&tls=")
+ } else {
+ hasParam = true
+ buf.WriteString("?tls=")
+ }
+ buf.WriteString(url.QueryEscape(cfg.TLSConfig))
+ }
+
+ if cfg.WriteTimeout > 0 {
+ if hasParam {
+ buf.WriteString("&writeTimeout=")
+ } else {
+ hasParam = true
+ buf.WriteString("?writeTimeout=")
+ }
+ buf.WriteString(cfg.WriteTimeout.String())
+ }
+
+ if cfg.MaxAllowedPacket > 0 {
+ if hasParam {
+ buf.WriteString("&maxAllowedPacket=")
+ } else {
+ hasParam = true
+ buf.WriteString("?maxAllowedPacket=")
+ }
+ buf.WriteString(strconv.Itoa(cfg.MaxAllowedPacket))
+
+ }
+
+ // other params
+ if cfg.Params != nil {
+ for param, value := range cfg.Params {
+ if hasParam {
+ buf.WriteByte('&')
+ } else {
+ hasParam = true
+ buf.WriteByte('?')
+ }
+
+ buf.WriteString(param)
+ buf.WriteByte('=')
+ buf.WriteString(url.QueryEscape(value))
+ }
+ }
+
+ return buf.String()
+}
+
+// ParseDSN parses the DSN string to a Config
+func ParseDSN(dsn string) (cfg *Config, err error) {
+ // New config with some default values
+ cfg = &Config{
+ Loc: time.UTC,
+ Collation: defaultCollation,
+ }
+
+ // [user[:password]@][net[(addr)]]/dbname[?param1=value1&paramN=valueN]
+ // Find the last '/' (since the password or the net addr might contain a '/')
+ foundSlash := false
+ for i := len(dsn) - 1; i >= 0; i-- {
+ if dsn[i] == '/' {
+ foundSlash = true
+ var j, k int
+
+ // left part is empty if i <= 0
+ if i > 0 {
+ // [username[:password]@][protocol[(address)]]
+ // Find the last '@' in dsn[:i]
+ for j = i; j >= 0; j-- {
+ if dsn[j] == '@' {
+ // username[:password]
+ // Find the first ':' in dsn[:j]
+ for k = 0; k < j; k++ {
+ if dsn[k] == ':' {
+ cfg.Passwd = dsn[k+1 : j]
+ break
+ }
+ }
+ cfg.User = dsn[:k]
+
+ break
+ }
+ }
+
+ // [protocol[(address)]]
+ // Find the first '(' in dsn[j+1:i]
+ for k = j + 1; k < i; k++ {
+ if dsn[k] == '(' {
+ // dsn[i-1] must be == ')' if an address is specified
+ if dsn[i-1] != ')' {
+ if strings.ContainsRune(dsn[k+1:i], ')') {
+ return nil, errInvalidDSNUnescaped
+ }
+ return nil, errInvalidDSNAddr
+ }
+ cfg.Addr = dsn[k+1 : i-1]
+ break
+ }
+ }
+ cfg.Net = dsn[j+1 : k]
+ }
+
+ // dbname[?param1=value1&...&paramN=valueN]
+ // Find the first '?' in dsn[i+1:]
+ for j = i + 1; j < len(dsn); j++ {
+ if dsn[j] == '?' {
+ if err = parseDSNParams(cfg, dsn[j+1:]); err != nil {
+ return
+ }
+ break
+ }
+ }
+ cfg.DBName = dsn[i+1 : j]
+
+ break
+ }
+ }
+
+ if !foundSlash && len(dsn) > 0 {
+ return nil, errInvalidDSNNoSlash
+ }
+
+ if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
+ return nil, errInvalidDSNUnsafeCollation
+ }
+
+ // Set default network if empty
+ if cfg.Net == "" {
+ cfg.Net = "tcp"
+ }
+
+ // Set default address if empty
+ if cfg.Addr == "" {
+ switch cfg.Net {
+ case "tcp":
+ cfg.Addr = "127.0.0.1:3306"
+ case "unix":
+ cfg.Addr = "/tmp/mysql.sock"
+ default:
+ return nil, errors.New("default addr for network '" + cfg.Net + "' unknown")
+ }
+
+ }
+
+ return
+}
+
+// parseDSNParams parses the DSN "query string"
+// Values must be url.QueryEscape'ed
+func parseDSNParams(cfg *Config, params string) (err error) {
+ for _, v := range strings.Split(params, "&") {
+ param := strings.SplitN(v, "=", 2)
+ if len(param) != 2 {
+ continue
+ }
+
+ // cfg params
+ switch value := param[1]; param[0] {
+
+ // Disable INFILE whitelist / enable all files
+ case "allowAllFiles":
+ var isBool bool
+ cfg.AllowAllFiles, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use cleartext authentication mode (MySQL 5.5.10+)
+ case "allowCleartextPasswords":
+ var isBool bool
+ cfg.AllowCleartextPasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use native password authentication
+ case "allowNativePasswords":
+ var isBool bool
+ cfg.AllowNativePasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use old authentication mode (pre MySQL 4.1)
+ case "allowOldPasswords":
+ var isBool bool
+ cfg.AllowOldPasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Switch "rowsAffected" mode
+ case "clientFoundRows":
+ var isBool bool
+ cfg.ClientFoundRows, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Collation
+ case "collation":
+ cfg.Collation = value
+ break
+
+ case "columnsWithAlias":
+ var isBool bool
+ cfg.ColumnsWithAlias, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Compression
+ case "compress":
+ return errors.New("compression not implemented yet")
+
+ // Enable client side placeholder substitution
+ case "interpolateParams":
+ var isBool bool
+ cfg.InterpolateParams, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Time Location
+ case "loc":
+ if value, err = url.QueryUnescape(value); err != nil {
+ return
+ }
+ cfg.Loc, err = time.LoadLocation(value)
+ if err != nil {
+ return
+ }
+
+ // multiple statements in one query
+ case "multiStatements":
+ var isBool bool
+ cfg.MultiStatements, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // time.Time parsing
+ case "parseTime":
+ var isBool bool
+ cfg.ParseTime, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // I/O read Timeout
+ case "readTimeout":
+ cfg.ReadTimeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+
+ // Strict mode
+ case "strict":
+ var isBool bool
+ cfg.Strict, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Dial Timeout
+ case "timeout":
+ cfg.Timeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+
+ // TLS-Encryption
+ case "tls":
+ boolValue, isBool := readBool(value)
+ if isBool {
+ if boolValue {
+ cfg.TLSConfig = "true"
+ cfg.tls = &tls.Config{}
+ } else {
+ cfg.TLSConfig = "false"
+ }
+ } else if vl := strings.ToLower(value); vl == "skip-verify" {
+ cfg.TLSConfig = vl
+ cfg.tls = &tls.Config{InsecureSkipVerify: true}
+ } else {
+ name, err := url.QueryUnescape(value)
+ if err != nil {
+ return fmt.Errorf("invalid value for TLS config name: %v", err)
+ }
+
+ if tlsConfig, ok := tlsConfigRegister[name]; ok {
+ if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify {
+ host, _, err := net.SplitHostPort(cfg.Addr)
+ if err == nil {
+ tlsConfig.ServerName = host
+ }
+ }
+
+ cfg.TLSConfig = name
+ cfg.tls = tlsConfig
+ } else {
+ return errors.New("invalid value / unknown config name: " + name)
+ }
+ }
+
+ // I/O write Timeout
+ case "writeTimeout":
+ cfg.WriteTimeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+ case "maxAllowedPacket":
+ cfg.MaxAllowedPacket, err = strconv.Atoi(value)
+ if err != nil {
+ return
+ }
+ default:
+ // lazy init
+ if cfg.Params == nil {
+ cfg.Params = make(map[string]string)
+ }
+
+ if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil {
+ return
+ }
+ }
+ }
+
+ return
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn_test.go b/vendor/github.com/go-sql-driver/mysql/dsn_test.go
new file mode 100644
index 000000000..0693192ad
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/dsn_test.go
@@ -0,0 +1,231 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net/url"
+ "reflect"
+ "testing"
+ "time"
+)
+
+var testDSNs = []struct {
+ in string
+ out *Config
+}{{
+ "username:password@protocol(address)/dbname?param=value",
+ &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC},
+}, {
+ "username:password@protocol(address)/dbname?param=value&columnsWithAlias=true",
+ &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC, ColumnsWithAlias: true},
+}, {
+ "username:password@protocol(address)/dbname?param=value&columnsWithAlias=true&multiStatements=true",
+ &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC, ColumnsWithAlias: true, MultiStatements: true},
+}, {
+ "user@unix(/path/to/socket)/dbname?charset=utf8",
+ &Config{User: "user", Net: "unix", Addr: "/path/to/socket", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8_general_ci", Loc: time.UTC},
+}, {
+ "user:password@tcp(localhost:5555)/dbname?charset=utf8&tls=true",
+ &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8_general_ci", Loc: time.UTC, TLSConfig: "true"},
+}, {
+ "user:password@tcp(localhost:5555)/dbname?charset=utf8mb4,utf8&tls=skip-verify",
+ &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8mb4,utf8"}, Collation: "utf8_general_ci", Loc: time.UTC, TLSConfig: "skip-verify"},
+}, {
+ "user:password@/dbname?loc=UTC&timeout=30s&readTimeout=1s&writeTimeout=1s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE&collation=utf8mb4_unicode_ci&maxAllowedPacket=16777216",
+ &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_unicode_ci", Loc: time.UTC, Timeout: 30 * time.Second, ReadTimeout: time.Second, WriteTimeout: time.Second, AllowAllFiles: true, AllowOldPasswords: true, ClientFoundRows: true, MaxAllowedPacket: 16777216},
+}, {
+ "user:p@ss(word)@tcp([de:ad:be:ef::ca:fe]:80)/dbname?loc=Local",
+ &Config{User: "user", Passwd: "p@ss(word)", Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:80", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.Local},
+}, {
+ "/dbname",
+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.UTC},
+}, {
+ "@/",
+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC},
+}, {
+ "/",
+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC},
+}, {
+ "",
+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC},
+}, {
+ "user:p@/ssword@/",
+ &Config{User: "user", Passwd: "p@/ssword", Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC},
+}, {
+ "unix/?arg=%2Fsome%2Fpath.ext",
+ &Config{Net: "unix", Addr: "/tmp/mysql.sock", Params: map[string]string{"arg": "/some/path.ext"}, Collation: "utf8_general_ci", Loc: time.UTC},
+}}
+
+func TestDSNParser(t *testing.T) {
+ for i, tst := range testDSNs {
+ cfg, err := ParseDSN(tst.in)
+ if err != nil {
+ t.Error(err.Error())
+ }
+
+ // pointer not static
+ cfg.tls = nil
+
+ if !reflect.DeepEqual(cfg, tst.out) {
+ t.Errorf("%d. ParseDSN(%q) mismatch:\ngot %+v\nwant %+v", i, tst.in, cfg, tst.out)
+ }
+ }
+}
+
+func TestDSNParserInvalid(t *testing.T) {
+ var invalidDSNs = []string{
+ "@net(addr/", // no closing brace
+ "@tcp(/", // no closing brace
+ "tcp(/", // no closing brace
+ "(/", // no closing brace
+ "net(addr)//", // unescaped
+ "User:pass@tcp(1.2.3.4:3306)", // no trailing slash
+ //"/dbname?arg=/some/unescaped/path",
+ }
+
+ for i, tst := range invalidDSNs {
+ if _, err := ParseDSN(tst); err == nil {
+ t.Errorf("invalid DSN #%d. (%s) didn't error!", i, tst)
+ }
+ }
+}
+
+func TestDSNReformat(t *testing.T) {
+ for i, tst := range testDSNs {
+ dsn1 := tst.in
+ cfg1, err := ParseDSN(dsn1)
+ if err != nil {
+ t.Error(err.Error())
+ continue
+ }
+ cfg1.tls = nil // pointer not static
+ res1 := fmt.Sprintf("%+v", cfg1)
+
+ dsn2 := cfg1.FormatDSN()
+ cfg2, err := ParseDSN(dsn2)
+ if err != nil {
+ t.Error(err.Error())
+ continue
+ }
+ cfg2.tls = nil // pointer not static
+ res2 := fmt.Sprintf("%+v", cfg2)
+
+ if res1 != res2 {
+ t.Errorf("%d. %q does not match %q", i, res2, res1)
+ }
+ }
+}
+
+func TestDSNWithCustomTLS(t *testing.T) {
+ baseDSN := "User:password@tcp(localhost:5555)/dbname?tls="
+ tlsCfg := tls.Config{}
+
+ RegisterTLSConfig("utils_test", &tlsCfg)
+
+ // Custom TLS is missing
+ tst := baseDSN + "invalid_tls"
+ cfg, err := ParseDSN(tst)
+ if err == nil {
+ t.Errorf("invalid custom TLS in DSN (%s) but did not error. Got config: %#v", tst, cfg)
+ }
+
+ tst = baseDSN + "utils_test"
+
+ // Custom TLS with a server name
+ name := "foohost"
+ tlsCfg.ServerName = name
+ cfg, err = ParseDSN(tst)
+
+ if err != nil {
+ t.Error(err.Error())
+ } else if cfg.tls.ServerName != name {
+ t.Errorf("did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, tst)
+ }
+
+ // Custom TLS without a server name
+ name = "localhost"
+ tlsCfg.ServerName = ""
+ cfg, err = ParseDSN(tst)
+
+ if err != nil {
+ t.Error(err.Error())
+ } else if cfg.tls.ServerName != name {
+ t.Errorf("did not get the correct ServerName (%s) parsing DSN (%s).", name, tst)
+ }
+
+ DeregisterTLSConfig("utils_test")
+}
+
+func TestDSNWithCustomTLSQueryEscape(t *testing.T) {
+ const configKey = "&%!:"
+ dsn := "User:password@tcp(localhost:5555)/dbname?tls=" + url.QueryEscape(configKey)
+ name := "foohost"
+ tlsCfg := tls.Config{ServerName: name}
+
+ RegisterTLSConfig(configKey, &tlsCfg)
+
+ cfg, err := ParseDSN(dsn)
+
+ if err != nil {
+ t.Error(err.Error())
+ } else if cfg.tls.ServerName != name {
+ t.Errorf("did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, dsn)
+ }
+}
+
+func TestDSNUnsafeCollation(t *testing.T) {
+ _, err := ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=true")
+ if err != errInvalidDSNUnsafeCollation {
+ t.Errorf("expected %v, got %v", errInvalidDSNUnsafeCollation, err)
+ }
+
+ _, err = ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=false")
+ if err != nil {
+ t.Errorf("expected %v, got %v", nil, err)
+ }
+
+ _, err = ParseDSN("/dbname?collation=gbk_chinese_ci")
+ if err != nil {
+ t.Errorf("expected %v, got %v", nil, err)
+ }
+
+ _, err = ParseDSN("/dbname?collation=ascii_bin&interpolateParams=true")
+ if err != nil {
+ t.Errorf("expected %v, got %v", nil, err)
+ }
+
+ _, err = ParseDSN("/dbname?collation=latin1_german1_ci&interpolateParams=true")
+ if err != nil {
+ t.Errorf("expected %v, got %v", nil, err)
+ }
+
+ _, err = ParseDSN("/dbname?collation=utf8_general_ci&interpolateParams=true")
+ if err != nil {
+ t.Errorf("expected %v, got %v", nil, err)
+ }
+
+ _, err = ParseDSN("/dbname?collation=utf8mb4_general_ci&interpolateParams=true")
+ if err != nil {
+ t.Errorf("expected %v, got %v", nil, err)
+ }
+}
+
+func BenchmarkParseDSN(b *testing.B) {
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ for _, tst := range testDSNs {
+ if _, err := ParseDSN(tst.in); err != nil {
+ b.Error(err.Error())
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go
index 97d7b3996..857854e14 100644
--- a/vendor/github.com/go-sql-driver/mysql/errors.go
+++ b/vendor/github.com/go-sql-driver/mysql/errors.go
@@ -19,18 +19,21 @@ import (
// Various errors the driver might return. Can change between driver versions.
var (
- ErrInvalidConn = errors.New("Invalid Connection")
- ErrMalformPkt = errors.New("Malformed Packet")
- ErrNoTLS = errors.New("TLS encryption requested but server does not support TLS")
- ErrOldPassword = errors.New("This server only supports the insecure old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
- ErrOldProtocol = errors.New("MySQL-Server does not support required Protocol 41+")
- ErrPktSync = errors.New("Commands out of sync. You can't run this command now")
- ErrPktSyncMul = errors.New("Commands out of sync. Did you run multiple statements at once?")
- ErrPktTooLarge = errors.New("Packet for query is too large. You can change this value on the server by adjusting the 'max_allowed_packet' variable.")
- ErrBusyBuffer = errors.New("Busy buffer")
+ ErrInvalidConn = errors.New("invalid connection")
+ ErrMalformPkt = errors.New("malformed packet")
+ ErrNoTLS = errors.New("TLS requested but server does not support TLS")
+ ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN")
+ ErrNativePassword = errors.New("this user requires mysql native password authentication.")
+ ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
+ ErrUnknownPlugin = errors.New("this authentication plugin is not supported")
+ ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+")
+ ErrPktSync = errors.New("commands out of sync. You can't run this command now")
+ ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?")
+ ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server")
+ ErrBusyBuffer = errors.New("busy buffer")
)
-var errLog Logger = log.New(os.Stderr, "[MySQL] ", log.Ldate|log.Ltime|log.Lshortfile)
+var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
// Logger is used to log critical error messages.
type Logger interface {
diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go
index 121a04c71..547357cfa 100644
--- a/vendor/github.com/go-sql-driver/mysql/infile.go
+++ b/vendor/github.com/go-sql-driver/mysql/infile.go
@@ -13,11 +13,14 @@ import (
"io"
"os"
"strings"
+ "sync"
)
var (
- fileRegister map[string]bool
- readerRegister map[string]func() io.Reader
+ fileRegister map[string]bool
+ fileRegisterLock sync.RWMutex
+ readerRegister map[string]func() io.Reader
+ readerRegisterLock sync.RWMutex
)
// RegisterLocalFile adds the given file to the file whitelist,
@@ -32,17 +35,21 @@ var (
// ...
//
func RegisterLocalFile(filePath string) {
+ fileRegisterLock.Lock()
// lazy map init
if fileRegister == nil {
fileRegister = make(map[string]bool)
}
fileRegister[strings.Trim(filePath, `"`)] = true
+ fileRegisterLock.Unlock()
}
// DeregisterLocalFile removes the given filepath from the whitelist.
func DeregisterLocalFile(filePath string) {
+ fileRegisterLock.Lock()
delete(fileRegister, strings.Trim(filePath, `"`))
+ fileRegisterLock.Unlock()
}
// RegisterReaderHandler registers a handler function which is used
@@ -61,18 +68,22 @@ func DeregisterLocalFile(filePath string) {
// ...
//
func RegisterReaderHandler(name string, handler func() io.Reader) {
+ readerRegisterLock.Lock()
// lazy map init
if readerRegister == nil {
readerRegister = make(map[string]func() io.Reader)
}
readerRegister[name] = handler
+ readerRegisterLock.Unlock()
}
// DeregisterReaderHandler removes the ReaderHandler function with
// the given name from the registry.
func DeregisterReaderHandler(name string) {
+ readerRegisterLock.Lock()
delete(readerRegister, name)
+ readerRegisterLock.Unlock()
}
func deferredClose(err *error, closer io.Closer) {
@@ -85,14 +96,22 @@ func deferredClose(err *error, closer io.Closer) {
func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
var rdr io.Reader
var data []byte
+ packetSize := 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP
+ if mc.maxWriteSize < packetSize {
+ packetSize = mc.maxWriteSize
+ }
+
+ if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader
+ // The server might return an an absolute path. See issue #355.
+ name = name[idx+8:]
+
+ readerRegisterLock.RLock()
+ handler, inMap := readerRegister[name]
+ readerRegisterLock.RUnlock()
- if strings.HasPrefix(name, "Reader::") { // io.Reader
- name = name[8:]
- if handler, inMap := readerRegister[name]; inMap {
+ if inMap {
rdr = handler()
if rdr != nil {
- data = make([]byte, 4+mc.maxWriteSize)
-
if cl, ok := rdr.(io.Closer); ok {
defer deferredClose(&err, cl)
}
@@ -104,7 +123,10 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
}
} else { // File
name = strings.Trim(name, `"`)
- if mc.cfg.allowAllFiles || fileRegister[name] {
+ fileRegisterLock.RLock()
+ fr := fileRegister[name]
+ fileRegisterLock.RUnlock()
+ if mc.cfg.AllowAllFiles || fr {
var file *os.File
var fi os.FileInfo
@@ -114,22 +136,19 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
// get file size
if fi, err = file.Stat(); err == nil {
rdr = file
- if fileSize := int(fi.Size()); fileSize <= mc.maxWriteSize {
- data = make([]byte, 4+fileSize)
- } else if fileSize <= mc.maxPacketAllowed {
- data = make([]byte, 4+mc.maxWriteSize)
- } else {
- err = fmt.Errorf("Local File '%s' too large: Size: %d, Max: %d", name, fileSize, mc.maxPacketAllowed)
+ if fileSize := int(fi.Size()); fileSize < packetSize {
+ packetSize = fileSize
}
}
}
} else {
- err = fmt.Errorf("Local File '%s' is not registered. Use the DSN parameter 'allowAllFiles=true' to allow all files", name)
+ err = fmt.Errorf("local file '%s' is not registered", name)
}
}
// send content packets
if err == nil {
+ data := make([]byte, 4+packetSize)
var n int
for err == nil {
n, err = rdr.Read(data[4:])
@@ -154,9 +173,10 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
// read OK packet
if err == nil {
- return mc.readResultOK()
- } else {
- mc.readPacket()
+ _, err = mc.readResultOK()
+ return err
}
+
+ mc.readPacket()
return err
}
diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go
index 618098146..aafe9793e 100644
--- a/vendor/github.com/go-sql-driver/mysql/packets.go
+++ b/vendor/github.com/go-sql-driver/mysql/packets.go
@@ -13,6 +13,7 @@ import (
"crypto/tls"
"database/sql/driver"
"encoding/binary"
+ "errors"
"fmt"
"io"
"math"
@@ -24,9 +25,9 @@ import (
// Read packet to buffer 'data'
func (mc *mysqlConn) readPacket() ([]byte, error) {
- var payload []byte
+ var prevData []byte
for {
- // Read packet header
+ // read packet header
data, err := mc.buf.readNext(4)
if err != nil {
errLog.Print(err)
@@ -34,26 +35,32 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
return nil, driver.ErrBadConn
}
- // Packet Length [24 bit]
+ // packet length [24 bit]
pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16)
- if pktLen < 1 {
- errLog.Print(ErrMalformPkt)
- mc.Close()
- return nil, driver.ErrBadConn
- }
-
- // Check Packet Sync [8 bit]
+ // check packet sync [8 bit]
if data[3] != mc.sequence {
if data[3] > mc.sequence {
return nil, ErrPktSyncMul
- } else {
- return nil, ErrPktSync
}
+ return nil, ErrPktSync
}
mc.sequence++
- // Read packet body [pktLen bytes]
+ // packets with length 0 terminate a previous packet which is a
+ // multiple of (2^24)−1 bytes long
+ if pktLen == 0 {
+ // there was no previous packet
+ if prevData == nil {
+ errLog.Print(ErrMalformPkt)
+ mc.Close()
+ return nil, driver.ErrBadConn
+ }
+
+ return prevData, nil
+ }
+
+ // read packet body [pktLen bytes]
data, err = mc.buf.readNext(pktLen)
if err != nil {
errLog.Print(err)
@@ -61,18 +68,17 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
return nil, driver.ErrBadConn
}
- isLastPacket := (pktLen < maxPacketSize)
+ // return data if this was the last packet
+ if pktLen < maxPacketSize {
+ // zero allocations for non-split packets
+ if prevData == nil {
+ return data, nil
+ }
- // Zero allocations for non-splitting packets
- if isLastPacket && payload == nil {
- return data, nil
+ return append(prevData, data...), nil
}
- payload = append(payload, data...)
-
- if isLastPacket {
- return payload, nil
- }
+ prevData = append(prevData, data...)
}
}
@@ -80,7 +86,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
func (mc *mysqlConn) writePacket(data []byte) error {
pktLen := len(data) - 4
- if pktLen > mc.maxPacketAllowed {
+ if pktLen > mc.maxAllowedPacket {
return ErrPktTooLarge
}
@@ -100,6 +106,12 @@ func (mc *mysqlConn) writePacket(data []byte) error {
data[3] = mc.sequence
// Write packet
+ if mc.writeTimeout > 0 {
+ if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil {
+ return err
+ }
+ }
+
n, err := mc.netConn.Write(data[:4+size])
if err == nil && n == 4+size {
mc.sequence++
@@ -140,7 +152,7 @@ func (mc *mysqlConn) readInitPacket() ([]byte, error) {
// protocol version [1 byte]
if data[0] < minProtocolVersion {
return nil, fmt.Errorf(
- "Unsupported MySQL Protocol Version %d. Protocol Version %d or higher is required",
+ "unsupported protocol version %d. Version %d or higher is required",
data[0],
minProtocolVersion,
)
@@ -196,7 +208,11 @@ func (mc *mysqlConn) readInitPacket() ([]byte, error) {
// return
//}
//return ErrMalformPkt
- return cipher, nil
+
+ // make a memory safe copy of the cipher slice
+ var b [20]byte
+ copy(b[:], cipher)
+ return b[:], nil
}
// make a memory safe copy of the cipher slice
@@ -214,9 +230,11 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
clientLongPassword |
clientTransactions |
clientLocalFiles |
+ clientPluginAuth |
+ clientMultiResults |
mc.flags&clientLongFlag
- if mc.cfg.clientFoundRows {
+ if mc.cfg.ClientFoundRows {
clientFlags |= clientFoundRows
}
@@ -225,13 +243,17 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
clientFlags |= clientSSL
}
+ if mc.cfg.MultiStatements {
+ clientFlags |= clientMultiStatements
+ }
+
// User Password
- scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.passwd))
+ scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.Passwd))
- pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.user) + 1 + 1 + len(scrambleBuff)
+ pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + 1 + len(scrambleBuff) + 21 + 1
// To specify a db name
- if n := len(mc.cfg.dbname); n > 0 {
+ if n := len(mc.cfg.DBName); n > 0 {
clientFlags |= clientConnectWithDB
pktLen += n + 1
}
@@ -257,7 +279,14 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
data[11] = 0x00
// Charset [1 byte]
- data[12] = mc.cfg.collation
+ var found bool
+ data[12], found = collations[mc.cfg.Collation]
+ if !found {
+ // Note possibility for false negatives:
+ // could be triggered although the collation is valid if the
+ // collations map does not contain entries the server supports.
+ return errors.New("unknown collation")
+ }
// SSL Connection Request Packet
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest
@@ -273,15 +302,18 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
return err
}
mc.netConn = tlsConn
- mc.buf.rd = tlsConn
+ mc.buf.nc = tlsConn
}
// Filler [23 bytes] (all 0x00)
- pos := 13 + 23
+ pos := 13
+ for ; pos < 13+23; pos++ {
+ data[pos] = 0
+ }
// User [null terminated string]
- if len(mc.cfg.user) > 0 {
- pos += copy(data[pos:], mc.cfg.user)
+ if len(mc.cfg.User) > 0 {
+ pos += copy(data[pos:], mc.cfg.User)
}
data[pos] = 0x00
pos++
@@ -291,11 +323,16 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
pos += 1 + copy(data[pos+1:], scrambleBuff)
// Databasename [null terminated string]
- if len(mc.cfg.dbname) > 0 {
- pos += copy(data[pos:], mc.cfg.dbname)
+ if len(mc.cfg.DBName) > 0 {
+ pos += copy(data[pos:], mc.cfg.DBName)
data[pos] = 0x00
+ pos++
}
+ // Assume native client during response
+ pos += copy(data[pos:], "mysql_native_password")
+ data[pos] = 0x00
+
// Send Auth packet
return mc.writePacket(data)
}
@@ -304,9 +341,9 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
func (mc *mysqlConn) writeOldAuthPacket(cipher []byte) error {
// User password
- scrambleBuff := scrambleOldPassword(cipher, []byte(mc.cfg.passwd))
+ scrambleBuff := scrambleOldPassword(cipher, []byte(mc.cfg.Passwd))
- // Calculate the packet lenght and add a tailing 0
+ // Calculate the packet length and add a tailing 0
pktLen := len(scrambleBuff) + 1
data := mc.buf.takeSmallBuffer(4 + pktLen)
if data == nil {
@@ -322,6 +359,45 @@ func (mc *mysqlConn) writeOldAuthPacket(cipher []byte) error {
return mc.writePacket(data)
}
+// Client clear text authentication packet
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
+func (mc *mysqlConn) writeClearAuthPacket() error {
+ // Calculate the packet length and add a tailing 0
+ pktLen := len(mc.cfg.Passwd) + 1
+ data := mc.buf.takeSmallBuffer(4 + pktLen)
+ if data == nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return driver.ErrBadConn
+ }
+
+ // Add the clear password [null terminated string]
+ copy(data[4:], mc.cfg.Passwd)
+ data[4+pktLen-1] = 0x00
+
+ return mc.writePacket(data)
+}
+
+// Native password authentication method
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
+func (mc *mysqlConn) writeNativeAuthPacket(cipher []byte) error {
+ scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.Passwd))
+
+ // Calculate the packet length and add a tailing 0
+ pktLen := len(scrambleBuff)
+ data := mc.buf.takeSmallBuffer(4 + pktLen)
+ if data == nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return driver.ErrBadConn
+ }
+
+ // Add the scramble
+ copy(data[4:], scrambleBuff)
+
+ return mc.writePacket(data)
+}
+
/******************************************************************************
* Command Packets *
******************************************************************************/
@@ -395,24 +471,43 @@ func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
******************************************************************************/
// Returns error if Packet is not an 'Result OK'-Packet
-func (mc *mysqlConn) readResultOK() error {
+func (mc *mysqlConn) readResultOK() ([]byte, error) {
data, err := mc.readPacket()
if err == nil {
// packet indicator
switch data[0] {
case iOK:
- return mc.handleOkPacket(data)
+ return nil, mc.handleOkPacket(data)
case iEOF:
- // someone is using old_passwords
- return ErrOldPassword
+ if len(data) > 1 {
+ pluginEndIndex := bytes.IndexByte(data, 0x00)
+ plugin := string(data[1:pluginEndIndex])
+ cipher := data[pluginEndIndex+1 : len(data)-1]
+
+ if plugin == "mysql_old_password" {
+ // using old_passwords
+ return cipher, ErrOldPassword
+ } else if plugin == "mysql_clear_password" {
+ // using clear text password
+ return cipher, ErrCleartextPassword
+ } else if plugin == "mysql_native_password" {
+ // using mysql default authentication method
+ return cipher, ErrNativePassword
+ } else {
+ return cipher, ErrUnknownPlugin
+ }
+ } else {
+ // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
+ return nil, ErrOldPassword
+ }
default: // Error otherwise
- return mc.handleErrorPacket(data)
+ return nil, mc.handleErrorPacket(data)
}
}
- return err
+ return nil, err
}
// Result Set Header Packet
@@ -470,6 +565,10 @@ func (mc *mysqlConn) handleErrorPacket(data []byte) error {
}
}
+func readStatus(b []byte) statusFlag {
+ return statusFlag(b[0]) | statusFlag(b[1])<<8
+}
+
// Ok Packet
// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet
func (mc *mysqlConn) handleOkPacket(data []byte) error {
@@ -484,17 +583,21 @@ func (mc *mysqlConn) handleOkPacket(data []byte) error {
mc.insertId, _, m = readLengthEncodedInteger(data[1+n:])
// server_status [2 bytes]
+ mc.status = readStatus(data[1+n+m : 1+n+m+2])
+ if err := mc.discardResults(); err != nil {
+ return err
+ }
// warning count [2 bytes]
if !mc.strict {
return nil
- } else {
- pos := 1 + n + m + 2
- if binary.LittleEndian.Uint16(data[pos:pos+2]) > 0 {
- return mc.getWarnings()
- }
- return nil
}
+
+ pos := 1 + n + m + 2
+ if binary.LittleEndian.Uint16(data[pos:pos+2]) > 0 {
+ return mc.getWarnings()
+ }
+ return nil
}
// Read Packets as Field Packets until EOF-Packet or an Error appears
@@ -513,7 +616,7 @@ func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
if i == count {
return columns, nil
}
- return nil, fmt.Errorf("ColumnsCount mismatch n:%d len:%d", count, len(columns))
+ return nil, fmt.Errorf("column count mismatch n:%d len:%d", count, len(columns))
}
// Catalog
@@ -530,11 +633,20 @@ func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
pos += n
// Table [len coded string]
- n, err = skipLengthEncodedString(data[pos:])
- if err != nil {
- return nil, err
+ if mc.cfg.ColumnsWithAlias {
+ tableName, _, n, err := readLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+ columns[i].tableName = string(tableName)
+ } else {
+ n, err = skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
}
- pos += n
// Original table [len coded string]
n, err = skipLengthEncodedString(data[pos:])
@@ -557,20 +669,21 @@ func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
return nil, err
}
- // Filler [1 byte]
- // Charset [16 bit uint]
- // Length [32 bit uint]
+ // Filler [uint8]
+ // Charset [charset, collation uint8]
+ // Length [uint32]
pos += n + 1 + 2 + 4
- // Field type [byte]
+ // Field type [uint8]
columns[i].fieldType = data[pos]
pos++
- // Flags [16 bit uint]
+ // Flags [uint16]
columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
- //pos += 2
+ pos += 2
- // Decimals [8 bit uint]
+ // Decimals [uint8]
+ columns[i].decimals = data[pos]
//pos++
// Default value [len coded binary]
@@ -592,7 +705,21 @@ func (rows *textRows) readRow(dest []driver.Value) error {
// EOF Packet
if data[0] == iEOF && len(data) == 5 {
- return io.EOF
+ // server_status [2 bytes]
+ rows.mc.status = readStatus(data[3:])
+ err = rows.mc.discardResults()
+ if err == nil {
+ err = io.EOF
+ } else {
+ // connection unusable
+ rows.mc.Close()
+ }
+ rows.mc = nil
+ return err
+ }
+ if data[0] == iERR {
+ rows.mc = nil
+ return mc.handleErrorPacket(data)
}
// RowSet Packet
@@ -614,7 +741,7 @@ func (rows *textRows) readRow(dest []driver.Value) error {
fieldTypeDate, fieldTypeNewDate:
dest[i], err = parseDateTime(
string(dest[i].([]byte)),
- mc.cfg.loc,
+ mc.cfg.Loc,
)
if err == nil {
continue
@@ -639,12 +766,19 @@ func (rows *textRows) readRow(dest []driver.Value) error {
func (mc *mysqlConn) readUntilEOF() error {
for {
data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
- // No Err and no EOF Packet
- if err == nil && data[0] != iEOF {
- continue
+ switch data[0] {
+ case iERR:
+ return mc.handleErrorPacket(data)
+ case iEOF:
+ if len(data) == 5 {
+ mc.status = readStatus(data[3:])
+ }
+ return nil
}
- return err // Err or EOF
}
}
@@ -676,20 +810,20 @@ func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) {
// Warning count [16 bit uint]
if !stmt.mc.strict {
return columnCount, nil
- } else {
- // Check for warnings count > 0, only available in MySQL > 4.1
- if len(data) >= 12 && binary.LittleEndian.Uint16(data[10:12]) > 0 {
- return columnCount, stmt.mc.getWarnings()
- }
- return columnCount, nil
}
+
+ // Check for warnings count > 0, only available in MySQL > 4.1
+ if len(data) >= 12 && binary.LittleEndian.Uint16(data[10:12]) > 0 {
+ return columnCount, stmt.mc.getWarnings()
+ }
+ return columnCount, nil
}
return 0, err
}
// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html
func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error {
- maxLen := stmt.mc.maxPacketAllowed - 1
+ maxLen := stmt.mc.maxAllowedPacket - 1
pktLen := maxLen
// After the header (bytes 0-3) follows before the data:
@@ -744,7 +878,7 @@ func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error {
func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
if len(args) != stmt.paramCount {
return fmt.Errorf(
- "Arguments count mismatch (Got: %d Has: %d)",
+ "argument count mismatch (got: %d; has: %d)",
len(args),
stmt.paramCount,
)
@@ -880,7 +1014,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
paramTypes[i+i] = fieldTypeString
paramTypes[i+i+1] = 0x00
- if len(v) < mc.maxPacketAllowed-pos-len(paramValues)-(len(args)-(i+1))*64 {
+ if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 {
paramValues = appendLengthEncodedInteger(paramValues,
uint64(len(v)),
)
@@ -902,7 +1036,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
paramTypes[i+i] = fieldTypeString
paramTypes[i+i+1] = 0x00
- if len(v) < mc.maxPacketAllowed-pos-len(paramValues)-(len(args)-(i+1))*64 {
+ if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 {
paramValues = appendLengthEncodedInteger(paramValues,
uint64(len(v)),
)
@@ -921,7 +1055,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
if v.IsZero() {
val = []byte("0000-00-00")
} else {
- val = []byte(v.In(mc.cfg.loc).Format(timeFormat))
+ val = []byte(v.In(mc.cfg.Loc).Format(timeFormat))
}
paramValues = appendLengthEncodedInteger(paramValues,
@@ -930,7 +1064,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
paramValues = append(paramValues, val...)
default:
- return fmt.Errorf("Can't convert type: %T", arg)
+ return fmt.Errorf("can not convert type: %T", arg)
}
}
@@ -948,6 +1082,28 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
return mc.writePacket(data)
}
+func (mc *mysqlConn) discardResults() error {
+ for mc.status&statusMoreResultsExists != 0 {
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return err
+ }
+ if resLen > 0 {
+ // columns
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+ // rows
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+ } else {
+ mc.status &^= statusMoreResultsExists
+ }
+ }
+ return nil
+}
+
// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html
func (rows *binaryRows) readRow(dest []driver.Value) error {
data, err := rows.mc.readPacket()
@@ -959,8 +1115,18 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
if data[0] != iOK {
// EOF Packet
if data[0] == iEOF && len(data) == 5 {
- return io.EOF
+ rows.mc.status = readStatus(data[3:])
+ err = rows.mc.discardResults()
+ if err == nil {
+ err = io.EOF
+ } else {
+ // connection unusable
+ rows.mc.Close()
+ }
+ rows.mc = nil
+ return err
}
+ rows.mc = nil
// Error otherwise
return rows.mc.handleErrorPacket(data)
@@ -1027,7 +1193,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
continue
case fieldTypeFloat:
- dest[i] = float64(math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4])))
+ dest[i] = float32(math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4])))
pos += 4
continue
@@ -1040,7 +1206,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
- fieldTypeVarString, fieldTypeString, fieldTypeGeometry:
+ fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON:
var isNull bool
var n int
dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
@@ -1055,88 +1221,53 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
}
return err
- // Date YYYY-MM-DD
- case fieldTypeDate, fieldTypeNewDate:
+ case
+ fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD
+ fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal]
+ fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal]
+
num, isNull, n := readLengthEncodedInteger(data[pos:])
pos += n
- if isNull {
+ switch {
+ case isNull:
dest[i] = nil
continue
- }
-
- if rows.mc.parseTime {
- dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.loc)
- } else {
- dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], false)
- }
-
- if err == nil {
- pos += int(num)
- continue
- } else {
- return err
- }
-
- // Time [-][H]HH:MM:SS[.fractal]
- case fieldTypeTime:
- num, isNull, n := readLengthEncodedInteger(data[pos:])
- pos += n
-
- if num == 0 {
- if isNull {
- dest[i] = nil
- continue
- } else {
- dest[i] = []byte("00:00:00")
- continue
+ case rows.columns[i].fieldType == fieldTypeTime:
+ // database/sql does not support an equivalent to TIME, return a string
+ var dstlen uint8
+ switch decimals := rows.columns[i].decimals; decimals {
+ case 0x00, 0x1f:
+ dstlen = 8
+ case 1, 2, 3, 4, 5, 6:
+ dstlen = 8 + 1 + decimals
+ default:
+ return fmt.Errorf(
+ "protocol error, illegal decimals value %d",
+ rows.columns[i].decimals,
+ )
}
- }
-
- var sign string
- if data[pos] == 1 {
- sign = "-"
- }
-
- switch num {
- case 8:
- dest[i] = []byte(fmt.Sprintf(
- sign+"%02d:%02d:%02d",
- uint16(data[pos+1])*24+uint16(data[pos+5]),
- data[pos+6],
- data[pos+7],
- ))
- pos += 8
- continue
- case 12:
- dest[i] = []byte(fmt.Sprintf(
- sign+"%02d:%02d:%02d.%06d",
- uint16(data[pos+1])*24+uint16(data[pos+5]),
- data[pos+6],
- data[pos+7],
- binary.LittleEndian.Uint32(data[pos+8:pos+12]),
- ))
- pos += 12
- continue
+ dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, true)
+ case rows.mc.parseTime:
+ dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc)
default:
- return fmt.Errorf("Invalid TIME-packet length %d", num)
- }
-
- // Timestamp YYYY-MM-DD HH:MM:SS[.fractal]
- case fieldTypeTimestamp, fieldTypeDateTime:
- num, isNull, n := readLengthEncodedInteger(data[pos:])
-
- pos += n
-
- if isNull {
- dest[i] = nil
- continue
- }
-
- if rows.mc.parseTime {
- dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.loc)
- } else {
- dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], true)
+ var dstlen uint8
+ if rows.columns[i].fieldType == fieldTypeDate {
+ dstlen = 10
+ } else {
+ switch decimals := rows.columns[i].decimals; decimals {
+ case 0x00, 0x1f:
+ dstlen = 19
+ case 1, 2, 3, 4, 5, 6:
+ dstlen = 19 + 1 + decimals
+ default:
+ return fmt.Errorf(
+ "protocol error, illegal decimals value %d",
+ rows.columns[i].decimals,
+ )
+ }
+ }
+ dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, false)
}
if err == nil {
@@ -1148,7 +1279,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
// Please report if this happens!
default:
- return fmt.Errorf("Unknown FieldType %d", rows.columns[i].fieldType)
+ return fmt.Errorf("unknown field type %d", rows.columns[i].fieldType)
}
}
diff --git a/vendor/github.com/go-sql-driver/mysql/packets_test.go b/vendor/github.com/go-sql-driver/mysql/packets_test.go
new file mode 100644
index 000000000..98404586a
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/packets_test.go
@@ -0,0 +1,282 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "errors"
+ "net"
+ "testing"
+ "time"
+)
+
+var (
+ errConnClosed = errors.New("connection is closed")
+ errConnTooManyReads = errors.New("too many reads")
+ errConnTooManyWrites = errors.New("too many writes")
+)
+
+// struct to mock a net.Conn for testing purposes
+type mockConn struct {
+ laddr net.Addr
+ raddr net.Addr
+ data []byte
+ closed bool
+ read int
+ written int
+ reads int
+ writes int
+ maxReads int
+ maxWrites int
+}
+
+func (m *mockConn) Read(b []byte) (n int, err error) {
+ if m.closed {
+ return 0, errConnClosed
+ }
+
+ m.reads++
+ if m.maxReads > 0 && m.reads > m.maxReads {
+ return 0, errConnTooManyReads
+ }
+
+ n = copy(b, m.data)
+ m.read += n
+ m.data = m.data[n:]
+ return
+}
+func (m *mockConn) Write(b []byte) (n int, err error) {
+ if m.closed {
+ return 0, errConnClosed
+ }
+
+ m.writes++
+ if m.maxWrites > 0 && m.writes > m.maxWrites {
+ return 0, errConnTooManyWrites
+ }
+
+ n = len(b)
+ m.written += n
+ return
+}
+func (m *mockConn) Close() error {
+ m.closed = true
+ return nil
+}
+func (m *mockConn) LocalAddr() net.Addr {
+ return m.laddr
+}
+func (m *mockConn) RemoteAddr() net.Addr {
+ return m.raddr
+}
+func (m *mockConn) SetDeadline(t time.Time) error {
+ return nil
+}
+func (m *mockConn) SetReadDeadline(t time.Time) error {
+ return nil
+}
+func (m *mockConn) SetWriteDeadline(t time.Time) error {
+ return nil
+}
+
+// make sure mockConn implements the net.Conn interface
+var _ net.Conn = new(mockConn)
+
+func TestReadPacketSingleByte(t *testing.T) {
+ conn := new(mockConn)
+ mc := &mysqlConn{
+ buf: newBuffer(conn),
+ }
+
+ conn.data = []byte{0x01, 0x00, 0x00, 0x00, 0xff}
+ conn.maxReads = 1
+ packet, err := mc.readPacket()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(packet) != 1 {
+ t.Fatalf("unexpected packet lenght: expected %d, got %d", 1, len(packet))
+ }
+ if packet[0] != 0xff {
+ t.Fatalf("unexpected packet content: expected %x, got %x", 0xff, packet[0])
+ }
+}
+
+func TestReadPacketWrongSequenceID(t *testing.T) {
+ conn := new(mockConn)
+ mc := &mysqlConn{
+ buf: newBuffer(conn),
+ }
+
+ // too low sequence id
+ conn.data = []byte{0x01, 0x00, 0x00, 0x00, 0xff}
+ conn.maxReads = 1
+ mc.sequence = 1
+ _, err := mc.readPacket()
+ if err != ErrPktSync {
+ t.Errorf("expected ErrPktSync, got %v", err)
+ }
+
+ // reset
+ conn.reads = 0
+ mc.sequence = 0
+ mc.buf = newBuffer(conn)
+
+ // too high sequence id
+ conn.data = []byte{0x01, 0x00, 0x00, 0x42, 0xff}
+ _, err = mc.readPacket()
+ if err != ErrPktSyncMul {
+ t.Errorf("expected ErrPktSyncMul, got %v", err)
+ }
+}
+
+func TestReadPacketSplit(t *testing.T) {
+ conn := new(mockConn)
+ mc := &mysqlConn{
+ buf: newBuffer(conn),
+ }
+
+ data := make([]byte, maxPacketSize*2+4*3)
+ const pkt2ofs = maxPacketSize + 4
+ const pkt3ofs = 2 * (maxPacketSize + 4)
+
+ // case 1: payload has length maxPacketSize
+ data = data[:pkt2ofs+4]
+
+ // 1st packet has maxPacketSize length and sequence id 0
+ // ff ff ff 00 ...
+ data[0] = 0xff
+ data[1] = 0xff
+ data[2] = 0xff
+
+ // mark the payload start and end of 1st packet so that we can check if the
+ // content was correctly appended
+ data[4] = 0x11
+ data[maxPacketSize+3] = 0x22
+
+ // 2nd packet has payload length 0 and squence id 1
+ // 00 00 00 01
+ data[pkt2ofs+3] = 0x01
+
+ conn.data = data
+ conn.maxReads = 3
+ packet, err := mc.readPacket()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(packet) != maxPacketSize {
+ t.Fatalf("unexpected packet lenght: expected %d, got %d", maxPacketSize, len(packet))
+ }
+ if packet[0] != 0x11 {
+ t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
+ }
+ if packet[maxPacketSize-1] != 0x22 {
+ t.Fatalf("unexpected payload end: expected %x, got %x", 0x22, packet[maxPacketSize-1])
+ }
+
+ // case 2: payload has length which is a multiple of maxPacketSize
+ data = data[:cap(data)]
+
+ // 2nd packet now has maxPacketSize length
+ data[pkt2ofs] = 0xff
+ data[pkt2ofs+1] = 0xff
+ data[pkt2ofs+2] = 0xff
+
+ // mark the payload start and end of the 2nd packet
+ data[pkt2ofs+4] = 0x33
+ data[pkt2ofs+maxPacketSize+3] = 0x44
+
+ // 3rd packet has payload length 0 and squence id 2
+ // 00 00 00 02
+ data[pkt3ofs+3] = 0x02
+
+ conn.data = data
+ conn.reads = 0
+ conn.maxReads = 5
+ mc.sequence = 0
+ packet, err = mc.readPacket()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(packet) != 2*maxPacketSize {
+ t.Fatalf("unexpected packet lenght: expected %d, got %d", 2*maxPacketSize, len(packet))
+ }
+ if packet[0] != 0x11 {
+ t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
+ }
+ if packet[2*maxPacketSize-1] != 0x44 {
+ t.Fatalf("unexpected payload end: expected %x, got %x", 0x44, packet[2*maxPacketSize-1])
+ }
+
+ // case 3: payload has a length larger maxPacketSize, which is not an exact
+ // multiple of it
+ data = data[:pkt2ofs+4+42]
+ data[pkt2ofs] = 0x2a
+ data[pkt2ofs+1] = 0x00
+ data[pkt2ofs+2] = 0x00
+ data[pkt2ofs+4+41] = 0x44
+
+ conn.data = data
+ conn.reads = 0
+ conn.maxReads = 4
+ mc.sequence = 0
+ packet, err = mc.readPacket()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(packet) != maxPacketSize+42 {
+ t.Fatalf("unexpected packet lenght: expected %d, got %d", maxPacketSize+42, len(packet))
+ }
+ if packet[0] != 0x11 {
+ t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
+ }
+ if packet[maxPacketSize+41] != 0x44 {
+ t.Fatalf("unexpected payload end: expected %x, got %x", 0x44, packet[maxPacketSize+41])
+ }
+}
+
+func TestReadPacketFail(t *testing.T) {
+ conn := new(mockConn)
+ mc := &mysqlConn{
+ buf: newBuffer(conn),
+ }
+
+ // illegal empty (stand-alone) packet
+ conn.data = []byte{0x00, 0x00, 0x00, 0x00}
+ conn.maxReads = 1
+ _, err := mc.readPacket()
+ if err != driver.ErrBadConn {
+ t.Errorf("expected ErrBadConn, got %v", err)
+ }
+
+ // reset
+ conn.reads = 0
+ mc.sequence = 0
+ mc.buf = newBuffer(conn)
+
+ // fail to read header
+ conn.closed = true
+ _, err = mc.readPacket()
+ if err != driver.ErrBadConn {
+ t.Errorf("expected ErrBadConn, got %v", err)
+ }
+
+ // reset
+ conn.closed = false
+ conn.reads = 0
+ mc.sequence = 0
+ mc.buf = newBuffer(conn)
+
+ // fail to read body
+ conn.maxReads = 1
+ _, err = mc.readPacket()
+ if err != driver.ErrBadConn {
+ t.Errorf("expected ErrBadConn, got %v", err)
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go
index df4ef06cb..c08255eee 100644
--- a/vendor/github.com/go-sql-driver/mysql/rows.go
+++ b/vendor/github.com/go-sql-driver/mysql/rows.go
@@ -14,9 +14,11 @@ import (
)
type mysqlField struct {
- fieldType byte
- flags fieldFlag
+ tableName string
name string
+ flags fieldFlag
+ fieldType byte
+ decimals byte
}
type mysqlRows struct {
@@ -32,10 +34,22 @@ type textRows struct {
mysqlRows
}
+type emptyRows struct{}
+
func (rows *mysqlRows) Columns() []string {
columns := make([]string, len(rows.columns))
- for i := range columns {
- columns[i] = rows.columns[i].name
+ if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias {
+ for i := range columns {
+ if tableName := rows.columns[i].tableName; len(tableName) > 0 {
+ columns[i] = tableName + "." + rows.columns[i].name
+ } else {
+ columns[i] = rows.columns[i].name
+ }
+ }
+ } else {
+ for i := range columns {
+ columns[i] = rows.columns[i].name
+ }
}
return columns
}
@@ -51,6 +65,12 @@ func (rows *mysqlRows) Close() error {
// Remove unread packets from stream
err := mc.readUntilEOF()
+ if err == nil {
+ if err = mc.discardResults(); err != nil {
+ return err
+ }
+ }
+
rows.mc = nil
return err
}
@@ -62,10 +82,7 @@ func (rows *binaryRows) Next(dest []driver.Value) error {
}
// Fetch next row from stream
- if err := rows.readRow(dest); err != io.EOF {
- return err
- }
- rows.mc = nil
+ return rows.readRow(dest)
}
return io.EOF
}
@@ -77,10 +94,19 @@ func (rows *textRows) Next(dest []driver.Value) error {
}
// Fetch next row from stream
- if err := rows.readRow(dest); err != io.EOF {
- return err
- }
- rows.mc = nil
+ return rows.readRow(dest)
}
return io.EOF
}
+
+func (rows emptyRows) Columns() []string {
+ return nil
+}
+
+func (rows emptyRows) Close() error {
+ return nil
+}
+
+func (rows emptyRows) Next(dest []driver.Value) error {
+ return io.EOF
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go
index 142ef5416..7f9b04585 100644
--- a/vendor/github.com/go-sql-driver/mysql/statement.go
+++ b/vendor/github.com/go-sql-driver/mysql/statement.go
@@ -10,6 +10,9 @@ package mysql
import (
"database/sql/driver"
+ "fmt"
+ "reflect"
+ "strconv"
)
type mysqlStmt struct {
@@ -21,7 +24,10 @@ type mysqlStmt struct {
func (stmt *mysqlStmt) Close() error {
if stmt.mc == nil || stmt.mc.netConn == nil {
- errLog.Print(ErrInvalidConn)
+ // driver.Stmt.Close can be called more than once, thus this function
+ // has to be idempotent.
+ // See also Issue #450 and golang/go#16019.
+ //errLog.Print(ErrInvalidConn)
return driver.ErrBadConn
}
@@ -34,6 +40,10 @@ func (stmt *mysqlStmt) NumInput() int {
return stmt.paramCount
}
+func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
+ return converter{}
+}
+
func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
if stmt.mc.netConn == nil {
errLog.Print(ErrInvalidConn)
@@ -94,9 +104,9 @@ func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
}
rows := new(binaryRows)
- rows.mc = mc
if resLen > 0 {
+ rows.mc = mc
// Columns
// If not cached, read them and cache them
if stmt.columns == nil {
@@ -110,3 +120,34 @@ func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
return rows, err
}
+
+type converter struct{}
+
+func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
+ if driver.IsValue(v) {
+ return v, nil
+ }
+
+ rv := reflect.ValueOf(v)
+ switch rv.Kind() {
+ case reflect.Ptr:
+ // indirect pointers
+ if rv.IsNil() {
+ return nil, nil
+ }
+ return c.ConvertValue(rv.Elem().Interface())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return rv.Int(), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
+ return int64(rv.Uint()), nil
+ case reflect.Uint64:
+ u64 := rv.Uint()
+ if u64 >= 1<<63 {
+ return strconv.FormatUint(u64, 10), nil
+ }
+ return int64(u64), nil
+ case reflect.Float32, reflect.Float64:
+ return rv.Float(), nil
+ }
+ return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go
index b6f200389..d523b7ffd 100644
--- a/vendor/github.com/go-sql-driver/mysql/utils.go
+++ b/vendor/github.com/go-sql-driver/mysql/utils.go
@@ -13,26 +13,16 @@ import (
"crypto/tls"
"database/sql/driver"
"encoding/binary"
- "errors"
"fmt"
"io"
- "net/url"
"strings"
"time"
)
var (
tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs
-
- errInvalidDSNUnescaped = errors.New("Invalid DSN: Did you forget to escape a param value?")
- errInvalidDSNAddr = errors.New("Invalid DSN: Network Address not terminated (missing closing brace)")
- errInvalidDSNNoSlash = errors.New("Invalid DSN: Missing the slash separating the database name")
)
-func init() {
- tlsConfigRegister = make(map[string]*tls.Config)
-}
-
// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
// Use the key as a value in the DSN where tls=value.
//
@@ -58,7 +48,11 @@ func init() {
//
func RegisterTLSConfig(key string, config *tls.Config) error {
if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" {
- return fmt.Errorf("Key '%s' is reserved", key)
+ return fmt.Errorf("key '%s' is reserved", key)
+ }
+
+ if tlsConfigRegister == nil {
+ tlsConfigRegister = make(map[string]*tls.Config)
}
tlsConfigRegister[key] = config
@@ -67,202 +61,9 @@ func RegisterTLSConfig(key string, config *tls.Config) error {
// DeregisterTLSConfig removes the tls.Config associated with key.
func DeregisterTLSConfig(key string) {
- delete(tlsConfigRegister, key)
-}
-
-// parseDSN parses the DSN string to a config
-func parseDSN(dsn string) (cfg *config, err error) {
- // New config with some default values
- cfg = &config{
- loc: time.UTC,
- collation: defaultCollation,
- }
-
- // TODO: use strings.IndexByte when we can depend on Go 1.2
-
- // [user[:password]@][net[(addr)]]/dbname[?param1=value1&paramN=valueN]
- // Find the last '/' (since the password or the net addr might contain a '/')
- foundSlash := false
- for i := len(dsn) - 1; i >= 0; i-- {
- if dsn[i] == '/' {
- foundSlash = true
- var j, k int
-
- // left part is empty if i <= 0
- if i > 0 {
- // [username[:password]@][protocol[(address)]]
- // Find the last '@' in dsn[:i]
- for j = i; j >= 0; j-- {
- if dsn[j] == '@' {
- // username[:password]
- // Find the first ':' in dsn[:j]
- for k = 0; k < j; k++ {
- if dsn[k] == ':' {
- cfg.passwd = dsn[k+1 : j]
- break
- }
- }
- cfg.user = dsn[:k]
-
- break
- }
- }
-
- // [protocol[(address)]]
- // Find the first '(' in dsn[j+1:i]
- for k = j + 1; k < i; k++ {
- if dsn[k] == '(' {
- // dsn[i-1] must be == ')' if an address is specified
- if dsn[i-1] != ')' {
- if strings.ContainsRune(dsn[k+1:i], ')') {
- return nil, errInvalidDSNUnescaped
- }
- return nil, errInvalidDSNAddr
- }
- cfg.addr = dsn[k+1 : i-1]
- break
- }
- }
- cfg.net = dsn[j+1 : k]
- }
-
- // dbname[?param1=value1&...&paramN=valueN]
- // Find the first '?' in dsn[i+1:]
- for j = i + 1; j < len(dsn); j++ {
- if dsn[j] == '?' {
- if err = parseDSNParams(cfg, dsn[j+1:]); err != nil {
- return
- }
- break
- }
- }
- cfg.dbname = dsn[i+1 : j]
-
- break
- }
- }
-
- if !foundSlash && len(dsn) > 0 {
- return nil, errInvalidDSNNoSlash
- }
-
- // Set default network if empty
- if cfg.net == "" {
- cfg.net = "tcp"
- }
-
- // Set default address if empty
- if cfg.addr == "" {
- switch cfg.net {
- case "tcp":
- cfg.addr = "127.0.0.1:3306"
- case "unix":
- cfg.addr = "/tmp/mysql.sock"
- default:
- return nil, errors.New("Default addr for network '" + cfg.net + "' unknown")
- }
-
- }
-
- return
-}
-
-// parseDSNParams parses the DSN "query string"
-// Values must be url.QueryEscape'ed
-func parseDSNParams(cfg *config, params string) (err error) {
- for _, v := range strings.Split(params, "&") {
- param := strings.SplitN(v, "=", 2)
- if len(param) != 2 {
- continue
- }
-
- // cfg params
- switch value := param[1]; param[0] {
-
- // Disable INFILE whitelist / enable all files
- case "allowAllFiles":
- var isBool bool
- cfg.allowAllFiles, isBool = readBool(value)
- if !isBool {
- return fmt.Errorf("Invalid Bool value: %s", value)
- }
-
- // Use old authentication mode (pre MySQL 4.1)
- case "allowOldPasswords":
- var isBool bool
- cfg.allowOldPasswords, isBool = readBool(value)
- if !isBool {
- return fmt.Errorf("Invalid Bool value: %s", value)
- }
-
- // Switch "rowsAffected" mode
- case "clientFoundRows":
- var isBool bool
- cfg.clientFoundRows, isBool = readBool(value)
- if !isBool {
- return fmt.Errorf("Invalid Bool value: %s", value)
- }
-
- // Collation
- case "collation":
- collation, ok := collations[value]
- if !ok {
- // Note possibility for false negatives:
- // could be triggered although the collation is valid if the
- // collations map does not contain entries the server supports.
- err = errors.New("unknown collation")
- return
- }
- cfg.collation = collation
- break
-
- // Time Location
- case "loc":
- if value, err = url.QueryUnescape(value); err != nil {
- return
- }
- cfg.loc, err = time.LoadLocation(value)
- if err != nil {
- return
- }
-
- // Dial Timeout
- case "timeout":
- cfg.timeout, err = time.ParseDuration(value)
- if err != nil {
- return
- }
-
- // TLS-Encryption
- case "tls":
- boolValue, isBool := readBool(value)
- if isBool {
- if boolValue {
- cfg.tls = &tls.Config{}
- }
- } else {
- if strings.ToLower(value) == "skip-verify" {
- cfg.tls = &tls.Config{InsecureSkipVerify: true}
- } else if tlsConfig, ok := tlsConfigRegister[value]; ok {
- cfg.tls = tlsConfig
- } else {
- return fmt.Errorf("Invalid value / unknown config name: %s", value)
- }
- }
-
- default:
- // lazy init
- if cfg.params == nil {
- cfg.params = make(map[string]string)
- }
-
- if cfg.params[param[0]], err = url.QueryUnescape(value); err != nil {
- return
- }
- }
+ if tlsConfigRegister != nil {
+ delete(tlsConfigRegister, key)
}
-
- return
}
// Returns the bool value of the input.
@@ -451,19 +252,15 @@ func (nt NullTime) Value() (driver.Value, error) {
}
func parseDateTime(str string, loc *time.Location) (t time.Time, err error) {
+ base := "0000-00-00 00:00:00.0000000"
switch len(str) {
- case 10: // YYYY-MM-DD
- if str == "0000-00-00" {
+ case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM"
+ if str == base[:len(str)] {
return
}
- t, err = time.Parse(timeFormat[:10], str)
- case 19: // YYYY-MM-DD HH:MM:SS
- if str == "0000-00-00 00:00:00" {
- return
- }
- t, err = time.Parse(timeFormat, str)
+ t, err = time.Parse(timeFormat[:len(str)], str)
default:
- err = fmt.Errorf("Invalid Time-String: %s", str)
+ err = fmt.Errorf("invalid time string: %s", str)
return
}
@@ -512,87 +309,151 @@ func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Va
loc,
), nil
}
- return nil, fmt.Errorf("Invalid DATETIME-packet length %d", num)
+ return nil, fmt.Errorf("invalid DATETIME packet length %d", num)
}
// zeroDateTime is used in formatBinaryDateTime to avoid an allocation
// if the DATE or DATETIME has the zero value.
// It must never be changed.
// The current behavior depends on database/sql copying the result.
-var zeroDateTime = []byte("0000-00-00 00:00:00")
+var zeroDateTime = []byte("0000-00-00 00:00:00.000000")
+
+const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
+const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
-func formatBinaryDateTime(src []byte, withTime bool) (driver.Value, error) {
+func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value, error) {
+ // length expects the deterministic length of the zero value,
+ // negative time and 100+ hours are automatically added if needed
if len(src) == 0 {
- if withTime {
- return zeroDateTime, nil
+ if justTime {
+ return zeroDateTime[11 : 11+length], nil
}
- return zeroDateTime[:10], nil
- }
- var dst []byte
- if withTime {
- if len(src) == 11 {
- dst = []byte("0000-00-00 00:00:00.000000")
+ return zeroDateTime[:length], nil
+ }
+ var dst []byte // return value
+ var pt, p1, p2, p3 byte // current digit pair
+ var zOffs byte // offset of value in zeroDateTime
+ if justTime {
+ switch length {
+ case
+ 8, // time (can be up to 10 when negative and 100+ hours)
+ 10, 11, 12, 13, 14, 15: // time with fractional seconds
+ default:
+ return nil, fmt.Errorf("illegal TIME length %d", length)
+ }
+ switch len(src) {
+ case 8, 12:
+ default:
+ return nil, fmt.Errorf("invalid TIME packet length %d", len(src))
+ }
+ // +2 to enable negative time and 100+ hours
+ dst = make([]byte, 0, length+2)
+ if src[0] == 1 {
+ dst = append(dst, '-')
+ }
+ if src[1] != 0 {
+ hour := uint16(src[1])*24 + uint16(src[5])
+ pt = byte(hour / 100)
+ p1 = byte(hour - 100*uint16(pt))
+ dst = append(dst, digits01[pt])
} else {
- dst = []byte("0000-00-00 00:00:00")
+ p1 = src[5]
}
+ zOffs = 11
+ src = src[6:]
} else {
- dst = []byte("0000-00-00")
- }
- switch len(src) {
- case 11:
- microsecs := binary.LittleEndian.Uint32(src[7:11])
- tmp32 := microsecs / 10
- dst[25] += byte(microsecs - 10*tmp32)
- tmp32, microsecs = tmp32/10, tmp32
- dst[24] += byte(microsecs - 10*tmp32)
- tmp32, microsecs = tmp32/10, tmp32
- dst[23] += byte(microsecs - 10*tmp32)
- tmp32, microsecs = tmp32/10, tmp32
- dst[22] += byte(microsecs - 10*tmp32)
- tmp32, microsecs = tmp32/10, tmp32
- dst[21] += byte(microsecs - 10*tmp32)
- dst[20] += byte(microsecs / 10)
- fallthrough
- case 7:
- second := src[6]
- tmp := second / 10
- dst[18] += second - 10*tmp
- dst[17] += tmp
- minute := src[5]
- tmp = minute / 10
- dst[15] += minute - 10*tmp
- dst[14] += tmp
- hour := src[4]
- tmp = hour / 10
- dst[12] += hour - 10*tmp
- dst[11] += tmp
- fallthrough
- case 4:
- day := src[3]
- tmp := day / 10
- dst[9] += day - 10*tmp
- dst[8] += tmp
- month := src[2]
- tmp = month / 10
- dst[6] += month - 10*tmp
- dst[5] += tmp
+ switch length {
+ case 10, 19, 21, 22, 23, 24, 25, 26:
+ default:
+ t := "DATE"
+ if length > 10 {
+ t += "TIME"
+ }
+ return nil, fmt.Errorf("illegal %s length %d", t, length)
+ }
+ switch len(src) {
+ case 4, 7, 11:
+ default:
+ t := "DATE"
+ if length > 10 {
+ t += "TIME"
+ }
+ return nil, fmt.Errorf("illegal %s packet length %d", t, len(src))
+ }
+ dst = make([]byte, 0, length)
+ // start with the date
year := binary.LittleEndian.Uint16(src[:2])
- tmp16 := year / 10
- dst[3] += byte(year - 10*tmp16)
- tmp16, year = tmp16/10, tmp16
- dst[2] += byte(year - 10*tmp16)
- tmp16, year = tmp16/10, tmp16
- dst[1] += byte(year - 10*tmp16)
- dst[0] += byte(tmp16)
+ pt = byte(year / 100)
+ p1 = byte(year - 100*uint16(pt))
+ p2, p3 = src[2], src[3]
+ dst = append(dst,
+ digits10[pt], digits01[pt],
+ digits10[p1], digits01[p1], '-',
+ digits10[p2], digits01[p2], '-',
+ digits10[p3], digits01[p3],
+ )
+ if length == 10 {
+ return dst, nil
+ }
+ if len(src) == 4 {
+ return append(dst, zeroDateTime[10:length]...), nil
+ }
+ dst = append(dst, ' ')
+ p1 = src[4] // hour
+ src = src[5:]
+ }
+ // p1 is 2-digit hour, src is after hour
+ p2, p3 = src[0], src[1]
+ dst = append(dst,
+ digits10[p1], digits01[p1], ':',
+ digits10[p2], digits01[p2], ':',
+ digits10[p3], digits01[p3],
+ )
+ if length <= byte(len(dst)) {
return dst, nil
}
- var t string
- if withTime {
- t = "DATETIME"
- } else {
- t = "DATE"
+ src = src[2:]
+ if len(src) == 0 {
+ return append(dst, zeroDateTime[19:zOffs+length]...), nil
+ }
+ microsecs := binary.LittleEndian.Uint32(src[:4])
+ p1 = byte(microsecs / 10000)
+ microsecs -= 10000 * uint32(p1)
+ p2 = byte(microsecs / 100)
+ microsecs -= 100 * uint32(p2)
+ p3 = byte(microsecs)
+ switch decimals := zOffs + length - 20; decimals {
+ default:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ digits10[p3], digits01[p3],
+ ), nil
+ case 1:
+ return append(dst, '.',
+ digits10[p1],
+ ), nil
+ case 2:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ ), nil
+ case 3:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2],
+ ), nil
+ case 4:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ ), nil
+ case 5:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ digits10[p3],
+ ), nil
}
- return nil, fmt.Errorf("invalid %s-packet length %d", t, len(src))
}
/******************************************************************************
@@ -683,6 +544,10 @@ func skipLengthEncodedString(b []byte) (int, error) {
// returns the number read, whether the value is NULL and the number of bytes read
func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
+ // See issue #349
+ if len(b) == 0 {
+ return 0, true, 1
+ }
switch b[0] {
// 251: NULL
@@ -724,3 +589,152 @@ func appendLengthEncodedInteger(b []byte, n uint64) []byte {
return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24),
byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
}
+
+// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
+// If cap(buf) is not enough, reallocate new buffer.
+func reserveBuffer(buf []byte, appendSize int) []byte {
+ newSize := len(buf) + appendSize
+ if cap(buf) < newSize {
+ // Grow buffer exponentially
+ newBuf := make([]byte, len(buf)*2+appendSize)
+ copy(newBuf, buf)
+ buf = newBuf
+ }
+ return buf[:newSize]
+}
+
+// escapeBytesBackslash escapes []byte with backslashes (\)
+// This escapes the contents of a string (provided as []byte) by adding backslashes before special
+// characters, and turning others into specific escape sequences, such as
+// turning newlines into \n and null bytes into \0.
+// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932
+func escapeBytesBackslash(buf, v []byte) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for _, c := range v {
+ switch c {
+ case '\x00':
+ buf[pos] = '\\'
+ buf[pos+1] = '0'
+ pos += 2
+ case '\n':
+ buf[pos] = '\\'
+ buf[pos+1] = 'n'
+ pos += 2
+ case '\r':
+ buf[pos] = '\\'
+ buf[pos+1] = 'r'
+ pos += 2
+ case '\x1a':
+ buf[pos] = '\\'
+ buf[pos+1] = 'Z'
+ pos += 2
+ case '\'':
+ buf[pos] = '\\'
+ buf[pos+1] = '\''
+ pos += 2
+ case '"':
+ buf[pos] = '\\'
+ buf[pos+1] = '"'
+ pos += 2
+ case '\\':
+ buf[pos] = '\\'
+ buf[pos+1] = '\\'
+ pos += 2
+ default:
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeStringBackslash is similar to escapeBytesBackslash but for string.
+func escapeStringBackslash(buf []byte, v string) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for i := 0; i < len(v); i++ {
+ c := v[i]
+ switch c {
+ case '\x00':
+ buf[pos] = '\\'
+ buf[pos+1] = '0'
+ pos += 2
+ case '\n':
+ buf[pos] = '\\'
+ buf[pos+1] = 'n'
+ pos += 2
+ case '\r':
+ buf[pos] = '\\'
+ buf[pos+1] = 'r'
+ pos += 2
+ case '\x1a':
+ buf[pos] = '\\'
+ buf[pos+1] = 'Z'
+ pos += 2
+ case '\'':
+ buf[pos] = '\\'
+ buf[pos+1] = '\''
+ pos += 2
+ case '"':
+ buf[pos] = '\\'
+ buf[pos+1] = '"'
+ pos += 2
+ case '\\':
+ buf[pos] = '\\'
+ buf[pos+1] = '\\'
+ pos += 2
+ default:
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeBytesQuotes escapes apostrophes in []byte by doubling them up.
+// This escapes the contents of a string by doubling up any apostrophes that
+// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in
+// effect on the server.
+// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038
+func escapeBytesQuotes(buf, v []byte) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for _, c := range v {
+ if c == '\'' {
+ buf[pos] = '\''
+ buf[pos+1] = '\''
+ pos += 2
+ } else {
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeStringQuotes is similar to escapeBytesQuotes but for string.
+func escapeStringQuotes(buf []byte, v string) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for i := 0; i < len(v); i++ {
+ c := v[i]
+ if c == '\'' {
+ buf[pos] = '\''
+ buf[pos+1] = '\''
+ pos += 2
+ } else {
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils_test.go b/vendor/github.com/go-sql-driver/mysql/utils_test.go
index 301d81a62..0d6c6684f 100644
--- a/vendor/github.com/go-sql-driver/mysql/utils_test.go
+++ b/vendor/github.com/go-sql-driver/mysql/utils_test.go
@@ -16,76 +16,6 @@ import (
"time"
)
-var testDSNs = []struct {
- in string
- out string
- loc *time.Location
-}{
- {"username:password@protocol(address)/dbname?param=value", "&{user:username passwd:password net:protocol addr:address dbname:dbname params:map[param:value] loc:%p tls:<nil> timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false}", time.UTC},
- {"user@unix(/path/to/socket)/dbname?charset=utf8", "&{user:user passwd: net:unix addr:/path/to/socket dbname:dbname params:map[charset:utf8] loc:%p tls:<nil> timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false}", time.UTC},
- {"user:password@tcp(localhost:5555)/dbname?charset=utf8&tls=true", "&{user:user passwd:password net:tcp addr:localhost:5555 dbname:dbname params:map[charset:utf8] loc:%p tls:<nil> timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false}", time.UTC},
- {"user:password@tcp(localhost:5555)/dbname?charset=utf8mb4,utf8&tls=skip-verify", "&{user:user passwd:password net:tcp addr:localhost:5555 dbname:dbname params:map[charset:utf8mb4,utf8] loc:%p tls:<nil> timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false}", time.UTC},
- {"user:password@/dbname?loc=UTC&timeout=30s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE&collation=utf8mb4_unicode_ci", "&{user:user passwd:password net:tcp addr:127.0.0.1:3306 dbname:dbname params:map[] loc:%p tls:<nil> timeout:30000000000 collation:224 allowAllFiles:true allowOldPasswords:true clientFoundRows:true}", time.UTC},
- {"user:p@ss(word)@tcp([de:ad:be:ef::ca:fe]:80)/dbname?loc=Local", "&{user:user passwd:p@ss(word) net:tcp addr:[de:ad:be:ef::ca:fe]:80 dbname:dbname params:map[] loc:%p tls:<nil> timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false}", time.Local},
- {"/dbname", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname:dbname params:map[] loc:%p tls:<nil> timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false}", time.UTC},
- {"@/", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls:<nil> timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false}", time.UTC},
- {"/", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls:<nil> timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false}", time.UTC},
- {"", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls:<nil> timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false}", time.UTC},
- {"user:p@/ssword@/", "&{user:user passwd:p@/ssword net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls:<nil> timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false}", time.UTC},
- {"unix/?arg=%2Fsome%2Fpath.ext", "&{user: passwd: net:unix addr:/tmp/mysql.sock dbname: params:map[arg:/some/path.ext] loc:%p tls:<nil> timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false clientFoundRows:false}", time.UTC},
-}
-
-func TestDSNParser(t *testing.T) {
- var cfg *config
- var err error
- var res string
-
- for i, tst := range testDSNs {
- cfg, err = parseDSN(tst.in)
- if err != nil {
- t.Error(err.Error())
- }
-
- // pointer not static
- cfg.tls = nil
-
- res = fmt.Sprintf("%+v", cfg)
- if res != fmt.Sprintf(tst.out, tst.loc) {
- t.Errorf("%d. parseDSN(%q) => %q, want %q", i, tst.in, res, fmt.Sprintf(tst.out, tst.loc))
- }
- }
-}
-
-func TestDSNParserInvalid(t *testing.T) {
- var invalidDSNs = []string{
- "@net(addr/", // no closing brace
- "@tcp(/", // no closing brace
- "tcp(/", // no closing brace
- "(/", // no closing brace
- "net(addr)//", // unescaped
- "user:pass@tcp(1.2.3.4:3306)", // no trailing slash
- //"/dbname?arg=/some/unescaped/path",
- }
-
- for i, tst := range invalidDSNs {
- if _, err := parseDSN(tst); err == nil {
- t.Errorf("invalid DSN #%d. (%s) didn't error!", i, tst)
- }
- }
-}
-
-func BenchmarkParseDSN(b *testing.B) {
- b.ReportAllocs()
-
- for i := 0; i < b.N; i++ {
- for _, tst := range testDSNs {
- if _, err := parseDSN(tst.in); err != nil {
- b.Error(err.Error())
- }
- }
- }
-}
-
func TestScanNullTime(t *testing.T) {
var scanTests = []struct {
in interface{}
@@ -191,22 +121,77 @@ func TestFormatBinaryDateTime(t *testing.T) {
rawDate[5] = 46 // minutes
rawDate[6] = 23 // seconds
binary.LittleEndian.PutUint32(rawDate[7:], 987654) // microseconds
- expect := func(expected string, length int, withTime bool) {
- actual, _ := formatBinaryDateTime(rawDate[:length], withTime)
+ expect := func(expected string, inlen, outlen uint8) {
+ actual, _ := formatBinaryDateTime(rawDate[:inlen], outlen, false)
bytes, ok := actual.([]byte)
if !ok {
t.Errorf("formatBinaryDateTime must return []byte, was %T", actual)
}
if string(bytes) != expected {
t.Errorf(
- "expected %q, got %q for length %d, withTime %v",
- bytes, actual, length, withTime,
+ "expected %q, got %q for length in %d, out %d",
+ bytes, actual, inlen, outlen,
)
}
}
- expect("0000-00-00", 0, false)
- expect("0000-00-00 00:00:00", 0, true)
- expect("1978-12-30", 4, false)
- expect("1978-12-30 15:46:23", 7, true)
- expect("1978-12-30 15:46:23.987654", 11, true)
+ expect("0000-00-00", 0, 10)
+ expect("0000-00-00 00:00:00", 0, 19)
+ expect("1978-12-30", 4, 10)
+ expect("1978-12-30 15:46:23", 7, 19)
+ expect("1978-12-30 15:46:23.987654", 11, 26)
+}
+
+func TestEscapeBackslash(t *testing.T) {
+ expect := func(expected, value string) {
+ actual := string(escapeBytesBackslash([]byte{}, []byte(value)))
+ if actual != expected {
+ t.Errorf(
+ "expected %s, got %s",
+ expected, actual,
+ )
+ }
+
+ actual = string(escapeStringBackslash([]byte{}, value))
+ if actual != expected {
+ t.Errorf(
+ "expected %s, got %s",
+ expected, actual,
+ )
+ }
+ }
+
+ expect("foo\\0bar", "foo\x00bar")
+ expect("foo\\nbar", "foo\nbar")
+ expect("foo\\rbar", "foo\rbar")
+ expect("foo\\Zbar", "foo\x1abar")
+ expect("foo\\\"bar", "foo\"bar")
+ expect("foo\\\\bar", "foo\\bar")
+ expect("foo\\'bar", "foo'bar")
+}
+
+func TestEscapeQuotes(t *testing.T) {
+ expect := func(expected, value string) {
+ actual := string(escapeBytesQuotes([]byte{}, []byte(value)))
+ if actual != expected {
+ t.Errorf(
+ "expected %s, got %s",
+ expected, actual,
+ )
+ }
+
+ actual = string(escapeStringQuotes([]byte{}, value))
+ if actual != expected {
+ t.Errorf(
+ "expected %s, got %s",
+ expected, actual,
+ )
+ }
+ }
+
+ expect("foo\x00bar", "foo\x00bar") // not affected
+ expect("foo\nbar", "foo\nbar") // not affected
+ expect("foo\rbar", "foo\rbar") // not affected
+ expect("foo\x1abar", "foo\x1abar") // not affected
+ expect("foo''bar", "foo'bar") // affected
+ expect("foo\"bar", "foo\"bar") // not affected
}
diff --git a/vendor/github.com/goamz/goamz/s3/s3.go b/vendor/github.com/goamz/goamz/s3/s3.go
index c659aa6ba..f27479cb4 100644
--- a/vendor/github.com/goamz/goamz/s3/s3.go
+++ b/vendor/github.com/goamz/goamz/s3/s3.go
@@ -984,6 +984,9 @@ func (s3 *S3) run(req *request, resp interface{}) (*http.Response, error) {
if v, ok := req.headers["Content-Length"]; ok {
hreq.ContentLength, _ = strconv.ParseInt(v[0], 10, 64)
delete(req.headers, "Content-Length")
+ if hreq.ContentLength == 0 {
+ req.payload = nil
+ }
}
if req.payload != nil {
hreq.Body = ioutil.NopCloser(req.payload)
diff --git a/vendor/github.com/golang/freetype/truetype/glyph.go b/vendor/github.com/golang/freetype/truetype/glyph.go
index c2935a58e..6157ad83e 100644
--- a/vendor/github.com/golang/freetype/truetype/glyph.go
+++ b/vendor/github.com/golang/freetype/truetype/glyph.go
@@ -209,6 +209,7 @@ func (g *GlyphBuf) load(recursion uint32, i Index, useMyMetrics bool) (err error
g.addPhantomsAndScale(len(g.Points), len(g.Points), true, true)
copy(g.phantomPoints[:], g.Points[len(g.Points)-4:])
g.Points = g.Points[:len(g.Points)-4]
+ // TODO: also trim g.InFontUnits and g.Unhinted?
return nil
}
@@ -282,6 +283,10 @@ func (g *GlyphBuf) loadSimple(glyf []byte, ne int) (program []byte) {
program = glyf[offset : offset+instrLen]
offset += instrLen
+ if ne == 0 {
+ return program
+ }
+
np0 := len(g.Points)
np1 := np0 + int(g.Ends[len(g.Ends)-1])
diff --git a/vendor/github.com/gorilla/handlers/recovery.go b/vendor/github.com/gorilla/handlers/recovery.go
index 65b7de58a..b1be9dc83 100644
--- a/vendor/github.com/gorilla/handlers/recovery.go
+++ b/vendor/github.com/gorilla/handlers/recovery.go
@@ -6,9 +6,14 @@ import (
"runtime/debug"
)
+// RecoveryHandlerLogger is an interface used by the recovering handler to print logs.
+type RecoveryHandlerLogger interface {
+ Println(...interface{})
+}
+
type recoveryHandler struct {
handler http.Handler
- logger *log.Logger
+ logger RecoveryHandlerLogger
printStack bool
}
@@ -46,7 +51,7 @@ func RecoveryHandler(opts ...RecoveryOption) func(h http.Handler) http.Handler {
// RecoveryLogger is a functional option to override
// the default logger
-func RecoveryLogger(logger *log.Logger) RecoveryOption {
+func RecoveryLogger(logger RecoveryHandlerLogger) RecoveryOption {
return func(h http.Handler) {
r := h.(*recoveryHandler)
r.logger = logger
@@ -73,11 +78,11 @@ func (h recoveryHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
h.handler.ServeHTTP(w, req)
}
-func (h recoveryHandler) log(message interface{}) {
+func (h recoveryHandler) log(v ...interface{}) {
if h.logger != nil {
- h.logger.Println(message)
+ h.logger.Println(v...)
} else {
- log.Println(message)
+ log.Println(v...)
}
if h.printStack {
diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md
index fa79a6bc3..94d396ca4 100644
--- a/vendor/github.com/gorilla/mux/README.md
+++ b/vendor/github.com/gorilla/mux/README.md
@@ -23,6 +23,7 @@ The name mux stands for "HTTP request multiplexer". Like the standard `http.Serv
* [Install](#install)
* [Examples](#examples)
* [Matching Routes](#matching-routes)
+* [Listing Routes](#listing-routes)
* [Static Files](#static-files)
* [Registered URLs](#registered-urls)
* [Full Example](#full-example)
@@ -65,8 +66,11 @@ r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`:
```go
-vars := mux.Vars(request)
-category := vars["category"]
+func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) {
+ vars := mux.Vars(r)
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprintf(w, "Category: %v\n", vars["category"])
+}
```
And this is all you need to know about the basic usage. More advanced options are explained below.
@@ -164,6 +168,42 @@ s.HandleFunc("/{key}/", ProductHandler)
s.HandleFunc("/{key}/details", ProductDetailsHandler)
```
+### Listing Routes
+
+Routes on a mux can be listed using the Router.Walk method—useful for generating documentation:
+
+```go
+package main
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/gorilla/mux"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+ return
+}
+
+func main() {
+ r := mux.NewRouter()
+ r.HandleFunc("/", handler)
+ r.HandleFunc("/products", handler)
+ r.HandleFunc("/articles", handler)
+ r.HandleFunc("/articles/{id}", handler)
+ r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
+ t, err := route.GetPathTemplate()
+ if err != nil {
+ return err
+ }
+ fmt.Println(t)
+ return nil
+ })
+ http.Handle("/", r)
+}
+```
+
### Static Files
Note that the path provided to `PathPrefix()` represents a "wildcard": calling
diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go
index e9573dd8a..00daf4a72 100644
--- a/vendor/github.com/gorilla/mux/doc.go
+++ b/vendor/github.com/gorilla/mux/doc.go
@@ -57,6 +57,11 @@ calling mux.Vars():
vars := mux.Vars(request)
category := vars["category"]
+Note that if any capturing groups are present, mux will panic() during parsing. To prevent
+this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to
+"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably
+when capturing groups were present.
+
And this is all you need to know about the basic usage. More advanced options
are explained below.
diff --git a/vendor/github.com/gorilla/mux/mux_test.go b/vendor/github.com/gorilla/mux/mux_test.go
index b4b049efc..405aca6de 100644
--- a/vendor/github.com/gorilla/mux/mux_test.go
+++ b/vendor/github.com/gorilla/mux/mux_test.go
@@ -1389,6 +1389,16 @@ func TestSubrouterErrorHandling(t *testing.T) {
}
}
+// See: https://github.com/gorilla/mux/issues/200
+func TestPanicOnCapturingGroups(t *testing.T) {
+ defer func() {
+ if recover() == nil {
+ t.Errorf("(Test that capturing groups now fail fast) Expected panic, however test completed sucessfully.\n")
+ }
+ }()
+ NewRouter().NewRoute().Path("/{type:(promo|special)}/{promoId}.json")
+}
+
// ----------------------------------------------------------------------------
// Helpers
// ----------------------------------------------------------------------------
diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go
index fd8fe3956..0189ad346 100644
--- a/vendor/github.com/gorilla/mux/regexp.go
+++ b/vendor/github.com/gorilla/mux/regexp.go
@@ -109,6 +109,13 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash,
if errCompile != nil {
return nil, errCompile
}
+
+ // Check for capturing groups which used to work in older versions
+ if reg.NumSubexp() != len(idxs)/2 {
+ panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) +
+ "Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)")
+ }
+
// Done!
return &routeRegexp{
template: template,
diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go
index e2ac7617b..72c166b2a 100644
--- a/vendor/github.com/gorilla/websocket/compression.go
+++ b/vendor/github.com/gorilla/websocket/compression.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -9,6 +9,11 @@ import (
"errors"
"io"
"strings"
+ "sync"
+)
+
+var (
+ flateWriterPool = sync.Pool{}
)
func decompressNoContextTakeover(r io.Reader) io.Reader {
@@ -17,13 +22,20 @@ func decompressNoContextTakeover(r io.Reader) io.Reader {
"\x00\x00\xff\xff" +
// Add final block to squelch unexpected EOF error from flate reader.
"\x01\x00\x00\xff\xff"
-
return flate.NewReader(io.MultiReader(r, strings.NewReader(tail)))
}
func compressNoContextTakeover(w io.WriteCloser) (io.WriteCloser, error) {
tw := &truncWriter{w: w}
- fw, err := flate.NewWriter(tw, 3)
+ i := flateWriterPool.Get()
+ var fw *flate.Writer
+ var err error
+ if i == nil {
+ fw, err = flate.NewWriter(tw, 3)
+ } else {
+ fw = i.(*flate.Writer)
+ fw.Reset(tw)
+ }
return &flateWrapper{fw: fw, tw: tw}, err
}
@@ -69,11 +81,19 @@ type flateWrapper struct {
}
func (w *flateWrapper) Write(p []byte) (int, error) {
+ if w.fw == nil {
+ return 0, errWriteClosed
+ }
return w.fw.Write(p)
}
func (w *flateWrapper) Close() error {
+ if w.fw == nil {
+ return errWriteClosed
+ }
err1 := w.fw.Flush()
+ flateWriterPool.Put(w.fw)
+ w.fw = nil
if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
}
diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go
index b7a97bae9..ce7f0a615 100644
--- a/vendor/github.com/gorilla/websocket/conn.go
+++ b/vendor/github.com/gorilla/websocket/conn.go
@@ -218,6 +218,7 @@ func isValidReceivedCloseCode(code int) bool {
return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999)
}
+// The Conn type represents a WebSocket connection.
type Conn struct {
conn net.Conn
isServer bool
@@ -406,12 +407,7 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er
return err
}
-// NextWriter returns a writer for the next message to send. The writer's Close
-// method flushes the complete message to the network.
-//
-// There can be at most one open writer on a connection. NextWriter closes the
-// previous writer if the application has not already done so.
-func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) {
+func (c *Conn) prepWrite(messageType int) error {
// Close previous writer if not already closed by the application. It's
// probably better to return an error in this situation, but we cannot
// change this without breaking existing applications.
@@ -421,13 +417,22 @@ func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) {
}
if !isControl(messageType) && !isData(messageType) {
- return nil, errBadWriteOpCode
+ return errBadWriteOpCode
}
c.writeErrMu.Lock()
err := c.writeErr
c.writeErrMu.Unlock()
- if err != nil {
+ return err
+}
+
+// NextWriter returns a writer for the next message to send. The writer's Close
+// method flushes the complete message to the network.
+//
+// There can be at most one open writer on a connection. NextWriter closes the
+// previous writer if the application has not already done so.
+func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) {
+ if err := c.prepWrite(messageType); err != nil {
return nil, err
}
@@ -652,16 +657,23 @@ func (w *messageWriter) Close() error {
// WriteMessage is a helper method for getting a writer using NextWriter,
// writing the message and closing the writer.
func (c *Conn) WriteMessage(messageType int, data []byte) error {
- w, err := c.NextWriter(messageType)
- if err != nil {
- return err
- }
- if mw, ok := w.(*messageWriter); ok && c.isServer {
- // Optimize write as a single frame.
+
+ if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) {
+
+ // Fast path with no allocations and single frame.
+
+ if err := c.prepWrite(messageType); err != nil {
+ return err
+ }
+ mw := messageWriter{c: c, frameType: messageType, pos: maxFrameHeaderSize}
n := copy(c.writeBuf[mw.pos:], data)
mw.pos += n
data = data[n:]
- err = mw.flushFrame(true, data)
+ return mw.flushFrame(true, data)
+ }
+
+ w, err := c.NextWriter(messageType)
+ if err != nil {
return err
}
if _, err = w.Write(data); err != nil {
diff --git a/vendor/github.com/lib/pq/.travis.yml b/vendor/github.com/lib/pq/.travis.yml
index d7b60f899..1a4656c53 100644
--- a/vendor/github.com/lib/pq/.travis.yml
+++ b/vendor/github.com/lib/pq/.travis.yml
@@ -1,10 +1,11 @@
language: go
go:
- - 1.5
- - 1.6
- - 1.7
- - tip
+ - 1.5.x
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
+ - master
sudo: true
@@ -15,6 +16,7 @@ env:
- PQSSLCERTTEST_PATH=$PWD/certs
- PGHOST=127.0.0.1
matrix:
+ - PGVERSION=9.6
- PGVERSION=9.5
- PGVERSION=9.4
- PGVERSION=9.3
@@ -39,5 +41,5 @@ script:
- >
goimports -d -e $(find -name '*.go') | awk '{ print } END { exit NR == 0 ? 0 : 1 }'
- go vet ./...
- - PQTEST_BINARY_PARAMETERS=no go test -v ./...
- - PQTEST_BINARY_PARAMETERS=yes go test -v ./...
+ - PQTEST_BINARY_PARAMETERS=no go test -race -v ./...
+ - PQTEST_BINARY_PARAMETERS=yes go test -race -v ./...
diff --git a/vendor/github.com/lib/pq/array.go b/vendor/github.com/lib/pq/array.go
index 27eb07a9e..e7b2145d6 100644
--- a/vendor/github.com/lib/pq/array.go
+++ b/vendor/github.com/lib/pq/array.go
@@ -70,6 +70,9 @@ func (a *BoolArray) Scan(src interface{}) error {
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
+ case nil:
+ *a = nil
+ return nil
}
return fmt.Errorf("pq: cannot convert %T to BoolArray", src)
@@ -80,7 +83,7 @@ func (a *BoolArray) scanBytes(src []byte) error {
if err != nil {
return err
}
- if len(elems) == 0 {
+ if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(BoolArray, len(elems))
@@ -141,6 +144,9 @@ func (a *ByteaArray) Scan(src interface{}) error {
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
+ case nil:
+ *a = nil
+ return nil
}
return fmt.Errorf("pq: cannot convert %T to ByteaArray", src)
@@ -151,7 +157,7 @@ func (a *ByteaArray) scanBytes(src []byte) error {
if err != nil {
return err
}
- if len(elems) == 0 {
+ if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(ByteaArray, len(elems))
@@ -210,6 +216,9 @@ func (a *Float64Array) Scan(src interface{}) error {
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
+ case nil:
+ *a = nil
+ return nil
}
return fmt.Errorf("pq: cannot convert %T to Float64Array", src)
@@ -220,7 +229,7 @@ func (a *Float64Array) scanBytes(src []byte) error {
if err != nil {
return err
}
- if len(elems) == 0 {
+ if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(Float64Array, len(elems))
@@ -320,6 +329,11 @@ func (a GenericArray) Scan(src interface{}) error {
return a.scanBytes(src, dv)
case string:
return a.scanBytes([]byte(src), dv)
+ case nil:
+ if dv.Kind() == reflect.Slice {
+ dv.Set(reflect.Zero(dv.Type()))
+ return nil
+ }
}
return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type())
@@ -386,7 +400,13 @@ func (a GenericArray) Value() (driver.Value, error) {
rv := reflect.ValueOf(a.A)
- if k := rv.Kind(); k != reflect.Array && k != reflect.Slice {
+ switch rv.Kind() {
+ case reflect.Slice:
+ if rv.IsNil() {
+ return nil, nil
+ }
+ case reflect.Array:
+ default:
return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A)
}
@@ -412,6 +432,9 @@ func (a *Int64Array) Scan(src interface{}) error {
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
+ case nil:
+ *a = nil
+ return nil
}
return fmt.Errorf("pq: cannot convert %T to Int64Array", src)
@@ -422,7 +445,7 @@ func (a *Int64Array) scanBytes(src []byte) error {
if err != nil {
return err
}
- if len(elems) == 0 {
+ if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(Int64Array, len(elems))
@@ -470,6 +493,9 @@ func (a *StringArray) Scan(src interface{}) error {
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
+ case nil:
+ *a = nil
+ return nil
}
return fmt.Errorf("pq: cannot convert %T to StringArray", src)
@@ -480,7 +506,7 @@ func (a *StringArray) scanBytes(src []byte) error {
if err != nil {
return err
}
- if len(elems) == 0 {
+ if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(StringArray, len(elems))
@@ -639,6 +665,9 @@ Element:
for i < len(src) {
switch src[i] {
case '{':
+ if depth == len(dims) {
+ break Element
+ }
depth++
dims[depth-1] = 0
i++
@@ -680,11 +709,11 @@ Element:
}
for i < len(src) {
- if bytes.HasPrefix(src[i:], del) {
+ if bytes.HasPrefix(src[i:], del) && depth > 0 {
dims[depth-1]++
i += len(del)
goto Element
- } else if src[i] == '}' {
+ } else if src[i] == '}' && depth > 0 {
dims[depth-1]++
depth--
i++
diff --git a/vendor/github.com/lib/pq/array_test.go b/vendor/github.com/lib/pq/array_test.go
index 96402fd4a..10b843184 100644
--- a/vendor/github.com/lib/pq/array_test.go
+++ b/vendor/github.com/lib/pq/array_test.go
@@ -70,6 +70,10 @@ func TestParseArrayError(t *testing.T) {
{`{,}`, "unexpected ',' at offset 1"},
{`{,x}`, "unexpected ',' at offset 1"},
{`{x,}`, "unexpected '}' at offset 3"},
+ {`{x,{`, "unexpected '{' at offset 3"},
+ {`{x},`, "unexpected ',' at offset 3"},
+ {`{x}}`, "unexpected '}' at offset 3"},
+ {`{{x}`, "expected '}' at offset 4"},
{`{""x}`, "unexpected 'x' at offset 3"},
{`{{a},{b,c}}`, "multidimensional arrays must have elements with matching dimensions"},
} {
@@ -171,6 +175,30 @@ func TestBoolArrayScanUnsupported(t *testing.T) {
}
}
+func TestBoolArrayScanEmpty(t *testing.T) {
+ var arr BoolArray
+ err := arr.Scan(`{}`)
+
+ if err != nil {
+ t.Fatalf("Expected no error, got %v", err)
+ }
+ if arr == nil || len(arr) != 0 {
+ t.Errorf("Expected empty, got %#v", arr)
+ }
+}
+
+func TestBoolArrayScanNil(t *testing.T) {
+ arr := BoolArray{true, true, true}
+ err := arr.Scan(nil)
+
+ if err != nil {
+ t.Fatalf("Expected no error, got %v", err)
+ }
+ if arr != nil {
+ t.Errorf("Expected nil, got %+v", arr)
+ }
+}
+
var BoolArrayStringTests = []struct {
str string
arr BoolArray
@@ -300,6 +328,30 @@ func TestByteaArrayScanUnsupported(t *testing.T) {
}
}
+func TestByteaArrayScanEmpty(t *testing.T) {
+ var arr ByteaArray
+ err := arr.Scan(`{}`)
+
+ if err != nil {
+ t.Fatalf("Expected no error, got %v", err)
+ }
+ if arr == nil || len(arr) != 0 {
+ t.Errorf("Expected empty, got %#v", arr)
+ }
+}
+
+func TestByteaArrayScanNil(t *testing.T) {
+ arr := ByteaArray{{2}, {6}, {0, 0}}
+ err := arr.Scan(nil)
+
+ if err != nil {
+ t.Fatalf("Expected no error, got %v", err)
+ }
+ if arr != nil {
+ t.Errorf("Expected nil, got %+v", arr)
+ }
+}
+
var ByteaArrayStringTests = []struct {
str string
arr ByteaArray
@@ -430,6 +482,30 @@ func TestFloat64ArrayScanUnsupported(t *testing.T) {
}
}
+func TestFloat64ArrayScanEmpty(t *testing.T) {
+ var arr Float64Array
+ err := arr.Scan(`{}`)
+
+ if err != nil {
+ t.Fatalf("Expected no error, got %v", err)
+ }
+ if arr == nil || len(arr) != 0 {
+ t.Errorf("Expected empty, got %#v", arr)
+ }
+}
+
+func TestFloat64ArrayScanNil(t *testing.T) {
+ arr := Float64Array{5, 5, 5}
+ err := arr.Scan(nil)
+
+ if err != nil {
+ t.Fatalf("Expected no error, got %v", err)
+ }
+ if arr != nil {
+ t.Errorf("Expected nil, got %+v", arr)
+ }
+}
+
var Float64ArrayStringTests = []struct {
str string
arr Float64Array
@@ -560,6 +636,30 @@ func TestInt64ArrayScanUnsupported(t *testing.T) {
}
}
+func TestInt64ArrayScanEmpty(t *testing.T) {
+ var arr Int64Array
+ err := arr.Scan(`{}`)
+
+ if err != nil {
+ t.Fatalf("Expected no error, got %v", err)
+ }
+ if arr == nil || len(arr) != 0 {
+ t.Errorf("Expected empty, got %#v", arr)
+ }
+}
+
+func TestInt64ArrayScanNil(t *testing.T) {
+ arr := Int64Array{5, 5, 5}
+ err := arr.Scan(nil)
+
+ if err != nil {
+ t.Fatalf("Expected no error, got %v", err)
+ }
+ if arr != nil {
+ t.Errorf("Expected nil, got %+v", arr)
+ }
+}
+
var Int64ArrayStringTests = []struct {
str string
arr Int64Array
@@ -689,6 +789,30 @@ func TestStringArrayScanUnsupported(t *testing.T) {
}
}
+func TestStringArrayScanEmpty(t *testing.T) {
+ var arr StringArray
+ err := arr.Scan(`{}`)
+
+ if err != nil {
+ t.Fatalf("Expected no error, got %v", err)
+ }
+ if arr == nil || len(arr) != 0 {
+ t.Errorf("Expected empty, got %#v", arr)
+ }
+}
+
+func TestStringArrayScanNil(t *testing.T) {
+ arr := StringArray{"x", "x", "x"}
+ err := arr.Scan(nil)
+
+ if err != nil {
+ t.Fatalf("Expected no error, got %v", err)
+ }
+ if arr != nil {
+ t.Errorf("Expected nil, got %+v", arr)
+ }
+}
+
var StringArrayStringTests = []struct {
str string
arr StringArray
@@ -811,6 +935,7 @@ func BenchmarkStringArrayValue(b *testing.B) {
func TestGenericArrayScanUnsupported(t *testing.T) {
var s string
var ss []string
+ var nsa [1]sql.NullString
for _, tt := range []struct {
src, dest interface{}
@@ -820,6 +945,7 @@ func TestGenericArrayScanUnsupported(t *testing.T) {
{nil, true, "destination bool is not a pointer to array or slice"},
{nil, &s, "destination *string is not a pointer to array or slice"},
{nil, ss, "destination []string is not a pointer to array or slice"},
+ {nil, &nsa, "<nil> to [1]sql.NullString"},
{true, &ss, "bool to []string"},
{`{{x}}`, &ss, "multidimensional ARRAY[1][1] is not implemented"},
{`{{x},{x}}`, &ss, "multidimensional ARRAY[2][1] is not implemented"},
@@ -862,6 +988,28 @@ func TestGenericArrayScanScannerArrayString(t *testing.T) {
}
}
+func TestGenericArrayScanScannerSliceEmpty(t *testing.T) {
+ var nss []sql.NullString
+
+ if err := (GenericArray{&nss}).Scan(`{}`); err != nil {
+ t.Fatalf("Expected no error, got %v", err)
+ }
+ if nss == nil || len(nss) != 0 {
+ t.Errorf("Expected empty, got %#v", nss)
+ }
+}
+
+func TestGenericArrayScanScannerSliceNil(t *testing.T) {
+ nss := []sql.NullString{{String: ``, Valid: true}, {}}
+
+ if err := (GenericArray{&nss}).Scan(nil); err != nil {
+ t.Fatalf("Expected no error, got %v", err)
+ }
+ if nss != nil {
+ t.Errorf("Expected nil, got %+v", nss)
+ }
+}
+
func TestGenericArrayScanScannerSliceBytes(t *testing.T) {
src, expected, nss := []byte(`{NULL,abc,"\""}`),
[]sql.NullString{{}, {String: `abc`, Valid: true}, {String: `"`, Valid: true}},
@@ -977,6 +1125,22 @@ func TestGenericArrayValue(t *testing.T) {
t.Errorf("Expected nil, got %q", result)
}
+ for _, tt := range []interface{}{
+ []bool(nil),
+ [][]int(nil),
+ []*int(nil),
+ []sql.NullString(nil),
+ } {
+ result, err := GenericArray{tt}.Value()
+
+ if err != nil {
+ t.Fatalf("Expected no error for %#v, got %v", tt, err)
+ }
+ if result != nil {
+ t.Errorf("Expected nil for %#v, got %q", tt, result)
+ }
+ }
+
Tilde := func(v driver.Value) FuncArrayValuer {
return FuncArrayValuer{
func() string { return "~" },
diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go
index ca88dc8c6..3c8f77cb6 100644
--- a/vendor/github.com/lib/pq/conn.go
+++ b/vendor/github.com/lib/pq/conn.go
@@ -3,15 +3,12 @@ package pq
import (
"bufio"
"crypto/md5"
- "crypto/tls"
- "crypto/x509"
"database/sql"
"database/sql/driver"
"encoding/binary"
"errors"
"fmt"
"io"
- "io/ioutil"
"net"
"os"
"os/user"
@@ -101,6 +98,15 @@ type conn struct {
namei int
scratch [512]byte
txnStatus transactionStatus
+ txnClosed chan<- struct{}
+
+ // Save connection arguments to use during CancelRequest.
+ dialer Dialer
+ opts values
+
+ // Cancellation key data for use with CancelRequest messages.
+ processID int
+ secretKey int
parameterStatus parameterStatus
@@ -310,7 +316,10 @@ func DialOpen(d Dialer, name string) (_ driver.Conn, err error) {
}
}
- cn := &conn{}
+ cn := &conn{
+ opts: o,
+ dialer: d,
+ }
err = cn.handleDriverSettings(o)
if err != nil {
return nil, err
@@ -532,7 +541,15 @@ func (cn *conn) Begin() (_ driver.Tx, err error) {
return cn, nil
}
+func (cn *conn) closeTxn() {
+ if cn.txnClosed != nil {
+ close(cn.txnClosed)
+ cn.txnClosed = nil
+ }
+}
+
func (cn *conn) Commit() (err error) {
+ defer cn.closeTxn()
if cn.bad {
return driver.ErrBadConn
}
@@ -568,6 +585,7 @@ func (cn *conn) Commit() (err error) {
}
func (cn *conn) Rollback() (err error) {
+ defer cn.closeTxn()
if cn.bad {
return driver.ErrBadConn
}
@@ -718,6 +736,8 @@ func decideColumnFormats(colTyps []oid.Oid, forceText bool) (colFmts []format, c
case oid.T_int4:
fallthrough
case oid.T_int2:
+ fallthrough
+ case oid.T_uuid:
colFmts[i] = formatBinary
allText = false
@@ -797,7 +817,11 @@ func (cn *conn) Close() (err error) {
}
// Implement the "Queryer" interface
-func (cn *conn) Query(query string, args []driver.Value) (_ driver.Rows, err error) {
+func (cn *conn) Query(query string, args []driver.Value) (driver.Rows, error) {
+ return cn.query(query, args)
+}
+
+func (cn *conn) query(query string, args []driver.Value) (_ *rows, err error) {
if cn.bad {
return nil, driver.ErrBadConn
}
@@ -1000,42 +1024,12 @@ func (cn *conn) recv1() (t byte, r *readBuf) {
}
func (cn *conn) ssl(o values) {
- verifyCaOnly := false
- tlsConf := tls.Config{}
- switch mode := o.Get("sslmode"); mode {
- // "require" is the default.
- case "", "require":
- // We must skip TLS's own verification since it requires full
- // verification since Go 1.3.
- tlsConf.InsecureSkipVerify = true
-
- // From http://www.postgresql.org/docs/current/static/libpq-ssl.html:
- // Note: For backwards compatibility with earlier versions of PostgreSQL, if a
- // root CA file exists, the behavior of sslmode=require will be the same as
- // that of verify-ca, meaning the server certificate is validated against the
- // CA. Relying on this behavior is discouraged, and applications that need
- // certificate validation should always use verify-ca or verify-full.
- if _, err := os.Stat(o.Get("sslrootcert")); err == nil {
- verifyCaOnly = true
- } else {
- o.Set("sslrootcert", "")
- }
- case "verify-ca":
- // We must skip TLS's own verification since it requires full
- // verification since Go 1.3.
- tlsConf.InsecureSkipVerify = true
- verifyCaOnly = true
- case "verify-full":
- tlsConf.ServerName = o.Get("host")
- case "disable":
+ upgrade := ssl(o)
+ if upgrade == nil {
+ // Nothing to do
return
- default:
- errorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode)
}
- cn.setupSSLClientCertificates(&tlsConf, o)
- cn.setupSSLCA(&tlsConf, o)
-
w := cn.writeBuf(0)
w.int32(80877103)
cn.sendStartupPacket(w)
@@ -1050,114 +1044,7 @@ func (cn *conn) ssl(o values) {
panic(ErrSSLNotSupported)
}
- client := tls.Client(cn.c, &tlsConf)
- if verifyCaOnly {
- cn.verifyCA(client, &tlsConf)
- }
- cn.c = client
-}
-
-// verifyCA carries out a TLS handshake to the server and verifies the
-// presented certificate against the effective CA, i.e. the one specified in
-// sslrootcert or the system CA if sslrootcert was not specified.
-func (cn *conn) verifyCA(client *tls.Conn, tlsConf *tls.Config) {
- err := client.Handshake()
- if err != nil {
- panic(err)
- }
- certs := client.ConnectionState().PeerCertificates
- opts := x509.VerifyOptions{
- DNSName: client.ConnectionState().ServerName,
- Intermediates: x509.NewCertPool(),
- Roots: tlsConf.RootCAs,
- }
- for i, cert := range certs {
- if i == 0 {
- continue
- }
- opts.Intermediates.AddCert(cert)
- }
- _, err = certs[0].Verify(opts)
- if err != nil {
- panic(err)
- }
-}
-
-// This function sets up SSL client certificates based on either the "sslkey"
-// and "sslcert" settings (possibly set via the environment variables PGSSLKEY
-// and PGSSLCERT, respectively), or if they aren't set, from the .postgresql
-// directory in the user's home directory. If the file paths are set
-// explicitly, the files must exist. The key file must also not be
-// world-readable, or this function will panic with
-// ErrSSLKeyHasWorldPermissions.
-func (cn *conn) setupSSLClientCertificates(tlsConf *tls.Config, o values) {
- var missingOk bool
-
- sslkey := o.Get("sslkey")
- sslcert := o.Get("sslcert")
- if sslkey != "" && sslcert != "" {
- // If the user has set an sslkey and sslcert, they *must* exist.
- missingOk = false
- } else {
- // Automatically load certificates from ~/.postgresql.
- user, err := user.Current()
- if err != nil {
- // user.Current() might fail when cross-compiling. We have to
- // ignore the error and continue without client certificates, since
- // we wouldn't know where to load them from.
- return
- }
-
- sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key")
- sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt")
- missingOk = true
- }
-
- // Check that both files exist, and report the error or stop, depending on
- // which behaviour we want. Note that we don't do any more extensive
- // checks than this (such as checking that the paths aren't directories);
- // LoadX509KeyPair() will take care of the rest.
- keyfinfo, err := os.Stat(sslkey)
- if err != nil && missingOk {
- return
- } else if err != nil {
- panic(err)
- }
- _, err = os.Stat(sslcert)
- if err != nil && missingOk {
- return
- } else if err != nil {
- panic(err)
- }
-
- // If we got this far, the key file must also have the correct permissions
- kmode := keyfinfo.Mode()
- if kmode != kmode&0600 {
- panic(ErrSSLKeyHasWorldPermissions)
- }
-
- cert, err := tls.LoadX509KeyPair(sslcert, sslkey)
- if err != nil {
- panic(err)
- }
- tlsConf.Certificates = []tls.Certificate{cert}
-}
-
-// Sets up RootCAs in the TLS configuration if sslrootcert is set.
-func (cn *conn) setupSSLCA(tlsConf *tls.Config, o values) {
- if sslrootcert := o.Get("sslrootcert"); sslrootcert != "" {
- tlsConf.RootCAs = x509.NewCertPool()
-
- cert, err := ioutil.ReadFile(sslrootcert)
- if err != nil {
- panic(err)
- }
-
- ok := tlsConf.RootCAs.AppendCertsFromPEM(cert)
- if !ok {
- errorf("couldn't parse pem in sslrootcert")
- }
- }
+ cn.c = upgrade(cn.c)
}
// isDriverSetting returns true iff a setting is purely for configuring the
@@ -1212,6 +1099,7 @@ func (cn *conn) startup(o values) {
t, r := cn.recv()
switch t {
case 'K':
+ cn.processBackendKeyData(r)
case 'S':
cn.processParameterStatus(r)
case 'R':
@@ -1439,6 +1327,7 @@ func (cn *conn) parseComplete(commandTag string) (driver.Result, string) {
type rows struct {
cn *conn
+ closed chan<- struct{}
colNames []string
colTyps []oid.Oid
colFmts []format
@@ -1447,6 +1336,9 @@ type rows struct {
}
func (rs *rows) Close() error {
+ if rs.closed != nil {
+ defer close(rs.closed)
+ }
// no need to look at cn.bad as Next() will
for {
err := rs.Next(nil)
@@ -1651,6 +1543,11 @@ func (cn *conn) readReadyForQuery() {
}
}
+func (c *conn) processBackendKeyData(r *readBuf) {
+ c.processID = r.int32()
+ c.secretKey = r.int32()
+}
+
func (cn *conn) readParseResponse() {
t, r := cn.recv1()
switch t {
diff --git a/vendor/github.com/lib/pq/conn_go18.go b/vendor/github.com/lib/pq/conn_go18.go
new file mode 100644
index 000000000..0aca1d002
--- /dev/null
+++ b/vendor/github.com/lib/pq/conn_go18.go
@@ -0,0 +1,92 @@
+// +build go1.8
+
+package pq
+
+import (
+ "context"
+ "database/sql/driver"
+ "errors"
+)
+
+// Implement the "QueryerContext" interface
+func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
+ list := make([]driver.Value, len(args))
+ for i, nv := range args {
+ list[i] = nv.Value
+ }
+ var closed chan<- struct{}
+ if ctx.Done() != nil {
+ closed = watchCancel(ctx, cn.cancel)
+ }
+ r, err := cn.query(query, list)
+ if err != nil {
+ return nil, err
+ }
+ r.closed = closed
+ return r, nil
+}
+
+// Implement the "ExecerContext" interface
+func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
+ list := make([]driver.Value, len(args))
+ for i, nv := range args {
+ list[i] = nv.Value
+ }
+
+ if ctx.Done() != nil {
+ closed := watchCancel(ctx, cn.cancel)
+ defer close(closed)
+ }
+
+ return cn.Exec(query, list)
+}
+
+// Implement the "ConnBeginTx" interface
+func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
+ if opts.Isolation != 0 {
+ return nil, errors.New("isolation levels not supported")
+ }
+ if opts.ReadOnly {
+ return nil, errors.New("read-only transactions not supported")
+ }
+ tx, err := cn.Begin()
+ if err != nil {
+ return nil, err
+ }
+ if ctx.Done() != nil {
+ cn.txnClosed = watchCancel(ctx, cn.cancel)
+ }
+ return tx, nil
+}
+
+func watchCancel(ctx context.Context, cancel func()) chan<- struct{} {
+ closed := make(chan struct{})
+ go func() {
+ select {
+ case <-ctx.Done():
+ cancel()
+ case <-closed:
+ }
+ }()
+ return closed
+}
+
+func (cn *conn) cancel() {
+ var err error
+ can := &conn{}
+ can.c, err = dial(cn.dialer, cn.opts)
+ if err != nil {
+ return
+ }
+ can.ssl(cn.opts)
+
+ defer can.errRecover(&err)
+
+ w := can.writeBuf(0)
+ w.int32(80877102) // cancel request code
+ w.int32(cn.processID)
+ w.int32(cn.secretKey)
+
+ can.sendStartupPacket(w)
+ _ = can.c.Close()
+}
diff --git a/vendor/github.com/lib/pq/copy.go b/vendor/github.com/lib/pq/copy.go
index 86a7127e1..345c2398f 100644
--- a/vendor/github.com/lib/pq/copy.go
+++ b/vendor/github.com/lib/pq/copy.go
@@ -97,13 +97,13 @@ awaitCopyInResponse:
err = parseError(r)
case 'Z':
if err == nil {
- cn.bad = true
+ ci.setBad()
errorf("unexpected ReadyForQuery in response to COPY")
}
cn.processReadyForQuery(r)
return nil, err
default:
- cn.bad = true
+ ci.setBad()
errorf("unknown response for copy query: %q", t)
}
}
@@ -122,7 +122,7 @@ awaitCopyInResponse:
cn.processReadyForQuery(r)
return nil, err
default:
- cn.bad = true
+ ci.setBad()
errorf("unknown response for CopyFail: %q", t)
}
}
@@ -143,7 +143,7 @@ func (ci *copyin) resploop() {
var r readBuf
t, err := ci.cn.recvMessage(&r)
if err != nil {
- ci.cn.bad = true
+ ci.setBad()
ci.setError(err)
ci.done <- true
return
@@ -161,7 +161,7 @@ func (ci *copyin) resploop() {
err := parseError(&r)
ci.setError(err)
default:
- ci.cn.bad = true
+ ci.setBad()
ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t))
ci.done <- true
return
@@ -169,6 +169,19 @@ func (ci *copyin) resploop() {
}
}
+func (ci *copyin) setBad() {
+ ci.Lock()
+ ci.cn.bad = true
+ ci.Unlock()
+}
+
+func (ci *copyin) isBad() bool {
+ ci.Lock()
+ b := ci.cn.bad
+ ci.Unlock()
+ return b
+}
+
func (ci *copyin) isErrorSet() bool {
ci.Lock()
isSet := (ci.err != nil)
@@ -206,7 +219,7 @@ func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
return nil, errCopyInClosed
}
- if ci.cn.bad {
+ if ci.isBad() {
return nil, driver.ErrBadConn
}
defer ci.cn.errRecover(&err)
@@ -244,7 +257,7 @@ func (ci *copyin) Close() (err error) {
}
ci.closed = true
- if ci.cn.bad {
+ if ci.isBad() {
return driver.ErrBadConn
}
defer ci.cn.errRecover(&err)
diff --git a/vendor/github.com/lib/pq/encode.go b/vendor/github.com/lib/pq/encode.go
index 29e8f6ff7..88a322cda 100644
--- a/vendor/github.com/lib/pq/encode.go
+++ b/vendor/github.com/lib/pq/encode.go
@@ -76,6 +76,12 @@ func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) inter
return int64(int32(binary.BigEndian.Uint32(s)))
case oid.T_int2:
return int64(int16(binary.BigEndian.Uint16(s)))
+ case oid.T_uuid:
+ b, err := decodeUUIDBinary(s)
+ if err != nil {
+ panic(err)
+ }
+ return b
default:
errorf("don't know how to decode binary parameter of type %d", uint32(typ))
@@ -471,7 +477,7 @@ func FormatTimestamp(t time.Time) []byte {
t = t.AddDate((-t.Year())*2+1, 0, 0)
bc = true
}
- b := []byte(t.Format(time.RFC3339Nano))
+ b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00"))
_, offset := t.Zone()
offset = offset % 60
diff --git a/vendor/github.com/lib/pq/encode_test.go b/vendor/github.com/lib/pq/encode_test.go
index 1e89f7f6f..b1531ec29 100644
--- a/vendor/github.com/lib/pq/encode_test.go
+++ b/vendor/github.com/lib/pq/encode_test.go
@@ -141,22 +141,22 @@ var formatTimeTests = []struct {
time time.Time
expected string
}{
- {time.Time{}, "0001-01-01T00:00:00Z"},
- {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "2001-02-03T04:05:06.123456789Z"},
- {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "2001-02-03T04:05:06.123456789+02:00"},
- {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "2001-02-03T04:05:06.123456789-06:00"},
- {time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "2001-02-03T04:05:06-07:30:09"},
-
- {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03T04:05:06.123456789Z"},
- {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03T04:05:06.123456789+02:00"},
- {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03T04:05:06.123456789-06:00"},
-
- {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03T04:05:06.123456789Z BC"},
- {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03T04:05:06.123456789+02:00 BC"},
- {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03T04:05:06.123456789-06:00 BC"},
-
- {time.Date(1, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03T04:05:06-07:30:09"},
- {time.Date(0, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03T04:05:06-07:30:09 BC"},
+ {time.Time{}, "0001-01-01 00:00:00Z"},
+ {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "2001-02-03 04:05:06.123456789Z"},
+ {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "2001-02-03 04:05:06.123456789+02:00"},
+ {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "2001-02-03 04:05:06.123456789-06:00"},
+ {time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "2001-02-03 04:05:06-07:30:09"},
+
+ {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03 04:05:06.123456789Z"},
+ {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03 04:05:06.123456789+02:00"},
+ {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03 04:05:06.123456789-06:00"},
+
+ {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03 04:05:06.123456789Z BC"},
+ {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03 04:05:06.123456789+02:00 BC"},
+ {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03 04:05:06.123456789-06:00 BC"},
+
+ {time.Date(1, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03 04:05:06-07:30:09"},
+ {time.Date(0, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03 04:05:06-07:30:09 BC"},
}
func TestFormatTs(t *testing.T) {
@@ -168,6 +168,26 @@ func TestFormatTs(t *testing.T) {
}
}
+func TestFormatTsBackend(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ var str string
+ err := db.QueryRow("SELECT '2001-02-03T04:05:06.007-08:09:10'::time::text").Scan(&str)
+ if err == nil {
+ t.Fatalf("PostgreSQL is accepting an ISO timestamp input for time")
+ }
+
+ for i, tt := range formatTimeTests {
+ for _, typ := range []string{"date", "time", "timetz", "timestamp", "timestamptz"} {
+ err = db.QueryRow("SELECT $1::"+typ+"::text", tt.time).Scan(&str)
+ if err != nil {
+ t.Errorf("%d: incorrect time format for %v on the backend: %v", i, typ, err)
+ }
+ }
+ }
+}
+
func TestTimestampWithTimeZone(t *testing.T) {
db := openTestConn(t)
defer db.Close()
diff --git a/vendor/github.com/lib/pq/go18_test.go b/vendor/github.com/lib/pq/go18_test.go
index df3e496b5..15546d865 100644
--- a/vendor/github.com/lib/pq/go18_test.go
+++ b/vendor/github.com/lib/pq/go18_test.go
@@ -2,7 +2,12 @@
package pq
-import "testing"
+import (
+ "context"
+ "database/sql"
+ "testing"
+ "time"
+)
func TestMultipleSimpleQuery(t *testing.T) {
db := openTestConn(t)
@@ -66,3 +71,95 @@ func TestMultipleSimpleQuery(t *testing.T) {
t.Fatal("unexpected result set")
}
}
+
+func TestContextCancelExec(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ ctx, cancel := context.WithCancel(context.Background())
+
+ // Delay execution for just a bit until db.ExecContext has begun.
+ go func() {
+ time.Sleep(time.Millisecond * 10)
+ cancel()
+ }()
+
+ // Not canceled until after the exec has started.
+ if _, err := db.ExecContext(ctx, "select pg_sleep(1)"); err == nil {
+ t.Fatal("expected error")
+ } else if err.Error() != "pq: canceling statement due to user request" {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ // Context is already canceled, so error should come before execution.
+ if _, err := db.ExecContext(ctx, "select pg_sleep(1)"); err == nil {
+ t.Fatal("expected error")
+ } else if err.Error() != "context canceled" {
+ t.Fatalf("unexpected error: %s", err)
+ }
+}
+
+func TestContextCancelQuery(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ ctx, cancel := context.WithCancel(context.Background())
+
+ // Delay execution for just a bit until db.QueryContext has begun.
+ go func() {
+ time.Sleep(time.Millisecond * 10)
+ cancel()
+ }()
+
+ // Not canceled until after the exec has started.
+ if _, err := db.QueryContext(ctx, "select pg_sleep(1)"); err == nil {
+ t.Fatal("expected error")
+ } else if err.Error() != "pq: canceling statement due to user request" {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ // Context is already canceled, so error should come before execution.
+ if _, err := db.QueryContext(ctx, "select pg_sleep(1)"); err == nil {
+ t.Fatal("expected error")
+ } else if err.Error() != "context canceled" {
+ t.Fatalf("unexpected error: %s", err)
+ }
+}
+
+func TestContextCancelBegin(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ tx, err := db.BeginTx(ctx, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Delay execution for just a bit until tx.Exec has begun.
+ go func() {
+ time.Sleep(time.Millisecond * 10)
+ cancel()
+ }()
+
+ // Not canceled until after the exec has started.
+ if _, err := tx.Exec("select pg_sleep(1)"); err == nil {
+ t.Fatal("expected error")
+ } else if err.Error() != "pq: canceling statement due to user request" {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ // Transaction is canceled, so expect an error.
+ if _, err := tx.Query("select pg_sleep(1)"); err == nil {
+ t.Fatal("expected error")
+ } else if err != sql.ErrTxDone {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ // Context is canceled, so cannot begin a transaction.
+ if _, err := db.BeginTx(ctx, nil); err == nil {
+ t.Fatal("expected error")
+ } else if err.Error() != "context canceled" {
+ t.Fatalf("unexpected error: %s", err)
+ }
+}
diff --git a/vendor/github.com/lib/pq/ssl.go b/vendor/github.com/lib/pq/ssl.go
new file mode 100644
index 000000000..b282ebd92
--- /dev/null
+++ b/vendor/github.com/lib/pq/ssl.go
@@ -0,0 +1,175 @@
+package pq
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "io/ioutil"
+ "net"
+ "os"
+ "os/user"
+ "path/filepath"
+)
+
+// ssl generates a function to upgrade a net.Conn based on the "sslmode" and
+// related settings. The function is nil when no upgrade should take place.
+func ssl(o values) func(net.Conn) net.Conn {
+ verifyCaOnly := false
+ tlsConf := tls.Config{}
+ switch mode := o.Get("sslmode"); mode {
+ // "require" is the default.
+ case "", "require":
+ // We must skip TLS's own verification since it requires full
+ // verification since Go 1.3.
+ tlsConf.InsecureSkipVerify = true
+
+ // From http://www.postgresql.org/docs/current/static/libpq-ssl.html:
+ // Note: For backwards compatibility with earlier versions of PostgreSQL, if a
+ // root CA file exists, the behavior of sslmode=require will be the same as
+ // that of verify-ca, meaning the server certificate is validated against the
+ // CA. Relying on this behavior is discouraged, and applications that need
+ // certificate validation should always use verify-ca or verify-full.
+ if _, err := os.Stat(o.Get("sslrootcert")); err == nil {
+ verifyCaOnly = true
+ } else {
+ o.Set("sslrootcert", "")
+ }
+ case "verify-ca":
+ // We must skip TLS's own verification since it requires full
+ // verification since Go 1.3.
+ tlsConf.InsecureSkipVerify = true
+ verifyCaOnly = true
+ case "verify-full":
+ tlsConf.ServerName = o.Get("host")
+ case "disable":
+ return nil
+ default:
+ errorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode)
+ }
+
+ sslClientCertificates(&tlsConf, o)
+ sslCertificateAuthority(&tlsConf, o)
+ sslRenegotiation(&tlsConf)
+
+ return func(conn net.Conn) net.Conn {
+ client := tls.Client(conn, &tlsConf)
+ if verifyCaOnly {
+ sslVerifyCertificateAuthority(client, &tlsConf)
+ }
+ return client
+ }
+}
+
+// sslClientCertificates adds the certificate specified in the "sslcert" and
+// "sslkey" settings, or if they aren't set, from the .postgresql directory
+// in the user's home directory. The configured files must exist and have
+// the correct permissions.
+func sslClientCertificates(tlsConf *tls.Config, o values) {
+ sslkey := o.Get("sslkey")
+ sslcert := o.Get("sslcert")
+
+ var cinfo, kinfo os.FileInfo
+ var err error
+
+ if sslcert != "" && sslkey != "" {
+ // Check that both files exist. Note that we don't do any more extensive
+ // checks than this (such as checking that the paths aren't directories);
+ // LoadX509KeyPair() will take care of the rest.
+ cinfo, err = os.Stat(sslcert)
+ if err != nil {
+ panic(err)
+ }
+
+ kinfo, err = os.Stat(sslkey)
+ if err != nil {
+ panic(err)
+ }
+ } else {
+ // Automatically find certificates from ~/.postgresql
+ sslcert, sslkey, cinfo, kinfo = sslHomeCertificates()
+
+ if cinfo == nil || kinfo == nil {
+ // No certificates to load
+ return
+ }
+ }
+
+ // The files must also have the correct permissions
+ sslCertificatePermissions(cinfo, kinfo)
+
+ cert, err := tls.LoadX509KeyPair(sslcert, sslkey)
+ if err != nil {
+ panic(err)
+ }
+ tlsConf.Certificates = []tls.Certificate{cert}
+}
+
+// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting.
+func sslCertificateAuthority(tlsConf *tls.Config, o values) {
+ if sslrootcert := o.Get("sslrootcert"); sslrootcert != "" {
+ tlsConf.RootCAs = x509.NewCertPool()
+
+ cert, err := ioutil.ReadFile(sslrootcert)
+ if err != nil {
+ panic(err)
+ }
+
+ ok := tlsConf.RootCAs.AppendCertsFromPEM(cert)
+ if !ok {
+ errorf("couldn't parse pem in sslrootcert")
+ }
+ }
+}
+
+// sslHomeCertificates returns the path and stats of certificates in the current
+// user's home directory.
+func sslHomeCertificates() (cert, key string, cinfo, kinfo os.FileInfo) {
+ user, err := user.Current()
+
+ if err != nil {
+ // user.Current() might fail when cross-compiling. We have to ignore the
+ // error and continue without client certificates, since we wouldn't know
+ // from where to load them.
+ return
+ }
+
+ cert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt")
+ key = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key")
+
+ cinfo, err = os.Stat(cert)
+ if err != nil {
+ cinfo = nil
+ }
+
+ kinfo, err = os.Stat(key)
+ if err != nil {
+ kinfo = nil
+ }
+
+ return
+}
+
+// sslVerifyCertificateAuthority carries out a TLS handshake to the server and
+// verifies the presented certificate against the CA, i.e. the one specified in
+// sslrootcert or the system CA if sslrootcert was not specified.
+func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) {
+ err := client.Handshake()
+ if err != nil {
+ panic(err)
+ }
+ certs := client.ConnectionState().PeerCertificates
+ opts := x509.VerifyOptions{
+ DNSName: client.ConnectionState().ServerName,
+ Intermediates: x509.NewCertPool(),
+ Roots: tlsConf.RootCAs,
+ }
+ for i, cert := range certs {
+ if i == 0 {
+ continue
+ }
+ opts.Intermediates.AddCert(cert)
+ }
+ _, err = certs[0].Verify(opts)
+ if err != nil {
+ panic(err)
+ }
+}
diff --git a/vendor/github.com/lib/pq/ssl_go1.7.go b/vendor/github.com/lib/pq/ssl_go1.7.go
new file mode 100644
index 000000000..d7ba43b32
--- /dev/null
+++ b/vendor/github.com/lib/pq/ssl_go1.7.go
@@ -0,0 +1,14 @@
+// +build go1.7
+
+package pq
+
+import "crypto/tls"
+
+// Accept renegotiation requests initiated by the backend.
+//
+// Renegotiation was deprecated then removed from PostgreSQL 9.5, but
+// the default configuration of older versions has it enabled. Redshift
+// also initiates renegotiations and cannot be reconfigured.
+func sslRenegotiation(conf *tls.Config) {
+ conf.Renegotiation = tls.RenegotiateFreelyAsClient
+}
diff --git a/vendor/github.com/lib/pq/ssl_permissions.go b/vendor/github.com/lib/pq/ssl_permissions.go
new file mode 100644
index 000000000..33076a8da
--- /dev/null
+++ b/vendor/github.com/lib/pq/ssl_permissions.go
@@ -0,0 +1,16 @@
+// +build !windows
+
+package pq
+
+import "os"
+
+// sslCertificatePermissions checks the permissions on user-supplied certificate
+// files. The key file should have very little access.
+//
+// libpq does not check key file permissions on Windows.
+func sslCertificatePermissions(cert, key os.FileInfo) {
+ kmode := key.Mode()
+ if kmode != kmode&0600 {
+ panic(ErrSSLKeyHasWorldPermissions)
+ }
+}
diff --git a/vendor/github.com/lib/pq/ssl_renegotiation.go b/vendor/github.com/lib/pq/ssl_renegotiation.go
new file mode 100644
index 000000000..85ed5e437
--- /dev/null
+++ b/vendor/github.com/lib/pq/ssl_renegotiation.go
@@ -0,0 +1,8 @@
+// +build !go1.7
+
+package pq
+
+import "crypto/tls"
+
+// Renegotiation is not supported by crypto/tls until Go 1.7.
+func sslRenegotiation(*tls.Config) {}
diff --git a/vendor/github.com/lib/pq/ssl_windows.go b/vendor/github.com/lib/pq/ssl_windows.go
new file mode 100644
index 000000000..529daed22
--- /dev/null
+++ b/vendor/github.com/lib/pq/ssl_windows.go
@@ -0,0 +1,9 @@
+// +build windows
+
+package pq
+
+import "os"
+
+// sslCertificatePermissions checks the permissions on user-supplied certificate
+// files. In libpq, this is a no-op on Windows.
+func sslCertificatePermissions(cert, key os.FileInfo) {}
diff --git a/vendor/github.com/lib/pq/uuid.go b/vendor/github.com/lib/pq/uuid.go
new file mode 100644
index 000000000..9a1b9e074
--- /dev/null
+++ b/vendor/github.com/lib/pq/uuid.go
@@ -0,0 +1,23 @@
+package pq
+
+import (
+ "encoding/hex"
+ "fmt"
+)
+
+// decodeUUIDBinary interprets the binary format of a uuid, returning it in text format.
+func decodeUUIDBinary(src []byte) ([]byte, error) {
+ if len(src) != 16 {
+ return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src))
+ }
+
+ dst := make([]byte, 36)
+ dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-'
+ hex.Encode(dst[0:], src[0:4])
+ hex.Encode(dst[9:], src[4:6])
+ hex.Encode(dst[14:], src[6:8])
+ hex.Encode(dst[19:], src[8:10])
+ hex.Encode(dst[24:], src[10:16])
+
+ return dst, nil
+}
diff --git a/vendor/github.com/lib/pq/uuid_test.go b/vendor/github.com/lib/pq/uuid_test.go
new file mode 100644
index 000000000..9df4a79b0
--- /dev/null
+++ b/vendor/github.com/lib/pq/uuid_test.go
@@ -0,0 +1,46 @@
+package pq
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+)
+
+func TestDecodeUUIDBinaryError(t *testing.T) {
+ t.Parallel()
+ _, err := decodeUUIDBinary([]byte{0x12, 0x34})
+
+ if err == nil {
+ t.Fatal("Expected error, got none")
+ }
+ if !strings.HasPrefix(err.Error(), "pq:") {
+ t.Errorf("Expected error to start with %q, got %q", "pq:", err.Error())
+ }
+ if !strings.Contains(err.Error(), "bad length: 2") {
+ t.Errorf("Expected error to contain length, got %q", err.Error())
+ }
+}
+
+func BenchmarkDecodeUUIDBinary(b *testing.B) {
+ x := []byte{0x03, 0xa3, 0x52, 0x2f, 0x89, 0x28, 0x49, 0x87, 0x84, 0xd6, 0x93, 0x7b, 0x36, 0xec, 0x27, 0x6f}
+
+ for i := 0; i < b.N; i++ {
+ decodeUUIDBinary(x)
+ }
+}
+
+func TestDecodeUUIDBackend(t *testing.T) {
+ db := openTestConn(t)
+ defer db.Close()
+
+ var s string = "a0ecc91d-a13f-4fe4-9fce-7e09777cc70a"
+ var scanned interface{}
+
+ err := db.QueryRow(`SELECT $1::uuid`, s).Scan(&scanned)
+ if err != nil {
+ t.Fatalf("Expected no error, got %v", err)
+ }
+ if !reflect.DeepEqual(scanned, []byte(s)) {
+ t.Errorf("Expected []byte(%q), got %T(%q)", s, scanned, scanned)
+ }
+}
diff --git a/vendor/github.com/miekg/dns/.travis.yml b/vendor/github.com/miekg/dns/.travis.yml
index 1f056ab7c..11bc9158e 100644
--- a/vendor/github.com/miekg/dns/.travis.yml
+++ b/vendor/github.com/miekg/dns/.travis.yml
@@ -1,7 +1,13 @@
language: go
sudo: false
go:
- - 1.5
- 1.6
+ - 1.7
+
+before_install:
+ # don't use the miekg/dns when testing forks
+ - mkdir -p $GOPATH/src/github.com/miekg
+ - ln -s $TRAVIS_BUILD_DIR $GOPATH/src/github.com/miekg/ || true
+
script:
- go test -race -v -bench=.
diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md
index 0e3356cb9..0c1f1b6a9 100644
--- a/vendor/github.com/miekg/dns/README.md
+++ b/vendor/github.com/miekg/dns/README.md
@@ -1,4 +1,5 @@
-[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns) [![](https://godoc.org/github.com/miekg/dns?status.svg)](https://godoc.org/github.com/miekg/dns)
+[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns)
+[![](https://godoc.org/github.com/miekg/dns?status.svg)](https://godoc.org/github.com/miekg/dns)
# Alternative (more granular) approach to a DNS library
@@ -12,7 +13,7 @@ can build servers and resolvers with it.
We try to keep the "master" branch as sane as possible and at the bleeding edge
of standards, avoiding breaking changes wherever reasonable. We support the last
-two versions of Go, currently: 1.5 and 1.6.
+two versions of Go, currently: 1.6 and 1.7.
# Goals
diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go
index cf456165f..3308ec838 100644
--- a/vendor/github.com/miekg/dns/defaults.go
+++ b/vendor/github.com/miekg/dns/defaults.go
@@ -102,11 +102,11 @@ func (dns *Msg) SetAxfr(z string) *Msg {
// SetTsig appends a TSIG RR to the message.
// This is only a skeleton TSIG RR that is added as the last RR in the
// additional section. The Tsig is calculated when the message is being send.
-func (dns *Msg) SetTsig(z, algo string, fudge, timesigned int64) *Msg {
+func (dns *Msg) SetTsig(z, algo string, fudge uint16, timesigned int64) *Msg {
t := new(TSIG)
t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0}
t.Algorithm = algo
- t.Fudge = 300
+ t.Fudge = fudge
t.TimeSigned = uint64(timesigned)
t.OrigId = dns.Id
dns.Extra = append(dns.Extra, t)
diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go
index fc0b46925..465b85f37 100644
--- a/vendor/github.com/miekg/dns/edns.go
+++ b/vendor/github.com/miekg/dns/edns.go
@@ -197,7 +197,7 @@ func (e *EDNS0_NSID) String() string { return string(e.Nsid) }
// e := new(dns.EDNS0_SUBNET)
// e.Code = dns.EDNS0SUBNET
// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6
-// e.NetMask = 32 // 32 for IPV4, 128 for IPv6
+// e.SourceNetMask = 32 // 32 for IPV4, 128 for IPv6
// e.SourceScope = 0
// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4
// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6
diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go
index a9acd1e9f..0d8cc6fb3 100644
--- a/vendor/github.com/miekg/dns/msg.go
+++ b/vendor/github.com/miekg/dns/msg.go
@@ -203,12 +203,6 @@ func packDomainName(s string, msg []byte, off int, compression map[string]int, c
bs[j] = bs[j+2]
}
ls -= 2
- } else if bs[i] == 't' {
- bs[i] = '\t'
- } else if bs[i] == 'r' {
- bs[i] = '\r'
- } else if bs[i] == 'n' {
- bs[i] = '\n'
}
escapedDot = bs[i] == '.'
bsFresh = false
@@ -335,10 +329,6 @@ Loop:
fallthrough
case '"', '\\':
s = append(s, '\\', b)
- case '\t':
- s = append(s, '\\', 't')
- case '\r':
- s = append(s, '\\', 'r')
default:
if b < 32 || b >= 127 { // unprintable use \DDD
var buf [3]byte
@@ -431,12 +421,6 @@ func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) {
if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
msg[offset] = dddToByte(bs[i:])
i += 2
- } else if bs[i] == 't' {
- msg[offset] = '\t'
- } else if bs[i] == 'r' {
- msg[offset] = '\r'
- } else if bs[i] == 'n' {
- msg[offset] = '\n'
} else {
msg[offset] = bs[i]
}
@@ -508,12 +492,6 @@ func unpackTxtString(msg []byte, offset int) (string, int, error) {
switch b {
case '"', '\\':
s = append(s, '\\', b)
- case '\t':
- s = append(s, `\t`...)
- case '\r':
- s = append(s, `\r`...)
- case '\n':
- s = append(s, `\n`...)
default:
if b < 32 || b > 127 { // unprintable
var buf [3]byte
@@ -781,9 +759,6 @@ func (dns *Msg) Unpack(msg []byte) (err error) {
if dh, off, err = unpackMsgHdr(msg, off); err != nil {
return err
}
- if off == len(msg) {
- return ErrTruncated
- }
dns.Id = dh.Id
dns.Response = (dh.Bits & _QR) != 0
@@ -797,6 +772,10 @@ func (dns *Msg) Unpack(msg []byte) (err error) {
dns.CheckingDisabled = (dh.Bits & _CD) != 0
dns.Rcode = int(dh.Bits & 0xF)
+ if off == len(msg) {
+ return ErrTruncated
+ }
+
// Optimistically use the count given to us in the header
dns.Question = make([]Question, 0, int(dh.Qdcount))
diff --git a/vendor/github.com/miekg/dns/msg_generate.go b/vendor/github.com/miekg/dns/msg_generate.go
index 35786f22c..c29447a10 100644
--- a/vendor/github.com/miekg/dns/msg_generate.go
+++ b/vendor/github.com/miekg/dns/msg_generate.go
@@ -117,9 +117,9 @@ return off, err
switch {
case st.Tag(i) == `dns:"-"`: // ignored
case st.Tag(i) == `dns:"cdomain-name"`:
- fallthrough
- case st.Tag(i) == `dns:"domain-name"`:
o("off, err = PackDomainName(rr.%s, msg, off, compression, compress)\n")
+ case st.Tag(i) == `dns:"domain-name"`:
+ o("off, err = PackDomainName(rr.%s, msg, off, compression, false)\n")
case st.Tag(i) == `dns:"a"`:
o("off, err = packDataA(rr.%s, msg, off)\n")
case st.Tag(i) == `dns:"aaaa"`:
diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go
index e7a9500cc..494c05377 100644
--- a/vendor/github.com/miekg/dns/msg_helpers.go
+++ b/vendor/github.com/miekg/dns/msg_helpers.go
@@ -263,8 +263,6 @@ func unpackString(msg []byte, off int) (string, int, error) {
switch b {
case '"', '\\':
s = append(s, '\\', b)
- case '\t', '\r', '\n':
- s = append(s, b)
default:
if b < 32 || b > 127 { // unprintable
var buf [3]byte
diff --git a/vendor/github.com/miekg/dns/parse_test.go b/vendor/github.com/miekg/dns/parse_test.go
index ca467a227..dc18b59ce 100644
--- a/vendor/github.com/miekg/dns/parse_test.go
+++ b/vendor/github.com/miekg/dns/parse_test.go
@@ -86,7 +86,7 @@ func TestDomainName(t *testing.T) {
}
func TestDomainNameAndTXTEscapes(t *testing.T) {
- tests := []byte{'.', '(', ')', ';', ' ', '@', '"', '\\', '\t', '\r', '\n', 0, 255}
+ tests := []byte{'.', '(', ')', ';', ' ', '@', '"', '\\', 9, 13, 10, 0, 255}
for _, b := range tests {
rrbytes := []byte{
1, b, 0, // owner
@@ -127,8 +127,8 @@ func TestTXTEscapeParsing(t *testing.T) {
test := [][]string{
{`";"`, `";"`},
{`\;`, `";"`},
- {`"\t"`, `"\t"`},
- {`"\r"`, `"\r"`},
+ {`"\t"`, `"t"`},
+ {`"\r"`, `"r"`},
{`"\ "`, `" "`},
{`"\;"`, `";"`},
{`"\;\""`, `";\""`},
@@ -137,8 +137,9 @@ func TestTXTEscapeParsing(t *testing.T) {
{`"(a\)"`, `"(a)"`},
{`"(a)"`, `"(a)"`},
{`"\048"`, `"0"`},
- {`"\` + "\n" + `"`, `"\n"`},
- {`"\` + "\r" + `"`, `"\r"`},
+ {`"\` + "\t" + `"`, `"\009"`},
+ {`"\` + "\n" + `"`, `"\010"`},
+ {`"\` + "\r" + `"`, `"\013"`},
{`"\` + "\x11" + `"`, `"\017"`},
{`"\'"`, `"'"`},
}
@@ -417,16 +418,16 @@ func TestQuotes(t *testing.T) {
tests := map[string]string{
`t.example.com. IN TXT "a bc"`: "t.example.com.\t3600\tIN\tTXT\t\"a bc\"",
`t.example.com. IN TXT "a
- bc"`: "t.example.com.\t3600\tIN\tTXT\t\"a\\n bc\"",
+ bc"`: "t.example.com.\t3600\tIN\tTXT\t\"a\\010 bc\"",
`t.example.com. IN TXT ""`: "t.example.com.\t3600\tIN\tTXT\t\"\"",
`t.example.com. IN TXT "a"`: "t.example.com.\t3600\tIN\tTXT\t\"a\"",
`t.example.com. IN TXT "aa"`: "t.example.com.\t3600\tIN\tTXT\t\"aa\"",
`t.example.com. IN TXT "aaa" ;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\"",
`t.example.com. IN TXT "abc" "DEF"`: "t.example.com.\t3600\tIN\tTXT\t\"abc\" \"DEF\"",
`t.example.com. IN TXT "abc" ( "DEF" )`: "t.example.com.\t3600\tIN\tTXT\t\"abc\" \"DEF\"",
- `t.example.com. IN TXT aaa ;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa \"",
- `t.example.com. IN TXT aaa aaa;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa aaa\"",
- `t.example.com. IN TXT aaa aaa`: "t.example.com.\t3600\tIN\tTXT\t\"aaa aaa\"",
+ `t.example.com. IN TXT aaa ;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\"",
+ `t.example.com. IN TXT aaa aaa;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\" \"aaa\"",
+ `t.example.com. IN TXT aaa aaa`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\" \"aaa\"",
`t.example.com. IN TXT aaa`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\"",
"cid.urn.arpa. NAPTR 100 50 \"s\" \"z3950+I2L+I2C\" \"\" _z3950._tcp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"z3950+I2L+I2C\" \"\" _z3950._tcp.gatech.edu.",
"cid.urn.arpa. NAPTR 100 50 \"s\" \"rcds+I2C\" \"\" _rcds._udp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"rcds+I2C\" \"\" _rcds._udp.gatech.edu.",
diff --git a/vendor/github.com/miekg/dns/sanitize_test.go b/vendor/github.com/miekg/dns/sanitize_test.go
index c108dc694..2ba3fe9a3 100644
--- a/vendor/github.com/miekg/dns/sanitize_test.go
+++ b/vendor/github.com/miekg/dns/sanitize_test.go
@@ -65,7 +65,7 @@ func TestNormalizedString(t *testing.T) {
tests := map[RR]string{
newRR(t, "mIEk.Nl. 3600 IN A 127.0.0.1"): "miek.nl.\tIN\tA\t127.0.0.1",
newRR(t, "m\\ iek.nL. 3600 IN A 127.0.0.1"): "m\\ iek.nl.\tIN\tA\t127.0.0.1",
- newRR(t, "m\\\tIeK.nl. 3600 in A 127.0.0.1"): "m\\tiek.nl.\tIN\tA\t127.0.0.1",
+ newRR(t, "m\\\tIeK.nl. 3600 in A 127.0.0.1"): "m\\009iek.nl.\tIN\tA\t127.0.0.1",
}
for tc, expected := range tests {
n := normalizedString(tc)
diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go
index 675fc80d8..8d6a1bf24 100644
--- a/vendor/github.com/miekg/dns/scan_rr.go
+++ b/vendor/github.com/miekg/dns/scan_rr.go
@@ -64,74 +64,63 @@ func endingToString(c chan lex, errstr, f string) (string, *ParseError, string)
return s, nil, l.comment
}
-// A remainder of the rdata with embedded spaces, return the parsed string slice (sans the spaces)
-// or an error
+// A remainder of the rdata with embedded spaces, split on unquoted whitespace
+// and return the parsed string slice or an error
func endingToTxtSlice(c chan lex, errstr, f string) ([]string, *ParseError, string) {
// Get the remaining data until we see a zNewline
- quote := false
l := <-c
- var s []string
if l.err {
- return s, &ParseError{f, errstr, l}, ""
- }
- switch l.value == zQuote {
- case true: // A number of quoted string
- s = make([]string, 0)
- empty := true
- for l.value != zNewline && l.value != zEOF {
- if l.err {
- return nil, &ParseError{f, errstr, l}, ""
- }
- switch l.value {
- case zString:
- empty = false
- if len(l.token) > 255 {
- // split up tokens that are larger than 255 into 255-chunks
- sx := []string{}
- p, i := 0, 255
- for {
- if i <= len(l.token) {
- sx = append(sx, l.token[p:i])
- } else {
- sx = append(sx, l.token[p:])
- break
-
- }
- p, i = p+255, i+255
+ return nil, &ParseError{f, errstr, l}, ""
+ }
+
+ // Build the slice
+ s := make([]string, 0)
+ quote := false
+ empty := false
+ for l.value != zNewline && l.value != zEOF {
+ if l.err {
+ return nil, &ParseError{f, errstr, l}, ""
+ }
+ switch l.value {
+ case zString:
+ empty = false
+ if len(l.token) > 255 {
+ // split up tokens that are larger than 255 into 255-chunks
+ sx := []string{}
+ p, i := 0, 255
+ for {
+ if i <= len(l.token) {
+ sx = append(sx, l.token[p:i])
+ } else {
+ sx = append(sx, l.token[p:])
+ break
+
}
- s = append(s, sx...)
- break
+ p, i = p+255, i+255
}
+ s = append(s, sx...)
+ break
+ }
- s = append(s, l.token)
- case zBlank:
- if quote {
- // zBlank can only be seen in between txt parts.
- return nil, &ParseError{f, errstr, l}, ""
- }
- case zQuote:
- if empty && quote {
- s = append(s, "")
- }
- quote = !quote
- empty = true
- default:
+ s = append(s, l.token)
+ case zBlank:
+ if quote {
+ // zBlank can only be seen in between txt parts.
return nil, &ParseError{f, errstr, l}, ""
}
- l = <-c
- }
- if quote {
- return nil, &ParseError{f, errstr, l}, ""
- }
- case false: // Unquoted text record
- s = make([]string, 1)
- for l.value != zNewline && l.value != zEOF {
- if l.err {
- return s, &ParseError{f, errstr, l}, ""
+ case zQuote:
+ if empty && quote {
+ s = append(s, "")
}
- s[0] += l.token
- l = <-c
+ quote = !quote
+ empty = true
+ default:
+ return nil, &ParseError{f, errstr, l}, ""
}
+ l = <-c
+ }
+ if quote {
+ return nil, &ParseError{f, errstr, l}, ""
}
return s, nil, l.comment
}
@@ -2027,9 +2016,12 @@ func setUINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr.Hdr = h
s, e, c1 := endingToTxtSlice(c, "bad UINFO Uinfo", f)
if e != nil {
- return nil, e, ""
+ return nil, e, c1
+ }
+ if ln := len(s); ln == 0 {
+ return rr, nil, c1
}
- rr.Uinfo = s[0] // silently discard anything above
+ rr.Uinfo = s[0] // silently discard anything after the first character-string
return rr, nil, c1
}
diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go
index 1d40ee56d..0ca6e008c 100644
--- a/vendor/github.com/miekg/dns/server.go
+++ b/vendor/github.com/miekg/dns/server.go
@@ -339,7 +339,7 @@ func (srv *Server) ListenAndServe() error {
network := "tcp"
if srv.Net == "tcp4-tls" {
network = "tcp4"
- } else if srv.Net == "tcp6" {
+ } else if srv.Net == "tcp6-tls" {
network = "tcp6"
}
diff --git a/vendor/github.com/miekg/dns/server_test.go b/vendor/github.com/miekg/dns/server_test.go
index 1b5cbc97e..098be2ebe 100644
--- a/vendor/github.com/miekg/dns/server_test.go
+++ b/vendor/github.com/miekg/dns/server_test.go
@@ -677,3 +677,43 @@ zDCJkckCgYEAndqM5KXGk5xYo+MAA1paZcbTUXwaWwjLU+XSRSSoyBEi5xMtfvUb
kFsxKCqxAnBVGEWAvVZAiiTOxleQFjz5RnL0BQp9Lg2cQe+dvuUmIAA=
-----END RSA PRIVATE KEY-----`)
)
+
+func testShutdownBindPort(t *testing.T, protocol string, port string) {
+ handler := NewServeMux()
+ handler.HandleFunc(".", func(w ResponseWriter, r *Msg) {})
+ startedCh := make(chan struct{})
+ s := &Server{
+ Addr: net.JoinHostPort("127.0.0.1", port),
+ Net: protocol,
+ Handler: handler,
+ NotifyStartedFunc: func() {
+ startedCh <- struct{}{}
+ },
+ }
+ go func() {
+ if err := s.ListenAndServe(); err != nil {
+ t.Log(err)
+ }
+ }()
+ <-startedCh
+ t.Logf("DNS server is started on: %s", s.Addr)
+ if err := s.Shutdown(); err != nil {
+ t.Fatal(err)
+ }
+ time.Sleep(100 * time.Millisecond)
+ go func() {
+ if err := s.ListenAndServe(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+ <-startedCh
+ t.Logf("DNS server is started on: %s", s.Addr)
+}
+
+func TestShutdownBindPortUDP(t *testing.T) {
+ testShutdownBindPort(t, "udp", "1153")
+}
+
+func TestShutdownBindPortTCP(t *testing.T) {
+ testShutdownBindPort(t, "tcp", "1154")
+}
diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go
index f63a18b33..c8b3191e5 100644
--- a/vendor/github.com/miekg/dns/types.go
+++ b/vendor/github.com/miekg/dns/types.go
@@ -480,12 +480,6 @@ func appendDomainNameByte(s []byte, b byte) []byte {
func appendTXTStringByte(s []byte, b byte) []byte {
switch b {
- case '\t':
- return append(s, '\\', 't')
- case '\r':
- return append(s, '\\', 'r')
- case '\n':
- return append(s, '\\', 'n')
case '"', '\\':
return append(s, '\\', b)
}
@@ -525,17 +519,8 @@ func nextByte(b []byte, offset int) (byte, int) {
return dddToByte(b[offset+1:]), 4
}
}
- // not \ddd, maybe a control char
- switch b[offset+1] {
- case 't':
- return '\t', 2
- case 'r':
- return '\r', 2
- case 'n':
- return '\n', 2
- default:
- return b[offset+1], 2
- }
+ // not \ddd, just an RFC 1035 "quoted" character
+ return b[offset+1], 2
}
type SPF struct {
diff --git a/vendor/github.com/miekg/dns/udp_linux.go b/vendor/github.com/miekg/dns/udp_linux.go
index c62d21881..142a80073 100644
--- a/vendor/github.com/miekg/dns/udp_linux.go
+++ b/vendor/github.com/miekg/dns/udp_linux.go
@@ -22,14 +22,17 @@ func setUDPSocketOptions4(conn *net.UDPConn) error {
return err
}
if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IP, syscall.IP_PKTINFO, 1); err != nil {
+ file.Close()
return err
}
// Calling File() above results in the connection becoming blocking, we must fix that.
// See https://github.com/miekg/dns/issues/279
err = syscall.SetNonblock(int(file.Fd()), true)
if err != nil {
+ file.Close()
return err
}
+ file.Close()
return nil
}
@@ -40,12 +43,15 @@ func setUDPSocketOptions6(conn *net.UDPConn) error {
return err
}
if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_RECVPKTINFO, 1); err != nil {
+ file.Close()
return err
}
err = syscall.SetNonblock(int(file.Fd()), true)
if err != nil {
+ file.Close()
return err
}
+ file.Close()
return nil
}
@@ -59,8 +65,10 @@ func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) {
// dual stack. See http://stackoverflow.com/questions/1618240/how-to-support-both-ipv4-and-ipv6-connections
v6only, err := syscall.GetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY)
if err != nil {
+ file.Close()
return false, err
}
+ file.Close()
return v6only == 1, nil
}
@@ -69,5 +77,6 @@ func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) {
if err != nil {
return nil, err
}
+ defer file.Close()
return syscall.Getsockname(int(file.Fd()))
}
diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go
index c561370e7..94627e35e 100644
--- a/vendor/github.com/miekg/dns/zmsg.go
+++ b/vendor/github.com/miekg/dns/zmsg.go
@@ -221,7 +221,7 @@ func (rr *DNAME) pack(msg []byte, off int, compression map[string]int, compress
return off, err
}
headerEnd := off
- off, err = PackDomainName(rr.Target, msg, off, compression, compress)
+ off, err = PackDomainName(rr.Target, msg, off, compression, false)
if err != nil {
return off, err
}
@@ -447,7 +447,7 @@ func (rr *KX) pack(msg []byte, off int, compression map[string]int, compress boo
if err != nil {
return off, err
}
- off, err = PackDomainName(rr.Exchanger, msg, off, compression, compress)
+ off, err = PackDomainName(rr.Exchanger, msg, off, compression, false)
if err != nil {
return off, err
}
@@ -539,7 +539,7 @@ func (rr *LP) pack(msg []byte, off int, compression map[string]int, compress boo
if err != nil {
return off, err
}
- off, err = PackDomainName(rr.Fqdn, msg, off, compression, compress)
+ off, err = PackDomainName(rr.Fqdn, msg, off, compression, false)
if err != nil {
return off, err
}
@@ -679,7 +679,7 @@ func (rr *NAPTR) pack(msg []byte, off int, compression map[string]int, compress
if err != nil {
return off, err
}
- off, err = PackDomainName(rr.Replacement, msg, off, compression, compress)
+ off, err = PackDomainName(rr.Replacement, msg, off, compression, false)
if err != nil {
return off, err
}
@@ -753,7 +753,7 @@ func (rr *NSAPPTR) pack(msg []byte, off int, compression map[string]int, compres
return off, err
}
headerEnd := off
- off, err = PackDomainName(rr.Ptr, msg, off, compression, compress)
+ off, err = PackDomainName(rr.Ptr, msg, off, compression, false)
if err != nil {
return off, err
}
@@ -767,7 +767,7 @@ func (rr *NSEC) pack(msg []byte, off int, compression map[string]int, compress b
return off, err
}
headerEnd := off
- off, err = PackDomainName(rr.NextDomain, msg, off, compression, compress)
+ off, err = PackDomainName(rr.NextDomain, msg, off, compression, false)
if err != nil {
return off, err
}
@@ -905,11 +905,11 @@ func (rr *PX) pack(msg []byte, off int, compression map[string]int, compress boo
if err != nil {
return off, err
}
- off, err = PackDomainName(rr.Map822, msg, off, compression, compress)
+ off, err = PackDomainName(rr.Map822, msg, off, compression, false)
if err != nil {
return off, err
}
- off, err = PackDomainName(rr.Mapx400, msg, off, compression, compress)
+ off, err = PackDomainName(rr.Mapx400, msg, off, compression, false)
if err != nil {
return off, err
}
@@ -963,11 +963,11 @@ func (rr *RP) pack(msg []byte, off int, compression map[string]int, compress boo
return off, err
}
headerEnd := off
- off, err = PackDomainName(rr.Mbox, msg, off, compression, compress)
+ off, err = PackDomainName(rr.Mbox, msg, off, compression, false)
if err != nil {
return off, err
}
- off, err = PackDomainName(rr.Txt, msg, off, compression, compress)
+ off, err = PackDomainName(rr.Txt, msg, off, compression, false)
if err != nil {
return off, err
}
@@ -1009,7 +1009,7 @@ func (rr *RRSIG) pack(msg []byte, off int, compression map[string]int, compress
if err != nil {
return off, err
}
- off, err = PackDomainName(rr.SignerName, msg, off, compression, compress)
+ off, err = PackDomainName(rr.SignerName, msg, off, compression, false)
if err != nil {
return off, err
}
@@ -1073,7 +1073,7 @@ func (rr *SIG) pack(msg []byte, off int, compression map[string]int, compress bo
if err != nil {
return off, err
}
- off, err = PackDomainName(rr.SignerName, msg, off, compression, compress)
+ off, err = PackDomainName(rr.SignerName, msg, off, compression, false)
if err != nil {
return off, err
}
@@ -1181,7 +1181,7 @@ func (rr *SRV) pack(msg []byte, off int, compression map[string]int, compress bo
if err != nil {
return off, err
}
- off, err = PackDomainName(rr.Target, msg, off, compression, compress)
+ off, err = PackDomainName(rr.Target, msg, off, compression, false)
if err != nil {
return off, err
}
@@ -1243,11 +1243,11 @@ func (rr *TALINK) pack(msg []byte, off int, compression map[string]int, compress
return off, err
}
headerEnd := off
- off, err = PackDomainName(rr.PreviousName, msg, off, compression, compress)
+ off, err = PackDomainName(rr.PreviousName, msg, off, compression, false)
if err != nil {
return off, err
}
- off, err = PackDomainName(rr.NextName, msg, off, compression, compress)
+ off, err = PackDomainName(rr.NextName, msg, off, compression, false)
if err != nil {
return off, err
}
@@ -1261,7 +1261,7 @@ func (rr *TKEY) pack(msg []byte, off int, compression map[string]int, compress b
return off, err
}
headerEnd := off
- off, err = PackDomainName(rr.Algorithm, msg, off, compression, compress)
+ off, err = PackDomainName(rr.Algorithm, msg, off, compression, false)
if err != nil {
return off, err
}
@@ -1333,7 +1333,7 @@ func (rr *TSIG) pack(msg []byte, off int, compression map[string]int, compress b
return off, err
}
headerEnd := off
- off, err = PackDomainName(rr.Algorithm, msg, off, compression, compress)
+ off, err = PackDomainName(rr.Algorithm, msg, off, compression, false)
if err != nil {
return off, err
}
diff --git a/vendor/github.com/minio/minio-go/.travis.yml b/vendor/github.com/minio/minio-go/.travis.yml
index f61da45b6..0c353ba76 100644
--- a/vendor/github.com/minio/minio-go/.travis.yml
+++ b/vendor/github.com/minio/minio-go/.travis.yml
@@ -3,7 +3,6 @@ language: go
os:
- linux
-- osx
env:
- ARCH=x86_64
@@ -12,12 +11,9 @@ env:
go:
- 1.5.3
- 1.6
+- 1.7.4
script:
- diff -au <(gofmt -d .) <(printf "")
- go vet ./...
- go test -short -race -v ./...
-
-notifications:
- slack:
- secure: HrOX2k6F/sEl6Rr4m5vHOdJCIwV42be0kz1Jy/WSMvrl/fQ8YkldKviLeWh4aWt1kclsYhNQ4FqGML+RIZYsdOqej4fAw9Vi5pZkI1MzPJq0UjrtMqkqzvD90eDGQYCKwaXjEIN8cohwJeb6X0B0HKAd9sqJW5GH5SwnhH5WWP8=
diff --git a/vendor/github.com/minio/minio-go/README.md b/vendor/github.com/minio/minio-go/README.md
index 16ed88685..f0d880b1e 100644
--- a/vendor/github.com/minio/minio-go/README.md
+++ b/vendor/github.com/minio/minio-go/README.md
@@ -1,5 +1,5 @@
-# Minio Golang Library for Amazon S3 Compatible Cloud Storage [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-The Minio Golang Client SDK provides simple APIs to access any Amazon S3 compatible object storage server.
+# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
**Supported cloud storage providers:**
@@ -14,22 +14,21 @@ The Minio Golang Client SDK provides simple APIs to access any Amazon S3 compati
- Ceph Object Gateway
- Riak CS
-This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough of a simple file uploader. For a complete list of APIs and examples, please take a look at the [Golang Client API Reference](https://docs.minio.io/docs/golang-client-api-reference).
+This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference).
-This document assumes that you have a working [Golang setup](https://docs.minio.io/docs/how-to-install-golang).
+This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang).
## Download from Github
```sh
-$ go get -u github.com/minio/minio-go
+go get -u github.com/minio/minio-go
```
## Initialize Minio Client
-You need four items to connect to Minio object storage server.
-
+Minio client requires the following four parameters specified to connect to an Amazon S3 compatible object storage.
| Parameter | Description|
@@ -68,7 +67,7 @@ func main() {
## Quick Start Example - File Uploader
-This example program connects to an object storage server, makes a bucket on the server and then uploads a file to the bucket.
+This example program connects to an object storage server, creates a bucket and uploads a file to the bucket.
@@ -132,11 +131,11 @@ func main() {
```sh
-$ go run file-uploader.go
+go run file-uploader.go
2016/08/13 17:03:28 Successfully created mymusic
2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
-$ mc ls play/mymusic/
+mc ls play/mymusic/
[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip
```
@@ -191,6 +190,13 @@ The full API Reference is available here.
* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject)
* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy)
+### API Reference : Client custom settings
+* [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo)
+* [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport)
+* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn)
+* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff)
+
+
## Full Examples
#### Full Examples : Bucket Operations
@@ -238,7 +244,7 @@ The full API Reference is available here.
## Explore Further
* [Complete Documentation](https://docs.minio.io)
-* [Minio Golang Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference)
+* [Minio Go Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference)
* [Go Music Player App- Full Application Example ](https://docs.minio.io/docs/go-music-player-app)
## Contribute
diff --git a/vendor/github.com/minio/minio-go/api-datatypes.go b/vendor/github.com/minio/minio-go/api-datatypes.go
index 0871b1cfb..ab2aa4af2 100644
--- a/vendor/github.com/minio/minio-go/api-datatypes.go
+++ b/vendor/github.com/minio/minio-go/api-datatypes.go
@@ -16,7 +16,10 @@
package minio
-import "time"
+import (
+ "net/http"
+ "time"
+)
// BucketInfo container for bucket metadata.
type BucketInfo struct {
@@ -38,6 +41,10 @@ type ObjectInfo struct {
Size int64 `json:"size"` // Size in bytes of the object.
ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data.
+ // Collection of additional metadata on the object.
+ // eg: x-amz-meta-*, content-encoding etc.
+ Metadata http.Header `json:"metadata"`
+
// Owner name.
Owner struct {
DisplayName string `json:"name"`
diff --git a/vendor/github.com/minio/minio-go/api-get-object.go b/vendor/github.com/minio/minio-go/api-get-object.go
index 1f0dabb05..c9b4dcedd 100644
--- a/vendor/github.com/minio/minio-go/api-get-object.go
+++ b/vendor/github.com/minio/minio-go/api-get-object.go
@@ -275,7 +275,7 @@ func (o *Object) setOffset(bytesRead int64) error {
return nil
}
-// Read reads up to len(p) bytes into p. It returns the number of
+// Read reads up to len(b) bytes into b. It returns the number of
// bytes read (0 <= n <= len(p)) and any error encountered. Returns
// io.EOF upon end of file.
func (o *Object) Read(b []byte) (n int, err error) {
@@ -343,7 +343,7 @@ func (o *Object) Read(b []byte) (n int, err error) {
return response.Size, err
}
-// Stat returns the ObjectInfo structure describing object.
+// Stat returns the ObjectInfo structure describing Object.
func (o *Object) Stat() (ObjectInfo, error) {
if o == nil {
return ObjectInfo{}, ErrInvalidArgument("Object is nil")
diff --git a/vendor/github.com/minio/minio-go/api-get-policy.go b/vendor/github.com/minio/minio-go/api-get-policy.go
index 07b1fa483..da0a409cd 100644
--- a/vendor/github.com/minio/minio-go/api-get-policy.go
+++ b/vendor/github.com/minio/minio-go/api-get-policy.go
@@ -41,7 +41,7 @@ func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy p
return policy.GetPolicy(policyInfo.Statements, bucketName, objectPrefix), nil
}
-// GetBucketPolicy - get bucket policy rules at a given path.
+// ListBucketPolicies - list all policies for a given prefix and all its children.
func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolicies map[string]policy.BucketPolicy, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
@@ -57,7 +57,7 @@ func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolic
return policy.GetPolicies(policyInfo.Statements, bucketName), nil
}
-// Request server for policy.
+// Request server for current bucket policy.
func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (policy.BucketAccessPolicy, error) {
// Get resources properly escaped and lined up before
// using them in http request.
diff --git a/vendor/github.com/minio/minio-go/api-list.go b/vendor/github.com/minio/minio-go/api-list.go
index 795de6183..adfaa0a7a 100644
--- a/vendor/github.com/minio/minio-go/api-list.go
+++ b/vendor/github.com/minio/minio-go/api-list.go
@@ -482,6 +482,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive
objectMultipartStatCh <- ObjectMultipartInfo{
Err: err,
}
+ continue
}
}
select {
diff --git a/vendor/github.com/minio/minio-go/api-notification.go b/vendor/github.com/minio/minio-go/api-notification.go
index 85e57805b..9c2a2ebd2 100644
--- a/vendor/github.com/minio/minio-go/api-notification.go
+++ b/vendor/github.com/minio/minio-go/api-notification.go
@@ -22,6 +22,9 @@ import (
"io"
"net/http"
"net/url"
+ "time"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// GetBucketNotification - get bucket notification at a given path.
@@ -135,7 +138,7 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
}
// Check ARN partition to verify if listening bucket is supported
- if isAmazonEndpoint(c.endpointURL) || isGoogleEndpoint(c.endpointURL) {
+ if s3utils.IsAmazonEndpoint(c.endpointURL) || s3utils.IsGoogleEndpoint(c.endpointURL) {
notificationInfoCh <- NotificationInfo{
Err: ErrAPINotSupported("Listening bucket notification is specific only to `minio` partitions"),
}
@@ -143,7 +146,14 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
}
// Continously run and listen on bucket notification.
- for {
+ // Create a done channel to control 'ListObjects' go routine.
+ retryDoneCh := make(chan struct{}, 1)
+
+ // Indicate to our routine to exit cleanly upon return.
+ defer close(retryDoneCh)
+
+ // Wait on the jitter retry loop.
+ for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) {
urlValues := make(url.Values)
urlValues.Set("prefix", prefix)
urlValues.Set("suffix", suffix)
@@ -155,10 +165,7 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
queryValues: urlValues,
})
if err != nil {
- notificationInfoCh <- NotificationInfo{
- Err: err,
- }
- return
+ continue
}
// Validate http response, upon error return quickly.
@@ -180,10 +187,7 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
for bio.Scan() {
var notificationInfo NotificationInfo
if err = json.Unmarshal(bio.Bytes(), &notificationInfo); err != nil {
- notificationInfoCh <- NotificationInfo{
- Err: err,
- }
- return
+ continue
}
// Send notifications on channel only if there are events received.
if len(notificationInfo.Records) > 0 {
@@ -200,12 +204,7 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
// and re-connect.
if err == io.ErrUnexpectedEOF {
resp.Body.Close()
- continue
}
- notificationInfoCh <- NotificationInfo{
- Err: err,
- }
- return
}
}
}(notificationInfoCh)
diff --git a/vendor/github.com/minio/minio-go/api-presigned.go b/vendor/github.com/minio/minio-go/api-presigned.go
index 200f33e9b..f9d05ab9b 100644
--- a/vendor/github.com/minio/minio-go/api-presigned.go
+++ b/vendor/github.com/minio/minio-go/api-presigned.go
@@ -20,6 +20,9 @@ import (
"errors"
"net/url"
"time"
+
+ "github.com/minio/minio-go/pkg/s3signer"
+ "github.com/minio/minio-go/pkg/s3utils"
)
// supportedGetReqParams - supported request parameters for GET presigned request.
@@ -126,14 +129,14 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
policyBase64 := p.base64()
p.formData["policy"] = policyBase64
// For Google endpoint set this value to be 'GoogleAccessId'.
- if isGoogleEndpoint(c.endpointURL) {
+ if s3utils.IsGoogleEndpoint(c.endpointURL) {
p.formData["GoogleAccessId"] = c.accessKeyID
} else {
// For all other endpoints set this value to be 'AWSAccessKeyId'.
p.formData["AWSAccessKeyId"] = c.accessKeyID
}
// Sign the policy.
- p.formData["signature"] = postPresignSignatureV2(policyBase64, c.secretAccessKey)
+ p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, c.secretAccessKey)
return u, p.formData, nil
}
@@ -156,7 +159,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
}
// Add a credential policy.
- credential := getCredential(c.accessKeyID, location, t)
+ credential := s3signer.GetCredential(c.accessKeyID, location, t)
if err = p.addNewPolicy(policyCondition{
matchType: "eq",
condition: "$x-amz-credential",
@@ -172,6 +175,6 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
p.formData["x-amz-algorithm"] = signV4Algorithm
p.formData["x-amz-credential"] = credential
p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
- p.formData["x-amz-signature"] = postPresignSignatureV4(policyBase64, t, c.secretAccessKey, location)
+ p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, c.secretAccessKey, location)
return u, p.formData, nil
}
diff --git a/vendor/github.com/minio/minio-go/api-put-bucket.go b/vendor/github.com/minio/minio-go/api-put-bucket.go
index 3c9f438ef..7c7e03f49 100644
--- a/vendor/github.com/minio/minio-go/api-put-bucket.go
+++ b/vendor/github.com/minio/minio-go/api-put-bucket.go
@@ -26,8 +26,10 @@ import (
"io/ioutil"
"net/http"
"net/url"
+ "path"
"github.com/minio/minio-go/pkg/policy"
+ "github.com/minio/minio-go/pkg/s3signer"
)
/// Bucket operations
@@ -89,11 +91,8 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req
// is the preferred method here. The final location of the
// 'bucket' is provided through XML LocationConstraint data with
// the request.
- targetURL, err := url.Parse(c.endpointURL)
- if err != nil {
- return nil, err
- }
- targetURL.Path = "/" + bucketName + "/"
+ targetURL := c.endpointURL
+ targetURL.Path = path.Join(bucketName, "") + "/"
// get a new HTTP request for the method.
req, err := http.NewRequest("PUT", targetURL.String(), nil)
@@ -133,9 +132,9 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req
if c.signature.isV4() {
// Signature calculated for MakeBucket request should be for 'us-east-1',
// regardless of the bucket's location constraint.
- req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
+ req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
} else if c.signature.isV2() {
- req = signV2(*req, c.accessKeyID, c.secretAccessKey)
+ req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
}
// Return signed request.
diff --git a/vendor/github.com/minio/minio-go/api-put-bucket_test.go b/vendor/github.com/minio/minio-go/api-put-bucket_test.go
index a1899fbe2..ec33c8492 100644
--- a/vendor/github.com/minio/minio-go/api-put-bucket_test.go
+++ b/vendor/github.com/minio/minio-go/api-put-bucket_test.go
@@ -24,8 +24,10 @@ import (
"io"
"io/ioutil"
"net/http"
- "net/url"
+ "path"
"testing"
+
+ "github.com/minio/minio-go/pkg/s3signer"
)
// Tests validate http request formulated for creation of bucket.
@@ -33,14 +35,11 @@ func TestMakeBucketRequest(t *testing.T) {
// Generates expected http request for bucket creation.
// Used for asserting with the actual request generated.
createExpectedRequest := func(c *Client, bucketName string, location string, req *http.Request) (*http.Request, error) {
-
- targetURL, err := url.Parse(c.endpointURL)
- if err != nil {
- return nil, err
- }
- targetURL.Path = "/" + bucketName + "/"
+ targetURL := c.endpointURL
+ targetURL.Path = path.Join(bucketName, "") + "/"
// get a new HTTP request for the method.
+ var err error
req, err = http.NewRequest("PUT", targetURL.String(), nil)
if err != nil {
return nil, err
@@ -78,9 +77,9 @@ func TestMakeBucketRequest(t *testing.T) {
if c.signature.isV4() {
// Signature calculated for MakeBucket request should be for 'us-east-1',
// regardless of the bucket's location constraint.
- req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
+ req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
} else if c.signature.isV2() {
- req = signV2(*req, c.accessKeyID, c.secretAccessKey)
+ req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
}
// Return signed request.
diff --git a/vendor/github.com/minio/minio-go/api-put-object-common.go b/vendor/github.com/minio/minio-go/api-put-object-common.go
index 2eaef2e30..5f5f568e6 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-common.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-common.go
@@ -44,18 +44,17 @@ func isReadAt(reader io.Reader) (ok bool) {
}
// shouldUploadPart - verify if part should be uploaded.
-func shouldUploadPart(objPart objectPart, objectParts map[int]objectPart) bool {
+func shouldUploadPart(objPart objectPart, uploadReq uploadPartReq) bool {
// If part not found should upload the part.
- uploadedPart, found := objectParts[objPart.PartNumber]
- if !found {
+ if uploadReq.Part == nil {
return true
}
// if size mismatches should upload the part.
- if objPart.Size != uploadedPart.Size {
+ if objPart.Size != uploadReq.Part.Size {
return true
}
// if md5sum mismatches should upload the part.
- if objPart.ETag != uploadedPart.ETag {
+ if objPart.ETag != uploadReq.Part.ETag {
return true
}
return false
@@ -68,7 +67,7 @@ func shouldUploadPart(objPart objectPart, objectParts map[int]objectPart) bool {
// object storage it will have the following parameters as constants.
//
// maxPartsCount - 10000
-// minPartSize - 5MiB
+// minPartSize - 64MiB
// maxMultipartPutObjectSize - 5TiB
//
func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) {
@@ -167,37 +166,64 @@ func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte,
// getUploadID - fetch upload id if already present for an object name
// or initiate a new request to fetch a new upload id.
-func (c Client) getUploadID(bucketName, objectName, contentType string) (uploadID string, isNew bool, err error) {
+func (c Client) newUploadID(bucketName, objectName string, metaData map[string][]string) (uploadID string, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
- return "", false, err
+ return "", err
}
if err := isValidObjectName(objectName); err != nil {
- return "", false, err
+ return "", err
}
- // Set content Type to default if empty string.
- if contentType == "" {
- contentType = "application/octet-stream"
+ // Initiate multipart upload for an object.
+ initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, metaData)
+ if err != nil {
+ return "", err
}
+ return initMultipartUploadResult.UploadID, nil
+}
+
+// getMpartUploadSession returns the upload id and the uploaded parts to continue a previous upload session
+// or initiate a new multipart session if no current one found
+func (c Client) getMpartUploadSession(bucketName, objectName string, metaData map[string][]string) (string, map[int]objectPart, error) {
+ // A map of all uploaded parts.
+ var partsInfo map[int]objectPart
+ var err error
- // Find upload id for previous upload for an object.
- uploadID, err = c.findUploadID(bucketName, objectName)
+ uploadID, err := c.findUploadID(bucketName, objectName)
if err != nil {
- return "", false, err
+ return "", nil, err
}
+
if uploadID == "" {
- // Initiate multipart upload for an object.
- initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, contentType)
+ // Initiates a new multipart request
+ uploadID, err = c.newUploadID(bucketName, objectName, metaData)
+ if err != nil {
+ return "", nil, err
+ }
+ } else {
+ // Fetch previously upload parts and maximum part size.
+ partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
if err != nil {
- return "", false, err
+ // When the server returns NoSuchUpload even if its previouls acknowleged the existance of the upload id,
+ // initiate a new multipart upload
+ if respErr, ok := err.(ErrorResponse); ok && respErr.Code == "NoSuchUpload" {
+ uploadID, err = c.newUploadID(bucketName, objectName, metaData)
+ if err != nil {
+ return "", nil, err
+ }
+ } else {
+ return "", nil, err
+ }
}
- // Save the new upload id.
- uploadID = initMultipartUploadResult.UploadID
- // Indicate that this is a new upload id.
- isNew = true
}
- return uploadID, isNew, nil
+
+ // Allocate partsInfo if not done yet
+ if partsInfo == nil {
+ partsInfo = make(map[int]objectPart)
+ }
+
+ return uploadID, partsInfo, nil
}
// computeHash - Calculates hashes for an input read Seeker.
diff --git a/vendor/github.com/minio/minio-go/api-put-object-copy.go b/vendor/github.com/minio/minio-go/api-put-object-copy.go
index c7cd46d08..56978d427 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-copy.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-copy.go
@@ -16,7 +16,11 @@
package minio
-import "net/http"
+import (
+ "net/http"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
// CopyObject - copy a source object into a new object with the provided name in the provided bucket
func (c Client) CopyObject(bucketName string, objectName string, objectSource string, cpCond CopyConditions) error {
@@ -38,7 +42,7 @@ func (c Client) CopyObject(bucketName string, objectName string, objectSource st
}
// Set copy source.
- customHeaders.Set("x-amz-copy-source", urlEncodePath(objectSource))
+ customHeaders.Set("x-amz-copy-source", s3utils.EncodePath(objectSource))
// Execute PUT on objectName.
resp, err := c.executeMethod("PUT", requestMetadata{
diff --git a/vendor/github.com/minio/minio-go/api-put-object-file.go b/vendor/github.com/minio/minio-go/api-put-object-file.go
index deaed0acd..aa554b321 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-file.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-file.go
@@ -28,6 +28,8 @@ import (
"os"
"path/filepath"
"sort"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// FPutObject - Create an object in a bucket, with contents from file at filePath.
@@ -62,6 +64,8 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
return 0, ErrEntityTooLarge(fileSize, maxMultipartPutObjectSize, bucketName, objectName)
}
+ objMetadata := make(map[string][]string)
+
// Set contentType based on filepath extension if not given or default
// value of "binary/octet-stream" if the extension has no associated type.
if contentType == "" {
@@ -70,9 +74,11 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
}
}
+ objMetadata["Content-Type"] = []string{contentType}
+
// NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
// Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers.
- if isGoogleEndpoint(c.endpointURL) {
+ if s3utils.IsGoogleEndpoint(c.endpointURL) {
if fileSize > int64(maxSinglePutObjectSize) {
return 0, ErrorResponse{
Code: "NotImplemented",
@@ -82,11 +88,11 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
}
}
// Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
- return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType, nil)
+ return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
}
// NOTE: S3 doesn't allow anonymous multipart requests.
- if isAmazonEndpoint(c.endpointURL) && c.anonymous {
+ if s3utils.IsAmazonEndpoint(c.endpointURL) && c.anonymous {
if fileSize > int64(maxSinglePutObjectSize) {
return 0, ErrorResponse{
Code: "NotImplemented",
@@ -97,15 +103,15 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
}
// Do not compute MD5 for anonymous requests to Amazon
// S3. Uploads up to 5GiB in size.
- return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType, nil)
+ return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
}
// Small object upload is initiated for uploads for input data size smaller than 5MiB.
if fileSize < minPartSize && fileSize >= 0 {
- return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType, nil)
+ return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
}
// Upload all large objects as multipart.
- n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, contentType, nil)
+ n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
if err != nil {
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
@@ -116,7 +122,7 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
return 0, ErrEntityTooLarge(fileSize, maxSinglePutObjectSize, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
- return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType, nil)
+ return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
}
return n, err
}
@@ -131,7 +137,7 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
// against MD5SUM of each individual parts. This function also
// effectively utilizes file system capabilities of reading from
// specific sections and not having to create temporary files.
-func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, contentType string, progress io.Reader) (int64, error) {
+func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, metaData map[string][]string, progress io.Reader) (int64, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
@@ -140,9 +146,8 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
return 0, err
}
- // Get upload id for an object, initiates a new multipart request
- // if it cannot find any previously partially uploaded object.
- uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
+ // Get the upload id of a previously partially uploaded object or initiate a new multipart upload
+ uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData)
if err != nil {
return 0, err
}
@@ -153,19 +158,6 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
- // A map of all uploaded parts.
- var partsInfo = make(map[int]objectPart)
-
- // If this session is a continuation of a previous session fetch all
- // previously uploaded parts info.
- if !isNew {
- // Fetch previously upload parts and maximum part size.
- partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
- if err != nil {
- return 0, err
- }
- }
-
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(fileSize)
if err != nil {
@@ -178,14 +170,19 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
// Create a channel to communicate which part to upload.
// Buffer this to 10000, the maximum number of parts allowed by S3.
- uploadPartsCh := make(chan int, 10000)
+ uploadPartsCh := make(chan uploadPartReq, 10000)
// Just for readability.
lastPartNumber := totalPartsCount
// Send each part through the partUploadCh to be uploaded.
for p := 1; p <= totalPartsCount; p++ {
- uploadPartsCh <- p
+ part, ok := partsInfo[p]
+ if ok {
+ uploadPartsCh <- uploadPartReq{PartNum: p, Part: &part}
+ } else {
+ uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
+ }
}
close(uploadPartsCh)
@@ -193,7 +190,7 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
for w := 1; w <= 3; w++ {
go func() {
// Deal with each part as it comes through the channel.
- for partNumber := range uploadPartsCh {
+ for uploadReq := range uploadPartsCh {
// Add hash algorithms that need to be calculated by computeHash()
// In case of a non-v4 signature or https connection, sha256 is not needed.
hashAlgos := make(map[string]hash.Hash)
@@ -203,47 +200,50 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
hashAlgos["sha256"] = sha256.New()
}
+ // If partNumber was not uploaded we calculate the missing
+ // part offset and size. For all other part numbers we
+ // calculate offset based on multiples of partSize.
+ readOffset := int64(uploadReq.PartNum-1) * partSize
+ missingPartSize := partSize
+
+ // As a special case if partNumber is lastPartNumber, we
+ // calculate the offset based on the last part size.
+ if uploadReq.PartNum == lastPartNumber {
+ readOffset = (fileSize - lastPartSize)
+ missingPartSize = lastPartSize
+ }
+
+ // Get a section reader on a particular offset.
+ sectionReader := io.NewSectionReader(fileReader, readOffset, missingPartSize)
+ var prtSize int64
+ var err error
+
+ prtSize, err = computeHash(hashAlgos, hashSums, sectionReader)
+ if err != nil {
+ uploadedPartsCh <- uploadedPartRes{
+ Error: err,
+ }
+ // Exit the goroutine.
+ return
+ }
+
// Create the part to be uploaded.
verifyObjPart := objectPart{
ETag: hex.EncodeToString(hashSums["md5"]),
- PartNumber: partNumber,
+ PartNumber: uploadReq.PartNum,
Size: partSize,
}
+
// If this is the last part do not give it the full part size.
- if partNumber == lastPartNumber {
+ if uploadReq.PartNum == lastPartNumber {
verifyObjPart.Size = lastPartSize
}
// Verify if part should be uploaded.
- if shouldUploadPart(verifyObjPart, partsInfo) {
- // If partNumber was not uploaded we calculate the missing
- // part offset and size. For all other part numbers we
- // calculate offset based on multiples of partSize.
- readOffset := int64(partNumber-1) * partSize
- missingPartSize := partSize
-
- // As a special case if partNumber is lastPartNumber, we
- // calculate the offset based on the last part size.
- if partNumber == lastPartNumber {
- readOffset = (fileSize - lastPartSize)
- missingPartSize = lastPartSize
- }
-
- // Get a section reader on a particular offset.
- sectionReader := io.NewSectionReader(fileReader, readOffset, missingPartSize)
- var prtSize int64
- prtSize, err = computeHash(hashAlgos, hashSums, sectionReader)
- if err != nil {
- uploadedPartsCh <- uploadedPartRes{
- Error: err,
- }
- // Exit the goroutine.
- return
- }
-
+ if shouldUploadPart(verifyObjPart, uploadReq) {
// Proceed to upload the part.
var objPart objectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID, sectionReader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
+ objPart, err = c.uploadPart(bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize)
if err != nil {
uploadedPartsCh <- uploadedPartRes{
Error: err,
@@ -252,12 +252,13 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
return
}
// Save successfully uploaded part metadata.
- partsInfo[partNumber] = objPart
+ uploadReq.Part = &objPart
}
// Return through the channel the part size.
uploadedPartsCh <- uploadedPartRes{
Size: verifyObjPart.Size,
- PartNum: partNumber,
+ PartNum: uploadReq.PartNum,
+ Part: uploadReq.Part,
Error: nil,
}
}
@@ -271,8 +272,8 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
return totalUploadedSize, uploadRes.Error
}
// Retrieve each uploaded part and store it to be completed.
- part, ok := partsInfo[uploadRes.PartNum]
- if !ok {
+ part := uploadRes.Part
+ if part == nil {
return totalUploadedSize, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
}
// Update the total uploaded size.
diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
index cdd3f53c2..f74eae626 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-multipart.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
@@ -45,11 +45,11 @@ import (
// If we exhaust all the known types, code proceeds to use stream as
// is where each part is re-downloaded, checksummed and verified
// before upload.
-func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
if size > 0 && size > minPartSize {
// Verify if reader is *os.File, then use file system functionalities.
if isFile(reader) {
- return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, contentType, progress)
+ return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, metaData, progress)
}
// Verify if reader is *minio.Object or io.ReaderAt.
// NOTE: Verification of object is kept for a specific purpose
@@ -58,17 +58,17 @@ func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Read
// and such a functionality is used in the subsequent code
// path.
if isObject(reader) || isReadAt(reader) {
- return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, contentType, progress)
+ return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metaData, progress)
}
}
// For any other data size and reader type we do generic multipart
// approach by staging data in temporary files and uploading them.
- return c.putObjectMultipartStream(bucketName, objectName, reader, size, contentType, progress)
+ return c.putObjectMultipartStream(bucketName, objectName, reader, size, metaData, progress)
}
-// putObjectStream uploads files bigger than 5MiB, and also supports
+// putObjectStream uploads files bigger than 64MiB, and also supports
// special case where size is unknown i.e '-1'.
-func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
@@ -83,27 +83,12 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
- // A map of all previously uploaded parts.
- var partsInfo = make(map[int]objectPart)
-
- // getUploadID for an object, initiates a new multipart request
- // if it cannot find any previously partially uploaded object.
- uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
+ // Get the upload id of a previously partially uploaded object or initiate a new multipart upload
+ uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData)
if err != nil {
return 0, err
}
- // If This session is a continuation of a previous session fetch all
- // previously uploaded parts info and as a special case only fetch partsInfo
- // for only known upload size.
- if !isNew {
- // Fetch previously uploaded parts and maximum part size.
- partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
- if err != nil {
- return 0, err
- }
- }
-
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, _, err := optimalPartInfo(size)
if err != nil {
@@ -139,12 +124,14 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
// as we read from the source.
reader = newHook(tmpBuffer, progress)
+ part, ok := partsInfo[partNumber]
+
// Verify if part should be uploaded.
- if shouldUploadPart(objectPart{
+ if !ok || shouldUploadPart(objectPart{
ETag: hex.EncodeToString(hashSums["md5"]),
PartNumber: partNumber,
Size: prtSize,
- }, partsInfo) {
+ }, uploadPartReq{PartNum: partNumber, Part: &part}) {
// Proceed to upload the part.
var objPart objectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
@@ -212,7 +199,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
}
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
-func (c Client) initiateMultipartUpload(bucketName, objectName, contentType string) (initiateMultipartUploadResult, error) {
+func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData map[string][]string) (initiateMultipartUploadResult, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return initiateMultipartUploadResult{}, err
@@ -225,13 +212,18 @@ func (c Client) initiateMultipartUpload(bucketName, objectName, contentType stri
urlValues := make(url.Values)
urlValues.Set("uploads", "")
- if contentType == "" {
- contentType = "application/octet-stream"
- }
-
// Set ContentType header.
customHeader := make(http.Header)
- customHeader.Set("Content-Type", contentType)
+ for k, v := range metaData {
+ if len(v) > 0 {
+ customHeader.Set(k, v[0])
+ }
+ }
+
+ // Set a default content-type header if the latter is not provided
+ if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
+ customHeader.Set("Content-Type", "application/octet-stream")
+ }
reqMetadata := requestMetadata{
bucketName: bucketName,
diff --git a/vendor/github.com/minio/minio-go/api-put-object-progress.go b/vendor/github.com/minio/minio-go/api-put-object-progress.go
index 0f79e708f..42f8ce4d1 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-progress.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-progress.go
@@ -19,10 +19,19 @@ package minio
import (
"io"
"strings"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
-// PutObjectWithProgress - With progress.
+// PutObjectWithProgress - with progress.
func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, contentType string, progress io.Reader) (n int64, err error) {
+ metaData := make(map[string][]string)
+ metaData["Content-Type"] = []string{contentType}
+ return c.PutObjectWithMetadata(bucketName, objectName, reader, metaData, progress)
+}
+
+// PutObjectWithMetadata - with metadata.
+func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
@@ -50,7 +59,7 @@ func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.R
// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
// So we fall back to single PUT operation with the maximum limit of 5GiB.
- if isGoogleEndpoint(c.endpointURL) {
+ if s3utils.IsGoogleEndpoint(c.endpointURL) {
if size <= -1 {
return 0, ErrorResponse{
Code: "NotImplemented",
@@ -63,11 +72,11 @@ func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.R
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
- return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType, progress)
+ return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress)
}
// NOTE: S3 doesn't allow anonymous multipart requests.
- if isAmazonEndpoint(c.endpointURL) && c.anonymous {
+ if s3utils.IsAmazonEndpoint(c.endpointURL) && c.anonymous {
if size <= -1 {
return 0, ErrorResponse{
Code: "NotImplemented",
@@ -81,15 +90,15 @@ func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.R
}
// Do not compute MD5 for anonymous requests to Amazon
// S3. Uploads up to 5GiB in size.
- return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType, progress)
+ return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress)
}
// putSmall object.
if size < minPartSize && size >= 0 {
- return c.putObjectSingle(bucketName, objectName, reader, size, contentType, progress)
+ return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress)
}
// For all sizes greater than 5MiB do multipart.
- n, err = c.putObjectMultipart(bucketName, objectName, reader, size, contentType, progress)
+ n, err = c.putObjectMultipart(bucketName, objectName, reader, size, metaData, progress)
if err != nil {
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
@@ -100,7 +109,7 @@ func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.R
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
- return c.putObjectSingle(bucketName, objectName, reader, size, contentType, progress)
+ return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress)
}
return n, err
}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-readat.go b/vendor/github.com/minio/minio-go/api-put-object-readat.go
index 14fa4b296..4ab1095f6 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-readat.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-readat.go
@@ -32,17 +32,22 @@ type uploadedPartRes struct {
Error error // Any error encountered while uploading the part.
PartNum int // Number of the part uploaded.
Size int64 // Size of the part uploaded.
+ Part *objectPart
+}
+
+type uploadPartReq struct {
+ PartNum int // Number of the part uploaded.
+ Part *objectPart // Size of the part uploaded.
}
// shouldUploadPartReadAt - verify if part should be uploaded.
-func shouldUploadPartReadAt(objPart objectPart, objectParts map[int]objectPart) bool {
+func shouldUploadPartReadAt(objPart objectPart, uploadReq uploadPartReq) bool {
// If part not found part should be uploaded.
- uploadedPart, found := objectParts[objPart.PartNumber]
- if !found {
+ if uploadReq.Part == nil {
return true
}
// if size mismatches part should be uploaded.
- if uploadedPart.Size != objPart.Size {
+ if uploadReq.Part.Size != objPart.Size {
return true
}
return false
@@ -58,7 +63,7 @@ func shouldUploadPartReadAt(objPart objectPart, objectParts map[int]objectPart)
// temporary files for staging all the data, these temporary files are
// cleaned automatically when the caller i.e http client closes the
// stream after uploading all the contents successfully.
-func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, contentType string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
@@ -67,9 +72,8 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
return 0, err
}
- // Get upload id for an object, initiates a new multipart request
- // if it cannot find any previously partially uploaded object.
- uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
+ // Get the upload id of a previously partially uploaded object or initiate a new multipart upload
+ uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData)
if err != nil {
return 0, err
}
@@ -80,17 +84,6 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
- // A map of all uploaded parts.
- var partsInfo = make(map[int]objectPart)
-
- // Fetch all parts info previously uploaded.
- if !isNew {
- partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
- if err != nil {
- return 0, err
- }
- }
-
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
if err != nil {
@@ -103,7 +96,7 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
// Declare a channel that sends the next part number to be uploaded.
// Buffered to 10000 because thats the maximum number of parts allowed
// by S3.
- uploadPartsCh := make(chan int, 10000)
+ uploadPartsCh := make(chan uploadPartReq, 10000)
// Declare a channel that sends back the response of a part upload.
// Buffered to 10000 because thats the maximum number of parts allowed
@@ -112,7 +105,12 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
// Send each part number to the channel to be processed.
for p := 1; p <= totalPartsCount; p++ {
- uploadPartsCh <- p
+ part, ok := partsInfo[p]
+ if ok {
+ uploadPartsCh <- uploadPartReq{PartNum: p, Part: &part}
+ } else {
+ uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
+ }
}
close(uploadPartsCh)
@@ -123,64 +121,65 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
readAtBuffer := make([]byte, optimalReadBufferSize)
// Each worker will draw from the part channel and upload in parallel.
- for partNumber := range uploadPartsCh {
+ for uploadReq := range uploadPartsCh {
// Declare a new tmpBuffer.
tmpBuffer := new(bytes.Buffer)
+ // If partNumber was not uploaded we calculate the missing
+ // part offset and size. For all other part numbers we
+ // calculate offset based on multiples of partSize.
+ readOffset := int64(uploadReq.PartNum-1) * partSize
+ missingPartSize := partSize
+
+ // As a special case if partNumber is lastPartNumber, we
+ // calculate the offset based on the last part size.
+ if uploadReq.PartNum == lastPartNumber {
+ readOffset = (size - lastPartSize)
+ missingPartSize = lastPartSize
+ }
+
+ // Get a section reader on a particular offset.
+ sectionReader := io.NewSectionReader(reader, readOffset, missingPartSize)
+
+ // Choose the needed hash algorithms to be calculated by hashCopyBuffer.
+ // Sha256 is avoided in non-v4 signature requests or HTTPS connections
+ hashSums := make(map[string][]byte)
+ hashAlgos := make(map[string]hash.Hash)
+ hashAlgos["md5"] = md5.New()
+ if c.signature.isV4() && !c.secure {
+ hashAlgos["sha256"] = sha256.New()
+ }
+
+ var prtSize int64
+ var err error
+ prtSize, err = hashCopyBuffer(hashAlgos, hashSums, tmpBuffer, sectionReader, readAtBuffer)
+ if err != nil {
+ // Send the error back through the channel.
+ uploadedPartsCh <- uploadedPartRes{
+ Size: 0,
+ Error: err,
+ }
+ // Exit the goroutine.
+ return
+ }
+
// Verify object if its uploaded.
verifyObjPart := objectPart{
- PartNumber: partNumber,
+ PartNumber: uploadReq.PartNum,
Size: partSize,
}
// Special case if we see a last part number, save last part
// size as the proper part size.
- if partNumber == lastPartNumber {
+ if uploadReq.PartNum == lastPartNumber {
verifyObjPart.Size = lastPartSize
}
// Only upload the necessary parts. Otherwise return size through channel
// to update any progress bar.
- if shouldUploadPartReadAt(verifyObjPart, partsInfo) {
- // If partNumber was not uploaded we calculate the missing
- // part offset and size. For all other part numbers we
- // calculate offset based on multiples of partSize.
- readOffset := int64(partNumber-1) * partSize
- missingPartSize := partSize
-
- // As a special case if partNumber is lastPartNumber, we
- // calculate the offset based on the last part size.
- if partNumber == lastPartNumber {
- readOffset = (size - lastPartSize)
- missingPartSize = lastPartSize
- }
-
- // Get a section reader on a particular offset.
- sectionReader := io.NewSectionReader(reader, readOffset, missingPartSize)
-
- // Choose the needed hash algorithms to be calculated by hashCopyBuffer.
- // Sha256 is avoided in non-v4 signature requests or HTTPS connections
- hashSums := make(map[string][]byte)
- hashAlgos := make(map[string]hash.Hash)
- hashAlgos["md5"] = md5.New()
- if c.signature.isV4() && !c.secure {
- hashAlgos["sha256"] = sha256.New()
- }
-
- var prtSize int64
- prtSize, err = hashCopyBuffer(hashAlgos, hashSums, tmpBuffer, sectionReader, readAtBuffer)
- if err != nil {
- // Send the error back through the channel.
- uploadedPartsCh <- uploadedPartRes{
- Size: 0,
- Error: err,
- }
- // Exit the goroutine.
- return
- }
-
+ if shouldUploadPartReadAt(verifyObjPart, uploadReq) {
// Proceed to upload the part.
var objPart objectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID, tmpBuffer, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
+ objPart, err = c.uploadPart(bucketName, objectName, uploadID, tmpBuffer, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize)
if err != nil {
uploadedPartsCh <- uploadedPartRes{
Size: 0,
@@ -190,12 +189,13 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
return
}
// Save successfully uploaded part metadata.
- partsInfo[partNumber] = objPart
+ uploadReq.Part = &objPart
}
// Send successful part info through the channel.
uploadedPartsCh <- uploadedPartRes{
Size: verifyObjPart.Size,
- PartNum: partNumber,
+ PartNum: uploadReq.PartNum,
+ Part: uploadReq.Part,
Error: nil,
}
}
@@ -210,8 +210,9 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
return totalUploadedSize, uploadRes.Error
}
// Retrieve each uploaded part and store it to be completed.
- part, ok := partsInfo[uploadRes.PartNum]
- if !ok {
+ // part, ok := partsInfo[uploadRes.PartNum]
+ part := uploadRes.Part
+ if part == nil {
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
}
// Update the totalUploadedSize.
diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go
index f7dd2daf1..a779fbebe 100644
--- a/vendor/github.com/minio/minio-go/api-put-object.go
+++ b/vendor/github.com/minio/minio-go/api-put-object.go
@@ -103,11 +103,10 @@ func getReaderSize(reader io.Reader) (size int64, err error) {
// implement Seekable calls. Ignore them and treat
// them like a stream with unknown length.
switch st.Name() {
- case "stdin":
- fallthrough
- case "stdout":
- fallthrough
- case "stderr":
+ case "stdin", "stdout", "stderr":
+ return
+ // Ignore read/write stream of os.Pipe() which have unknown length too.
+ case "|0", "|1":
return
}
size = st.Size()
@@ -151,7 +150,7 @@ func (c Client) PutObject(bucketName, objectName string, reader io.Reader, conte
// putObjectNoChecksum special function used Google Cloud Storage. This special function
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
-func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
@@ -169,7 +168,7 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea
// This function does not calculate sha256 and md5sum for payload.
// Execute put object.
- st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, contentType)
+ st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, metaData)
if err != nil {
return 0, err
}
@@ -181,7 +180,7 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea
// putObjectSingle is a special function for uploading single put object request.
// This special function is used as a fallback when multipart upload fails.
-func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
@@ -237,7 +236,7 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
}
}
// Execute put object.
- st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, contentType)
+ st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, metaData)
if err != nil {
return 0, err
}
@@ -255,7 +254,7 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
// putObjectDo - executes the put object http operation.
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
-func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, contentType string) (ObjectInfo, error) {
+func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
@@ -272,13 +271,20 @@ func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5
return ObjectInfo{}, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
- if strings.TrimSpace(contentType) == "" {
- contentType = "application/octet-stream"
- }
-
// Set headers.
customHeader := make(http.Header)
- customHeader.Set("Content-Type", contentType)
+
+ // Set metadata to headers
+ for k, v := range metaData {
+ if len(v) > 0 {
+ customHeader.Set(k, v[0])
+ }
+ }
+
+ // If Content-Type is not provided, set the default application/octet-stream one
+ if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
+ customHeader.Set("Content-Type", "application/octet-stream")
+ }
// Populate request metadata.
reqMetadata := requestMetadata{
@@ -303,13 +309,13 @@ func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5
}
}
- var metadata ObjectInfo
+ var objInfo ObjectInfo
// Trim off the odd double quotes from ETag in the beginning and end.
- metadata.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
- metadata.ETag = strings.TrimSuffix(metadata.ETag, "\"")
+ objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
+ objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"")
// A success here means data was written to server successfully.
- metadata.Size = size
+ objInfo.Size = size
// Return here.
- return metadata, nil
+ return objInfo, nil
}
diff --git a/vendor/github.com/minio/minio-go/api-remove.go b/vendor/github.com/minio/minio-go/api-remove.go
index 110a73e99..2ca84458e 100644
--- a/vendor/github.com/minio/minio-go/api-remove.go
+++ b/vendor/github.com/minio/minio-go/api-remove.go
@@ -71,6 +71,13 @@ func (c Client) RemoveObject(bucketName, objectName string) error {
if err != nil {
return err
}
+ if resp != nil {
+ // if some unexpected error happened and max retry is reached, we want to let client know
+ if resp.StatusCode != http.StatusNoContent {
+ return httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+
// DeleteObject always responds with http '204' even for
// objects which do not exist. So no need to handle them
// specifically.
@@ -164,6 +171,10 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan
break
}
}
+ if count == 0 {
+ // Multi Objects Delete API doesn't accept empty object list, quit immediatly
+ break
+ }
if count < maxEntries {
// We didn't have 1000 entries, so this is the last batch
finish = true
diff --git a/vendor/github.com/minio/minio-go/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/api-s3-datatypes.go
index 52e8a120d..a34f82e97 100644
--- a/vendor/github.com/minio/minio-go/api-s3-datatypes.go
+++ b/vendor/github.com/minio/minio-go/api-s3-datatypes.go
@@ -210,15 +210,16 @@ type createBucketConfiguration struct {
// deleteObject container for Delete element in MultiObjects Delete XML request
type deleteObject struct {
Key string
- VersionId string `xml:"VersionId,omitempty"`
+ VersionID string `xml:"VersionId,omitempty"`
}
// deletedObject container for Deleted element in MultiObjects Delete XML response
type deletedObject struct {
- Key string
- VersionId string `xml:"VersionId,omitempty"`
+ Key string
+ VersionID string `xml:"VersionId,omitempty"`
+ // These fields are ignored.
DeleteMarker bool
- DeleteMarkerVersionId string
+ DeleteMarkerVersionID string
}
// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response
diff --git a/vendor/github.com/minio/minio-go/api-stat.go b/vendor/github.com/minio/minio-go/api-stat.go
index 976d61241..e3bb115d4 100644
--- a/vendor/github.com/minio/minio-go/api-stat.go
+++ b/vendor/github.com/minio/minio-go/api-stat.go
@@ -21,6 +21,8 @@ import (
"strconv"
"strings"
"time"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// BucketExists verify if bucket exists and you have permission to access it.
@@ -49,6 +51,31 @@ func (c Client) BucketExists(bucketName string) (bool, error) {
return true, nil
}
+// List of header keys to be filtered, usually
+// from all S3 API http responses.
+var defaultFilterKeys = []string{
+ "Transfer-Encoding",
+ "Accept-Ranges",
+ "Date",
+ "Server",
+ "Vary",
+ "x-amz-request-id",
+ "x-amz-id-2",
+ // Add new headers to be ignored.
+}
+
+// Extract only necessary metadata header key/values by
+// filtering them out with a list of custom header keys.
+func extractObjMetadata(header http.Header) http.Header {
+ filterKeys := append([]string{
+ "ETag",
+ "Content-Length",
+ "Last-Modified",
+ "Content-Type",
+ }, defaultFilterKeys...)
+ return filterHeader(header, filterKeys)
+}
+
// StatObject verifies if object exists and you have permission to access.
func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
// Input validation.
@@ -78,17 +105,21 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
md5sum = strings.TrimSuffix(md5sum, "\"")
- // Parse content length.
- size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
- if err != nil {
- return ObjectInfo{}, ErrorResponse{
- Code: "InternalError",
- Message: "Content-Length is invalid. " + reportIssue,
- BucketName: bucketName,
- Key: objectName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- Region: resp.Header.Get("x-amz-bucket-region"),
+ // Content-Length is not valid for Google Cloud Storage, do not verify.
+ var size int64 = -1
+ if !s3utils.IsGoogleEndpoint(c.endpointURL) {
+ // Parse content length.
+ size, err = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
+ if err != nil {
+ return ObjectInfo{}, ErrorResponse{
+ Code: "InternalError",
+ Message: "Content-Length is invalid. " + reportIssue,
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
+ }
}
}
// Parse Last-Modified has http time format.
@@ -109,12 +140,19 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
if contentType == "" {
contentType = "application/octet-stream"
}
+
+ // Extract only the relevant header keys describing the object.
+ // following function filters out a list of standard set of keys
+ // which are not part of object metadata.
+ metadata := extractObjMetadata(resp.Header)
+
// Save object metadata info.
- var objectStat ObjectInfo
- objectStat.ETag = md5sum
- objectStat.Key = objectName
- objectStat.Size = size
- objectStat.LastModified = date
- objectStat.ContentType = contentType
- return objectStat, nil
+ return ObjectInfo{
+ ETag: md5sum,
+ Key: objectName,
+ Size: size,
+ LastModified: date,
+ ContentType: contentType,
+ Metadata: metadata,
+ }, nil
}
diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go
index 954927084..98829cd2c 100644
--- a/vendor/github.com/minio/minio-go/api.go
+++ b/vendor/github.com/minio/minio-go/api.go
@@ -33,12 +33,18 @@ import (
"strings"
"sync"
"time"
+
+ "github.com/minio/minio-go/pkg/s3signer"
+ "github.com/minio/minio-go/pkg/s3utils"
)
// Client implements Amazon S3 compatible methods.
type Client struct {
/// Standard options.
+ // Parsed endpoint url provided by the user.
+ endpointURL url.URL
+
// AccessKeyID required for authorized requests.
accessKeyID string
// SecretAccessKey required for authorized requests.
@@ -53,7 +59,6 @@ type Client struct {
appName string
appVersion string
}
- endpointURL string
// Indicate whether we are using https or not
secure bool
@@ -73,7 +78,7 @@ type Client struct {
// Global constants.
const (
libraryName = "minio-go"
- libraryVersion = "2.0.2"
+ libraryVersion = "2.0.3"
)
// User Agent should always following the below style.
@@ -116,13 +121,12 @@ func New(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Cl
if err != nil {
return nil, err
}
- // Google cloud storage should be set to signature V2, force it if
- // not.
- if isGoogleEndpoint(clnt.endpointURL) {
+ // Google cloud storage should be set to signature V2, force it if not.
+ if s3utils.IsGoogleEndpoint(clnt.endpointURL) {
clnt.signature = SignatureV2
}
// If Amazon S3 set to signature v2.n
- if isAmazonEndpoint(clnt.endpointURL) {
+ if s3utils.IsAmazonEndpoint(clnt.endpointURL) {
clnt.signature = SignatureV4
}
return clnt, nil
@@ -151,6 +155,18 @@ func (r *lockedRandSource) Seed(seed int64) {
r.lk.Unlock()
}
+// redirectHeaders copies all headers when following a redirect URL.
+// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800)
+func redirectHeaders(req *http.Request, via []*http.Request) error {
+ if len(via) == 0 {
+ return nil
+ }
+ for key, val := range via[0].Header {
+ req.Header[key] = val
+ }
+ return nil
+}
+
func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
// construct endpoint.
endpointURL, err := getEndpointURL(endpoint, secure)
@@ -170,11 +186,12 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Cl
clnt.secure = secure
// Save endpoint URL, user agent for future uses.
- clnt.endpointURL = endpointURL.String()
+ clnt.endpointURL = *endpointURL
// Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{
- Transport: http.DefaultTransport,
+ Transport: http.DefaultTransport,
+ CheckRedirect: redirectHeaders,
}
// Instantiae bucket location cache.
@@ -262,6 +279,12 @@ type requestMetadata struct {
contentMD5Bytes []byte
}
+// regCred matches credential string in HTTP header
+var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/")
+
+// regCred matches signature string in HTTP header
+var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)")
+
// Filter out signature value from Authorization header.
func (c Client) filterSignature(req *http.Request) {
// For anonymous requests, no need to filter.
@@ -281,11 +304,9 @@ func (c Client) filterSignature(req *http.Request) {
origAuth := req.Header.Get("Authorization")
// Strip out accessKeyID from:
// Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
- regCred := regexp.MustCompile("Credential=([A-Z0-9]+)/")
newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
// Strip out 256-bit signature from: Signature=<256-bit signature>
- regSign := regexp.MustCompile("Signature=([[0-9a-f]+)")
newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
// Set a temporary redacted auth
@@ -364,20 +385,35 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
// do - execute http request.
func (c Client) do(req *http.Request) (*http.Response, error) {
- // do the request.
- resp, err := c.httpClient.Do(req)
- if err != nil {
- // Handle this specifically for now until future Golang
- // versions fix this issue properly.
- urlErr, ok := err.(*url.Error)
- if ok && strings.Contains(urlErr.Err.Error(), "EOF") {
- return nil, &url.Error{
- Op: urlErr.Op,
- URL: urlErr.URL,
- Err: fmt.Errorf("Connection closed by foreign host %s. Retry again.", urlErr.URL),
+ var resp *http.Response
+ var err error
+ // Do the request in a loop in case of 307 http is met since golang still doesn't
+ // handle properly this situation (https://github.com/golang/go/issues/7912)
+ for {
+ resp, err = c.httpClient.Do(req)
+ if err != nil {
+ // Handle this specifically for now until future Golang
+ // versions fix this issue properly.
+ urlErr, ok := err.(*url.Error)
+ if ok && strings.Contains(urlErr.Err.Error(), "EOF") {
+ return nil, &url.Error{
+ Op: urlErr.Op,
+ URL: urlErr.URL,
+ Err: fmt.Errorf("Connection closed by foreign host %s. Retry again.", urlErr.URL),
+ }
}
+ return nil, err
+ }
+ // Redo the request with the new redirect url if http 307 is returned, quit the loop otherwise
+ if resp != nil && resp.StatusCode == http.StatusTemporaryRedirect {
+ newURL, err := url.Parse(resp.Header.Get("Location"))
+ if err != nil {
+ break
+ }
+ req.URL = newURL
+ } else {
+ break
}
- return nil, err
}
// Response cannot be non-nil, report if its the case.
@@ -467,6 +503,8 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
// Read the body to be saved later.
errBodyBytes, err := ioutil.ReadAll(res.Body)
+ // res.Body should be closed
+ closeResponse(res)
if err != nil {
return nil, err
}
@@ -512,7 +550,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
// Default all requests to "us-east-1" or "cn-north-1" (china region)
location := "us-east-1"
- if isAmazonChinaEndpoint(c.endpointURL) {
+ if s3utils.IsAmazonChinaEndpoint(c.endpointURL) {
// For china specifically we need to set everything to
// cn-north-1 for now, there is no easier way until AWS S3
// provides a cleaner compatible API across "us-east-1" and
@@ -550,10 +588,10 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
}
if c.signature.isV2() {
// Presign URL with signature v2.
- req = preSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires)
+ req = s3signer.PreSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires)
} else {
// Presign URL with signature v4.
- req = preSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires)
+ req = s3signer.PreSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires)
}
return req, nil
}
@@ -566,7 +604,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
// FIXEM: Enable this when Google Cloud Storage properly supports 100-continue.
// Skip setting 'expect' header for Google Cloud Storage, there
// are some known issues - https://github.com/restic/restic/issues/520
- if !isGoogleEndpoint(c.endpointURL) {
+ if !s3utils.IsGoogleEndpoint(c.endpointURL) {
// Set 'Expect' header for the request.
req.Header.Set("Expect", "100-continue")
}
@@ -610,10 +648,10 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
if !c.anonymous {
if c.signature.isV2() {
// Add signature version '2' authorization header.
- req = signV2(*req, c.accessKeyID, c.secretAccessKey)
+ req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
} else if c.signature.isV4() {
// Add signature version '4' authorization header.
- req = signV4(*req, c.accessKeyID, c.secretAccessKey, location)
+ req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, location)
}
}
@@ -631,26 +669,21 @@ func (c Client) setUserAgent(req *http.Request) {
// makeTargetURL make a new target url.
func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, queryValues url.Values) (*url.URL, error) {
- // Save host.
- url, err := url.Parse(c.endpointURL)
- if err != nil {
- return nil, err
- }
- host := url.Host
+ host := c.endpointURL.Host
// For Amazon S3 endpoint, try to fetch location based endpoint.
- if isAmazonEndpoint(c.endpointURL) {
+ if s3utils.IsAmazonEndpoint(c.endpointURL) {
// Fetch new host based on the bucket location.
host = getS3Endpoint(bucketLocation)
}
// Save scheme.
- scheme := url.Scheme
+ scheme := c.endpointURL.Scheme
urlStr := scheme + "://" + host + "/"
// Make URL only if bucketName is available, otherwise use the
// endpoint URL.
if bucketName != "" {
// Save if target url will have buckets which suppport virtual host.
- isVirtualHostStyle := isVirtualHostSupported(c.endpointURL, bucketName)
+ isVirtualHostStyle := s3utils.IsVirtualHostSupported(c.endpointURL, bucketName)
// If endpoint supports virtual host style use that always.
// Currently only S3 and Google Cloud Storage would support
@@ -658,19 +691,19 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que
if isVirtualHostStyle {
urlStr = scheme + "://" + bucketName + "." + host + "/"
if objectName != "" {
- urlStr = urlStr + urlEncodePath(objectName)
+ urlStr = urlStr + s3utils.EncodePath(objectName)
}
} else {
// If not fall back to using path style.
urlStr = urlStr + bucketName + "/"
if objectName != "" {
- urlStr = urlStr + urlEncodePath(objectName)
+ urlStr = urlStr + s3utils.EncodePath(objectName)
}
}
}
// If there are any query values, add them to the end.
if len(queryValues) > 0 {
- urlStr = urlStr + "?" + queryEncode(queryValues)
+ urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues)
}
u, err := url.Parse(urlStr)
if err != nil {
diff --git a/vendor/github.com/minio/minio-go/api_functional_v2_test.go b/vendor/github.com/minio/minio-go/api_functional_v2_test.go
index 2084ffef7..23713732a 100644
--- a/vendor/github.com/minio/minio-go/api_functional_v2_test.go
+++ b/vendor/github.com/minio/minio-go/api_functional_v2_test.go
@@ -42,10 +42,10 @@ func TestMakeBucketErrorV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -88,10 +88,10 @@ func TestGetObjectClosedTwiceV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -168,10 +168,10 @@ func TestRemovePartiallyUploadedV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -238,10 +238,10 @@ func TestResumablePutObjectV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -350,10 +350,10 @@ func TestFPutObjectV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -499,10 +499,10 @@ func TestResumableFPutObjectV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -577,10 +577,10 @@ func TestMakeBucketRegionsV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -628,10 +628,10 @@ func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -761,10 +761,10 @@ func TestGetObjectReadAtFunctionalV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -897,10 +897,10 @@ func TestCopyObjectV2(t *testing.T) {
// Instantiate new minio client object
c, err := NewV2(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -1014,10 +1014,10 @@ func TestFunctionalV2(t *testing.T) {
rand.Seed(time.Now().Unix())
c, err := NewV2(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
diff --git a/vendor/github.com/minio/minio-go/api_functional_v4_test.go b/vendor/github.com/minio/minio-go/api_functional_v4_test.go
index 64f8a77f8..d19d3e1ae 100644
--- a/vendor/github.com/minio/minio-go/api_functional_v4_test.go
+++ b/vendor/github.com/minio/minio-go/api_functional_v4_test.go
@@ -70,10 +70,10 @@ func TestMakeBucketError(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -116,10 +116,10 @@ func TestMakeBucketRegions(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -167,10 +167,10 @@ func TestPutObjectReadAt(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -204,7 +204,10 @@ func TestPutObjectReadAt(t *testing.T) {
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ // Object content type
+ objectContentType := "binary/octet-stream"
+
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), objectContentType)
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
@@ -227,6 +230,105 @@ func TestPutObjectReadAt(t *testing.T) {
t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
len(buf), st.Size)
}
+ if st.ContentType != objectContentType {
+ t.Fatalf("Error: Content types don't match, expected: %+v, found: %+v\n", objectContentType, st.ContentType)
+ }
+ if err := r.Close(); err != nil {
+ t.Fatal("Error:", err)
+ }
+ if err := r.Close(); err == nil {
+ t.Fatal("Error: object is already closed, should return error")
+ }
+
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
+// Test PutObject using a large data to trigger multipart readat
+func TestPutObjectWithMetadata(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := New(
+ os.Getenv("S3_ADDRESS"),
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ mustParseBool(os.Getenv("S3_SECURE")),
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Generate data using 2 parts
+ buf := make([]byte, minPartSize*2)
+ // Use crand.Reader for multipart tests to ensure part order at the end.
+ size, err := io.ReadFull(crand.Reader, buf)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if size != minPartSize*2 {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*2, size)
+ }
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+
+ // Object custom metadata
+ customContentType := "custom/contenttype"
+
+ n, err := c.PutObjectWithMetadata(bucketName, objectName, bytes.NewReader(buf), map[string][]string{"Content-Type": []string{customContentType}}, nil)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ if n != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+ if st.Size != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
+ len(buf), st.Size)
+ }
+ if st.ContentType != customContentType {
+ t.Fatalf("Error: Expected and found content types do not match, want %v, got %v\n",
+ customContentType, st.ContentType)
+ }
if err := r.Close(); err != nil {
t.Fatal("Error:", err)
}
@@ -255,10 +357,10 @@ func TestListPartiallyUploaded(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -334,10 +436,10 @@ func TestGetOjectSeekEnd(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -429,10 +531,10 @@ func TestGetObjectClosedTwice(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -509,10 +611,10 @@ func TestRemoveMultipleObjects(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
@@ -583,10 +685,10 @@ func TestRemovePartiallyUploaded(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -656,10 +758,10 @@ func TestResumablePutObject(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -702,9 +804,10 @@ func TestResumablePutObject(t *testing.T) {
// New object name.
objectName := bucketName + "-resumable"
+ objectContentType := "application/custom-octet-stream"
// Upload the file.
- n, err = c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream")
+ n, err = c.FPutObject(bucketName, objectName, file.Name(), objectContentType)
if err != nil {
t.Fatal("Error:", err)
}
@@ -718,17 +821,22 @@ func TestResumablePutObject(t *testing.T) {
t.Fatal("Error:", err)
}
- // Upload now cloud to cloud.
- n, err = c.PutObject(bucketName, objectName+"-put", reader, "application/octest-stream")
+ // Get object info.
+ objInfo, err := reader.Stat()
if err != nil {
t.Fatal("Error:", err)
}
- // Get object info.
- objInfo, err := reader.Stat()
+ if objInfo.ContentType != objectContentType {
+ t.Fatalf("Error: Content types don't match, want %v, got %v\n", objectContentType, objInfo.ContentType)
+ }
+
+ // Upload now cloud to cloud.
+ n, err = c.PutObject(bucketName, objectName+"-put", reader, objectContentType)
if err != nil {
t.Fatal("Error:", err)
}
+
if n != objInfo.Size {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", objInfo.Size, n)
}
@@ -766,10 +874,10 @@ func TestResumableFPutObject(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -856,10 +964,10 @@ func TestFPutObjectMultipart(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -912,9 +1020,10 @@ func TestFPutObjectMultipart(t *testing.T) {
// Set base object name
objectName := bucketName + "FPutObject"
+ objectContentType := "testapplication/octet-stream"
// Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
- n, err := c.FPutObject(bucketName, objectName+"-standard", file.Name(), "application/octet-stream")
+ n, err := c.FPutObject(bucketName, objectName+"-standard", file.Name(), objectContentType)
if err != nil {
t.Fatal("Error:", err)
}
@@ -922,6 +1031,21 @@ func TestFPutObjectMultipart(t *testing.T) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, n)
}
+ r, err := c.GetObject(bucketName, objectName+"-standard")
+ if err != nil {
+ t.Fatalf("Unexpected error: %v\n", err)
+ }
+ objInfo, err := r.Stat()
+ if err != nil {
+ t.Fatalf("Unexpected error: %v\n", err)
+ }
+ if objInfo.Size != minPartSize*4 {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, n)
+ }
+ if objInfo.ContentType != objectContentType {
+ t.Fatalf("Error: Content types don't match, want %v, got %v\n", objectContentType, objInfo.ContentType)
+ }
+
// Remove all objects and bucket and temp file
err = c.RemoveObject(bucketName, objectName+"-standard")
if err != nil {
@@ -945,10 +1069,10 @@ func TestFPutObject(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -1105,10 +1229,10 @@ func TestGetObjectReadSeekFunctional(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -1238,10 +1362,10 @@ func TestGetObjectReadAtFunctional(t *testing.T) {
// Instantiate new minio client object.
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -1387,10 +1511,10 @@ func TestPresignedPostPolicy(t *testing.T) {
// Instantiate new minio client object
c, err := NewV4(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -1482,10 +1606,10 @@ func TestCopyObject(t *testing.T) {
// Instantiate new minio client object
c, err := NewV4(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -1648,10 +1772,10 @@ func TestBucketNotification(t *testing.T) {
rand.Seed(time.Now().Unix())
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@@ -1724,10 +1848,10 @@ func TestFunctional(t *testing.T) {
rand.Seed(time.Now().Unix())
c, err := New(
- "s3.amazonaws.com",
+ os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
- true,
+ mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
diff --git a/vendor/github.com/minio/minio-go/api_unit_test.go b/vendor/github.com/minio/minio-go/api_unit_test.go
index 817a8c2c7..c1db0df5d 100644
--- a/vendor/github.com/minio/minio-go/api_unit_test.go
+++ b/vendor/github.com/minio/minio-go/api_unit_test.go
@@ -18,11 +18,9 @@ package minio
import (
"bytes"
- "fmt"
"io"
"io/ioutil"
"net/http"
- "net/url"
"os"
"strings"
"testing"
@@ -202,49 +200,6 @@ func TestTempFile(t *testing.T) {
}
}
-// Tests url encoding.
-func TestEncodeURL2Path(t *testing.T) {
- type urlStrings struct {
- objName string
- encodedObjName string
- }
-
- bucketName := "bucketName"
- want := []urlStrings{
- {
- objName: "本語",
- encodedObjName: "%E6%9C%AC%E8%AA%9E",
- },
- {
- objName: "本語.1",
- encodedObjName: "%E6%9C%AC%E8%AA%9E.1",
- },
- {
- objName: ">123>3123123",
- encodedObjName: "%3E123%3E3123123",
- },
- {
- objName: "test 1 2.txt",
- encodedObjName: "test%201%202.txt",
- },
- {
- objName: "test++ 1.txt",
- encodedObjName: "test%2B%2B%201.txt",
- },
- }
-
- for _, o := range want {
- u, err := url.Parse(fmt.Sprintf("https://%s.s3.amazonaws.com/%s", bucketName, o.objName))
- if err != nil {
- t.Fatal("Error:", err)
- }
- urlPath := "/" + bucketName + "/" + o.encodedObjName
- if urlPath != encodeURL2Path(u) {
- t.Fatal("Error")
- }
- }
-}
-
// Tests error response structure.
func TestErrorResponse(t *testing.T) {
var err error
@@ -270,53 +225,6 @@ func TestErrorResponse(t *testing.T) {
}
}
-// Tests signature calculation.
-func TestSignatureCalculation(t *testing.T) {
- req, err := http.NewRequest("GET", "https://s3.amazonaws.com", nil)
- if err != nil {
- t.Fatal("Error:", err)
- }
- req = signV4(*req, "", "", "us-east-1")
- if req.Header.Get("Authorization") != "" {
- t.Fatal("Error: anonymous credentials should not have Authorization header.")
- }
-
- req = preSignV4(*req, "", "", "us-east-1", 0)
- if strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
- t.Fatal("Error: anonymous credentials should not have Signature query resource.")
- }
-
- req = signV2(*req, "", "")
- if req.Header.Get("Authorization") != "" {
- t.Fatal("Error: anonymous credentials should not have Authorization header.")
- }
-
- req = preSignV2(*req, "", "", 0)
- if strings.Contains(req.URL.RawQuery, "Signature") {
- t.Fatal("Error: anonymous credentials should not have Signature query resource.")
- }
-
- req = signV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1")
- if req.Header.Get("Authorization") == "" {
- t.Fatal("Error: normal credentials should have Authorization header.")
- }
-
- req = preSignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1", 0)
- if !strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
- t.Fatal("Error: normal credentials should have Signature query resource.")
- }
-
- req = signV2(*req, "ACCESS-KEY", "SECRET-KEY")
- if req.Header.Get("Authorization") == "" {
- t.Fatal("Error: normal credentials should have Authorization header.")
- }
-
- req = preSignV2(*req, "ACCESS-KEY", "SECRET-KEY", 0)
- if !strings.Contains(req.URL.RawQuery, "Signature") {
- t.Fatal("Error: normal credentials should not have Signature query resource.")
- }
-}
-
// Tests signature type.
func TestSignatureType(t *testing.T) {
clnt := Client{}
diff --git a/vendor/github.com/minio/minio-go/bucket-cache.go b/vendor/github.com/minio/minio-go/bucket-cache.go
index 4ad106959..46dfe9348 100644
--- a/vendor/github.com/minio/minio-go/bucket-cache.go
+++ b/vendor/github.com/minio/minio-go/bucket-cache.go
@@ -23,6 +23,9 @@ import (
"path"
"strings"
"sync"
+
+ "github.com/minio/minio-go/pkg/s3signer"
+ "github.com/minio/minio-go/pkg/s3utils"
)
// bucketLocationCache - Provides simple mechanism to hold bucket
@@ -85,7 +88,7 @@ func (c Client) getBucketLocation(bucketName string) (string, error) {
return location, nil
}
- if isAmazonChinaEndpoint(c.endpointURL) {
+ if s3utils.IsAmazonChinaEndpoint(c.endpointURL) {
// For china specifically we need to set everything to
// cn-north-1 for now, there is no easier way until AWS S3
// provides a cleaner compatible API across "us-east-1" and
@@ -160,10 +163,14 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
urlValues.Set("location", "")
// Set get bucket location always as path style.
- targetURL, err := url.Parse(c.endpointURL)
- if err != nil {
- return nil, err
+ targetURL := c.endpointURL
+
+ // Requesting a bucket location from an accelerate endpoint returns a 400,
+ // so default to us-east-1 for the lookup
+ if s3utils.IsAmazonS3AccelerateEndpoint(c.endpointURL) {
+ targetURL.Host = getS3Endpoint("us-east-1")
}
+
targetURL.Path = path.Join(bucketName, "") + "/"
targetURL.RawQuery = urlValues.Encode()
@@ -189,9 +196,9 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
// Sign the request.
if c.signature.isV4() {
- req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
+ req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
} else if c.signature.isV2() {
- req = signV2(*req, c.accessKeyID, c.secretAccessKey)
+ req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
}
return req, nil
}
diff --git a/vendor/github.com/minio/minio-go/bucket-cache_test.go b/vendor/github.com/minio/minio-go/bucket-cache_test.go
index 81cfbc097..0c068c966 100644
--- a/vendor/github.com/minio/minio-go/bucket-cache_test.go
+++ b/vendor/github.com/minio/minio-go/bucket-cache_test.go
@@ -26,6 +26,8 @@ import (
"path"
"reflect"
"testing"
+
+ "github.com/minio/minio-go/pkg/s3signer"
)
// Test validates `newBucketLocationCache`.
@@ -70,14 +72,12 @@ func TestGetBucketLocationRequest(t *testing.T) {
urlValues.Set("location", "")
// Set get bucket location always as path style.
- targetURL, err := url.Parse(c.endpointURL)
- if err != nil {
- return nil, err
- }
+ targetURL := c.endpointURL
targetURL.Path = path.Join(bucketName, "") + "/"
targetURL.RawQuery = urlValues.Encode()
// Get a new HTTP request for the method.
+ var err error
req, err = http.NewRequest("GET", targetURL.String(), nil)
if err != nil {
return nil, err
@@ -93,9 +93,9 @@ func TestGetBucketLocationRequest(t *testing.T) {
// Sign the request.
if c.signature.isV4() {
- req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
+ req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
} else if c.signature.isV2() {
- req = signV2(*req, c.accessKeyID, c.secretAccessKey)
+ req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
}
return req, nil
diff --git a/vendor/github.com/minio/minio-go/bucket-notification.go b/vendor/github.com/minio/minio-go/bucket-notification.go
index 121a63a77..4f60f1c8b 100644
--- a/vendor/github.com/minio/minio-go/bucket-notification.go
+++ b/vendor/github.com/minio/minio-go/bucket-notification.go
@@ -84,7 +84,7 @@ func (arn Arn) String() string {
// NotificationConfig - represents one single notification configuration
// such as topic, queue or lambda configuration.
type NotificationConfig struct {
- Id string `xml:"Id,omitempty"`
+ ID string `xml:"Id,omitempty"`
Arn Arn `xml:"-"`
Events []NotificationEventType `xml:"Event"`
Filter *Filter `xml:"Filter,omitempty"`
diff --git a/vendor/github.com/minio/minio-go/constants.go b/vendor/github.com/minio/minio-go/constants.go
index 779ed8c7a..057c3eef4 100644
--- a/vendor/github.com/minio/minio-go/constants.go
+++ b/vendor/github.com/minio/minio-go/constants.go
@@ -44,3 +44,9 @@ const optimalReadBufferSize = 1024 * 1024 * 5
// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
// we don't want to sign the request payload
const unsignedPayload = "UNSIGNED-PAYLOAD"
+
+// Signature related constants.
+const (
+ signV4Algorithm = "AWS4-HMAC-SHA256"
+ iso8601DateFormat = "20060102T150405Z"
+)
diff --git a/vendor/github.com/minio/minio-go/docs/API.md b/vendor/github.com/minio/minio-go/docs/API.md
index 9977c5df9..0365c7fad 100644
--- a/vendor/github.com/minio/minio-go/docs/API.md
+++ b/vendor/github.com/minio/minio-go/docs/API.md
@@ -9,9 +9,9 @@
package main
import (
- "fmt"
+ "fmt"
- "github.com/minio/minio-go"
+ "github.com/minio/minio-go"
)
func main() {
@@ -22,7 +22,7 @@ func main() {
minioClient, err := minio.New("play.minio.io:9000", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ssl)
if err != nil {
fmt.Println(err)
- return
+ return
}
}
@@ -35,9 +35,9 @@ func main() {
package main
import (
- "fmt"
+ "fmt"
- "github.com/minio/minio-go"
+ "github.com/minio/minio-go"
)
func main() {
@@ -48,18 +48,18 @@ func main() {
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ssl)
if err != nil {
fmt.Println(err)
- return
+ return
}
}
```
-| Bucket operations |Object operations | Presigned operations | Bucket Policy/Notification Operations |
-|:---|:---|:---|:---|
-|[`MakeBucket`](#MakeBucket) |[`GetObject`](#GetObject) | [`PresignedGetObject`](#PresignedGetObject) |[`SetBucketPolicy`](#SetBucketPolicy) |
-|[`ListBuckets`](#ListBuckets) |[`PutObject`](#PutObject) |[`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) |
-|[`BucketExists`](#BucketExists) |[`CopyObject`](#CopyObject) |[`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) |
-| [`RemoveBucket`](#RemoveBucket) |[`StatObject`](#StatObject) | | [`SetBucketNotification`](#SetBucketNotification) |
+| Bucket operations |Object operations | Presigned operations | Bucket Policy/Notification Operations | Client custom settings |
+|:---|:---|:---|:---|:---|
+|[`MakeBucket`](#MakeBucket) |[`GetObject`](#GetObject) | [`PresignedGetObject`](#PresignedGetObject) |[`SetBucketPolicy`](#SetBucketPolicy) | [`SetAppInfo`](#SetAppInfo) |
+|[`ListBuckets`](#ListBuckets) |[`PutObject`](#PutObject) |[`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) | [`SetCustomTransport`](#SetCustomTransport) |
+|[`BucketExists`](#BucketExists) |[`CopyObject`](#CopyObject) |[`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) | [`TraceOn`](#TraceOn) |
+| [`RemoveBucket`](#RemoveBucket) |[`StatObject`](#StatObject) | | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOff`](#TraceOff) |
|[`ListObjects`](#ListObjects) |[`RemoveObject`](#RemoveObject) | | [`GetBucketNotification`](#GetBucketNotification) |
|[`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) |
|[`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | [`ListenBucketNotification`](#ListenBucketNotification) |
@@ -77,10 +77,10 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`endpoint` | _string_ |S3 object storage endpoint. |
-| `accessKeyID` |_string_ | Access key for the object storage endpoint. |
-| `secretAccessKey` | _string_ |Secret key for the object storage endpoint. |
-|`ssl` | _bool_ | Set this value to 'true' to enable secure (HTTPS) access. |
+|`endpoint` | _string_ |S3 compatible object storage endpoint |
+|`accessKeyID` |_string_ |Access key for the object storage |
+|`secretAccessKey` | _string_ |Secret key for the object storage |
+|`ssl` | _bool_ | If 'true' API requests will be secure (HTTPS), and insecure (HTTP) otherwise |
## 2. Bucket operations
@@ -94,8 +94,8 @@ __Parameters__
| Param | Type | Description |
|---|---|---|
-|`bucketName` | _string_ | Name of the bucket. |
-| `location` | _string_ | Default value is us-east-1 Region where the bucket is created. Valid values are listed below:|
+|`bucketName` | _string_ | Name of the bucket |
+| `location` | _string_ | Region where the bucket is to be created. Default value is us-east-1. Other valid values are listed below. Note: When used with minio server, use the region specified in its config file (defaults to us-east-1).|
| | |us-east-1 |
| | |us-west-1 |
| | |us-west-2 |
@@ -104,7 +104,7 @@ __Parameters__
| | | ap-southeast-1|
| | | ap-northeast-1|
| | | ap-southeast-2|
-| | | sa-east-1|
+| | | sa-east-1|
__Example__
@@ -128,30 +128,30 @@ Lists all buckets.
| Param | Type | Description |
|---|---|---|
-|`bucketList` | _[]BucketInfo_ | Lists bucket in following format shown below: |
+|`bucketList` | _[]BucketInfo_ | Lists of all buckets |
| Param | Type | Description |
|---|---|---|
-|`bucket.Name` | _string_ | bucket name. |
-|`bucket.CreationDate` | _time.Time_ | date when bucket was created. |
+|`bucket.Name` | _string_ | Name of the bucket |
+|`bucket.CreationDate` | _time.Time_ | Date of bucket creation |
- __Example__
+__Example__
-
- ```go
- buckets, err := minioClient.ListBuckets()
-if err != nil {
+```go
+
+buckets, err := minioClient.ListBuckets()
+ if err != nil {
fmt.Println(err)
return
}
for _, bucket := range buckets {
- fmt.Println(bucket)
-}
+ fmt.Println(bucket)
+}
- ```
+```
<a name="BucketExists"></a>
### BucketExists(bucketName string) (found bool, err error)
@@ -163,15 +163,15 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
+|`bucketName` | _string_ |Name of the bucket |
__Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`found` | _bool_ | indicates whether bucket exists or not |
-|`err` | _error_ | standard error |
+|`found` | _bool_ | Indicates whether bucket exists or not |
+|`err` | _error_ | Standard Error |
__Example__
@@ -200,7 +200,7 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
+|`bucketName` | _string_ |Name of the bucket |
__Example__
@@ -225,24 +225,24 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-| `objectPrefix` |_string_ | the prefix of the objects that should be listed. |
-| `recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. |
-|`doneCh` | _chan struct{}_ | Set this value to 'true' to enable secure (HTTPS) access. |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectPrefix` |_string_ | Prefix of objects to be listed |
+|`recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. |
+|`doneCh` | _chan struct{}_ | A message on this channel ends the ListObjects iterator. |
__Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`chan ObjectInfo` | _chan ObjectInfo_ |Read channel for all the objects in the bucket, the object is of the format listed below: |
+|`chan ObjectInfo` | _chan ObjectInfo_ |Read channel for all objects in the bucket, the object is of the format listed below: |
|Param |Type |Description |
|:---|:---| :---|
-|`objectInfo.Key` | _string_ |name of the object. |
-|`objectInfo.Size` | _int64_ |size of the object. |
-|`objectInfo.ETag` | _string_ |etag of the object. |
-|`objectInfo.LastModified` | _time.Time_ |modified time stamp. |
+|`objectInfo.Key` | _string_ |Name of the object |
+|`objectInfo.Size` | _int64_ |Size of the object |
+|`objectInfo.ETag` | _string_ |MD5 checksum of the object |
+|`objectInfo.LastModified` | _time.Time_ |Time when object was last modified |
```go
@@ -269,17 +269,17 @@ for object := range objectCh {
<a name="ListObjectsV2"></a>
### ListObjectsV2(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
-Lists objects in a bucket using the recommanded listing API v2
+Lists objects in a bucket using the recommended listing API v2
__Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-| `objectPrefix` |_string_ | the prefix of the objects that should be listed. |
+|`bucketName` | _string_ |Name of the bucket |
+| `objectPrefix` |_string_ | Prefix of objects to be listed |
| `recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. |
-|`doneCh` | _chan struct{}_ | Set this value to 'true' to enable secure (HTTPS) access. |
+|`doneCh` | _chan struct{}_ | A message on this channel ends the ListObjectsV2 iterator. |
__Return Value__
@@ -290,10 +290,10 @@ __Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`objectInfo.Key` | _string_ |name of the object. |
-|`objectInfo.Size` | _int64_ |size of the object. |
-|`objectInfo.ETag` | _string_ |etag of the object. |
-|`objectInfo.LastModified` | _time.Time_ |modified time stamp. |
+|`objectInfo.Key` | _string_ |Name of the object |
+|`objectInfo.Size` | _int64_ |Size of the object |
+|`objectInfo.ETag` | _string_ |MD5 checksum of the object |
+|`objectInfo.LastModified` | _time.Time_ |Time when object was last modified |
```go
@@ -327,25 +327,25 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-| `prefix` |_string_ | prefix of the object names that are partially uploaded |
+|`bucketName` | _string_ |Name of the bucket |
+| `prefix` |_string_ | Prefix of objects that are partially uploaded |
| `recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. |
-|`doneCh` | _chan struct{}_ | Set this value to 'true' to enable secure (HTTPS) access. |
+|`doneCh` | _chan struct{}_ | A message on this channel ends the ListenIncompleteUploads iterator. |
__Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`chan ObjectMultipartInfo` | _chan ObjectMultipartInfo_ |emits multipart objects of the format listed below: |
+|`chan ObjectMultipartInfo` | _chan ObjectMultipartInfo_ |Emits multipart objects of the format listed below: |
__Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`multiPartObjInfo.Key` | _string_ |name of the incomplete object. |
-|`multiPartObjInfo.UploadID` | _string_ |upload ID of the incomplete object.|
-|`multiPartObjInfo.Size` | _int64_ |size of the incompletely uploaded object.|
+|`multiPartObjInfo.Key` | _string_ |Name of incompletely uploaded object |
+|`multiPartObjInfo.UploadID` | _string_ |Upload ID of incompletely uploaded object |
+|`multiPartObjInfo.Size` | _int64_ |Size of incompletely uploaded object |
__Example__
@@ -383,8 +383,8 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectName` | _string_ |name of the object. |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
__Return Value__
@@ -427,9 +427,9 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectName` | _string_ |name of the object. |
-|`filePath` | _string_ |path to which the object data will be written to. |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`filePath` | _string_ |Path to download object to |
__Example__
@@ -446,7 +446,7 @@ if err != nil {
```
<a name="PutObject"></a>
-### PutObject(bucketName string, objectName string, reader io.Reader, contentType string) (n int, err error)
+### PutObject(bucketName string, objectName string, reader io.Reader, contentType string) (n int, err error)
Uploads an object.
@@ -456,16 +456,16 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectName` | _string_ |name of the object. |
-|`reader` | _io.Reader_ |Any golang object implementing io.Reader. |
-|`contentType` | _string_ |content type of the object. |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`reader` | _io.Reader_ |Any Go type that implements io.Reader |
+|`contentType` | _string_ |Content type of the object |
__Example__
-Uploads objects that are less than 5MiB in a single PUT operation. For objects that are greater than the 5MiB in size, PutObject seamlessly uploads the object in chunks of 5MiB or more depending on the actual file size. The max upload size for an object is 5TB.
+Uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than 64MiB in size, PutObject seamlessly uploads the object in chunks of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB.
In the event that PutObject fails to upload an object, the user may attempt to re-upload the same object. If the same object is being uploaded, PutObject API examines the previous partial attempt to upload this object and resumes automatically from where it left off.
@@ -499,10 +499,10 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectName` | _string_ |name of the object. |
-|`objectSource` | _string_ |name of the object source. |
-|`conditions` | _CopyConditions_ |Collection of supported CopyObject conditions. [`x-amz-copy-source`, `x-amz-copy-source-if-match`, `x-amz-copy-source-if-none-match`, `x-amz-copy-source-if-unmodified-since`, `x-amz-copy-source-if-modified-since`].|
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`objectSource` | _string_ |Name of the source object |
+|`conditions` | _CopyConditions_ |Collection of supported CopyObject conditions. [`x-amz-copy-source`, `x-amz-copy-source-if-match`, `x-amz-copy-source-if-none-match`, `x-amz-copy-source-if-unmodified-since`, `x-amz-copy-source-if-modified-since`]|
__Example__
@@ -537,7 +537,7 @@ if err != nil {
<a name="FPutObject"></a>
### FPutObject(bucketName string, objectName string, filePath string, contentType string) error
-Uploads contents from a file to objectName.
+Uploads contents from a file to objectName.
__Parameters__
@@ -545,16 +545,16 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectName` | _string_ |name of the object. |
-|`filePath` | _string_ |file path of the file to be uploaded. |
-|`contentType` | _string_ |content type of the object. |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`filePath` | _string_ |Path to file to be uploaded |
+|`contentType` | _string_ |Content type of the object |
__Example__
-FPutObject uploads objects that are less than 5MiB in a single PUT operation. For objects that are greater than the 5MiB in size, FPutObject seamlessly uploads the object in chunks of 5MiB or more depending on the actual file size. The max upload size for an object is 5TB.
+FPutObject uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than the 64MiB in size, FPutObject seamlessly uploads the object in chunks of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB.
In the event that FPutObject fails to upload an object, the user may attempt to re-upload the same object. If the same object is being uploaded, FPutObject API examines the previous partial attempt to upload this object and resumes automatically from where it left off.
@@ -579,28 +579,28 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectName` | _string_ |name of the object. |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
__Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`objInfo` | _ObjectInfo_ |object stat info for format listed below: |
+|`objInfo` | _ObjectInfo_ |Object stat information |
|Param |Type |Description |
|:---|:---| :---|
-|`objInfo.LastModified` | _time.Time_ |modified time stamp. |
-|`objInfo.ETag` | _string_ |etag of the object.|
-|`objInfo.ContentType` | _string_ |Content-Type of the object.|
-|`objInfo.Size` | _int64_ |size of the object.|
+|`objInfo.LastModified` | _time.Time_ |Time when object was last modified |
+|`objInfo.ETag` | _string_ |MD5 checksum of the object|
+|`objInfo.ContentType` | _string_ |Content type of the object|
+|`objInfo.Size` | _int64_ |Size of the object|
__Example__
-
+
```go
objInfo, err := minioClient.StatObject("mybucket", "photo.jpg")
@@ -623,8 +623,8 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectName` | _string_ |name of the object. |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
```go
@@ -639,22 +639,22 @@ if err != nil {
<a name="RemoveObjects"></a>
### RemoveObjects(bucketName string, objectsCh chan string) errorCh chan minio.RemoveObjectError
-Removes a list of objects obtained from an input channel. The call internally buffers up `1000` at
-a time and initiates a delete request to the server. Upon any error is sent through the error channel.
+Removes a list of objects obtained from an input channel. The call sends a delete request to the server up to 1000 objects at a time.
+The errors observed are sent over the error channel.
__Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectsCh` | _chan string_ | write prefixes of objects to be removed |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectsCh` | _chan string_ | Prefix of objects to be removed |
__Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`errorCh` | _chan minio.RemoveObjectError | read objects deletion errors |
+|`errorCh` | _chan minio.RemoveObjectError | Channel of errors observed during deletion. |
@@ -679,8 +679,8 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectName` | _string_ |name of the object. |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
__Example__
@@ -708,10 +708,10 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectName` | _string_ |name of the object. |
-|`expiry` | _time.Duration_ |expiry in seconds. |
-|`reqParams` | _url.Values_ |additional response header overrides supports _response-expires_, _response-content-type_, _response-cache-control_, _response-content-disposition_. |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`expiry` | _time.Duration_ |Expiry of presigned URL in seconds |
+|`reqParams` | _url.Values_ |Additional response header overrides supports _response-expires_, _response-content-type_, _response-cache-control_, _response-content-disposition_. |
__Example__
@@ -738,7 +738,7 @@ if err != nil {
Generates a presigned URL for HTTP PUT operations. Browsers/Mobile clients may point to this URL to upload objects directly to a bucket even if it is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days.
NOTE: you can upload to S3 only with specified object name.
-
+
__Parameters__
@@ -746,9 +746,9 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectName` | _string_ |name of the object. |
-|`expiry` | _time.Duration_ |expiry in seconds. |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`expiry` | _time.Duration_ |Expiry of presigned URL in seconds |
__Example__
@@ -763,7 +763,7 @@ if err != nil {
fmt.Println(err)
return
}
- fmt.Println(presignedURL)
+fmt.Println(presignedURL)
```
@@ -833,9 +833,9 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket.|
-|`objectPrefix` | _string_ |name of the object prefix.|
-|`policy` | _policy.BucketPolicy_ |policy can be:|
+|`bucketName` | _string_ |Name of the bucket|
+|`objectPrefix` | _string_ |Name of the object prefix|
+|`policy` | _policy.BucketPolicy_ |Policy can be one of the following: |
|| |policy.BucketPolicyNone|
| | |policy.BucketPolicyReadOnly|
|| |policy.BucketPolicyReadWrite|
@@ -847,7 +847,7 @@ __Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`err` | _error_ |standard error |
+|`err` | _error_ |Standard Error |
__Example__
@@ -875,8 +875,8 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectPrefix` | _string_ |name of the object prefix |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectPrefix` | _string_ |Prefix matching objects under the bucket |
__Return Values__
@@ -884,7 +884,7 @@ __Return Values__
|Param |Type |Description |
|:---|:---| :---|
|`bucketPolicy` | _policy.BucketPolicy_ |string that contains: `none`, `readonly`, `readwrite`, or `writeonly` |
-|`err` | _error_ |standard error |
+|`err` | _error_ |Standard Error |
__Example__
@@ -910,16 +910,16 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`objectPrefix` | _string_ |name of the object prefix |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectPrefix` | _string_ |Prefix matching objects under the bucket |
__Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketPolicies` | _map[string]BucketPolicy_ |map that contains object resources paths with their associated permissions |
-|`err` | _error_ |standard error |
+|`bucketPolicies` | _map[string]BucketPolicy_ |Map of object resource paths and their permissions |
+|`err` | _error_ |Standard Error |
__Example__
@@ -947,7 +947,7 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
+|`bucketName` | _string_ |Name of the bucket |
__Return Values__
@@ -955,7 +955,7 @@ __Return Values__
|Param |Type |Description |
|:---|:---| :---|
|`bucketNotification` | _BucketNotification_ |structure which holds all notification configurations|
-|`err` | _error_ |standard error |
+|`err` | _error_ |Standard Error |
__Example__
@@ -963,10 +963,11 @@ __Example__
```go
bucketNotification, err := minioClient.GetBucketNotification("mybucket")
if err != nil {
- for _, topicConfig := range bucketNotification.TopicConfigs {
- for _, e := range topicConfig.Events {
- fmt.Println(e + " event is enabled")
- }
+ log.Fatalf("Failed to get bucket notification configurations for mybucket - %v", err)
+}
+for _, topicConfig := range bucketNotification.TopicConfigs {
+ for _, e := range topicConfig.Events {
+ fmt.Println(e + " event is enabled")
}
}
```
@@ -981,15 +982,15 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
-|`bucketNotification` | _BucketNotification_ |bucket notification. |
+|`bucketName` | _string_ |Name of the bucket |
+|`bucketNotification` | _BucketNotification_ |Represents the XML to be sent to the configured web service |
__Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`err` | _error_ |standard error |
+|`err` | _error_ |Standard Error |
__Example__
@@ -1006,7 +1007,7 @@ bucketNotification := BucketNotification{}
bucketNotification.AddTopic(topicConfig)
err := c.SetBucketNotification(bucketName, bucketNotification)
if err != nil {
- fmt.Println("Cannot set the bucket notification: " + err)
+ fmt.Println("Unable to set the bucket notification: " + err)
}
```
@@ -1020,14 +1021,14 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ |name of the bucket. |
+|`bucketName` | _string_ |Name of the bucket |
__Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`err` | _error_ |standard error |
+|`err` | _error_ |Standard Error |
__Example__
@@ -1035,7 +1036,7 @@ __Example__
```go
err := c.RemoveAllBucketNotification(bucketName)
if err != nil {
- fmt.Println("Cannot remove bucket notifications.")
+ fmt.Println("Unable to remove bucket notifications.", err)
}
```
@@ -1056,20 +1057,20 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketName` | _string_ | Bucket to listen notifications from. |
-|`prefix` | _string_ | Object key prefix to filter notifications for. |
-|`suffix` | _string_ | Object key suffix to filter notifications for. |
-|`events` | _[]string_| Enables notifications for specific event types. |
-|`doneCh` | _chan struct{}_ | A message on this channel ends the ListenBucketNotification loop. |
+|`bucketName` | _string_ | Bucket to listen notifications on |
+|`prefix` | _string_ | Object key prefix to filter notifications for |
+|`suffix` | _string_ | Object key suffix to filter notifications for |
+|`events` | _[]string_| Enables notifications for specific event types |
+|`doneCh` | _chan struct{}_ | A message on this channel ends the ListenBucketNotification iterator |
__Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`chan NotificationInfo` | _chan_ | Read channel for all notificatons on bucket. |
-|`NotificationInfo` | _object_ | Notification object represents events info. |
-|`notificationInfo.Records` | _[]NotificationEvent_ | Collection of notification events. |
-|`notificationInfo.Err` | _error_ | Carries any error occurred during the operation. |
+|`chan NotificationInfo` | _chan_ | Read channel for all notificatons on bucket |
+|`NotificationInfo` | _object_ | Notification object represents events info |
+|`notificationInfo.Records` | _[]NotificationEvent_ | Collection of notification events |
+|`notificationInfo.Err` | _error_ | Carries any error occurred during the operation |
__Example__
@@ -1085,17 +1086,69 @@ defer close(doneCh)
// Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events.
for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET", "PREFIX", "SUFFIX", []string{
- "s3:ObjectCreated:*",
- "s3:ObjectRemoved:*",
-}, doneCh) {
- if notificationInfo.Err != nil {
- log.Fatalln(notificationInfo.Err)
- }
- log.Println(notificationInfo)
+ "s3:ObjectCreated:*",
+ "s3:ObjectRemoved:*",
+ }, doneCh) {
+ if notificationInfo.Err != nil {
+ log.Fatalln(notificationInfo.Err)
+ }
+ log.Println(notificationInfo)
}
```
-## 6. Explore Further
+## 6. Client custom settings
-- [Build your own Go Music Player App example](https://docs.minio.io/docs/go-music-player-app)
+<a name="SetAppInfo"></a>
+### SetAppInfo(appName string, appVersion string)
+Adds application details to User-Agent.
+
+__Parameters__
+
+| Param | Type | Description |
+|---|---|---|
+|`appName` | _string_ | Name of the application performing the API requests. |
+| `appVersion`| _string_ | Version of the application performing the API requests. |
+
+__Example__
+
+
+```go
+
+// Set Application name and version to be used in subsequent API requests.
+minioClient.SetAppInfo("myCloudApp", "1.0.0")
+
+```
+
+<a name="SetCustomTransport"></a>
+### SetCustomTransport(customHTTPTransport http.RoundTripper)
+Overrides default HTTP transport. This is usually needed for debugging
+or for adding custom TLS certificates.
+
+__Parameters__
+
+| Param | Type | Description |
+|---|---|---|
+|`customHTTPTransport` | _http.RoundTripper_ | Custom transport e.g, to trace API requests and responses for debugging purposes.|
+
+
+<a name="TraceOn"></a>
+### TraceOn(outputStream io.Writer)
+Enables HTTP tracing. The trace is written to the io.Writer
+provided. If outputStream is nil, trace is written to os.Stdout.
+
+__Parameters__
+
+| Param | Type | Description |
+|---|---|---|
+|`outputStream` | _io.Writer_ | HTTP trace is written into outputStream.|
+
+
+<a name="TraceOff"></a>
+### TraceOff()
+Disables HTTP tracing.
+
+
+## 7. Explore Further
+
+- [Build your own Go Music Player App example](https://docs.minio.io/docs/go-music-player-app)
diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go
index f618059cf..cbb889d8d 100644
--- a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go
+++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go
@@ -34,7 +34,7 @@ const (
BucketPolicyWriteOnly = "writeonly"
)
-// isValidBucketPolicy - Is provided policy value supported.
+// IsValidBucketPolicy - returns true if policy is valid and supported, false otherwise.
func (p BucketPolicy) IsValidBucketPolicy() bool {
switch p {
case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly:
@@ -508,7 +508,7 @@ func getObjectPolicy(statement Statement) (readOnly bool, writeOnly bool) {
return readOnly, writeOnly
}
-// Returns policy of given bucket name, prefix in given statements.
+// GetPolicy - Returns policy of given bucket name, prefix in given statements.
func GetPolicy(statements []Statement, bucketName string, prefix string) BucketPolicy {
bucketResource := awsResourcePrefix + bucketName
objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*"
@@ -563,7 +563,7 @@ func GetPolicy(statements []Statement, bucketName string, prefix string) BucketP
return policy
}
-// GetPolicies returns a map of policies rules of given bucket name, prefix in given statements.
+// GetPolicies - returns a map of policies rules of given bucket name, prefix in given statements.
func GetPolicies(statements []Statement, bucketName string) map[string]BucketPolicy {
policyRules := map[string]BucketPolicy{}
objResources := set.NewStringSet()
@@ -590,8 +590,7 @@ func GetPolicies(statements []Statement, bucketName string) map[string]BucketPol
return policyRules
}
-// Returns new statements containing policy of given bucket name and
-// prefix are appended.
+// SetPolicy - Returns new statements containing policy of given bucket name and prefix are appended.
func SetPolicy(statements []Statement, policy BucketPolicy, bucketName string, prefix string) []Statement {
out := removeStatements(statements, bucketName, prefix)
// fmt.Println("out = ")
diff --git a/vendor/github.com/minio/minio-go/request-signature-v2.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go
index b9f248253..e1ec6c02c 100644
--- a/vendor/github.com/minio/minio-go/request-signature-v2.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-package minio
+package s3signer
import (
"bytes"
@@ -29,6 +29,8 @@ import (
"strconv"
"strings"
"time"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// Signature and API related constants.
@@ -45,22 +47,22 @@ func encodeURL2Path(u *url.URL) (path string) {
bucketName := hostSplits[0]
path = "/" + bucketName
path += u.Path
- path = urlEncodePath(path)
+ path = s3utils.EncodePath(path)
return
}
if strings.HasSuffix(u.Host, ".storage.googleapis.com") {
path = "/" + strings.TrimSuffix(u.Host, ".storage.googleapis.com")
path += u.Path
- path = urlEncodePath(path)
+ path = s3utils.EncodePath(path)
return
}
- path = urlEncodePath(u.Path)
+ path = s3utils.EncodePath(u.Path)
return
}
-// preSignV2 - presign the request in following style.
+// PreSignV2 - presign the request in following style.
// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}.
-func preSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request {
+func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request {
// Presign is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
@@ -95,18 +97,18 @@ func preSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in
query.Set("Expires", strconv.FormatInt(epochExpires, 10))
// Encode query and save.
- req.URL.RawQuery = queryEncode(query)
+ req.URL.RawQuery = s3utils.QueryEncode(query)
// Save signature finally.
- req.URL.RawQuery += "&Signature=" + urlEncodePath(signature)
+ req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature)
// Return.
return &req
}
-// postPresignSignatureV2 - presigned signature for PostPolicy
+// PostPresignSignatureV2 - presigned signature for PostPolicy
// request.
-func postPresignSignatureV2(policyBase64, secretAccessKey string) string {
+func PostPresignSignatureV2(policyBase64, secretAccessKey string) string {
hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(policyBase64))
signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
@@ -129,8 +131,8 @@ func postPresignSignatureV2(policyBase64, secretAccessKey string) string {
//
// CanonicalizedProtocolHeaders = <described below>
-// signV2 sign the request before Do() (AWS Signature Version 2).
-func signV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request {
+// SignV2 sign the request before Do() (AWS Signature Version 2).
+func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request {
// Signature calculation is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
@@ -287,7 +289,7 @@ func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, isPreSign b
// Get encoded URL path.
if len(requestURL.Query()) > 0 {
// Keep the usual queries unescaped for string to sign.
- query, _ := url.QueryUnescape(queryEncode(requestURL.Query()))
+ query, _ := url.QueryUnescape(s3utils.QueryEncode(requestURL.Query()))
path = path + "?" + query
}
buf.WriteString(path)
diff --git a/vendor/github.com/minio/minio-go/request-signature-v2_test.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2_test.go
index 6d861fb81..3c0e0ecea 100644
--- a/vendor/github.com/minio/minio-go/request-signature-v2_test.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2_test.go
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-package minio
+package s3signer
import (
"sort"
diff --git a/vendor/github.com/minio/minio-go/request-signature-v4.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go
index 2be3808d6..3322b67cc 100644
--- a/vendor/github.com/minio/minio-go/request-signature-v4.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-package minio
+package s3signer
import (
"bytes"
@@ -24,6 +24,8 @@ import (
"strconv"
"strings"
"time"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// Signature and API related constants.
@@ -101,8 +103,8 @@ func getScope(location string, t time.Time) string {
return scope
}
-// getCredential generate a credential string.
-func getCredential(accessKeyID, location string, t time.Time) string {
+// GetCredential generate a credential string.
+func GetCredential(accessKeyID, location string, t time.Time) string {
scope := getScope(location, t)
return accessKeyID + "/" + scope
}
@@ -185,7 +187,7 @@ func getCanonicalRequest(req http.Request) string {
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
canonicalRequest := strings.Join([]string{
req.Method,
- urlEncodePath(req.URL.Path),
+ s3utils.EncodePath(req.URL.Path),
req.URL.RawQuery,
getCanonicalHeaders(req),
getSignedHeaders(req),
@@ -202,9 +204,9 @@ func getStringToSignV4(t time.Time, location, canonicalRequest string) string {
return stringToSign
}
-// preSignV4 presign the request, in accordance with
+// PreSignV4 presign the request, in accordance with
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
-func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request {
+func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request {
// Presign is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
@@ -214,7 +216,7 @@ func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string,
t := time.Now().UTC()
// Get credential string.
- credential := getCredential(accessKeyID, location, t)
+ credential := GetCredential(accessKeyID, location, t)
// Get all signed headers.
signedHeaders := getSignedHeaders(req)
@@ -246,9 +248,9 @@ func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string,
return &req
}
-// postPresignSignatureV4 - presigned signature for PostPolicy
+// PostPresignSignatureV4 - presigned signature for PostPolicy
// requests.
-func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
+func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
// Get signining key.
signingkey := getSigningKey(secretAccessKey, location, t)
// Calculate signature.
@@ -256,9 +258,9 @@ func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, l
return signature
}
-// signV4 sign the request before Do(), in accordance with
+// SignV4 sign the request before Do(), in accordance with
// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
-func signV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
+func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
// Signature calculation is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
@@ -280,7 +282,7 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, location string) *ht
signingKey := getSigningKey(secretAccessKey, location, t)
// Get credential string.
- credential := getCredential(accessKeyID, location, t)
+ credential := GetCredential(accessKeyID, location, t)
// Get all signed headers.
signedHeaders := getSignedHeaders(req)
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go
new file mode 100644
index 000000000..6f5ba1895
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go
@@ -0,0 +1,70 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package s3signer
+
+import (
+ "net/http"
+ "strings"
+ "testing"
+)
+
+// Tests signature calculation.
+func TestSignatureCalculation(t *testing.T) {
+ req, err := http.NewRequest("GET", "https://s3.amazonaws.com", nil)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ req = SignV4(*req, "", "", "us-east-1")
+ if req.Header.Get("Authorization") != "" {
+ t.Fatal("Error: anonymous credentials should not have Authorization header.")
+ }
+
+ req = PreSignV4(*req, "", "", "us-east-1", 0)
+ if strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
+ t.Fatal("Error: anonymous credentials should not have Signature query resource.")
+ }
+
+ req = SignV2(*req, "", "")
+ if req.Header.Get("Authorization") != "" {
+ t.Fatal("Error: anonymous credentials should not have Authorization header.")
+ }
+
+ req = PreSignV2(*req, "", "", 0)
+ if strings.Contains(req.URL.RawQuery, "Signature") {
+ t.Fatal("Error: anonymous credentials should not have Signature query resource.")
+ }
+
+ req = SignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1")
+ if req.Header.Get("Authorization") == "" {
+ t.Fatal("Error: normal credentials should have Authorization header.")
+ }
+
+ req = PreSignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1", 0)
+ if !strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
+ t.Fatal("Error: normal credentials should have Signature query resource.")
+ }
+
+ req = SignV2(*req, "ACCESS-KEY", "SECRET-KEY")
+ if req.Header.Get("Authorization") == "" {
+ t.Fatal("Error: normal credentials should have Authorization header.")
+ }
+
+ req = PreSignV2(*req, "ACCESS-KEY", "SECRET-KEY", 0)
+ if !strings.Contains(req.URL.RawQuery, "Signature") {
+ t.Fatal("Error: normal credentials should not have Signature query resource.")
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go b/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go
new file mode 100644
index 000000000..0619b3082
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go
@@ -0,0 +1,39 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package s3signer
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+)
+
+// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
+const unsignedPayload = "UNSIGNED-PAYLOAD"
+
+// sum256 calculate sha256 sum for an input byte array.
+func sum256(data []byte) []byte {
+ hash := sha256.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// sumHMAC calculate hmac between two input byte array.
+func sumHMAC(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/utils_test.go b/vendor/github.com/minio/minio-go/pkg/s3signer/utils_test.go
new file mode 100644
index 000000000..b266e42a1
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/utils_test.go
@@ -0,0 +1,66 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package s3signer
+
+import (
+ "fmt"
+ "net/url"
+ "testing"
+)
+
+// Tests url encoding.
+func TestEncodeURL2Path(t *testing.T) {
+ type urlStrings struct {
+ objName string
+ encodedObjName string
+ }
+
+ bucketName := "bucketName"
+ want := []urlStrings{
+ {
+ objName: "本語",
+ encodedObjName: "%E6%9C%AC%E8%AA%9E",
+ },
+ {
+ objName: "本語.1",
+ encodedObjName: "%E6%9C%AC%E8%AA%9E.1",
+ },
+ {
+ objName: ">123>3123123",
+ encodedObjName: "%3E123%3E3123123",
+ },
+ {
+ objName: "test 1 2.txt",
+ encodedObjName: "test%201%202.txt",
+ },
+ {
+ objName: "test++ 1.txt",
+ encodedObjName: "test%2B%2B%201.txt",
+ },
+ }
+
+ for _, o := range want {
+ u, err := url.Parse(fmt.Sprintf("https://%s.s3.amazonaws.com/%s", bucketName, o.objName))
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ urlPath := "/" + bucketName + "/" + o.encodedObjName
+ if urlPath != encodeURL2Path(u) {
+ t.Fatal("Error")
+ }
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go
new file mode 100644
index 000000000..ae1cea337
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go
@@ -0,0 +1,195 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package s3utils
+
+import (
+ "bytes"
+ "encoding/hex"
+ "net"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "unicode/utf8"
+)
+
+// Sentinel URL is the default url value which is invalid.
+var sentinelURL = url.URL{}
+
+// IsValidDomain validates if input string is a valid domain name.
+func IsValidDomain(host string) bool {
+ // See RFC 1035, RFC 3696.
+ host = strings.TrimSpace(host)
+ if len(host) == 0 || len(host) > 255 {
+ return false
+ }
+ // host cannot start or end with "-"
+ if host[len(host)-1:] == "-" || host[:1] == "-" {
+ return false
+ }
+ // host cannot start or end with "_"
+ if host[len(host)-1:] == "_" || host[:1] == "_" {
+ return false
+ }
+ // host cannot start or end with a "."
+ if host[len(host)-1:] == "." || host[:1] == "." {
+ return false
+ }
+ // All non alphanumeric characters are invalid.
+ if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:><?/") {
+ return false
+ }
+ // No need to regexp match, since the list is non-exhaustive.
+ // We let it valid and fail later.
+ return true
+}
+
+// IsValidIP parses input string for ip address validity.
+func IsValidIP(ip string) bool {
+ return net.ParseIP(ip) != nil
+}
+
+// IsVirtualHostSupported - verifies if bucketName can be part of
+// virtual host. Currently only Amazon S3 and Google Cloud Storage
+// would support this.
+func IsVirtualHostSupported(endpointURL url.URL, bucketName string) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ // bucketName can be valid but '.' in the hostname will fail SSL
+ // certificate validation. So do not use host-style for such buckets.
+ if endpointURL.Scheme == "https" && strings.Contains(bucketName, ".") {
+ return false
+ }
+ // Return true for all other cases
+ return IsAmazonEndpoint(endpointURL) || IsGoogleEndpoint(endpointURL)
+}
+
+// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint.
+func IsAmazonEndpoint(endpointURL url.URL) bool {
+ if IsAmazonChinaEndpoint(endpointURL) {
+ return true
+ }
+
+ if IsAmazonS3AccelerateEndpoint(endpointURL) {
+ return true
+ }
+
+ return endpointURL.Host == "s3.amazonaws.com"
+}
+
+// IsAmazonChinaEndpoint - Match if it is exactly Amazon S3 China endpoint.
+// Customers who wish to use the new Beijing Region are required
+// to sign up for a separate set of account credentials unique to
+// the China (Beijing) Region. Customers with existing AWS credentials
+// will not be able to access resources in the new Region, and vice versa.
+// For more info https://aws.amazon.com/about-aws/whats-new/2013/12/18/announcing-the-aws-china-beijing-region/
+func IsAmazonChinaEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return endpointURL.Host == "s3.cn-north-1.amazonaws.com.cn"
+}
+
+// IsAmazonS3AccelerateEndpoint - Match if it is an Amazon S3 Accelerate
+func IsAmazonS3AccelerateEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return endpointURL.Host == "s3-accelerate.amazonaws.com"
+}
+
+// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint.
+func IsGoogleEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return endpointURL.Host == "storage.googleapis.com"
+}
+
+// Expects ascii encoded strings - from output of urlEncodePath
+func percentEncodeSlash(s string) string {
+ return strings.Replace(s, "/", "%2F", -1)
+}
+
+// QueryEncode - encodes query values in their URL encoded form. In
+// addition to the percent encoding performed by urlEncodePath() used
+// here, it also percent encodes '/' (forward slash)
+func QueryEncode(v url.Values) string {
+ if v == nil {
+ return ""
+ }
+ var buf bytes.Buffer
+ keys := make([]string, 0, len(v))
+ for k := range v {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ vs := v[k]
+ prefix := percentEncodeSlash(EncodePath(k)) + "="
+ for _, v := range vs {
+ if buf.Len() > 0 {
+ buf.WriteByte('&')
+ }
+ buf.WriteString(prefix)
+ buf.WriteString(percentEncodeSlash(EncodePath(v)))
+ }
+ }
+ return buf.String()
+}
+
+// if object matches reserved string, no need to encode them
+var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
+
+// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
+//
+// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
+// non english characters cannot be parsed due to the nature in which url.Encode() is written
+//
+// This function on the other hand is a direct replacement for url.Encode() technique to support
+// pretty much every UTF-8 character.
+func EncodePath(pathName string) string {
+ if reservedObjectNames.MatchString(pathName) {
+ return pathName
+ }
+ var encodedPathname string
+ for _, s := range pathName {
+ if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ }
+ switch s {
+ case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ default:
+ len := utf8.RuneLen(s)
+ if len < 0 {
+ // if utf8 cannot convert return the same string as is
+ return pathName
+ }
+ u := make([]byte, len)
+ utf8.EncodeRune(u, s)
+ for _, r := range u {
+ hex := hex.EncodeToString([]byte{r})
+ encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
+ }
+ }
+ }
+ return encodedPathname
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3utils/utils_test.go b/vendor/github.com/minio/minio-go/pkg/s3utils/utils_test.go
new file mode 100644
index 000000000..f790861cd
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/s3utils/utils_test.go
@@ -0,0 +1,284 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package s3utils
+
+import (
+ "net/url"
+ "testing"
+)
+
+// Tests for 'isValidDomain(host string) bool'.
+func TestIsValidDomain(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ host string
+ // Expected result.
+ result bool
+ }{
+ {"s3.amazonaws.com", true},
+ {"s3.cn-north-1.amazonaws.com.cn", true},
+ {"s3.amazonaws.com_", false},
+ {"%$$$", false},
+ {"s3.amz.test.com", true},
+ {"s3.%%", false},
+ {"localhost", true},
+ {"-localhost", false},
+ {"", false},
+ {"\n \t", false},
+ {" ", false},
+ }
+
+ for i, testCase := range testCases {
+ result := IsValidDomain(testCase.host)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isValidDomain test to be '%v', but found '%v' instead", i+1, testCase.result, result)
+ }
+ }
+}
+
+// Tests validate IP address validator.
+func TestIsValidIP(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ ip string
+ // Expected result.
+ result bool
+ }{
+ {"192.168.1.1", true},
+ {"192.168.1", false},
+ {"192.168.1.1.1", false},
+ {"-192.168.1.1", false},
+ {"260.192.1.1", false},
+ }
+
+ for i, testCase := range testCases {
+ result := IsValidIP(testCase.ip)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isValidIP to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.ip, result)
+ }
+ }
+
+}
+
+// Tests validate virtual host validator.
+func TestIsVirtualHostSupported(t *testing.T) {
+ testCases := []struct {
+ url string
+ bucket string
+ // Expeceted result.
+ result bool
+ }{
+ {"https://s3.amazonaws.com", "my-bucket", true},
+ {"https://s3.cn-north-1.amazonaws.com.cn", "my-bucket", true},
+ {"https://s3.amazonaws.com", "my-bucket.", false},
+ {"https://amazons3.amazonaws.com", "my-bucket.", false},
+ {"https://storage.googleapis.com/", "my-bucket", true},
+ {"https://mystorage.googleapis.com/", "my-bucket", false},
+ }
+
+ for i, testCase := range testCases {
+ u, err := url.Parse(testCase.url)
+ if err != nil {
+ t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
+ }
+ result := IsVirtualHostSupported(*u, testCase.bucket)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isVirtualHostSupported to be '%v' for input url \"%s\" and bucket \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, testCase.bucket, result)
+ }
+ }
+}
+
+// Tests validate Amazon endpoint validator.
+func TestIsAmazonEndpoint(t *testing.T) {
+ testCases := []struct {
+ url string
+ // Expected result.
+ result bool
+ }{
+ {"https://192.168.1.1", false},
+ {"192.168.1.1", false},
+ {"http://storage.googleapis.com", false},
+ {"https://storage.googleapis.com", false},
+ {"storage.googleapis.com", false},
+ {"s3.amazonaws.com", false},
+ {"https://amazons3.amazonaws.com", false},
+ {"-192.168.1.1", false},
+ {"260.192.1.1", false},
+ // valid inputs.
+ {"https://s3.amazonaws.com", true},
+ {"https://s3.cn-north-1.amazonaws.com.cn", true},
+ }
+
+ for i, testCase := range testCases {
+ u, err := url.Parse(testCase.url)
+ if err != nil {
+ t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
+ }
+ result := IsAmazonEndpoint(*u)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
+ }
+ }
+
+}
+
+// Tests validate Amazon S3 China endpoint validator.
+func TestIsAmazonChinaEndpoint(t *testing.T) {
+ testCases := []struct {
+ url string
+ // Expected result.
+ result bool
+ }{
+ {"https://192.168.1.1", false},
+ {"192.168.1.1", false},
+ {"http://storage.googleapis.com", false},
+ {"https://storage.googleapis.com", false},
+ {"storage.googleapis.com", false},
+ {"s3.amazonaws.com", false},
+ {"https://amazons3.amazonaws.com", false},
+ {"-192.168.1.1", false},
+ {"260.192.1.1", false},
+ // s3.amazonaws.com is not a valid Amazon S3 China end point.
+ {"https://s3.amazonaws.com", false},
+ // valid input.
+ {"https://s3.cn-north-1.amazonaws.com.cn", true},
+ }
+
+ for i, testCase := range testCases {
+ u, err := url.Parse(testCase.url)
+ if err != nil {
+ t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
+ }
+ result := IsAmazonChinaEndpoint(*u)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
+ }
+ }
+
+}
+
+// Tests validate Google Cloud end point validator.
+func TestIsGoogleEndpoint(t *testing.T) {
+ testCases := []struct {
+ url string
+ // Expected result.
+ result bool
+ }{
+ {"192.168.1.1", false},
+ {"https://192.168.1.1", false},
+ {"s3.amazonaws.com", false},
+ {"http://s3.amazonaws.com", false},
+ {"https://s3.amazonaws.com", false},
+ {"https://s3.cn-north-1.amazonaws.com.cn", false},
+ {"-192.168.1.1", false},
+ {"260.192.1.1", false},
+ // valid inputs.
+ {"http://storage.googleapis.com", true},
+ {"https://storage.googleapis.com", true},
+ }
+
+ for i, testCase := range testCases {
+ u, err := url.Parse(testCase.url)
+ if err != nil {
+ t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
+ }
+ result := IsGoogleEndpoint(*u)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isGoogleEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
+ }
+ }
+
+}
+
+func TestPercentEncodeSlash(t *testing.T) {
+ testCases := []struct {
+ input string
+ output string
+ }{
+ {"test123", "test123"},
+ {"abc,+_1", "abc,+_1"},
+ {"%40prefix=test%40123", "%40prefix=test%40123"},
+ {"key1=val1/val2", "key1=val1%2Fval2"},
+ {"%40prefix=test%40123/", "%40prefix=test%40123%2F"},
+ }
+
+ for i, testCase := range testCases {
+ receivedOutput := percentEncodeSlash(testCase.input)
+ if testCase.output != receivedOutput {
+ t.Errorf(
+ "Test %d: Input: \"%s\" --> Expected percentEncodeSlash to return \"%s\", but it returned \"%s\" instead!",
+ i+1, testCase.input, testCase.output,
+ receivedOutput,
+ )
+
+ }
+ }
+}
+
+// Tests validate the query encoder.
+func TestQueryEncode(t *testing.T) {
+ testCases := []struct {
+ queryKey string
+ valueToEncode []string
+ // Expected result.
+ result string
+ }{
+ {"prefix", []string{"test@123", "test@456"}, "prefix=test%40123&prefix=test%40456"},
+ {"@prefix", []string{"test@123"}, "%40prefix=test%40123"},
+ {"@prefix", []string{"a/b/c/"}, "%40prefix=a%2Fb%2Fc%2F"},
+ {"prefix", []string{"test#123"}, "prefix=test%23123"},
+ {"prefix#", []string{"test#123"}, "prefix%23=test%23123"},
+ {"prefix", []string{"test123"}, "prefix=test123"},
+ {"prefix", []string{"test本語123", "test123"}, "prefix=test%E6%9C%AC%E8%AA%9E123&prefix=test123"},
+ }
+
+ for i, testCase := range testCases {
+ urlValues := make(url.Values)
+ for _, valueToEncode := range testCase.valueToEncode {
+ urlValues.Add(testCase.queryKey, valueToEncode)
+ }
+ result := QueryEncode(urlValues)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected queryEncode result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
+ }
+ }
+}
+
+// Tests validate the URL path encoder.
+func TestEncodePath(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ inputStr string
+ // Expected result.
+ result string
+ }{
+ {"thisisthe%url", "thisisthe%25url"},
+ {"本語", "%E6%9C%AC%E8%AA%9E"},
+ {"本語.1", "%E6%9C%AC%E8%AA%9E.1"},
+ {">123", "%3E123"},
+ {"myurl#link", "myurl%23link"},
+ {"space in url", "space%20in%20url"},
+ {"url+path", "url%2Bpath"},
+ }
+
+ for i, testCase := range testCases {
+ result := EncodePath(testCase.inputStr)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected queryEncode result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
+ }
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/post-policy.go b/vendor/github.com/minio/minio-go/post-policy.go
index 2a675d770..5e716124a 100644
--- a/vendor/github.com/minio/minio-go/post-policy.go
+++ b/vendor/github.com/minio/minio-go/post-policy.go
@@ -149,6 +149,24 @@ func (p *PostPolicy) SetContentLengthRange(min, max int64) error {
return nil
}
+// SetSuccessStatusAction - Sets the status success code of the object for this policy
+// based upload.
+func (p *PostPolicy) SetSuccessStatusAction(status string) error {
+ if strings.TrimSpace(status) == "" || status == "" {
+ return ErrInvalidArgument("Status is empty")
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$success_action_status",
+ value: status,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["success_action_status"] = status
+ return nil
+}
+
// addNewPolicy - internal helper to validate adding new policies.
func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error {
if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" {
diff --git a/vendor/github.com/minio/minio-go/retry-continous.go b/vendor/github.com/minio/minio-go/retry-continous.go
new file mode 100644
index 000000000..e300af69c
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/retry-continous.go
@@ -0,0 +1,52 @@
+package minio
+
+import "time"
+
+// newRetryTimerContinous creates a timer with exponentially increasing delays forever.
+func (c Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
+ attemptCh := make(chan int)
+
+ // normalize jitter to the range [0, 1.0]
+ if jitter < NoJitter {
+ jitter = NoJitter
+ }
+ if jitter > MaxJitter {
+ jitter = MaxJitter
+ }
+
+ // computes the exponential backoff duration according to
+ // https://www.awsarchitectureblog.com/2015/03/backoff.html
+ exponentialBackoffWait := func(attempt int) time.Duration {
+ // 1<<uint(attempt) below could overflow, so limit the value of attempt
+ maxAttempt := 30
+ if attempt > maxAttempt {
+ attempt = maxAttempt
+ }
+ //sleep = random_between(0, min(cap, base * 2 ** attempt))
+ sleep := unit * time.Duration(1<<uint(attempt))
+ if sleep > cap {
+ sleep = cap
+ }
+ if jitter != NoJitter {
+ sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
+ }
+ return sleep
+ }
+
+ go func() {
+ defer close(attemptCh)
+ var nextBackoff int
+ for {
+ select {
+ // Attempts starts.
+ case attemptCh <- nextBackoff:
+ nextBackoff++
+ case <-doneCh:
+ // Stop the routine.
+ return
+ }
+ time.Sleep(exponentialBackoffWait(nextBackoff))
+ }
+ }()
+ return attemptCh
+}
diff --git a/vendor/github.com/minio/minio-go/s3-endpoints.go b/vendor/github.com/minio/minio-go/s3-endpoints.go
index 3f159bd9d..d7fa5e038 100644
--- a/vendor/github.com/minio/minio-go/s3-endpoints.go
+++ b/vendor/github.com/minio/minio-go/s3-endpoints.go
@@ -20,9 +20,12 @@ package minio
// "cn-north-1" adds support for AWS China.
var awsS3EndpointMap = map[string]string{
"us-east-1": "s3.amazonaws.com",
+ "us-east-2": "s3-us-east-2.amazonaws.com",
"us-west-2": "s3-us-west-2.amazonaws.com",
"us-west-1": "s3-us-west-1.amazonaws.com",
+ "ca-central-1": "s3.ca-central-1.amazonaws.com",
"eu-west-1": "s3-eu-west-1.amazonaws.com",
+ "eu-west-2": "s3-eu-west-2.amazonaws.com",
"eu-central-1": "s3-eu-central-1.amazonaws.com",
"ap-south-1": "s3-ap-south-1.amazonaws.com",
"ap-southeast-1": "s3-ap-southeast-1.amazonaws.com",
diff --git a/vendor/github.com/minio/minio-go/test-utils_test.go b/vendor/github.com/minio/minio-go/test-utils_test.go
index 179c28a23..4134af996 100644
--- a/vendor/github.com/minio/minio-go/test-utils_test.go
+++ b/vendor/github.com/minio/minio-go/test-utils_test.go
@@ -21,6 +21,7 @@ import (
"encoding/xml"
"io/ioutil"
"net/http"
+ "strconv"
)
// Contains common used utilities for tests.
@@ -62,3 +63,12 @@ func encodeResponse(response interface{}) []byte {
encode.Encode(response)
return bytesBuffer.Bytes()
}
+
+// Convert string to bool and always return true if any error
+func mustParseBool(str string) bool {
+ b, err := strconv.ParseBool(str)
+ if err != nil {
+ return true
+ }
+ return b
+}
diff --git a/vendor/github.com/minio/minio-go/utils.go b/vendor/github.com/minio/minio-go/utils.go
index 2208d3603..93cd1712f 100644
--- a/vendor/github.com/minio/minio-go/utils.go
+++ b/vendor/github.com/minio/minio-go/utils.go
@@ -17,11 +17,8 @@
package minio
import (
- "bytes"
- "crypto/hmac"
"crypto/md5"
"crypto/sha256"
- "encoding/hex"
"encoding/xml"
"io"
"io/ioutil"
@@ -29,10 +26,11 @@ import (
"net/http"
"net/url"
"regexp"
- "sort"
"strings"
"time"
"unicode/utf8"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// xmlDecoder provide decoded value in xml.
@@ -55,13 +53,6 @@ func sumMD5(data []byte) []byte {
return hash.Sum(nil)
}
-// sumHMAC calculate hmac between two input byte array.
-func sumHMAC(key []byte, data []byte) []byte {
- hash := hmac.New(sha256.New, key)
- hash.Write(data)
- return hash.Sum(nil)
-}
-
// getEndpointURL - construct a new endpoint.
func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
if strings.Contains(endpoint, ":") {
@@ -69,12 +60,12 @@ func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
if err != nil {
return nil, err
}
- if !isValidIP(host) && !isValidDomain(host) {
+ if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) {
msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
return nil, ErrInvalidArgument(msg)
}
} else {
- if !isValidIP(endpoint) && !isValidDomain(endpoint) {
+ if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) {
msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
return nil, ErrInvalidArgument(msg)
}
@@ -93,45 +84,12 @@ func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
}
// Validate incoming endpoint URL.
- if err := isValidEndpointURL(endpointURL.String()); err != nil {
+ if err := isValidEndpointURL(*endpointURL); err != nil {
return nil, err
}
return endpointURL, nil
}
-// isValidDomain validates if input string is a valid domain name.
-func isValidDomain(host string) bool {
- // See RFC 1035, RFC 3696.
- host = strings.TrimSpace(host)
- if len(host) == 0 || len(host) > 255 {
- return false
- }
- // host cannot start or end with "-"
- if host[len(host)-1:] == "-" || host[:1] == "-" {
- return false
- }
- // host cannot start or end with "_"
- if host[len(host)-1:] == "_" || host[:1] == "_" {
- return false
- }
- // host cannot start or end with a "."
- if host[len(host)-1:] == "." || host[:1] == "." {
- return false
- }
- // All non alphanumeric characters are invalid.
- if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:><?/") {
- return false
- }
- // No need to regexp match, since the list is non-exhaustive.
- // We let it valid and fail later.
- return true
-}
-
-// isValidIP parses input string for ip address validity.
-func isValidIP(ip string) bool {
- return net.ParseIP(ip) != nil
-}
-
// closeResponse close non nil response with any response Body.
// convenient wrapper to drain any remaining data on response body.
//
@@ -152,92 +110,24 @@ func closeResponse(resp *http.Response) {
}
}
-// isVirtualHostSupported - verifies if bucketName can be part of
-// virtual host. Currently only Amazon S3 and Google Cloud Storage
-// would support this.
-func isVirtualHostSupported(endpointURL string, bucketName string) bool {
- url, err := url.Parse(endpointURL)
- if err != nil {
- return false
- }
- // bucketName can be valid but '.' in the hostname will fail SSL
- // certificate validation. So do not use host-style for such buckets.
- if url.Scheme == "https" && strings.Contains(bucketName, ".") {
- return false
- }
- // Return true for all other cases
- return isAmazonEndpoint(endpointURL) || isGoogleEndpoint(endpointURL)
-}
-
-// Match if it is exactly Amazon S3 endpoint.
-func isAmazonEndpoint(endpointURL string) bool {
- if isAmazonChinaEndpoint(endpointURL) {
- return true
- }
- url, err := url.Parse(endpointURL)
- if err != nil {
- return false
- }
- if url.Host == "s3.amazonaws.com" {
- return true
- }
- return false
-}
-
-// Match if it is exactly Amazon S3 China endpoint.
-// Customers who wish to use the new Beijing Region are required
-// to sign up for a separate set of account credentials unique to
-// the China (Beijing) Region. Customers with existing AWS credentials
-// will not be able to access resources in the new Region, and vice versa.
-// For more info https://aws.amazon.com/about-aws/whats-new/2013/12/18/announcing-the-aws-china-beijing-region/
-func isAmazonChinaEndpoint(endpointURL string) bool {
- if endpointURL == "" {
- return false
- }
- url, err := url.Parse(endpointURL)
- if err != nil {
- return false
- }
- if url.Host == "s3.cn-north-1.amazonaws.com.cn" {
- return true
- }
- return false
-}
-
-// Match if it is exactly Google cloud storage endpoint.
-func isGoogleEndpoint(endpointURL string) bool {
- if endpointURL == "" {
- return false
- }
- url, err := url.Parse(endpointURL)
- if err != nil {
- return false
- }
- if url.Host == "storage.googleapis.com" {
- return true
- }
- return false
-}
+// Sentinel URL is the default url value which is invalid.
+var sentinelURL = url.URL{}
// Verify if input endpoint URL is valid.
-func isValidEndpointURL(endpointURL string) error {
- if endpointURL == "" {
+func isValidEndpointURL(endpointURL url.URL) error {
+ if endpointURL == sentinelURL {
return ErrInvalidArgument("Endpoint url cannot be empty.")
}
- url, err := url.Parse(endpointURL)
- if err != nil {
+ if endpointURL.Path != "/" && endpointURL.Path != "" {
return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
}
- if url.Path != "/" && url.Path != "" {
- return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
- }
- if strings.Contains(endpointURL, ".amazonaws.com") {
- if !isAmazonEndpoint(endpointURL) {
+ if strings.Contains(endpointURL.Host, ".amazonaws.com") {
+ if !s3utils.IsAmazonEndpoint(endpointURL) {
return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
}
}
- if strings.Contains(endpointURL, ".googleapis.com") {
- if !isGoogleEndpoint(endpointURL) {
+ if strings.Contains(endpointURL.Host, ".googleapis.com") {
+ if !s3utils.IsGoogleEndpoint(endpointURL) {
return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.")
}
}
@@ -260,6 +150,9 @@ func isValidExpiry(expires time.Duration) error {
// style requests instead for such buckets.
var validBucketName = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
+// Invalid bucket name with double dot.
+var invalidDotBucketName = regexp.MustCompile(`\.\.`)
+
// isValidBucketName - verify bucket name in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
func isValidBucketName(bucketName string) error {
@@ -275,7 +168,7 @@ func isValidBucketName(bucketName string) error {
if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' {
return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")
}
- if match, _ := regexp.MatchString("\\.\\.", bucketName); match {
+ if invalidDotBucketName.MatchString(bucketName) {
return ErrInvalidBucketName("Bucket name cannot have successive periods.")
}
if !validBucketName.MatchString(bucketName) {
@@ -310,74 +203,25 @@ func isValidObjectPrefix(objectPrefix string) error {
return nil
}
-//expects ascii encoded strings - from output of urlEncodePath
-func percentEncodeSlash(s string) string {
- return strings.Replace(s, "/", "%2F", -1)
-}
-
-// queryEncode - encodes query values in their URL encoded form. In
-// addition to the percent encoding performed by urlEncodePath() used
-// here, it also percent encodes '/' (forward slash)
-func queryEncode(v url.Values) string {
- if v == nil {
- return ""
- }
- var buf bytes.Buffer
- keys := make([]string, 0, len(v))
- for k := range v {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- for _, k := range keys {
- vs := v[k]
- prefix := percentEncodeSlash(urlEncodePath(k)) + "="
- for _, v := range vs {
- if buf.Len() > 0 {
- buf.WriteByte('&')
- }
- buf.WriteString(prefix)
- buf.WriteString(percentEncodeSlash(urlEncodePath(v)))
- }
+// make a copy of http.Header
+func cloneHeader(h http.Header) http.Header {
+ h2 := make(http.Header, len(h))
+ for k, vv := range h {
+ vv2 := make([]string, len(vv))
+ copy(vv2, vv)
+ h2[k] = vv2
}
- return buf.String()
+ return h2
}
-// urlEncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
-//
-// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
-// non english characters cannot be parsed due to the nature in which url.Encode() is written
-//
-// This function on the other hand is a direct replacement for url.Encode() technique to support
-// pretty much every UTF-8 character.
-func urlEncodePath(pathName string) string {
- // if object matches reserved string, no need to encode them
- reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
- if reservedNames.MatchString(pathName) {
- return pathName
- }
- var encodedPathname string
- for _, s := range pathName {
- if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
- encodedPathname = encodedPathname + string(s)
- continue
- }
- switch s {
- case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
- encodedPathname = encodedPathname + string(s)
- continue
- default:
- len := utf8.RuneLen(s)
- if len < 0 {
- // if utf8 cannot convert return the same string as is
- return pathName
- }
- u := make([]byte, len)
- utf8.EncodeRune(u, s)
- for _, r := range u {
- hex := hex.EncodeToString([]byte{r})
- encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
- }
- }
+// Filter relevant response headers from
+// the HEAD, GET http response. The function takes
+// a list of headers which are filtered out and
+// returned as a new http header.
+func filterHeader(header http.Header, filterKeys []string) (filteredHeader http.Header) {
+ filteredHeader = cloneHeader(header)
+ for _, key := range filterKeys {
+ filteredHeader.Del(key)
}
- return encodedPathname
+ return filteredHeader
}
diff --git a/vendor/github.com/minio/minio-go/utils_test.go b/vendor/github.com/minio/minio-go/utils_test.go
index 1a30d5441..99bdea329 100644
--- a/vendor/github.com/minio/minio-go/utils_test.go
+++ b/vendor/github.com/minio/minio-go/utils_test.go
@@ -17,11 +17,27 @@ package minio
import (
"fmt"
+ "net/http"
"net/url"
"testing"
"time"
)
+// Tests filter header function by filtering out
+// some custom header keys.
+func TestFilterHeader(t *testing.T) {
+ header := http.Header{}
+ header.Set("Content-Type", "binary/octet-stream")
+ header.Set("Content-Encoding", "gzip")
+ newHeader := filterHeader(header, []string{"Content-Type"})
+ if len(newHeader) > 1 {
+ t.Fatalf("Unexpected size of the returned header, should be 1, got %d", len(newHeader))
+ }
+ if newHeader.Get("Content-Encoding") != "gzip" {
+ t.Fatalf("Unexpected content-encoding value, expected 'gzip', got %s", newHeader.Get("Content-Encoding"))
+ }
+}
+
// Tests for 'getEndpointURL(endpoint string, inSecure bool)'.
func TestGetEndpointURL(t *testing.T) {
testCases := []struct {
@@ -74,35 +90,6 @@ func TestGetEndpointURL(t *testing.T) {
}
}
-// Tests for 'isValidDomain(host string) bool'.
-func TestIsValidDomain(t *testing.T) {
- testCases := []struct {
- // Input.
- host string
- // Expected result.
- result bool
- }{
- {"s3.amazonaws.com", true},
- {"s3.cn-north-1.amazonaws.com.cn", true},
- {"s3.amazonaws.com_", false},
- {"%$$$", false},
- {"s3.amz.test.com", true},
- {"s3.%%", false},
- {"localhost", true},
- {"-localhost", false},
- {"", false},
- {"\n \t", false},
- {" ", false},
- }
-
- for i, testCase := range testCases {
- result := isValidDomain(testCase.host)
- if testCase.result != result {
- t.Errorf("Test %d: Expected isValidDomain test to be '%v', but found '%v' instead", i+1, testCase.result, result)
- }
- }
-}
-
// Tests validate end point validator.
func TestIsValidEndpointURL(t *testing.T) {
testCases := []struct {
@@ -125,161 +112,33 @@ func TestIsValidEndpointURL(t *testing.T) {
}
for i, testCase := range testCases {
- err := isValidEndpointURL(testCase.url)
+ var u url.URL
+ if testCase.url == "" {
+ u = sentinelURL
+ } else {
+ u1, err := url.Parse(testCase.url)
+ if err != nil {
+ t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
+ }
+ u = *u1
+ }
+ err := isValidEndpointURL(u)
if err != nil && testCase.shouldPass {
- t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
+ t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
}
if err == nil && !testCase.shouldPass {
- t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
+ t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err)
}
// Failed as expected, but does it fail for the expected reason.
if err != nil && !testCase.shouldPass {
if err.Error() != testCase.err.Error() {
- t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
+ t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err, err)
}
}
}
}
-// Tests validate IP address validator.
-func TestIsValidIP(t *testing.T) {
- testCases := []struct {
- // Input.
- ip string
- // Expected result.
- result bool
- }{
- {"192.168.1.1", true},
- {"192.168.1", false},
- {"192.168.1.1.1", false},
- {"-192.168.1.1", false},
- {"260.192.1.1", false},
- }
-
- for i, testCase := range testCases {
- result := isValidIP(testCase.ip)
- if testCase.result != result {
- t.Errorf("Test %d: Expected isValidIP to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.ip, result)
- }
- }
-
-}
-
-// Tests validate virtual host validator.
-func TestIsVirtualHostSupported(t *testing.T) {
- testCases := []struct {
- url string
- bucket string
- // Expeceted result.
- result bool
- }{
- {"https://s3.amazonaws.com", "my-bucket", true},
- {"https://s3.cn-north-1.amazonaws.com.cn", "my-bucket", true},
- {"https://s3.amazonaws.com", "my-bucket.", false},
- {"https://amazons3.amazonaws.com", "my-bucket.", false},
- {"https://storage.googleapis.com/", "my-bucket", true},
- {"https://mystorage.googleapis.com/", "my-bucket", false},
- }
-
- for i, testCase := range testCases {
- result := isVirtualHostSupported(testCase.url, testCase.bucket)
- if testCase.result != result {
- t.Errorf("Test %d: Expected isVirtualHostSupported to be '%v' for input url \"%s\" and bucket \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, testCase.bucket, result)
- }
- }
-}
-
-// Tests validate Amazon endpoint validator.
-func TestIsAmazonEndpoint(t *testing.T) {
- testCases := []struct {
- url string
- // Expected result.
- result bool
- }{
- {"https://192.168.1.1", false},
- {"192.168.1.1", false},
- {"http://storage.googleapis.com", false},
- {"https://storage.googleapis.com", false},
- {"storage.googleapis.com", false},
- {"s3.amazonaws.com", false},
- {"https://amazons3.amazonaws.com", false},
- {"-192.168.1.1", false},
- {"260.192.1.1", false},
- // valid inputs.
- {"https://s3.amazonaws.com", true},
- {"https://s3.cn-north-1.amazonaws.com.cn", true},
- }
-
- for i, testCase := range testCases {
- result := isAmazonEndpoint(testCase.url)
- if testCase.result != result {
- t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
- }
- }
-
-}
-
-// Tests validate Amazon S3 China endpoint validator.
-func TestIsAmazonChinaEndpoint(t *testing.T) {
- testCases := []struct {
- url string
- // Expected result.
- result bool
- }{
- {"https://192.168.1.1", false},
- {"192.168.1.1", false},
- {"http://storage.googleapis.com", false},
- {"https://storage.googleapis.com", false},
- {"storage.googleapis.com", false},
- {"s3.amazonaws.com", false},
- {"https://amazons3.amazonaws.com", false},
- {"-192.168.1.1", false},
- {"260.192.1.1", false},
- // s3.amazonaws.com is not a valid Amazon S3 China end point.
- {"https://s3.amazonaws.com", false},
- // valid input.
- {"https://s3.cn-north-1.amazonaws.com.cn", true},
- }
-
- for i, testCase := range testCases {
- result := isAmazonChinaEndpoint(testCase.url)
- if testCase.result != result {
- t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
- }
- }
-
-}
-
-// Tests validate Google Cloud end point validator.
-func TestIsGoogleEndpoint(t *testing.T) {
- testCases := []struct {
- url string
- // Expected result.
- result bool
- }{
- {"192.168.1.1", false},
- {"https://192.168.1.1", false},
- {"s3.amazonaws.com", false},
- {"http://s3.amazonaws.com", false},
- {"https://s3.amazonaws.com", false},
- {"https://s3.cn-north-1.amazonaws.com.cn", false},
- {"-192.168.1.1", false},
- {"260.192.1.1", false},
- // valid inputs.
- {"http://storage.googleapis.com", true},
- {"https://storage.googleapis.com", true},
- }
-
- for i, testCase := range testCases {
- result := isGoogleEndpoint(testCase.url)
- if testCase.result != result {
- t.Errorf("Test %d: Expected isGoogleEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
- }
- }
-
-}
-
// Tests validate the expiry time validator.
func TestIsValidExpiry(t *testing.T) {
testCases := []struct {
@@ -355,82 +214,3 @@ func TestIsValidBucketName(t *testing.T) {
}
}
-
-func TestPercentEncodeSlash(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {"test123", "test123"},
- {"abc,+_1", "abc,+_1"},
- {"%40prefix=test%40123", "%40prefix=test%40123"},
- {"key1=val1/val2", "key1=val1%2Fval2"},
- {"%40prefix=test%40123/", "%40prefix=test%40123%2F"},
- }
-
- for i, testCase := range testCases {
- receivedOutput := percentEncodeSlash(testCase.input)
- if testCase.output != receivedOutput {
- t.Errorf(
- "Test %d: Input: \"%s\" --> Expected percentEncodeSlash to return \"%s\", but it returned \"%s\" instead!",
- i+1, testCase.input, testCase.output,
- receivedOutput,
- )
-
- }
- }
-}
-
-// Tests validate the query encoder.
-func TestQueryEncode(t *testing.T) {
- testCases := []struct {
- queryKey string
- valueToEncode []string
- // Expected result.
- result string
- }{
- {"prefix", []string{"test@123", "test@456"}, "prefix=test%40123&prefix=test%40456"},
- {"@prefix", []string{"test@123"}, "%40prefix=test%40123"},
- {"@prefix", []string{"a/b/c/"}, "%40prefix=a%2Fb%2Fc%2F"},
- {"prefix", []string{"test#123"}, "prefix=test%23123"},
- {"prefix#", []string{"test#123"}, "prefix%23=test%23123"},
- {"prefix", []string{"test123"}, "prefix=test123"},
- {"prefix", []string{"test本語123", "test123"}, "prefix=test%E6%9C%AC%E8%AA%9E123&prefix=test123"},
- }
-
- for i, testCase := range testCases {
- urlValues := make(url.Values)
- for _, valueToEncode := range testCase.valueToEncode {
- urlValues.Add(testCase.queryKey, valueToEncode)
- }
- result := queryEncode(urlValues)
- if testCase.result != result {
- t.Errorf("Test %d: Expected queryEncode result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
- }
- }
-}
-
-// Tests validate the URL path encoder.
-func TestUrlEncodePath(t *testing.T) {
- testCases := []struct {
- // Input.
- inputStr string
- // Expected result.
- result string
- }{
- {"thisisthe%url", "thisisthe%25url"},
- {"本語", "%E6%9C%AC%E8%AA%9E"},
- {"本語.1", "%E6%9C%AC%E8%AA%9E.1"},
- {">123", "%3E123"},
- {"myurl#link", "myurl%23link"},
- {"space in url", "space%20in%20url"},
- {"url+path", "url%2Bpath"},
- }
-
- for i, testCase := range testCases {
- result := urlEncodePath(testCase.inputStr)
- if testCase.result != result {
- t.Errorf("Test %d: Expected queryEncode result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
- }
- }
-}
diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/bundle/bundle.go b/vendor/github.com/nicksnyder/go-i18n/i18n/bundle/bundle.go
index 8e46fa296..155543bda 100644
--- a/vendor/github.com/nicksnyder/go-i18n/i18n/bundle/bundle.go
+++ b/vendor/github.com/nicksnyder/go-i18n/i18n/bundle/bundle.go
@@ -4,14 +4,14 @@ package bundle
import (
"encoding/json"
"fmt"
- "gopkg.in/yaml.v2"
"io/ioutil"
- "reflect"
-
"path/filepath"
+ "reflect"
+ "sync"
"github.com/nicksnyder/go-i18n/i18n/language"
"github.com/nicksnyder/go-i18n/i18n/translation"
+ "gopkg.in/yaml.v2"
)
// TranslateFunc is a copy of i18n.TranslateFunc to avoid a circular dependency.
@@ -24,6 +24,8 @@ type Bundle struct {
// Translations that can be used when an exact language match is not possible.
fallbackTranslations map[string]map[string]translation.Translation
+
+ sync.RWMutex
}
// New returns an empty bundle.
@@ -89,7 +91,7 @@ func parseTranslations(filename string, buf []byte) ([]translation.Translation,
var translationsData []map[string]interface{}
if len(buf) > 0 {
if err := unmarshalFunc(buf, &translationsData); err != nil {
- return nil, err
+ return nil, fmt.Errorf("failed to load %s because %s", filename, err)
}
}
@@ -108,6 +110,8 @@ func parseTranslations(filename string, buf []byte) ([]translation.Translation,
//
// It is useful if your translations are in a format not supported by LoadTranslationFile.
func (b *Bundle) AddTranslation(lang *language.Language, translations ...translation.Translation) {
+ b.Lock()
+ defer b.Unlock()
if b.translations[lang.Tag] == nil {
b.translations[lang.Tag] = make(map[string]translation.Translation, len(translations))
}
@@ -128,24 +132,37 @@ func (b *Bundle) AddTranslation(lang *language.Language, translations ...transla
// Translations returns all translations in the bundle.
func (b *Bundle) Translations() map[string]map[string]translation.Translation {
- return b.translations
+ t := make(map[string]map[string]translation.Translation)
+ b.RLock()
+ for tag, translations := range b.translations {
+ t[tag] = make(map[string]translation.Translation)
+ for id, translation := range translations {
+ t[tag][id] = translation
+ }
+ }
+ b.RUnlock()
+ return t
}
// LanguageTags returns the tags of all languages that that have been added.
func (b *Bundle) LanguageTags() []string {
var tags []string
+ b.RLock()
for k := range b.translations {
tags = append(tags, k)
}
+ b.RUnlock()
return tags
}
// LanguageTranslationIDs returns the ids of all translations that have been added for a given language.
func (b *Bundle) LanguageTranslationIDs(languageTag string) []string {
var ids []string
+ b.RLock()
for id := range b.translations[languageTag] {
ids = append(ids, id)
}
+ b.RUnlock()
return ids
}
@@ -212,6 +229,8 @@ func (b *Bundle) supportedLanguage(pref string, prefs ...string) *language.Langu
func (b *Bundle) translatedLanguage(src string) *language.Language {
langs := language.Parse(src)
+ b.RLock()
+ defer b.RUnlock()
for _, lang := range langs {
if len(b.translations[lang.Tag]) > 0 ||
len(b.fallbackTranslations[lang.Tag]) > 0 {
@@ -226,15 +245,7 @@ func (b *Bundle) translate(lang *language.Language, translationID string, args .
return translationID
}
- translations := b.translations[lang.Tag]
- if translations == nil {
- translations = b.fallbackTranslations[lang.Tag]
- if translations == nil {
- return translationID
- }
- }
-
- translation := translations[translationID]
+ translation := b.translation(lang, translationID)
if translation == nil {
return translationID
}
@@ -280,6 +291,19 @@ func (b *Bundle) translate(lang *language.Language, translationID string, args .
return s
}
+func (b *Bundle) translation(lang *language.Language, translationID string) translation.Translation {
+ b.RLock()
+ defer b.RUnlock()
+ translations := b.translations[lang.Tag]
+ if translations == nil {
+ translations = b.fallbackTranslations[lang.Tag]
+ if translations == nil {
+ return nil
+ }
+ }
+ return translations[translationID]
+}
+
func isNumber(n interface{}) bool {
switch n.(type) {
case int, int8, int16, int32, int64, string:
diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/bundle/bundle_test.go b/vendor/github.com/nicksnyder/go-i18n/i18n/bundle/bundle_test.go
index b241ad1d4..da3830a79 100644
--- a/vendor/github.com/nicksnyder/go-i18n/i18n/bundle/bundle_test.go
+++ b/vendor/github.com/nicksnyder/go-i18n/i18n/bundle/bundle_test.go
@@ -2,6 +2,8 @@ package bundle
import (
"fmt"
+ "strconv"
+ "sync"
"testing"
"reflect"
@@ -160,6 +162,59 @@ func TestTfuncAndLanguage(t *testing.T) {
}
}
+func TestConcurrent(t *testing.T) {
+ b := New()
+ // bootstrap bundle
+ translationID := "translation_id" // +1
+ englishLanguage := languageWithTag("en-US")
+ addFakeTranslation(t, b, englishLanguage, translationID)
+
+ tf, err := b.Tfunc(englishLanguage.Tag)
+ if err != nil {
+ t.Errorf("Tfunc(%v) = error{%q}; expected no error", []string{englishLanguage.Tag}, err)
+ }
+
+ const iterations = 1000
+ var wg sync.WaitGroup
+ wg.Add(iterations)
+
+ // Using go routines insert 1000 ints into our map.
+ go func() {
+ for i := 0; i < iterations/2; i++ {
+ // Add item to map.
+ translationID := strconv.FormatInt(int64(i), 10)
+ addFakeTranslation(t, b, englishLanguage, translationID)
+
+ // Retrieve item from map.
+ tf(translationID)
+
+ wg.Done()
+ } // Call go routine with current index.
+ }()
+
+ go func() {
+ for i := iterations / 2; i < iterations; i++ {
+ // Add item to map.
+ translationID := strconv.FormatInt(int64(i), 10)
+ addFakeTranslation(t, b, englishLanguage, translationID)
+
+ // Retrieve item from map.
+ tf(translationID)
+
+ wg.Done()
+ } // Call go routine with current index.
+ }()
+
+ // Wait for all go routines to finish.
+ wg.Wait()
+
+ // Make sure map contains 1000+1 elements.
+ count := len(b.Translations()[englishLanguage.Tag])
+ if count != iterations+1 {
+ t.Error("Expecting 1001 elements, got", count)
+ }
+}
+
func addFakeTranslation(t *testing.T, b *Bundle, lang *language.Language, translationID string) string {
translation := fakeTranslation(lang, translationID)
b.AddTranslation(lang, testNewTranslation(t, map[string]interface{}{
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
index 487fdc6cc..a7a42d5ef 100644
--- a/vendor/github.com/prometheus/common/expfmt/decode.go
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -31,6 +31,7 @@ type Decoder interface {
Decode(*dto.MetricFamily) error
}
+// DecodeOptions contains options used by the Decoder and in sample extraction.
type DecodeOptions struct {
// Timestamp is added to each value from the stream that has no explicit timestamp set.
Timestamp model.Time
@@ -142,6 +143,8 @@ func (d *textDecoder) Decode(v *dto.MetricFamily) error {
return nil
}
+// SampleDecoder wraps a Decoder to extract samples from the metric families
+// decoded by the wrapped Decoder.
type SampleDecoder struct {
Dec Decoder
Opts *DecodeOptions
@@ -149,37 +152,51 @@ type SampleDecoder struct {
f dto.MetricFamily
}
+// Decode calls the Decode method of the wrapped Decoder and then extracts the
+// samples from the decoded MetricFamily into the provided model.Vector.
func (sd *SampleDecoder) Decode(s *model.Vector) error {
- if err := sd.Dec.Decode(&sd.f); err != nil {
+ err := sd.Dec.Decode(&sd.f)
+ if err != nil {
return err
}
- *s = extractSamples(&sd.f, sd.Opts)
- return nil
+ *s, err = extractSamples(&sd.f, sd.Opts)
+ return err
}
-// Extract samples builds a slice of samples from the provided metric families.
-func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector {
- var all model.Vector
+// ExtractSamples builds a slice of samples from the provided metric
+// families. If an error occurs during sample extraction, it continues to
+// extract from the remaining metric families. The returned error is the last
+// error that has occured.
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
+ var (
+ all model.Vector
+ lastErr error
+ )
for _, f := range fams {
- all = append(all, extractSamples(f, o)...)
+ some, err := extractSamples(f, o)
+ if err != nil {
+ lastErr = err
+ continue
+ }
+ all = append(all, some...)
}
- return all
+ return all, lastErr
}
-func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector {
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
switch f.GetType() {
case dto.MetricType_COUNTER:
- return extractCounter(o, f)
+ return extractCounter(o, f), nil
case dto.MetricType_GAUGE:
- return extractGauge(o, f)
+ return extractGauge(o, f), nil
case dto.MetricType_SUMMARY:
- return extractSummary(o, f)
+ return extractSummary(o, f), nil
case dto.MetricType_UNTYPED:
- return extractUntyped(o, f)
+ return extractUntyped(o, f), nil
case dto.MetricType_HISTOGRAM:
- return extractHistogram(o, f)
+ return extractHistogram(o, f), nil
}
- panic("expfmt.extractSamples: unknown metric family type")
+ return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
}
func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
diff --git a/vendor/github.com/prometheus/common/expfmt/decode_test.go b/vendor/github.com/prometheus/common/expfmt/decode_test.go
index c27325a9d..82c1130c9 100644
--- a/vendor/github.com/prometheus/common/expfmt/decode_test.go
+++ b/vendor/github.com/prometheus/common/expfmt/decode_test.go
@@ -21,6 +21,9 @@ import (
"strings"
"testing"
+ "github.com/golang/protobuf/proto"
+ dto "github.com/prometheus/client_model/go"
+
"github.com/prometheus/common/model"
)
@@ -365,3 +368,68 @@ func BenchmarkDiscriminatorHTTPHeader(b *testing.B) {
testDiscriminatorHTTPHeader(b)
}
}
+
+func TestExtractSamples(t *testing.T) {
+ var (
+ goodMetricFamily1 = &dto.MetricFamily{
+ Name: proto.String("foo"),
+ Help: proto.String("Help for foo."),
+ Type: dto.MetricType_COUNTER.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Counter: &dto.Counter{
+ Value: proto.Float64(4711),
+ },
+ },
+ },
+ }
+ goodMetricFamily2 = &dto.MetricFamily{
+ Name: proto.String("bar"),
+ Help: proto.String("Help for bar."),
+ Type: dto.MetricType_GAUGE.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Gauge: &dto.Gauge{
+ Value: proto.Float64(3.14),
+ },
+ },
+ },
+ }
+ badMetricFamily = &dto.MetricFamily{
+ Name: proto.String("bad"),
+ Help: proto.String("Help for bad."),
+ Type: dto.MetricType(42).Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Gauge: &dto.Gauge{
+ Value: proto.Float64(2.7),
+ },
+ },
+ },
+ }
+
+ opts = &DecodeOptions{
+ Timestamp: 42,
+ }
+ )
+
+ got, err := ExtractSamples(opts, goodMetricFamily1, goodMetricFamily2)
+ if err != nil {
+ t.Error("Unexpected error from ExtractSamples:", err)
+ }
+ want := model.Vector{
+ &model.Sample{Metric: model.Metric{model.MetricNameLabel: "foo"}, Value: 4711, Timestamp: 42},
+ &model.Sample{Metric: model.Metric{model.MetricNameLabel: "bar"}, Value: 3.14, Timestamp: 42},
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("unexpected samples extracted, got: %v, want: %v", got, want)
+ }
+
+ got, err = ExtractSamples(opts, goodMetricFamily1, badMetricFamily, goodMetricFamily2)
+ if err == nil {
+ t.Error("Expected error from ExtractSamples")
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("unexpected samples extracted, got: %v, want: %v", got, want)
+ }
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
index fae10f6eb..371ac7503 100644
--- a/vendor/github.com/prometheus/common/expfmt/expfmt.go
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -11,14 +11,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// A package for reading and writing Prometheus metrics.
+// Package expfmt contains tools for reading and writing Prometheus metrics.
package expfmt
+// Format specifies the HTTP content type of the different wire protocols.
type Format string
+// Constants to assemble the Content-Type values for the different wire protocols.
const (
- TextVersion = "0.0.4"
-
+ TextVersion = "0.0.4"
ProtoType = `application/vnd.google.protobuf`
ProtoProtocol = `io.prometheus.client.MetricFamily`
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
diff --git a/vendor/github.com/prometheus/common/log/syslog_formatter.go b/vendor/github.com/prometheus/common/log/syslog_formatter.go
index fd8c6fbee..64f5fdac9 100644
--- a/vendor/github.com/prometheus/common/log/syslog_formatter.go
+++ b/vendor/github.com/prometheus/common/log/syslog_formatter.go
@@ -23,6 +23,8 @@ import (
"github.com/Sirupsen/logrus"
)
+var _ logrus.Formatter = (*syslogger)(nil)
+
func init() {
setSyslogFormatter = func(appname, local string) error {
if appname == "" {
@@ -43,7 +45,7 @@ func init() {
}
}
-var ceeTag = []byte("@cee:")
+var prefixTag []byte
type syslogger struct {
wrap logrus.Formatter
@@ -56,6 +58,11 @@ func newSyslogger(appname string, facility string, fmter logrus.Formatter) (*sys
return nil, err
}
out, err := syslog.New(priority, appname)
+ _, isJSON := fmter.(*logrus.JSONFormatter)
+ if isJSON {
+ // add cee tag to json formatted syslogs
+ prefixTag = []byte("@cee:")
+ }
return &syslogger{
out: out,
wrap: fmter,
@@ -92,7 +99,7 @@ func (s *syslogger) Format(e *logrus.Entry) ([]byte, error) {
}
// only append tag to data sent to syslog (line), not to what
// is returned
- line := string(append(ceeTag, data...))
+ line := string(append(prefixTag, data...))
switch e.Level {
case logrus.PanicLevel:
diff --git a/vendor/github.com/prometheus/common/log/syslog_formatter_test.go b/vendor/github.com/prometheus/common/log/syslog_formatter_test.go
new file mode 100644
index 000000000..b7e68848f
--- /dev/null
+++ b/vendor/github.com/prometheus/common/log/syslog_formatter_test.go
@@ -0,0 +1,52 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows,!nacl,!plan9
+
+package log
+
+import (
+ "errors"
+ "log/syslog"
+ "testing"
+)
+
+func TestGetFacility(t *testing.T) {
+ testCases := []struct {
+ facility string
+ expectedPriority syslog.Priority
+ expectedErr error
+ }{
+ {"0", syslog.LOG_LOCAL0, nil},
+ {"1", syslog.LOG_LOCAL1, nil},
+ {"2", syslog.LOG_LOCAL2, nil},
+ {"3", syslog.LOG_LOCAL3, nil},
+ {"4", syslog.LOG_LOCAL4, nil},
+ {"5", syslog.LOG_LOCAL5, nil},
+ {"6", syslog.LOG_LOCAL6, nil},
+ {"7", syslog.LOG_LOCAL7, nil},
+ {"8", syslog.LOG_LOCAL0, errors.New("invalid local(8) for syslog")},
+ }
+ for _, tc := range testCases {
+ priority, err := getFacility(tc.facility)
+ if err != tc.expectedErr {
+ if err.Error() != tc.expectedErr.Error() {
+ t.Errorf("want %s, got %s", tc.expectedErr.Error(), err.Error())
+ }
+ }
+
+ if priority != tc.expectedPriority {
+ t.Errorf("want %q, got %q", tc.expectedPriority, priority)
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
index 9dff899cb..f7250909b 100644
--- a/vendor/github.com/prometheus/common/model/metric.go
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -44,7 +44,7 @@ func (m Metric) Before(o Metric) bool {
// Clone returns a copy of the Metric.
func (m Metric) Clone() Metric {
- clone := Metric{}
+ clone := make(Metric, len(m))
for k, v := range m {
clone[k] = v
}
diff --git a/vendor/github.com/prometheus/common/route/route.go b/vendor/github.com/prometheus/common/route/route.go
index 930b52d4f..1e5638ed9 100644
--- a/vendor/github.com/prometheus/common/route/route.go
+++ b/vendor/github.com/prometheus/common/route/route.go
@@ -33,18 +33,19 @@ func WithParam(ctx context.Context, p, v string) context.Context {
return context.WithValue(ctx, param(p), v)
}
-type contextFn func(r *http.Request) (context.Context, error)
+// ContextFunc returns a new context for a request.
+type ContextFunc func(r *http.Request) (context.Context, error)
// Router wraps httprouter.Router and adds support for prefixed sub-routers
// and per-request context injections.
type Router struct {
rtr *httprouter.Router
prefix string
- ctxFn contextFn
+ ctxFn ContextFunc
}
// New returns a new Router.
-func New(ctxFn contextFn) *Router {
+func New(ctxFn ContextFunc) *Router {
if ctxFn == nil {
ctxFn = func(r *http.Request) (context.Context, error) {
return context.Background(), nil
diff --git a/vendor/github.com/prometheus/common/route/route_test.go b/vendor/github.com/prometheus/common/route/route_test.go
index 4055d69d5..e7b1cba33 100644
--- a/vendor/github.com/prometheus/common/route/route_test.go
+++ b/vendor/github.com/prometheus/common/route/route_test.go
@@ -29,7 +29,7 @@ func TestRedirect(t *testing.T) {
}
}
-func TestContextFn(t *testing.T) {
+func TestContextFunc(t *testing.T) {
router := New(func(r *http.Request) (context.Context, error) {
return context.WithValue(context.Background(), "testkey", "testvalue"), nil
})
diff --git a/vendor/github.com/prometheus/procfs/.travis.yml b/vendor/github.com/prometheus/procfs/.travis.yml
index 2b4554da5..a9e28bf5d 100644
--- a/vendor/github.com/prometheus/procfs/.travis.yml
+++ b/vendor/github.com/prometheus/procfs/.travis.yml
@@ -1,5 +1,5 @@
sudo: false
language: go
go:
- - 1.5
- - 1.6
+ - 1.6.4
+ - 1.7.4
diff --git a/vendor/github.com/prometheus/procfs/AUTHORS.md b/vendor/github.com/prometheus/procfs/AUTHORS.md
index 0c802dd87..d55863560 100644
--- a/vendor/github.com/prometheus/procfs/AUTHORS.md
+++ b/vendor/github.com/prometheus/procfs/AUTHORS.md
@@ -14,6 +14,7 @@ The following individuals have contributed code to this repository
* Ji-Hoon, Seol <jihoon.seol@gmail.com>
* Jonas Große Sundrup <cherti@letopolis.de>
* Julius Volz <julius.volz@gmail.com>
+* Matt Layher <mdlayher@gmail.com>
* Matthias Rampke <mr@soundcloud.com>
* Nicky Gerritsen <nicky@streamone.nl>
* Rémi Audebert <contact@halfr.net>
diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/mountstats b/vendor/github.com/prometheus/procfs/fixtures/26231/mountstats
new file mode 100644
index 000000000..a665c33da
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/26231/mountstats
@@ -0,0 +1,19 @@
+device rootfs mounted on / with fstype rootfs
+device sysfs mounted on /sys with fstype sysfs
+device proc mounted on /proc with fstype proc
+device /dev/sda1 mounted on / with fstype ext4
+device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1
+ opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none
+ age: 13968
+ caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255
+ nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured
+ sec: flavor=1,pseudoflavor=1
+ events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0
+ bytes: 1207640230 0 0 0 1210214218 0 295483 0
+ RPC iostats version: 1.0 p/v: 100003/4 (nfs)
+ xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726
+ per-op statistics
+ NULL: 0 0 0 0 0 0 0 0
+ READ: 1298 1298 0 207680 1210292152 6 79386 79407
+ WRITE: 0 0 0 0 0 0 0 0
+
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
new file mode 100644
index 000000000..47ab0a744
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -0,0 +1,552 @@
+package procfs
+
+// While implementing parsing of /proc/[pid]/mountstats, this blog was used
+// heavily as a reference:
+// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
+//
+// Special thanks to Chris Siebenmann for all of his posts explaining the
+// various statistics available for NFS.
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Constants shared between multiple functions.
+const (
+ deviceEntryLen = 8
+
+ fieldBytesLen = 8
+ fieldEventsLen = 27
+
+ statVersion10 = "1.0"
+ statVersion11 = "1.1"
+
+ fieldTransport10Len = 10
+ fieldTransport11Len = 13
+)
+
+// A Mount is a device mount parsed from /proc/[pid]/mountstats.
+type Mount struct {
+ // Name of the device.
+ Device string
+ // The mount point of the device.
+ Mount string
+ // The filesystem type used by the device.
+ Type string
+ // If available additional statistics related to this Mount.
+ // Use a type assertion to determine if additional statistics are available.
+ Stats MountStats
+}
+
+// A MountStats is a type which contains detailed statistics for a specific
+// type of Mount.
+type MountStats interface {
+ mountStats()
+}
+
+// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
+type MountStatsNFS struct {
+ // The version of statistics provided.
+ StatVersion string
+ // The age of the NFS mount.
+ Age time.Duration
+ // Statistics related to byte counters for various operations.
+ Bytes NFSBytesStats
+ // Statistics related to various NFS event occurrences.
+ Events NFSEventsStats
+ // Statistics broken down by filesystem operation.
+ Operations []NFSOperationStats
+ // Statistics about the NFS RPC transport.
+ Transport NFSTransportStats
+}
+
+// mountStats implements MountStats.
+func (m MountStatsNFS) mountStats() {}
+
+// A NFSBytesStats contains statistics about the number of bytes read and written
+// by an NFS client to and from an NFS server.
+type NFSBytesStats struct {
+ // Number of bytes read using the read() syscall.
+ Read uint64
+ // Number of bytes written using the write() syscall.
+ Write uint64
+ // Number of bytes read using the read() syscall in O_DIRECT mode.
+ DirectRead uint64
+ // Number of bytes written using the write() syscall in O_DIRECT mode.
+ DirectWrite uint64
+ // Number of bytes read from the NFS server, in total.
+ ReadTotal uint64
+ // Number of bytes written to the NFS server, in total.
+ WriteTotal uint64
+ // Number of pages read directly via mmap()'d files.
+ ReadPages uint64
+ // Number of pages written directly via mmap()'d files.
+ WritePages uint64
+}
+
+// A NFSEventsStats contains statistics about NFS event occurrences.
+type NFSEventsStats struct {
+ // Number of times cached inode attributes are re-validated from the server.
+ InodeRevalidate uint64
+ // Number of times cached dentry nodes are re-validated from the server.
+ DnodeRevalidate uint64
+ // Number of times an inode cache is cleared.
+ DataInvalidate uint64
+ // Number of times cached inode attributes are invalidated.
+ AttributeInvalidate uint64
+ // Number of times files or directories have been open()'d.
+ VFSOpen uint64
+ // Number of times a directory lookup has occurred.
+ VFSLookup uint64
+ // Number of times permissions have been checked.
+ VFSAccess uint64
+ // Number of updates (and potential writes) to pages.
+ VFSUpdatePage uint64
+ // Number of pages read directly via mmap()'d files.
+ VFSReadPage uint64
+ // Number of times a group of pages have been read.
+ VFSReadPages uint64
+ // Number of pages written directly via mmap()'d files.
+ VFSWritePage uint64
+ // Number of times a group of pages have been written.
+ VFSWritePages uint64
+ // Number of times directory entries have been read with getdents().
+ VFSGetdents uint64
+ // Number of times attributes have been set on inodes.
+ VFSSetattr uint64
+ // Number of pending writes that have been forcefully flushed to the server.
+ VFSFlush uint64
+ // Number of times fsync() has been called on directories and files.
+ VFSFsync uint64
+ // Number of times locking has been attemped on a file.
+ VFSLock uint64
+ // Number of times files have been closed and released.
+ VFSFileRelease uint64
+ // Unknown. Possibly unused.
+ CongestionWait uint64
+ // Number of times files have been truncated.
+ Truncation uint64
+ // Number of times a file has been grown due to writes beyond its existing end.
+ WriteExtension uint64
+ // Number of times a file was removed while still open by another process.
+ SillyRename uint64
+ // Number of times the NFS server gave less data than expected while reading.
+ ShortRead uint64
+ // Number of times the NFS server wrote less data than expected while writing.
+ ShortWrite uint64
+ // Number of times the NFS server indicated EJUKEBOX; retrieving data from
+ // offline storage.
+ JukeboxDelay uint64
+ // Number of NFS v4.1+ pNFS reads.
+ PNFSRead uint64
+ // Number of NFS v4.1+ pNFS writes.
+ PNFSWrite uint64
+}
+
+// A NFSOperationStats contains statistics for a single operation.
+type NFSOperationStats struct {
+ // The name of the operation.
+ Operation string
+ // Number of requests performed for this operation.
+ Requests uint64
+ // Number of times an actual RPC request has been transmitted for this operation.
+ Transmissions uint64
+ // Number of times a request has had a major timeout.
+ MajorTimeouts uint64
+ // Number of bytes sent for this operation, including RPC headers and payload.
+ BytesSent uint64
+ // Number of bytes received for this operation, including RPC headers and payload.
+ BytesReceived uint64
+ // Duration all requests spent queued for transmission before they were sent.
+ CumulativeQueueTime time.Duration
+ // Duration it took to get a reply back after the request was transmitted.
+ CumulativeTotalResponseTime time.Duration
+ // Duration from when a request was enqueued to when it was completely handled.
+ CumulativeTotalRequestTime time.Duration
+}
+
+// A NFSTransportStats contains statistics for the NFS mount RPC requests and
+// responses.
+type NFSTransportStats struct {
+ // The local port used for the NFS mount.
+ Port uint64
+ // Number of times the client has had to establish a connection from scratch
+ // to the NFS server.
+ Bind uint64
+ // Number of times the client has made a TCP connection to the NFS server.
+ Connect uint64
+ // Duration (in jiffies, a kernel internal unit of time) the NFS mount has
+ // spent waiting for connections to the server to be established.
+ ConnectIdleTime uint64
+ // Duration since the NFS mount last saw any RPC traffic.
+ IdleTime time.Duration
+ // Number of RPC requests for this mount sent to the NFS server.
+ Sends uint64
+ // Number of RPC responses for this mount received from the NFS server.
+ Receives uint64
+ // Number of times the NFS server sent a response with a transaction ID
+ // unknown to this client.
+ BadTransactionIDs uint64
+ // A running counter, incremented on each request as the current difference
+ // ebetween sends and receives.
+ CumulativeActiveRequests uint64
+ // A running counter, incremented on each request by the current backlog
+ // queue size.
+ CumulativeBacklog uint64
+
+ // Stats below only available with stat version 1.1.
+
+ // Maximum number of simultaneously active RPC requests ever used.
+ MaximumRPCSlotsUsed uint64
+ // A running counter, incremented on each request as the current size of the
+ // sending queue.
+ CumulativeSendingQueue uint64
+ // A running counter, incremented on each request as the current size of the
+ // pending queue.
+ CumulativePendingQueue uint64
+}
+
+// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
+// of Mount structures containing detailed information about each mount.
+// If available, statistics for each mount are parsed as well.
+func parseMountStats(r io.Reader) ([]*Mount, error) {
+ const (
+ device = "device"
+ statVersionPrefix = "statvers="
+
+ nfs3Type = "nfs"
+ nfs4Type = "nfs4"
+ )
+
+ var mounts []*Mount
+
+ s := bufio.NewScanner(r)
+ for s.Scan() {
+ // Only look for device entries in this function
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 || ss[0] != device {
+ continue
+ }
+
+ m, err := parseMount(ss)
+ if err != nil {
+ return nil, err
+ }
+
+ // Does this mount also possess statistics information?
+ if len(ss) > deviceEntryLen {
+ // Only NFSv3 and v4 are supported for parsing statistics
+ if m.Type != nfs3Type && m.Type != nfs4Type {
+ return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
+ }
+
+ statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
+
+ stats, err := parseMountStatsNFS(s, statVersion)
+ if err != nil {
+ return nil, err
+ }
+
+ m.Stats = stats
+ }
+
+ mounts = append(mounts, m)
+ }
+
+ return mounts, s.Err()
+}
+
+// parseMount parses an entry in /proc/[pid]/mountstats in the format:
+// device [device] mounted on [mount] with fstype [type]
+func parseMount(ss []string) (*Mount, error) {
+ if len(ss) < deviceEntryLen {
+ return nil, fmt.Errorf("invalid device entry: %v", ss)
+ }
+
+ // Check for specific words appearing at specific indices to ensure
+ // the format is consistent with what we expect
+ format := []struct {
+ i int
+ s string
+ }{
+ {i: 0, s: "device"},
+ {i: 2, s: "mounted"},
+ {i: 3, s: "on"},
+ {i: 5, s: "with"},
+ {i: 6, s: "fstype"},
+ }
+
+ for _, f := range format {
+ if ss[f.i] != f.s {
+ return nil, fmt.Errorf("invalid device entry: %v", ss)
+ }
+ }
+
+ return &Mount{
+ Device: ss[1],
+ Mount: ss[4],
+ Type: ss[7],
+ }, nil
+}
+
+// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
+// related to NFS statistics.
+func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
+ // Field indicators for parsing specific types of data
+ const (
+ fieldAge = "age:"
+ fieldBytes = "bytes:"
+ fieldEvents = "events:"
+ fieldPerOpStats = "per-op"
+ fieldTransport = "xprt:"
+ )
+
+ stats := &MountStatsNFS{
+ StatVersion: statVersion,
+ }
+
+ for s.Scan() {
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 {
+ break
+ }
+ if len(ss) < 2 {
+ return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ }
+
+ switch ss[0] {
+ case fieldAge:
+ // Age integer is in seconds
+ d, err := time.ParseDuration(ss[1] + "s")
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Age = d
+ case fieldBytes:
+ bstats, err := parseNFSBytesStats(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Bytes = *bstats
+ case fieldEvents:
+ estats, err := parseNFSEventsStats(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Events = *estats
+ case fieldTransport:
+ if len(ss) < 3 {
+ return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
+ }
+
+ tstats, err := parseNFSTransportStats(ss[2:], statVersion)
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Transport = *tstats
+ }
+
+ // When encountering "per-operation statistics", we must break this
+ // loop and parse them seperately to ensure we can terminate parsing
+ // before reaching another device entry; hence why this 'if' statement
+ // is not just another switch case
+ if ss[0] == fieldPerOpStats {
+ break
+ }
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ // NFS per-operation stats appear last before the next device entry
+ perOpStats, err := parseNFSOperationStats(s)
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Operations = perOpStats
+
+ return stats, nil
+}
+
+// parseNFSBytesStats parses a NFSBytesStats line using an input set of
+// integer fields.
+func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
+ if len(ss) != fieldBytesLen {
+ return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
+ }
+
+ ns := make([]uint64, 0, fieldBytesLen)
+ for _, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ return &NFSBytesStats{
+ Read: ns[0],
+ Write: ns[1],
+ DirectRead: ns[2],
+ DirectWrite: ns[3],
+ ReadTotal: ns[4],
+ WriteTotal: ns[5],
+ ReadPages: ns[6],
+ WritePages: ns[7],
+ }, nil
+}
+
+// parseNFSEventsStats parses a NFSEventsStats line using an input set of
+// integer fields.
+func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
+ if len(ss) != fieldEventsLen {
+ return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
+ }
+
+ ns := make([]uint64, 0, fieldEventsLen)
+ for _, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ return &NFSEventsStats{
+ InodeRevalidate: ns[0],
+ DnodeRevalidate: ns[1],
+ DataInvalidate: ns[2],
+ AttributeInvalidate: ns[3],
+ VFSOpen: ns[4],
+ VFSLookup: ns[5],
+ VFSAccess: ns[6],
+ VFSUpdatePage: ns[7],
+ VFSReadPage: ns[8],
+ VFSReadPages: ns[9],
+ VFSWritePage: ns[10],
+ VFSWritePages: ns[11],
+ VFSGetdents: ns[12],
+ VFSSetattr: ns[13],
+ VFSFlush: ns[14],
+ VFSFsync: ns[15],
+ VFSLock: ns[16],
+ VFSFileRelease: ns[17],
+ CongestionWait: ns[18],
+ Truncation: ns[19],
+ WriteExtension: ns[20],
+ SillyRename: ns[21],
+ ShortRead: ns[22],
+ ShortWrite: ns[23],
+ JukeboxDelay: ns[24],
+ PNFSRead: ns[25],
+ PNFSWrite: ns[26],
+ }, nil
+}
+
+// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
+// additional information about per-operation statistics until an empty
+// line is reached.
+func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
+ const (
+ // Number of expected fields in each per-operation statistics set
+ numFields = 9
+ )
+
+ var ops []NFSOperationStats
+
+ for s.Scan() {
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 {
+ // Must break when reading a blank line after per-operation stats to
+ // enable top-level function to parse the next device entry
+ break
+ }
+
+ if len(ss) != numFields {
+ return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
+ }
+
+ // Skip string operation name for integers
+ ns := make([]uint64, 0, numFields-1)
+ for _, st := range ss[1:] {
+ n, err := strconv.ParseUint(st, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ ops = append(ops, NFSOperationStats{
+ Operation: strings.TrimSuffix(ss[0], ":"),
+ Requests: ns[0],
+ Transmissions: ns[1],
+ MajorTimeouts: ns[2],
+ BytesSent: ns[3],
+ BytesReceived: ns[4],
+ CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond,
+ CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond,
+ CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond,
+ })
+ }
+
+ return ops, s.Err()
+}
+
+// parseNFSTransportStats parses a NFSTransportStats line using an input set of
+// integer fields matched to a specific stats version.
+func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
+ switch statVersion {
+ case statVersion10:
+ if len(ss) != fieldTransport10Len {
+ return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
+ }
+ case statVersion11:
+ if len(ss) != fieldTransport11Len {
+ return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
+ }
+ default:
+ return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
+ }
+
+ // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
+ // in a v1.0 response
+ ns := make([]uint64, 0, fieldTransport11Len)
+ for _, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ return &NFSTransportStats{
+ Port: ns[0],
+ Bind: ns[1],
+ Connect: ns[2],
+ ConnectIdleTime: ns[3],
+ IdleTime: time.Duration(ns[4]) * time.Second,
+ Sends: ns[5],
+ Receives: ns[6],
+ BadTransactionIDs: ns[7],
+ CumulativeActiveRequests: ns[8],
+ CumulativeBacklog: ns[9],
+ MaximumRPCSlotsUsed: ns[10],
+ CumulativeSendingQueue: ns[11],
+ CumulativePendingQueue: ns[12],
+ }, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/mountstats_test.go b/vendor/github.com/prometheus/procfs/mountstats_test.go
new file mode 100644
index 000000000..e65707939
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mountstats_test.go
@@ -0,0 +1,252 @@
+package procfs
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestMountStats(t *testing.T) {
+ tests := []struct {
+ name string
+ s string
+ fs bool
+ mounts []*Mount
+ invalid bool
+ }{
+ {
+ name: "no devices",
+ s: `hello`,
+ },
+ {
+ name: "device has too few fields",
+ s: `device foo`,
+ invalid: true,
+ },
+ {
+ name: "device incorrect format",
+ s: `device rootfs BAD on / with fstype rootfs`,
+ invalid: true,
+ },
+ {
+ name: "device incorrect format",
+ s: `device rootfs mounted BAD / with fstype rootfs`,
+ invalid: true,
+ },
+ {
+ name: "device incorrect format",
+ s: `device rootfs mounted on / BAD fstype rootfs`,
+ invalid: true,
+ },
+ {
+ name: "device incorrect format",
+ s: `device rootfs mounted on / with BAD rootfs`,
+ invalid: true,
+ },
+ {
+ name: "device rootfs cannot have stats",
+ s: `device rootfs mounted on / with fstype rootfs stats`,
+ invalid: true,
+ },
+ {
+ name: "NFSv4 device with too little info",
+ s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\nhello",
+ invalid: true,
+ },
+ {
+ name: "NFSv4 device with bad bytes",
+ s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\nbytes: 0",
+ invalid: true,
+ },
+ {
+ name: "NFSv4 device with bad events",
+ s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\nevents: 0",
+ invalid: true,
+ },
+ {
+ name: "NFSv4 device with bad per-op stats",
+ s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\nper-op statistics\nFOO 0",
+ invalid: true,
+ },
+ {
+ name: "NFSv4 device with bad transport stats",
+ s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\nxprt: tcp",
+ invalid: true,
+ },
+ {
+ name: "NFSv4 device with bad transport version",
+ s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=foo\nxprt: tcp 0",
+ invalid: true,
+ },
+ {
+ name: "NFSv4 device with bad transport stats version 1.0",
+ s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.0\nxprt: tcp 0 0 0 0 0 0 0 0 0 0 0 0 0",
+ invalid: true,
+ },
+ {
+ name: "NFSv4 device with bad transport stats version 1.1",
+ s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\nxprt: tcp 0 0 0 0 0 0 0 0 0 0",
+ invalid: true,
+ },
+ {
+ name: "device rootfs OK",
+ s: `device rootfs mounted on / with fstype rootfs`,
+ mounts: []*Mount{{
+ Device: "rootfs",
+ Mount: "/",
+ Type: "rootfs",
+ }},
+ },
+ {
+ name: "NFSv3 device with minimal stats OK",
+ s: `device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs statvers=1.1`,
+ mounts: []*Mount{{
+ Device: "192.168.1.1:/srv",
+ Mount: "/mnt/nfs",
+ Type: "nfs",
+ Stats: &MountStatsNFS{
+ StatVersion: "1.1",
+ },
+ }},
+ },
+ {
+ name: "fixtures OK",
+ fs: true,
+ mounts: []*Mount{
+ {
+ Device: "rootfs",
+ Mount: "/",
+ Type: "rootfs",
+ },
+ {
+ Device: "sysfs",
+ Mount: "/sys",
+ Type: "sysfs",
+ },
+ {
+ Device: "proc",
+ Mount: "/proc",
+ Type: "proc",
+ },
+ {
+ Device: "/dev/sda1",
+ Mount: "/",
+ Type: "ext4",
+ },
+ {
+ Device: "192.168.1.1:/srv/test",
+ Mount: "/mnt/nfs/test",
+ Type: "nfs4",
+ Stats: &MountStatsNFS{
+ StatVersion: "1.1",
+ Age: 13968 * time.Second,
+ Bytes: NFSBytesStats{
+ Read: 1207640230,
+ ReadTotal: 1210214218,
+ ReadPages: 295483,
+ },
+ Events: NFSEventsStats{
+ InodeRevalidate: 52,
+ DnodeRevalidate: 226,
+ VFSOpen: 1,
+ VFSLookup: 13,
+ VFSAccess: 398,
+ VFSReadPages: 331,
+ VFSWritePages: 47,
+ VFSFlush: 77,
+ VFSFileRelease: 77,
+ },
+ Operations: []NFSOperationStats{
+ {
+ Operation: "NULL",
+ },
+ {
+ Operation: "READ",
+ Requests: 1298,
+ Transmissions: 1298,
+ BytesSent: 207680,
+ BytesReceived: 1210292152,
+ CumulativeQueueTime: 6 * time.Millisecond,
+ CumulativeTotalResponseTime: 79386 * time.Millisecond,
+ CumulativeTotalRequestTime: 79407 * time.Millisecond,
+ },
+ {
+ Operation: "WRITE",
+ },
+ },
+ Transport: NFSTransportStats{
+ Port: 832,
+ Connect: 1,
+ IdleTime: 11 * time.Second,
+ Sends: 6428,
+ Receives: 6428,
+ CumulativeActiveRequests: 12154,
+ MaximumRPCSlotsUsed: 24,
+ CumulativeSendingQueue: 26,
+ CumulativePendingQueue: 5726,
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for i, tt := range tests {
+ t.Logf("[%02d] test %q", i, tt.name)
+
+ var mounts []*Mount
+ var err error
+
+ if tt.s != "" {
+ mounts, err = parseMountStats(strings.NewReader(tt.s))
+ }
+ if tt.fs {
+ proc, err := FS("fixtures").NewProc(26231)
+ if err != nil {
+ t.Fatalf("failed to create proc: %v", err)
+ }
+
+ mounts, err = proc.MountStats()
+ }
+
+ if tt.invalid && err == nil {
+ t.Error("expected an error, but none occurred")
+ }
+ if !tt.invalid && err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+
+ if want, have := tt.mounts, mounts; !reflect.DeepEqual(want, have) {
+ t.Errorf("mounts:\nwant:\n%v\nhave:\n%v", mountsStr(want), mountsStr(have))
+ }
+ }
+}
+
+func mountsStr(mounts []*Mount) string {
+ var out string
+ for i, m := range mounts {
+ out += fmt.Sprintf("[%d] %q on %q (%q)", i, m.Device, m.Mount, m.Type)
+
+ stats, ok := m.Stats.(*MountStatsNFS)
+ if !ok {
+ out += "\n"
+ continue
+ }
+
+ out += fmt.Sprintf("\n\t- v%s, age: %s", stats.StatVersion, stats.Age)
+ out += fmt.Sprintf("\n\t- bytes: %v", stats.Bytes)
+ out += fmt.Sprintf("\n\t- events: %v", stats.Events)
+ out += fmt.Sprintf("\n\t- transport: %v", stats.Transport)
+ out += fmt.Sprintf("\n\t- per-operation stats:")
+
+ for _, o := range stats.Operations {
+ out += fmt.Sprintf("\n\t\t- %v", o)
+ }
+
+ out += "\n"
+ }
+
+ return out
+}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
index 0d0a6a90f..8717e1fe0 100644
--- a/vendor/github.com/prometheus/procfs/proc.go
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -192,6 +192,18 @@ func (p Proc) FileDescriptorsLen() (int, error) {
return len(fds), nil
}
+// MountStats retrieves statistics and configuration for mount points in a
+// process's namespace.
+func (p Proc) MountStats() ([]*Mount, error) {
+ f, err := os.Open(p.path("mountstats"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseMountStats(f)
+}
+
func (p Proc) fileDescriptors() ([]string, error) {
d, err := os.Open(p.path("fd"))
if err != nil {
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
index 5d2504b1c..2efda5920 100644
--- a/vendor/github.com/spf13/cobra/README.md
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -157,7 +157,12 @@ In a Cobra app, typically the main.go file is very bare. It serves, one purpose,
```go
package main
-import "{pathToYourApp}/cmd"
+import (
+ "fmt"
+ "os"
+
+ "{pathToYourApp}/cmd"
+)
func main() {
if err := cmd.RootCmd.Execute(); err != nil {
@@ -313,7 +318,12 @@ In a Cobra app, typically the main.go file is very bare. It serves, one purpose,
```go
package main
-import "{pathToYourApp}/cmd"
+import (
+ "fmt"
+ "os"
+
+ "{pathToYourApp}/cmd"
+)
func main() {
if err := cmd.RootCmd.Execute(); err != nil {
@@ -337,6 +347,7 @@ package cmd
import (
"github.com/spf13/cobra"
+ "fmt"
)
func init() {
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
index 7a5bd4d7d..8820ba8fc 100644
--- a/vendor/github.com/spf13/cobra/bash_completions.go
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -10,6 +10,7 @@ import (
"github.com/spf13/pflag"
)
+// Annotations for Bash completion.
const (
BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions"
BashCompCustom = "cobra_annotation_bash_completion_custom"
@@ -22,7 +23,7 @@ func preamble(out io.Writer, name string) error {
if err != nil {
return err
}
- _, err = fmt.Fprint(out, `
+ preamStr := `
__debug()
{
if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then
@@ -246,7 +247,8 @@ __handle_word()
__handle_word
}
-`)
+`
+ _, err = fmt.Fprint(out, preamStr)
return err
}
@@ -566,6 +568,7 @@ func gen(cmd *Command, w io.Writer) error {
return nil
}
+// GenBashCompletion generates bash completion file and writes to the passed writer.
func (cmd *Command) GenBashCompletion(w io.Writer) error {
if err := preamble(w, cmd.Name()); err != nil {
return err
@@ -585,6 +588,7 @@ func nonCompletableFlag(flag *pflag.Flag) bool {
return flag.Hidden || len(flag.Deprecated) > 0
}
+// GenBashCompletionFile generates bash completion file.
func (cmd *Command) GenBashCompletionFile(filename string) error {
outFile, err := os.Create(filename)
if err != nil {
diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md
index 6e3b71f13..7fa970fa2 100644
--- a/vendor/github.com/spf13/cobra/bash_completions.md
+++ b/vendor/github.com/spf13/cobra/bash_completions.md
@@ -18,7 +18,7 @@ func main() {
}
```
-That will get you completions of subcommands and flags. If you make additional annotations to your code, you can get even more intelligent and flexible behavior.
+`out.sh` will get you completions of subcommands and flags. Copy it to `/etc/bash_completion.d/` as described [here](https://debian-administration.org/article/316/An_introduction_to_bash_completion_part_1) and reset your terminal to use autocompletion. If you make additional annotations to your code, you can get even more intelligent and flexible behavior.
## Creating your own custom functions
diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go
index b39c715a5..9605b984c 100644
--- a/vendor/github.com/spf13/cobra/cobra.go
+++ b/vendor/github.com/spf13/cobra/cobra.go
@@ -37,7 +37,8 @@ var templateFuncs = template.FuncMap{
var initializers []func()
-// Automatic prefix matching can be a dangerous thing to automatically enable in CLI tools.
+// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing
+// to automatically enable in CLI tools.
// Set this to true to enable it.
var EnablePrefixMatching = false
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
index 49889318c..3ee1a0a9d 100644
--- a/vendor/github.com/spf13/cobra/command.go
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -129,7 +129,7 @@ type Command struct {
DisableFlagParsing bool
}
-// os.Args[1:] by default, if desired, can be overridden
+// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
// particularly useful when testing.
func (c *Command) SetArgs(a []string) {
c.args = a
@@ -141,12 +141,12 @@ func (c *Command) SetOutput(output io.Writer) {
c.output = &output
}
-// Usage can be defined by application.
+// SetUsageFunc sets usage function. Usage can be defined by application.
func (c *Command) SetUsageFunc(f func(*Command) error) {
c.usageFunc = f
}
-// Can be defined by Application.
+// SetUsageTemplate sets usage template. Can be defined by Application.
func (c *Command) SetUsageTemplate(s string) {
c.usageTemplate = s
}
@@ -157,16 +157,17 @@ func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) {
c.flagErrorFunc = f
}
-// Can be defined by Application
+// SetHelpFunc sets help function. Can be defined by Application
func (c *Command) SetHelpFunc(f func(*Command, []string)) {
c.helpFunc = f
}
+// SetHelpCommand sets help command.
func (c *Command) SetHelpCommand(cmd *Command) {
c.helpCommand = cmd
}
-// Can be defined by Application.
+// SetHelpTemplate sets help template to be used. Application can use it to set custom template.
func (c *Command) SetHelpTemplate(s string) {
c.helpTemplate = s
}
@@ -183,10 +184,12 @@ func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string
}
}
+// OutOrStdout returns output to stdout
func (c *Command) OutOrStdout() io.Writer {
return c.getOut(os.Stdout)
}
+// OutOrStderr returns output to stderr
func (c *Command) OutOrStderr() io.Writer {
return c.getOut(os.Stderr)
}
@@ -265,6 +268,7 @@ func (c *Command) Help() error {
return nil
}
+// UsageString return usage string.
func (c *Command) UsageString() string {
tmpOutput := c.output
bb := new(bytes.Buffer)
@@ -292,6 +296,7 @@ func (c *Command) FlagErrorFunc() (f func(*Command, error) error) {
var minUsagePadding = 25
+// UsagePadding return padding for the usage.
func (c *Command) UsagePadding() int {
if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen {
return minUsagePadding
@@ -301,7 +306,7 @@ func (c *Command) UsagePadding() int {
var minCommandPathPadding = 11
-//
+// CommandPathPadding return padding for the command path.
func (c *Command) CommandPathPadding() int {
if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen {
return minCommandPathPadding
@@ -311,6 +316,7 @@ func (c *Command) CommandPathPadding() int {
var minNamePadding = 11
+// NamePadding returns padding for the name.
func (c *Command) NamePadding() int {
if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen {
return minNamePadding
@@ -318,6 +324,7 @@ func (c *Command) NamePadding() int {
return c.parent.commandsMaxNameLen
}
+// UsageTemplate returns usage template for the command.
func (c *Command) UsageTemplate() string {
if c.usageTemplate != "" {
return c.usageTemplate
@@ -353,6 +360,7 @@ Use "{{.CommandPath}} [command] --help" for more information about a command.{{e
`
}
+// HelpTemplate return help template for the command.
func (c *Command) HelpTemplate() string {
if c.helpTemplate != "" {
return c.helpTemplate
@@ -418,7 +426,7 @@ func stripFlags(args []string, c *Command) []string {
case inFlag:
inFlag = false
case y == "":
- // strip empty commands, as the go tests expect this to be ok....
+ // strip empty commands, as the go tests expect this to be ok....
case !strings.HasPrefix(y, "-"):
commands = append(commands, y)
inFlag = false
@@ -447,7 +455,7 @@ func argsMinusFirstX(args []string, x string) []string {
return args
}
-// find the target command given the args and command tree
+// Find finds the target command given the args and command tree
// Meant to be run on the highest node. Only searches down.
func (c *Command) Find(args []string) (*Command, []string, error) {
if c == nil {
@@ -515,6 +523,7 @@ func (c *Command) Find(args []string) (*Command, []string, error) {
return commandFound, a, nil
}
+// SuggestionsFor provides suggestions for the typedName.
func (c *Command) SuggestionsFor(typedName string) []string {
suggestions := []string{}
for _, cmd := range c.commands {
@@ -535,6 +544,7 @@ func (c *Command) SuggestionsFor(typedName string) []string {
return suggestions
}
+// VisitParents visits all parents of the command and invokes fn on each parent.
func (c *Command) VisitParents(fn func(*Command)) {
var traverse func(*Command) *Command
@@ -550,6 +560,7 @@ func (c *Command) VisitParents(fn func(*Command)) {
traverse(c)
}
+// Root finds root command.
func (c *Command) Root() *Command {
var findRoot func(*Command) *Command
@@ -674,7 +685,7 @@ func (c *Command) errorMsgFromParse() string {
return ""
}
-// Call execute to use the args (os.Args[1:] by default)
+// Execute Call execute to use the args (os.Args[1:] by default)
// and run through the command tree finding appropriate matches
// for commands and then corresponding flags.
func (c *Command) Execute() error {
@@ -682,6 +693,7 @@ func (c *Command) Execute() error {
return err
}
+// ExecuteC executes the command.
func (c *Command) ExecuteC() (cmd *Command, err error) {
// Regardless of what command execute is called on, run on Root only
@@ -779,7 +791,7 @@ func (c *Command) initHelpCmd() {
c.AddCommand(c.helpCommand)
}
-// Used for testing.
+// ResetCommands used for testing.
func (c *Command) ResetCommands() {
c.commands = nil
c.helpCommand = nil
@@ -902,7 +914,7 @@ func (c *Command) UseLine() string {
return str + c.Use
}
-// For use in determining which flags have been assigned to which commands
+// DebugFlags used to determine which flags have been assigned to which commands
// and which persist.
func (c *Command) DebugFlags() {
c.Println("DebugFlags called on", c.Name())
@@ -970,10 +982,12 @@ func (c *Command) HasAlias(s string) bool {
return false
}
+// NameAndAliases returns string containing name and all aliases
func (c *Command) NameAndAliases() string {
return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ")
}
+// HasExample determines if the command has example.
func (c *Command) HasExample() bool {
return len(c.Example) > 0
}
@@ -1070,7 +1084,7 @@ func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) f
return c.globNormFunc
}
-// Flage returns the complete FlagSet that applies
+// Flags returns the complete FlagSet that applies
// to this command (local and persistent declared here and by all parents).
func (c *Command) Flags() *flag.FlagSet {
if c.flags == nil {
@@ -1170,44 +1184,44 @@ func (c *Command) ResetFlags() {
c.pflags.SetOutput(c.flagErrorBuf)
}
-// Does the command contain any flags (local plus persistent from the entire structure).
+// HasFlags checks if the command contains any flags (local plus persistent from the entire structure).
func (c *Command) HasFlags() bool {
return c.Flags().HasFlags()
}
-// Does the command contain persistent flags.
+// HasPersistentFlags checks if the command contains persistent flags.
func (c *Command) HasPersistentFlags() bool {
return c.PersistentFlags().HasFlags()
}
-// Does the command has flags specifically declared locally.
+// HasLocalFlags checks if the command has flags specifically declared locally.
func (c *Command) HasLocalFlags() bool {
return c.LocalFlags().HasFlags()
}
-// Does the command have flags inherited from its parent command.
+// HasInheritedFlags checks if the command has flags inherited from its parent command.
func (c *Command) HasInheritedFlags() bool {
return c.InheritedFlags().HasFlags()
}
-// Does the command contain any flags (local plus persistent from the entire
+// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire
// structure) which are not hidden or deprecated.
func (c *Command) HasAvailableFlags() bool {
return c.Flags().HasAvailableFlags()
}
-// Does the command contain persistent flags which are not hidden or deprecated.
+// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated.
func (c *Command) HasAvailablePersistentFlags() bool {
return c.PersistentFlags().HasAvailableFlags()
}
-// Does the command has flags specifically declared locally which are not hidden
+// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden
// or deprecated.
func (c *Command) HasAvailableLocalFlags() bool {
return c.LocalFlags().HasAvailableFlags()
}
-// Does the command have flags inherited from its parent command which are
+// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are
// not hidden or deprecated.
func (c *Command) HasAvailableInheritedFlags() bool {
return c.InheritedFlags().HasAvailableFlags()
diff --git a/vendor/github.com/spf13/cobra/doc/man_docs.go b/vendor/github.com/spf13/cobra/doc/man_docs.go
index 5798d0fbf..fd7107c42 100644
--- a/vendor/github.com/spf13/cobra/doc/man_docs.go
+++ b/vendor/github.com/spf13/cobra/doc/man_docs.go
@@ -37,7 +37,7 @@ func GenManTree(cmd *cobra.Command, header *GenManHeader, dir string) error {
return GenManTreeFromOpts(cmd, GenManTreeOptions{
Header: header,
Path: dir,
- CommandSeparator: "_",
+ CommandSeparator: "-",
})
}
diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.md b/vendor/github.com/spf13/cobra/doc/md_docs.md
index 480b152f0..beec3e0e8 100644
--- a/vendor/github.com/spf13/cobra/doc/md_docs.md
+++ b/vendor/github.com/spf13/cobra/doc/md_docs.md
@@ -32,15 +32,15 @@ import (
"io/ioutil"
"os"
- kubectlcmd "k8s.io/kubernetes/pkg/kubectl/cmd"
+ "k8s.io/kubernetes/pkg/kubectl/cmd"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"github.com/spf13/cobra/doc"
)
func main() {
- cmd := kubectlcmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard)
- doc.GenMarkdownTree(cmd, "./")
+ kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard)
+ doc.GenMarkdownTree(kubectl, "./")
}
```
diff --git a/vendor/github.com/spf13/cobra/doc/util.go b/vendor/github.com/spf13/cobra/doc/util.go
index a1c6b89ba..a7d2765a9 100644
--- a/vendor/github.com/spf13/cobra/doc/util.go
+++ b/vendor/github.com/spf13/cobra/doc/util.go
@@ -13,7 +13,11 @@
package doc
-import "github.com/spf13/cobra"
+import (
+ "strings"
+
+ "github.com/spf13/cobra"
+)
// Test to see if we have a reason to print See Also information in docs
// Basically this is a test for a parent commend or a subcommand which is
@@ -31,6 +35,15 @@ func hasSeeAlso(cmd *cobra.Command) bool {
return false
}
+// Temporary workaround for yaml lib generating incorrect yaml with long strings
+// that do not contain \n.
+func forceMultiLine(s string) string {
+ if len(s) > 60 && !strings.Contains(s, "\n") {
+ s = s + "\n"
+ }
+ return s
+}
+
type byName []*cobra.Command
func (s byName) Len() int { return len(s) }
diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.go b/vendor/github.com/spf13/cobra/doc/yaml_docs.go
new file mode 100644
index 000000000..75474d299
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/doc/yaml_docs.go
@@ -0,0 +1,165 @@
+// Copyright 2016 French Ben. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package doc
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+ "gopkg.in/yaml.v2"
+)
+
+type cmdOption struct {
+ Name string
+ Shorthand string `yaml:",omitempty"`
+ DefaultValue string `yaml:"default_value,omitempty"`
+ Usage string `yaml:",omitempty"`
+}
+
+type cmdDoc struct {
+ Name string
+ Synopsis string `yaml:",omitempty"`
+ Description string `yaml:",omitempty"`
+ Options []cmdOption `yaml:",omitempty"`
+ InheritedOptions []cmdOption `yaml:"inherited_options,omitempty"`
+ Example string `yaml:",omitempty"`
+ SeeAlso []string `yaml:"see_also,omitempty"`
+}
+
+// GenYamlTree creates yaml structured ref files for this command and all descendants
+// in the directory given. This function may not work
+// correctly if your command names have - in them. If you have `cmd` with two
+// subcmds, `sub` and `sub-third`. And `sub` has a subcommand called `third`
+// it is undefined which help output will be in the file `cmd-sub-third.1`.
+func GenYamlTree(cmd *cobra.Command, dir string) error {
+ identity := func(s string) string { return s }
+ emptyStr := func(s string) string { return "" }
+ return GenYamlTreeCustom(cmd, dir, emptyStr, identity)
+}
+
+// GenYamlTreeCustom creates yaml structured ref files
+func GenYamlTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error {
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() || c.IsHelpCommand() {
+ continue
+ }
+ if err := GenYamlTreeCustom(c, dir, filePrepender, linkHandler); err != nil {
+ return err
+ }
+ }
+
+ basename := strings.Replace(cmd.CommandPath(), " ", "_", -1) + ".yaml"
+ filename := filepath.Join(dir, basename)
+ f, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ if _, err := io.WriteString(f, filePrepender(filename)); err != nil {
+ return err
+ }
+ if err := GenYamlCustom(cmd, f, linkHandler); err != nil {
+ return err
+ }
+ return nil
+}
+
+// GenYaml creates yaml output
+func GenYaml(cmd *cobra.Command, w io.Writer) error {
+ return GenYamlCustom(cmd, w, func(s string) string { return s })
+}
+
+// GenYamlCustom creates custom yaml output
+func GenYamlCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error {
+ yamlDoc := cmdDoc{}
+ yamlDoc.Name = cmd.CommandPath()
+
+ yamlDoc.Synopsis = forceMultiLine(cmd.Short)
+ yamlDoc.Description = forceMultiLine(cmd.Long)
+
+ if len(cmd.Example) > 0 {
+ yamlDoc.Example = cmd.Example
+ }
+
+ flags := cmd.NonInheritedFlags()
+ if flags.HasFlags() {
+ yamlDoc.Options = genFlagResult(flags)
+ }
+ flags = cmd.InheritedFlags()
+ if flags.HasFlags() {
+ yamlDoc.InheritedOptions = genFlagResult(flags)
+ }
+
+ if hasSeeAlso(cmd) {
+ result := []string{}
+ if cmd.HasParent() {
+ parent := cmd.Parent()
+ result = append(result, parent.CommandPath()+" - "+parent.Short)
+ }
+ children := cmd.Commands()
+ sort.Sort(byName(children))
+ for _, child := range children {
+ if !child.IsAvailableCommand() || child.IsHelpCommand() {
+ continue
+ }
+ result = append(result, child.Name()+" - "+child.Short)
+ }
+ yamlDoc.SeeAlso = result
+ }
+
+ final, err := yaml.Marshal(&yamlDoc)
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+ if _, err := fmt.Fprintf(w, string(final)); err != nil {
+ return err
+ }
+ return nil
+}
+
+func genFlagResult(flags *pflag.FlagSet) []cmdOption {
+ var result []cmdOption
+
+ flags.VisitAll(func(flag *pflag.Flag) {
+ // Todo, when we mark a shorthand is deprecated, but specify an empty message.
+ // The flag.ShorthandDeprecated is empty as the shorthand is deprecated.
+ // Using len(flag.ShorthandDeprecated) > 0 can't handle this, others are ok.
+ if !(len(flag.ShorthandDeprecated) > 0) && len(flag.Shorthand) > 0 {
+ opt := cmdOption{
+ flag.Name,
+ flag.Shorthand,
+ flag.DefValue,
+ forceMultiLine(flag.Usage),
+ }
+ result = append(result, opt)
+ } else {
+ opt := cmdOption{
+ Name: flag.Name,
+ DefaultValue: forceMultiLine(flag.DefValue),
+ Usage: forceMultiLine(flag.Usage),
+ }
+ result = append(result, opt)
+ }
+ })
+
+ return result
+}
diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.md b/vendor/github.com/spf13/cobra/doc/yaml_docs.md
new file mode 100644
index 000000000..4d0c75a12
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/doc/yaml_docs.md
@@ -0,0 +1,103 @@
+# Generating Yaml Docs For Your Own cobra.Command
+
+Generating yaml files from a cobra command is incredibly easy. An example is as follows:
+
+```go
+package main
+
+import (
+ "github.com/spf13/cobra"
+ "github.com/spf13/cobra/doc"
+)
+
+func main() {
+ cmd := &cobra.Command{
+ Use: "test",
+ Short: "my test program",
+ }
+ doc.GenYamlTree(cmd, "/tmp")
+}
+```
+
+That will get you a Yaml document `/tmp/test.yaml`
+
+## Generate yaml docs for the entire command tree
+
+This program can actually generate docs for the kubectl command in the kubernetes project
+
+```go
+package main
+
+import (
+ "io/ioutil"
+ "os"
+
+ "k8s.io/kubernetes/pkg/kubectl/cmd"
+ cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
+
+ "github.com/spf13/cobra/doc"
+)
+
+func main() {
+ kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard)
+ doc.GenYamlTree(kubectl, "./")
+}
+```
+
+This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./")
+
+## Generate yaml docs for a single command
+
+You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenYaml` instead of `GenYamlTree`
+
+```go
+ out := new(bytes.Buffer)
+ doc.GenYaml(cmd, out)
+```
+
+This will write the yaml doc for ONLY "cmd" into the out, buffer.
+
+## Customize the output
+
+Both `GenYaml` and `GenYamlTree` have alternate versions with callbacks to get some control of the output:
+
+```go
+func GenYamlTreeCustom(cmd *Command, dir string, filePrepender, linkHandler func(string) string) error {
+ //...
+}
+```
+
+```go
+func GenYamlCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) error {
+ //...
+}
+```
+
+The `filePrepender` will prepend the return value given the full filepath to the rendered Yaml file. A common use case is to add front matter to use the generated documentation with [Hugo](http://gohugo.io/):
+
+```go
+const fmTemplate = `---
+date: %s
+title: "%s"
+slug: %s
+url: %s
+---
+`
+
+filePrepender := func(filename string) string {
+ now := time.Now().Format(time.RFC3339)
+ name := filepath.Base(filename)
+ base := strings.TrimSuffix(name, path.Ext(name))
+ url := "/commands/" + strings.ToLower(base) + "/"
+ return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url)
+}
+```
+
+The `linkHandler` can be used to customize the rendered internal links to the commands, given a filename:
+
+```go
+linkHandler := func(name string) string {
+ base := strings.TrimSuffix(name, path.Ext(name))
+ return "/commands/" + strings.ToLower(base) + "/"
+}
+```
diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs_test.go b/vendor/github.com/spf13/cobra/doc/yaml_docs_test.go
new file mode 100644
index 000000000..a41499e1f
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/doc/yaml_docs_test.go
@@ -0,0 +1,88 @@
+package doc
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+)
+
+var _ = fmt.Println
+var _ = os.Stderr
+
+func TestGenYamlDoc(t *testing.T) {
+ c := initializeWithRootCmd()
+ // Need two commands to run the command alphabetical sort
+ cmdEcho.AddCommand(cmdTimes, cmdEchoSub, cmdDeprecated)
+ c.AddCommand(cmdPrint, cmdEcho)
+ cmdRootWithRun.PersistentFlags().StringVarP(&flags2a, "rootflag", "r", "two", strtwoParentHelp)
+
+ out := new(bytes.Buffer)
+
+ // We generate on s subcommand so we have both subcommands and parents
+ if err := GenYaml(cmdEcho, out); err != nil {
+ t.Fatal(err)
+ }
+ found := out.String()
+
+ // Our description
+ expected := cmdEcho.Long
+ if !strings.Contains(found, expected) {
+ t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found)
+ }
+
+ // Better have our example
+ expected = cmdEcho.Example
+ if !strings.Contains(found, expected) {
+ t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found)
+ }
+
+ // A local flag
+ expected = "boolone"
+ if !strings.Contains(found, expected) {
+ t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found)
+ }
+
+ // persistent flag on parent
+ expected = "rootflag"
+ if !strings.Contains(found, expected) {
+ t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found)
+ }
+
+ // We better output info about our parent
+ expected = cmdRootWithRun.Short
+ if !strings.Contains(found, expected) {
+ t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found)
+ }
+
+ // And about subcommands
+ expected = cmdEchoSub.Short
+ if !strings.Contains(found, expected) {
+ t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found)
+ }
+
+ unexpected := cmdDeprecated.Short
+ if strings.Contains(found, unexpected) {
+ t.Errorf("Unexpected response.\nFound: %v\nBut should not have!!\n", unexpected)
+ }
+}
+
+func TestGenYamlNoTag(t *testing.T) {
+ c := initializeWithRootCmd()
+ // Need two commands to run the command alphabetical sort
+ cmdEcho.AddCommand(cmdTimes, cmdEchoSub, cmdDeprecated)
+ c.AddCommand(cmdPrint, cmdEcho)
+ c.DisableAutoGenTag = true
+ cmdRootWithRun.PersistentFlags().StringVarP(&flags2a, "rootflag", "r", "two", strtwoParentHelp)
+ out := new(bytes.Buffer)
+
+ if err := GenYaml(c, out); err != nil {
+ t.Fatal(err)
+ }
+ found := out.String()
+
+ unexpected := "Auto generated"
+ checkStringOmits(t, found, unexpected)
+
+}
diff --git a/vendor/github.com/spf13/pflag/.gitignore b/vendor/github.com/spf13/pflag/.gitignore
new file mode 100644
index 000000000..c3da29013
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.gitignore
@@ -0,0 +1,2 @@
+.idea/*
+
diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md
index 08ad94565..eefb46dec 100644
--- a/vendor/github.com/spf13/pflag/README.md
+++ b/vendor/github.com/spf13/pflag/README.md
@@ -1,4 +1,6 @@
[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag)
+[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/pflag)](https://goreportcard.com/report/github.com/spf13/pflag)
+[![GoDoc](https://godoc.org/github.com/spf13/pflag?status.svg)](https://godoc.org/github.com/spf13/pflag)
## Description
@@ -106,9 +108,9 @@ that give one-letter shorthands for flags. You can use these by appending
var ip = flag.IntP("flagname", "f", 1234, "help message")
var flagvar bool
func init() {
- flag.BoolVarP("boolname", "b", true, "help message")
+ flag.BoolVarP(&flagvar, "boolname", "b", true, "help message")
}
-flag.VarP(&flagVar, "varname", "v", 1234, "help message")
+flag.VarP(&flagVal, "varname", "v", "help message")
```
Shorthand letters can be used with single dashes on the command line.
diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go
new file mode 100644
index 000000000..5af02f1a7
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool_slice.go
@@ -0,0 +1,147 @@
+package pflag
+
+import (
+ "io"
+ "strconv"
+ "strings"
+)
+
+// -- boolSlice Value
+type boolSliceValue struct {
+ value *[]bool
+ changed bool
+}
+
+func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue {
+ bsv := new(boolSliceValue)
+ bsv.value = p
+ *bsv.value = val
+ return bsv
+}
+
+// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag.
+// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended.
+func (s *boolSliceValue) Set(val string) error {
+
+ // remove all quote characters
+ rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+ // read flag arguments with CSV parser
+ boolStrSlice, err := readAsCSV(rmQuote.Replace(val))
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ // parse boolean values into slice
+ out := make([]bool, 0, len(boolStrSlice))
+ for _, boolStr := range boolStrSlice {
+ b, err := strconv.ParseBool(strings.TrimSpace(boolStr))
+ if err != nil {
+ return err
+ }
+ out = append(out, b)
+ }
+
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+
+ s.changed = true
+
+ return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *boolSliceValue) Type() string {
+ return "boolSlice"
+}
+
+// String defines a "native" format for this boolean slice flag value.
+func (s *boolSliceValue) String() string {
+
+ boolStrSlice := make([]string, len(*s.value))
+ for i, b := range *s.value {
+ boolStrSlice[i] = strconv.FormatBool(b)
+ }
+
+ out, _ := writeAsCSV(boolStrSlice)
+
+ return "[" + out + "]"
+}
+
+func boolSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []bool{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]bool, len(ss))
+ for i, t := range ss {
+ var err error
+ out[i], err = strconv.ParseBool(t)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+// GetBoolSlice returns the []bool value of a flag with the given name.
+func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) {
+ val, err := f.getFlagType(name, "boolSlice", boolSliceConv)
+ if err != nil {
+ return []bool{}, err
+ }
+ return val.([]bool), nil
+}
+
+// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+ f.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+ f.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSliceVar defines a []bool flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+ CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+ CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool {
+ p := []bool{}
+ f.BoolSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+ p := []bool{}
+ f.BoolSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func BoolSlice(name string, value []bool, usage string) *[]bool {
+ return CommandLine.BoolSliceP(name, "", value, usage)
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+ return CommandLine.BoolSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/bool_slice_test.go b/vendor/github.com/spf13/pflag/bool_slice_test.go
new file mode 100644
index 000000000..b617dd237
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool_slice_test.go
@@ -0,0 +1,215 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+func setUpBSFlagSet(bsp *[]bool) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.BoolSliceVar(bsp, "bs", []bool{}, "Command separated list!")
+ return f
+}
+
+func setUpBSFlagSetWithDefault(bsp *[]bool) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.BoolSliceVar(bsp, "bs", []bool{false, true}, "Command separated list!")
+ return f
+}
+
+func TestEmptyBS(t *testing.T) {
+ var bs []bool
+ f := setUpBSFlagSet(&bs)
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ getBS, err := f.GetBoolSlice("bs")
+ if err != nil {
+ t.Fatal("got an error from GetBoolSlice():", err)
+ }
+ if len(getBS) != 0 {
+ t.Fatalf("got bs %v with len=%d but expected length=0", getBS, len(getBS))
+ }
+}
+
+func TestBS(t *testing.T) {
+ var bs []bool
+ f := setUpBSFlagSet(&bs)
+
+ vals := []string{"1", "F", "TRUE", "0"}
+ arg := fmt.Sprintf("--bs=%s", strings.Join(vals, ","))
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range bs {
+ b, err := strconv.ParseBool(vals[i])
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if b != v {
+ t.Fatalf("expected is[%d] to be %s but got: %t", i, vals[i], v)
+ }
+ }
+ getBS, err := f.GetBoolSlice("bs")
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ for i, v := range getBS {
+ b, err := strconv.ParseBool(vals[i])
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if b != v {
+ t.Fatalf("expected bs[%d] to be %s but got: %t from GetBoolSlice", i, vals[i], v)
+ }
+ }
+}
+
+func TestBSDefault(t *testing.T) {
+ var bs []bool
+ f := setUpBSFlagSetWithDefault(&bs)
+
+ vals := []string{"false", "T"}
+
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range bs {
+ b, err := strconv.ParseBool(vals[i])
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if b != v {
+ t.Fatalf("expected bs[%d] to be %t from GetBoolSlice but got: %t", i, b, v)
+ }
+ }
+
+ getBS, err := f.GetBoolSlice("bs")
+ if err != nil {
+ t.Fatal("got an error from GetBoolSlice():", err)
+ }
+ for i, v := range getBS {
+ b, err := strconv.ParseBool(vals[i])
+ if err != nil {
+ t.Fatal("got an error from GetBoolSlice():", err)
+ }
+ if b != v {
+ t.Fatalf("expected bs[%d] to be %t from GetBoolSlice but got: %t", i, b, v)
+ }
+ }
+}
+
+func TestBSWithDefault(t *testing.T) {
+ var bs []bool
+ f := setUpBSFlagSetWithDefault(&bs)
+
+ vals := []string{"FALSE", "1"}
+ arg := fmt.Sprintf("--bs=%s", strings.Join(vals, ","))
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range bs {
+ b, err := strconv.ParseBool(vals[i])
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if b != v {
+ t.Fatalf("expected bs[%d] to be %t but got: %t", i, b, v)
+ }
+ }
+
+ getBS, err := f.GetBoolSlice("bs")
+ if err != nil {
+ t.Fatal("got an error from GetBoolSlice():", err)
+ }
+ for i, v := range getBS {
+ b, err := strconv.ParseBool(vals[i])
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if b != v {
+ t.Fatalf("expected bs[%d] to be %t from GetBoolSlice but got: %t", i, b, v)
+ }
+ }
+}
+
+func TestBSCalledTwice(t *testing.T) {
+ var bs []bool
+ f := setUpBSFlagSet(&bs)
+
+ in := []string{"T,F", "T"}
+ expected := []bool{true, false, true}
+ argfmt := "--bs=%s"
+ arg1 := fmt.Sprintf(argfmt, in[0])
+ arg2 := fmt.Sprintf(argfmt, in[1])
+ err := f.Parse([]string{arg1, arg2})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range bs {
+ if expected[i] != v {
+ t.Fatalf("expected bs[%d] to be %t but got %t", i, expected[i], v)
+ }
+ }
+}
+
+func TestBSBadQuoting(t *testing.T) {
+
+ tests := []struct {
+ Want []bool
+ FlagArg []string
+ }{
+ {
+ Want: []bool{true, false, true},
+ FlagArg: []string{"1", "0", "true"},
+ },
+ {
+ Want: []bool{true, false},
+ FlagArg: []string{"True", "F"},
+ },
+ {
+ Want: []bool{true, false},
+ FlagArg: []string{"T", "0"},
+ },
+ {
+ Want: []bool{true, false},
+ FlagArg: []string{"1", "0"},
+ },
+ {
+ Want: []bool{true, false, false},
+ FlagArg: []string{"true,false", "false"},
+ },
+ {
+ Want: []bool{true, false, false, true, false, true, false},
+ FlagArg: []string{`"true,false,false,1,0, T"`, " false "},
+ },
+ {
+ Want: []bool{false, false, true, false, true, false, true},
+ FlagArg: []string{`"0, False, T,false , true,F"`, "true"},
+ },
+ }
+
+ for i, test := range tests {
+
+ var bs []bool
+ f := setUpBSFlagSet(&bs)
+
+ if err := f.Parse([]string{fmt.Sprintf("--bs=%s", strings.Join(test.FlagArg, ","))}); err != nil {
+ t.Fatalf("flag parsing failed with error: %s\nparsing:\t%#v\nwant:\t\t%#v",
+ err, test.FlagArg, test.Want[i])
+ }
+
+ for j, b := range bs {
+ if b != test.Want[j] {
+ t.Fatalf("bad value parsed for test %d on bool %d:\nwant:\t%t\ngot:\t%t", i, j, test.Want[j], b)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go
index fa815642e..746af6327 100644
--- a/vendor/github.com/spf13/pflag/flag.go
+++ b/vendor/github.com/spf13/pflag/flag.go
@@ -487,9 +487,76 @@ func UnquoteUsage(flag *Flag) (name string, usage string) {
return
}
-// FlagUsages Returns a string containing the usage information for all flags in
-// the FlagSet
-func (f *FlagSet) FlagUsages() string {
+// Splits the string `s` on whitespace into an initial substring up to
+// `i` runes in length and the remainder. Will go `slop` over `i` if
+// that encompasses the entire string (which allows the caller to
+// avoid short orphan words on the final line).
+func wrapN(i, slop int, s string) (string, string) {
+ if i+slop > len(s) {
+ return s, ""
+ }
+
+ w := strings.LastIndexAny(s[:i], " \t")
+ if w <= 0 {
+ return s, ""
+ }
+
+ return s[:w], s[w+1:]
+}
+
+// Wraps the string `s` to a maximum width `w` with leading indent
+// `i`. The first line is not indented (this is assumed to be done by
+// caller). Pass `w` == 0 to do no wrapping
+func wrap(i, w int, s string) string {
+ if w == 0 {
+ return s
+ }
+
+ // space between indent i and end of line width w into which
+ // we should wrap the text.
+ wrap := w - i
+
+ var r, l string
+
+ // Not enough space for sensible wrapping. Wrap as a block on
+ // the next line instead.
+ if wrap < 24 {
+ i = 16
+ wrap = w - i
+ r += "\n" + strings.Repeat(" ", i)
+ }
+ // If still not enough space then don't even try to wrap.
+ if wrap < 24 {
+ return s
+ }
+
+ // Try to avoid short orphan words on the final line, by
+ // allowing wrapN to go a bit over if that would fit in the
+ // remainder of the line.
+ slop := 5
+ wrap = wrap - slop
+
+ // Handle first line, which is indented by the caller (or the
+ // special case above)
+ l, s = wrapN(wrap, slop, s)
+ r = r + l
+
+ // Now wrap the rest
+ for s != "" {
+ var t string
+
+ t, s = wrapN(wrap, slop, s)
+ r = r + "\n" + strings.Repeat(" ", i) + t
+ }
+
+ return r
+
+}
+
+// FlagUsagesWrapped returns a string containing the usage information
+// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no
+// wrapping)
+func (f *FlagSet) FlagUsagesWrapped(cols int) string {
x := new(bytes.Buffer)
lines := make([]string, 0, len(f.formal))
@@ -546,12 +613,19 @@ func (f *FlagSet) FlagUsages() string {
for _, line := range lines {
sidx := strings.Index(line, "\x00")
spacing := strings.Repeat(" ", maxlen-sidx)
- fmt.Fprintln(x, line[:sidx], spacing, line[sidx+1:])
+ // maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx
+ fmt.Fprintln(x, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:]))
}
return x.String()
}
+// FlagUsages returns a string containing the usage information for all flags in
+// the FlagSet
+func (f *FlagSet) FlagUsages() string {
+ return f.FlagUsagesWrapped(0)
+}
+
// PrintDefaults prints to standard error the default values of all defined command-line flags.
func PrintDefaults() {
CommandLine.PrintDefaults()
@@ -635,7 +709,7 @@ func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag {
// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) VarP(value Value, name, shorthand, usage string) {
- _ = f.VarPF(value, name, shorthand, usage)
+ f.VarPF(value, name, shorthand, usage)
}
// AddFlag will add the flag to the FlagSet
@@ -752,7 +826,7 @@ func containsShorthand(arg, shorthand string) bool {
return strings.Contains(arg, shorthand)
}
-func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error) {
+func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) {
a = args
name := s[2:]
if len(name) == 0 || name[0] == '-' || name[0] == '=' {
@@ -786,11 +860,11 @@ func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error)
err = f.failf("flag needs an argument: %s", s)
return
}
- err = f.setFlag(flag, value, s)
+ err = fn(flag, value, s)
return
}
-func (f *FlagSet) parseSingleShortArg(shorthands string, args []string) (outShorts string, outArgs []string, err error) {
+func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) {
if strings.HasPrefix(shorthands, "test.") {
return
}
@@ -825,16 +899,16 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string) (outShor
err = f.failf("flag needs an argument: %q in -%s", c, shorthands)
return
}
- err = f.setFlag(flag, value, shorthands)
+ err = fn(flag, value, shorthands)
return
}
-func (f *FlagSet) parseShortArg(s string, args []string) (a []string, err error) {
+func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) {
a = args
shorthands := s[1:]
for len(shorthands) > 0 {
- shorthands, a, err = f.parseSingleShortArg(shorthands, args)
+ shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn)
if err != nil {
return
}
@@ -843,7 +917,7 @@ func (f *FlagSet) parseShortArg(s string, args []string) (a []string, err error)
return
}
-func (f *FlagSet) parseArgs(args []string) (err error) {
+func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) {
for len(args) > 0 {
s := args[0]
args = args[1:]
@@ -863,9 +937,9 @@ func (f *FlagSet) parseArgs(args []string) (err error) {
f.args = append(f.args, args...)
break
}
- args, err = f.parseLongArg(s, args)
+ args, err = f.parseLongArg(s, args, fn)
} else {
- args, err = f.parseShortArg(s, args)
+ args, err = f.parseShortArg(s, args, fn)
}
if err != nil {
return
@@ -881,7 +955,41 @@ func (f *FlagSet) parseArgs(args []string) (err error) {
func (f *FlagSet) Parse(arguments []string) error {
f.parsed = true
f.args = make([]string, 0, len(arguments))
- err := f.parseArgs(arguments)
+
+ assign := func(flag *Flag, value, origArg string) error {
+ return f.setFlag(flag, value, origArg)
+ }
+
+ err := f.parseArgs(arguments, assign)
+ if err != nil {
+ switch f.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ os.Exit(2)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ return nil
+}
+
+type parseFunc func(flag *Flag, value, origArg string) error
+
+// ParseAll parses flag definitions from the argument list, which should not
+// include the command name. The arguments for fn are flag and value. Must be
+// called after all flags in the FlagSet are defined and before flags are
+// accessed by the program. The return value will be ErrHelp if -help was set
+// but not defined.
+func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error {
+ f.parsed = true
+ f.args = make([]string, 0, len(arguments))
+
+ assign := func(flag *Flag, value, origArg string) error {
+ return fn(flag, value)
+ }
+
+ err := f.parseArgs(arguments, assign)
if err != nil {
switch f.errorHandling {
case ContinueOnError:
@@ -907,6 +1015,14 @@ func Parse() {
CommandLine.Parse(os.Args[1:])
}
+// ParseAll parses the command-line flags from os.Args[1:] and called fn for each.
+// The arguments for fn are flag and value. Must be called after all flags are
+// defined and before flags are accessed by the program.
+func ParseAll(fn func(flag *Flag, value string) error) {
+ // Ignore errors; CommandLine is set for ExitOnError.
+ CommandLine.ParseAll(os.Args[1:], fn)
+}
+
// SetInterspersed sets whether to support interspersed option/non-option arguments.
func SetInterspersed(interspersed bool) {
CommandLine.SetInterspersed(interspersed)
diff --git a/vendor/github.com/spf13/pflag/flag_test.go b/vendor/github.com/spf13/pflag/flag_test.go
index b294fc768..b83a0ed6a 100644
--- a/vendor/github.com/spf13/pflag/flag_test.go
+++ b/vendor/github.com/spf13/pflag/flag_test.go
@@ -333,6 +333,59 @@ func testParse(f *FlagSet, t *testing.T) {
}
}
+func testParseAll(f *FlagSet, t *testing.T) {
+ if f.Parsed() {
+ fmt.Errorf("f.Parse() = true before Parse")
+ }
+ f.BoolP("boola", "a", false, "bool value")
+ f.BoolP("boolb", "b", false, "bool2 value")
+ f.BoolP("boolc", "c", false, "bool3 value")
+ f.BoolP("boold", "d", false, "bool4 value")
+ f.StringP("stringa", "s", "0", "string value")
+ f.StringP("stringz", "z", "0", "string value")
+ f.StringP("stringx", "x", "0", "string value")
+ f.StringP("stringy", "y", "0", "string value")
+ f.Lookup("stringx").NoOptDefVal = "1"
+ args := []string{
+ "-ab",
+ "-cs=xx",
+ "--stringz=something",
+ "-d=true",
+ "-x",
+ "-y",
+ "ee",
+ }
+ want := []string{
+ "boola", "true",
+ "boolb", "true",
+ "boolc", "true",
+ "stringa", "xx",
+ "stringz", "something",
+ "boold", "true",
+ "stringx", "1",
+ "stringy", "ee",
+ }
+ got := []string{}
+ store := func(flag *Flag, value string) error {
+ got = append(got, flag.Name)
+ if len(value) > 0 {
+ got = append(got, value)
+ }
+ return nil
+ }
+ if err := f.ParseAll(args, store); err != nil {
+ t.Errorf("expected no error, got ", err)
+ }
+ if !f.Parsed() {
+ t.Errorf("f.Parse() = false after Parse")
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("f.ParseAll() fail to restore the args")
+ t.Errorf("Got: %v", got)
+ t.Errorf("Want: %v", want)
+ }
+}
+
func TestShorthand(t *testing.T) {
f := NewFlagSet("shorthand", ContinueOnError)
if f.Parsed() {
@@ -398,16 +451,21 @@ func TestParse(t *testing.T) {
testParse(GetCommandLine(), t)
}
+func TestParseAll(t *testing.T) {
+ ResetForTesting(func() { t.Error("bad parse") })
+ testParseAll(GetCommandLine(), t)
+}
+
func TestFlagSetParse(t *testing.T) {
testParse(NewFlagSet("test", ContinueOnError), t)
}
func TestChangedHelper(t *testing.T) {
f := NewFlagSet("changedtest", ContinueOnError)
- _ = f.Bool("changed", false, "changed bool")
- _ = f.Bool("settrue", true, "true to true")
- _ = f.Bool("setfalse", false, "false to false")
- _ = f.Bool("unchanged", false, "unchanged bool")
+ f.Bool("changed", false, "changed bool")
+ f.Bool("settrue", true, "true to true")
+ f.Bool("setfalse", false, "false to false")
+ f.Bool("unchanged", false, "unchanged bool")
args := []string{"--changed", "--settrue", "--setfalse=false"}
if err := f.Parse(args); err != nil {
diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go
index b056147fd..c4f47ebe5 100644
--- a/vendor/github.com/spf13/pflag/golangflag.go
+++ b/vendor/github.com/spf13/pflag/golangflag.go
@@ -6,13 +6,10 @@ package pflag
import (
goflag "flag"
- "fmt"
"reflect"
"strings"
)
-var _ = fmt.Print
-
// flagValueWrapper implements pflag.Value around a flag.Value. The main
// difference here is the addition of the Type method that returns a string
// name of the type. As this is generally unknown, we approximate that with
diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go
index 88a17430a..3d414ba69 100644
--- a/vendor/github.com/spf13/pflag/ip.go
+++ b/vendor/github.com/spf13/pflag/ip.go
@@ -6,8 +6,6 @@ import (
"strings"
)
-var _ = strings.TrimSpace
-
// -- net.IP value
type ipValue net.IP
diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go
new file mode 100644
index 000000000..7dd196fe3
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip_slice.go
@@ -0,0 +1,148 @@
+package pflag
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "strings"
+)
+
+// -- ipSlice Value
+type ipSliceValue struct {
+ value *[]net.IP
+ changed bool
+}
+
+func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue {
+ ipsv := new(ipSliceValue)
+ ipsv.value = p
+ *ipsv.value = val
+ return ipsv
+}
+
+// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag.
+// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended.
+func (s *ipSliceValue) Set(val string) error {
+
+ // remove all quote characters
+ rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+ // read flag arguments with CSV parser
+ ipStrSlice, err := readAsCSV(rmQuote.Replace(val))
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ // parse ip values into slice
+ out := make([]net.IP, 0, len(ipStrSlice))
+ for _, ipStr := range ipStrSlice {
+ ip := net.ParseIP(strings.TrimSpace(ipStr))
+ if ip == nil {
+ return fmt.Errorf("invalid string being converted to IP address: %s", ipStr)
+ }
+ out = append(out, ip)
+ }
+
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+
+ s.changed = true
+
+ return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *ipSliceValue) Type() string {
+ return "ipSlice"
+}
+
+// String defines a "native" format for this net.IP slice flag value.
+func (s *ipSliceValue) String() string {
+
+ ipStrSlice := make([]string, len(*s.value))
+ for i, ip := range *s.value {
+ ipStrSlice[i] = ip.String()
+ }
+
+ out, _ := writeAsCSV(ipStrSlice)
+
+ return "[" + out + "]"
+}
+
+func ipSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Emtpy string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []net.IP{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]net.IP, len(ss))
+ for i, sval := range ss {
+ ip := net.ParseIP(strings.TrimSpace(sval))
+ if ip == nil {
+ return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+ }
+ out[i] = ip
+ }
+ return out, nil
+}
+
+// GetIPSlice returns the []net.IP value of a flag with the given name
+func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) {
+ val, err := f.getFlagType(name, "ipSlice", ipSliceConv)
+ if err != nil {
+ return []net.IP{}, err
+ }
+ return val.([]net.IP), nil
+}
+
+// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string.
+// The argument p points to a []net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
+ f.VarP(newIPSliceValue(value, p), name, "", usage)
+}
+
+// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
+ f.VarP(newIPSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string.
+// The argument p points to a []net.IP variable in which to store the value of the flag.
+func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
+ CommandLine.VarP(newIPSliceValue(value, p), name, "", usage)
+}
+
+// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
+ CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of that flag.
+func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP {
+ p := []net.IP{}
+ f.IPSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
+ p := []net.IP{}
+ f.IPSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of the flag.
+func IPSlice(name string, value []net.IP, usage string) *[]net.IP {
+ return CommandLine.IPSliceP(name, "", value, usage)
+}
+
+// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
+func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
+ return CommandLine.IPSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ip_slice_test.go b/vendor/github.com/spf13/pflag/ip_slice_test.go
new file mode 100644
index 000000000..b0c681c5b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip_slice_test.go
@@ -0,0 +1,222 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strings"
+ "testing"
+)
+
+func setUpIPSFlagSet(ipsp *[]net.IP) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.IPSliceVar(ipsp, "ips", []net.IP{}, "Command separated list!")
+ return f
+}
+
+func setUpIPSFlagSetWithDefault(ipsp *[]net.IP) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.IPSliceVar(ipsp, "ips",
+ []net.IP{
+ net.ParseIP("192.168.1.1"),
+ net.ParseIP("0:0:0:0:0:0:0:1"),
+ },
+ "Command separated list!")
+ return f
+}
+
+func TestEmptyIP(t *testing.T) {
+ var ips []net.IP
+ f := setUpIPSFlagSet(&ips)
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ getIPS, err := f.GetIPSlice("ips")
+ if err != nil {
+ t.Fatal("got an error from GetIPSlice():", err)
+ }
+ if len(getIPS) != 0 {
+ t.Fatalf("got ips %v with len=%d but expected length=0", getIPS, len(getIPS))
+ }
+}
+
+func TestIPS(t *testing.T) {
+ var ips []net.IP
+ f := setUpIPSFlagSet(&ips)
+
+ vals := []string{"192.168.1.1", "10.0.0.1", "0:0:0:0:0:0:0:2"}
+ arg := fmt.Sprintf("--ips=%s", strings.Join(vals, ","))
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range ips {
+ if ip := net.ParseIP(vals[i]); ip == nil {
+ t.Fatalf("invalid string being converted to IP address: %s", vals[i])
+ } else if !ip.Equal(v) {
+ t.Fatalf("expected ips[%d] to be %s but got: %s from GetIPSlice", i, vals[i], v)
+ }
+ }
+}
+
+func TestIPSDefault(t *testing.T) {
+ var ips []net.IP
+ f := setUpIPSFlagSetWithDefault(&ips)
+
+ vals := []string{"192.168.1.1", "0:0:0:0:0:0:0:1"}
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range ips {
+ if ip := net.ParseIP(vals[i]); ip == nil {
+ t.Fatalf("invalid string being converted to IP address: %s", vals[i])
+ } else if !ip.Equal(v) {
+ t.Fatalf("expected ips[%d] to be %s but got: %s", i, vals[i], v)
+ }
+ }
+
+ getIPS, err := f.GetIPSlice("ips")
+ if err != nil {
+ t.Fatal("got an error from GetIPSlice")
+ }
+ for i, v := range getIPS {
+ if ip := net.ParseIP(vals[i]); ip == nil {
+ t.Fatalf("invalid string being converted to IP address: %s", vals[i])
+ } else if !ip.Equal(v) {
+ t.Fatalf("expected ips[%d] to be %s but got: %s", i, vals[i], v)
+ }
+ }
+}
+
+func TestIPSWithDefault(t *testing.T) {
+ var ips []net.IP
+ f := setUpIPSFlagSetWithDefault(&ips)
+
+ vals := []string{"192.168.1.1", "0:0:0:0:0:0:0:1"}
+ arg := fmt.Sprintf("--ips=%s", strings.Join(vals, ","))
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range ips {
+ if ip := net.ParseIP(vals[i]); ip == nil {
+ t.Fatalf("invalid string being converted to IP address: %s", vals[i])
+ } else if !ip.Equal(v) {
+ t.Fatalf("expected ips[%d] to be %s but got: %s", i, vals[i], v)
+ }
+ }
+
+ getIPS, err := f.GetIPSlice("ips")
+ if err != nil {
+ t.Fatal("got an error from GetIPSlice")
+ }
+ for i, v := range getIPS {
+ if ip := net.ParseIP(vals[i]); ip == nil {
+ t.Fatalf("invalid string being converted to IP address: %s", vals[i])
+ } else if !ip.Equal(v) {
+ t.Fatalf("expected ips[%d] to be %s but got: %s", i, vals[i], v)
+ }
+ }
+}
+
+func TestIPSCalledTwice(t *testing.T) {
+ var ips []net.IP
+ f := setUpIPSFlagSet(&ips)
+
+ in := []string{"192.168.1.2,0:0:0:0:0:0:0:1", "10.0.0.1"}
+ expected := []net.IP{net.ParseIP("192.168.1.2"), net.ParseIP("0:0:0:0:0:0:0:1"), net.ParseIP("10.0.0.1")}
+ argfmt := "ips=%s"
+ arg1 := fmt.Sprintf(argfmt, in[0])
+ arg2 := fmt.Sprintf(argfmt, in[1])
+ err := f.Parse([]string{arg1, arg2})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range ips {
+ if !expected[i].Equal(v) {
+ t.Fatalf("expected ips[%d] to be %s but got: %s", i, expected[i], v)
+ }
+ }
+}
+
+func TestIPSBadQuoting(t *testing.T) {
+
+ tests := []struct {
+ Want []net.IP
+ FlagArg []string
+ }{
+ {
+ Want: []net.IP{
+ net.ParseIP("a4ab:61d:f03e:5d7d:fad7:d4c2:a1a5:568"),
+ net.ParseIP("203.107.49.208"),
+ net.ParseIP("14.57.204.90"),
+ },
+ FlagArg: []string{
+ "a4ab:61d:f03e:5d7d:fad7:d4c2:a1a5:568",
+ "203.107.49.208",
+ "14.57.204.90",
+ },
+ },
+ {
+ Want: []net.IP{
+ net.ParseIP("204.228.73.195"),
+ net.ParseIP("86.141.15.94"),
+ },
+ FlagArg: []string{
+ "204.228.73.195",
+ "86.141.15.94",
+ },
+ },
+ {
+ Want: []net.IP{
+ net.ParseIP("c70c:db36:3001:890f:c6ea:3f9b:7a39:cc3f"),
+ net.ParseIP("4d17:1d6e:e699:bd7a:88c5:5e7e:ac6a:4472"),
+ },
+ FlagArg: []string{
+ "c70c:db36:3001:890f:c6ea:3f9b:7a39:cc3f",
+ "4d17:1d6e:e699:bd7a:88c5:5e7e:ac6a:4472",
+ },
+ },
+ {
+ Want: []net.IP{
+ net.ParseIP("5170:f971:cfac:7be3:512a:af37:952c:bc33"),
+ net.ParseIP("93.21.145.140"),
+ net.ParseIP("2cac:61d3:c5ff:6caf:73e0:1b1a:c336:c1ca"),
+ },
+ FlagArg: []string{
+ " 5170:f971:cfac:7be3:512a:af37:952c:bc33 , 93.21.145.140 ",
+ "2cac:61d3:c5ff:6caf:73e0:1b1a:c336:c1ca",
+ },
+ },
+ {
+ Want: []net.IP{
+ net.ParseIP("2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"),
+ net.ParseIP("2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"),
+ net.ParseIP("2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"),
+ net.ParseIP("2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"),
+ },
+ FlagArg: []string{
+ `"2e5e:66b2:6441:848:5b74:76ea:574c:3a7b, 2e5e:66b2:6441:848:5b74:76ea:574c:3a7b,2e5e:66b2:6441:848:5b74:76ea:574c:3a7b "`,
+ " 2e5e:66b2:6441:848:5b74:76ea:574c:3a7b"},
+ },
+ }
+
+ for i, test := range tests {
+
+ var ips []net.IP
+ f := setUpIPSFlagSet(&ips)
+
+ if err := f.Parse([]string{fmt.Sprintf("--ips=%s", strings.Join(test.FlagArg, ","))}); err != nil {
+ t.Fatalf("flag parsing failed with error: %s\nparsing:\t%#v\nwant:\t\t%s",
+ err, test.FlagArg, test.Want[i])
+ }
+
+ for j, b := range ips {
+ if !b.Equal(test.Want[j]) {
+ t.Fatalf("bad value parsed for test %d on net.IP %d:\nwant:\t%s\ngot:\t%s", i, j, test.Want[j], b)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go
index 149b764b1..e2c1b8bcd 100644
--- a/vendor/github.com/spf13/pflag/ipnet.go
+++ b/vendor/github.com/spf13/pflag/ipnet.go
@@ -27,8 +27,6 @@ func (*ipNetValue) Type() string {
return "ipNet"
}
-var _ = strings.TrimSpace
-
func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue {
*p = val
return (*ipNetValue)(p)
diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go
index 93b4e4329..276b7ed49 100644
--- a/vendor/github.com/spf13/pflag/string_array.go
+++ b/vendor/github.com/spf13/pflag/string_array.go
@@ -1,11 +1,5 @@
package pflag
-import (
- "fmt"
-)
-
-var _ = fmt.Fprint
-
// -- stringArray Value
type stringArrayValue struct {
value *[]string
diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go
index 7829cfafb..05eee7543 100644
--- a/vendor/github.com/spf13/pflag/string_slice.go
+++ b/vendor/github.com/spf13/pflag/string_slice.go
@@ -3,12 +3,9 @@ package pflag
import (
"bytes"
"encoding/csv"
- "fmt"
"strings"
)
-var _ = fmt.Fprint
-
// -- stringSlice Value
type stringSliceValue struct {
value *[]string
@@ -39,7 +36,7 @@ func writeAsCSV(vals []string) (string, error) {
return "", err
}
w.Flush()
- return strings.TrimSuffix(b.String(), fmt.Sprintln()), nil
+ return strings.TrimSuffix(b.String(), "\n"), nil
}
func (s *stringSliceValue) Set(val string) error {
diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go
new file mode 100644
index 000000000..edd94c600
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint_slice.go
@@ -0,0 +1,126 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- uintSlice Value
+type uintSliceValue struct {
+ value *[]uint
+ changed bool
+}
+
+func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue {
+ uisv := new(uintSliceValue)
+ uisv.value = p
+ *uisv.value = val
+ return uisv
+}
+
+func (s *uintSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]uint, len(ss))
+ for i, d := range ss {
+ u, err := strconv.ParseUint(d, 10, 0)
+ if err != nil {
+ return err
+ }
+ out[i] = uint(u)
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *uintSliceValue) Type() string {
+ return "uintSlice"
+}
+
+func (s *uintSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func uintSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []uint{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]uint, len(ss))
+ for i, d := range ss {
+ u, err := strconv.ParseUint(d, 10, 0)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = uint(u)
+ }
+ return out, nil
+}
+
+// GetUintSlice returns the []uint value of a flag with the given name.
+func (f *FlagSet) GetUintSlice(name string) ([]uint, error) {
+ val, err := f.getFlagType(name, "uintSlice", uintSliceConv)
+ if err != nil {
+ return []uint{}, err
+ }
+ return val.([]uint), nil
+}
+
+// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string.
+// The argument p points to a []uint variable in which to store the value of the flag.
+func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) {
+ f.VarP(newUintSliceValue(value, p), name, "", usage)
+}
+
+// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
+ f.VarP(newUintSliceValue(value, p), name, shorthand, usage)
+}
+
+// UintSliceVar defines a uint[] flag with specified name, default value, and usage string.
+// The argument p points to a uint[] variable in which to store the value of the flag.
+func UintSliceVar(p *[]uint, name string, value []uint, usage string) {
+ CommandLine.VarP(newUintSliceValue(value, p), name, "", usage)
+}
+
+// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
+ CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage)
+}
+
+// UintSlice defines a []uint flag with specified name, default value, and usage string.
+// The return value is the address of a []uint variable that stores the value of the flag.
+func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint {
+ p := []uint{}
+ f.UintSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
+ p := []uint{}
+ f.UintSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// UintSlice defines a []uint flag with specified name, default value, and usage string.
+// The return value is the address of a []uint variable that stores the value of the flag.
+func UintSlice(name string, value []uint, usage string) *[]uint {
+ return CommandLine.UintSliceP(name, "", value, usage)
+}
+
+// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
+func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
+ return CommandLine.UintSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint_slice_test.go b/vendor/github.com/spf13/pflag/uint_slice_test.go
new file mode 100644
index 000000000..db1a19dc2
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint_slice_test.go
@@ -0,0 +1,161 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+func setUpUISFlagSet(uisp *[]uint) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.UintSliceVar(uisp, "uis", []uint{}, "Command separated list!")
+ return f
+}
+
+func setUpUISFlagSetWithDefault(uisp *[]uint) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ f.UintSliceVar(uisp, "uis", []uint{0, 1}, "Command separated list!")
+ return f
+}
+
+func TestEmptyUIS(t *testing.T) {
+ var uis []uint
+ f := setUpUISFlagSet(&uis)
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+
+ getUIS, err := f.GetUintSlice("uis")
+ if err != nil {
+ t.Fatal("got an error from GetUintSlice():", err)
+ }
+ if len(getUIS) != 0 {
+ t.Fatalf("got is %v with len=%d but expected length=0", getUIS, len(getUIS))
+ }
+}
+
+func TestUIS(t *testing.T) {
+ var uis []uint
+ f := setUpUISFlagSet(&uis)
+
+ vals := []string{"1", "2", "4", "3"}
+ arg := fmt.Sprintf("--uis=%s", strings.Join(vals, ","))
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range uis {
+ u, err := strconv.ParseUint(vals[i], 10, 0)
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if uint(u) != v {
+ t.Fatalf("expected uis[%d] to be %s but got %d", i, vals[i], v)
+ }
+ }
+ getUIS, err := f.GetUintSlice("uis")
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ for i, v := range getUIS {
+ u, err := strconv.ParseUint(vals[i], 10, 0)
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if uint(u) != v {
+ t.Fatalf("expected uis[%d] to be %s but got: %d from GetUintSlice", i, vals[i], v)
+ }
+ }
+}
+
+func TestUISDefault(t *testing.T) {
+ var uis []uint
+ f := setUpUISFlagSetWithDefault(&uis)
+
+ vals := []string{"0", "1"}
+
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range uis {
+ u, err := strconv.ParseUint(vals[i], 10, 0)
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if uint(u) != v {
+ t.Fatalf("expect uis[%d] to be %d but got: %d", i, u, v)
+ }
+ }
+
+ getUIS, err := f.GetUintSlice("uis")
+ if err != nil {
+ t.Fatal("got an error from GetUintSlice():", err)
+ }
+ for i, v := range getUIS {
+ u, err := strconv.ParseUint(vals[i], 10, 0)
+ if err != nil {
+ t.Fatal("got an error from GetIntSlice():", err)
+ }
+ if uint(u) != v {
+ t.Fatalf("expected uis[%d] to be %d from GetUintSlice but got: %d", i, u, v)
+ }
+ }
+}
+
+func TestUISWithDefault(t *testing.T) {
+ var uis []uint
+ f := setUpUISFlagSetWithDefault(&uis)
+
+ vals := []string{"1", "2"}
+ arg := fmt.Sprintf("--uis=%s", strings.Join(vals, ","))
+ err := f.Parse([]string{arg})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range uis {
+ u, err := strconv.ParseUint(vals[i], 10, 0)
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if uint(u) != v {
+ t.Fatalf("expected uis[%d] to be %d from GetUintSlice but got: %d", i, u, v)
+ }
+ }
+
+ getUIS, err := f.GetUintSlice("uis")
+ if err != nil {
+ t.Fatal("got an error from GetUintSlice():", err)
+ }
+ for i, v := range getUIS {
+ u, err := strconv.ParseUint(vals[i], 10, 0)
+ if err != nil {
+ t.Fatalf("got error: %v", err)
+ }
+ if uint(u) != v {
+ t.Fatalf("expected uis[%d] to be %d from GetUintSlice but got: %d", i, u, v)
+ }
+ }
+}
+
+func TestUISCalledTwice(t *testing.T) {
+ var uis []uint
+ f := setUpUISFlagSet(&uis)
+
+ in := []string{"1,2", "3"}
+ expected := []int{1, 2, 3}
+ argfmt := "--uis=%s"
+ arg1 := fmt.Sprintf(argfmt, in[0])
+ arg2 := fmt.Sprintf(argfmt, in[1])
+ err := f.Parse([]string{arg1, arg2})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ for i, v := range uis {
+ if uint(expected[i]) != v {
+ t.Fatalf("expected uis[%d] to be %d but got: %d", i, expected[i], v)
+ }
+ }
+}
diff --git a/vendor/github.com/tylerb/graceful/graceful.go b/vendor/github.com/tylerb/graceful/graceful.go
index 07c990489..d6a9ca068 100644
--- a/vendor/github.com/tylerb/graceful/graceful.go
+++ b/vendor/github.com/tylerb/graceful/graceful.go
@@ -172,10 +172,12 @@ func (srv *Server) ListenTLS(certFile, keyFile string) (net.Listener, error) {
}
var err error
- config.Certificates = make([]tls.Certificate, 1)
- config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
- if err != nil {
- return nil, err
+ if certFile != "" && keyFile != "" {
+ config.Certificates = make([]tls.Certificate, 1)
+ config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ return nil, err
+ }
}
// Enable http2
@@ -455,6 +457,8 @@ func (srv *Server) shutdown(shutdown chan chan struct{}, kill chan struct{}) {
done := make(chan struct{})
shutdown <- done
+ srv.stopLock.Lock()
+ defer srv.stopLock.Unlock()
if srv.Timeout > 0 {
select {
case <-done:
diff --git a/vendor/github.com/xenolf/lego/README.md b/vendor/github.com/xenolf/lego/README.md
index 136bc5548..9be562944 100644
--- a/vendor/github.com/xenolf/lego/README.md
+++ b/vendor/github.com/xenolf/lego/README.md
@@ -244,7 +244,7 @@ if err != nil {
// The acme library takes care of completing the challenges to obtain the certificate(s).
// The domains must resolve to this machine or you have to use the DNS challenge.
bundle := false
-certificates, failures := client.ObtainCertificate([]string{"mydomain.com"}, bundle, nil)
+certificates, failures := client.ObtainCertificate([]string{"mydomain.com"}, bundle, nil, false)
if len(failures) > 0 {
log.Fatal(failures)
}
diff --git a/vendor/github.com/xenolf/lego/acme/client.go b/vendor/github.com/xenolf/lego/acme/client.go
index 9f837af36..e824f5080 100644
--- a/vendor/github.com/xenolf/lego/acme/client.go
+++ b/vendor/github.com/xenolf/lego/acme/client.go
@@ -11,6 +11,7 @@ import (
"io/ioutil"
"log"
"net"
+ "net/http"
"regexp"
"strconv"
"strings"
@@ -22,6 +23,9 @@ var (
Logger *log.Logger
)
+// maxBodySize is the maximum size of body that we will read.
+const maxBodySize = 1024 * 1024
+
// logf writes a log entry. It uses Logger if not
// nil, otherwise it uses the default log.Logger.
func logf(format string, args ...interface{}) {
@@ -49,12 +53,11 @@ type validateFunc func(j *jws, domain, uri string, chlng challenge) error
// Client is the user-friendy way to ACME
type Client struct {
- directory directory
- user User
- jws *jws
- keyType KeyType
- issuerCert []byte
- solvers map[Challenge]solver
+ directory directory
+ user User
+ jws *jws
+ keyType KeyType
+ solvers map[Challenge]solver
}
// NewClient creates a new ACME client on behalf of the user. The client will depend on
@@ -611,89 +614,108 @@ func (c *Client) requestCertificateForCsr(authz []authorizationResource, bundle
return CertificateResource{}, err
}
- cerRes := CertificateResource{
+ certRes := CertificateResource{
Domain: commonName.Domain,
CertURL: resp.Header.Get("Location"),
- PrivateKey: privateKeyPem}
+ PrivateKey: privateKeyPem,
+ }
- for {
- switch resp.StatusCode {
- case 201, 202:
- cert, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024))
- resp.Body.Close()
- if err != nil {
- return CertificateResource{}, err
- }
+ maxChecks := 1000
+ for i := 0; i < maxChecks; i++ {
+ done, err := c.checkCertResponse(resp, &certRes, bundle)
+ resp.Body.Close()
+ if err != nil {
+ return CertificateResource{}, err
+ }
+ if done {
+ break
+ }
+ if i == maxChecks-1 {
+ return CertificateResource{}, fmt.Errorf("polled for certificate %d times; giving up", i)
+ }
+ resp, err = httpGet(certRes.CertURL)
+ if err != nil {
+ return CertificateResource{}, err
+ }
+ }
+
+ return certRes, nil
+}
+
+// checkCertResponse checks resp to see if a certificate is contained in the
+// response, and if so, loads it into certRes and returns true. If the cert
+// is not yet ready, it returns false. This function honors the waiting period
+// required by the Retry-After header of the response, if specified. This
+// function may read from resp.Body but does NOT close it. The certRes input
+// should already have the Domain (common name) field populated. If bundle is
+// true, the certificate will be bundled with the issuer's cert.
+func (c *Client) checkCertResponse(resp *http.Response, certRes *CertificateResource, bundle bool) (bool, error) {
+ switch resp.StatusCode {
+ case 201, 202:
+ cert, err := ioutil.ReadAll(limitReader(resp.Body, maxBodySize))
+ if err != nil {
+ return false, err
+ }
+
+ // The server returns a body with a length of zero if the
+ // certificate was not ready at the time this request completed.
+ // Otherwise the body is the certificate.
+ if len(cert) > 0 {
+ certRes.CertStableURL = resp.Header.Get("Content-Location")
+ certRes.AccountRef = c.user.GetRegistration().URI
- // The server returns a body with a length of zero if the
- // certificate was not ready at the time this request completed.
- // Otherwise the body is the certificate.
- if len(cert) > 0 {
+ issuedCert := pemEncode(derCertificateBytes(cert))
- cerRes.CertStableURL = resp.Header.Get("Content-Location")
- cerRes.AccountRef = c.user.GetRegistration().URI
+ // The issuer certificate link is always supplied via an "up" link
+ // in the response headers of a new certificate.
+ links := parseLinks(resp.Header["Link"])
+ issuerCert, err := c.getIssuerCertificate(links["up"])
+ if err != nil {
+ // If we fail to acquire the issuer cert, return the issued certificate - do not fail.
+ logf("[WARNING][%s] acme: Could not bundle issuer certificate: %v", certRes.Domain, err)
+ } else {
+ issuerCert = pemEncode(derCertificateBytes(issuerCert))
- issuedCert := pemEncode(derCertificateBytes(cert))
// If bundle is true, we want to return a certificate bundle.
- // To do this, we need the issuer certificate.
+ // To do this, we append the issuer cert to the issued cert.
if bundle {
- // The issuer certificate link is always supplied via an "up" link
- // in the response headers of a new certificate.
- links := parseLinks(resp.Header["Link"])
- issuerCert, err := c.getIssuerCertificate(links["up"])
- if err != nil {
- // If we fail to acquire the issuer cert, return the issued certificate - do not fail.
- logf("[WARNING][%s] acme: Could not bundle issuer certificate: %v", commonName.Domain, err)
- } else {
- // Success - append the issuer cert to the issued cert.
- issuerCert = pemEncode(derCertificateBytes(issuerCert))
- issuedCert = append(issuedCert, issuerCert...)
- }
+ issuedCert = append(issuedCert, issuerCert...)
}
-
- cerRes.Certificate = issuedCert
- logf("[INFO][%s] Server responded with a certificate.", commonName.Domain)
- return cerRes, nil
- }
-
- // The certificate was granted but is not yet issued.
- // Check retry-after and loop.
- ra := resp.Header.Get("Retry-After")
- retryAfter, err := strconv.Atoi(ra)
- if err != nil {
- return CertificateResource{}, err
}
- logf("[INFO][%s] acme: Server responded with status 202; retrying after %ds", commonName.Domain, retryAfter)
- time.Sleep(time.Duration(retryAfter) * time.Second)
-
- break
- default:
- return CertificateResource{}, handleHTTPError(resp)
+ certRes.Certificate = issuedCert
+ certRes.IssuerCertificate = issuerCert
+ logf("[INFO][%s] Server responded with a certificate.", certRes.Domain)
+ return true, nil
}
- resp, err = httpGet(cerRes.CertURL)
+ // The certificate was granted but is not yet issued.
+ // Check retry-after and loop.
+ ra := resp.Header.Get("Retry-After")
+ retryAfter, err := strconv.Atoi(ra)
if err != nil {
- return CertificateResource{}, err
+ return false, err
}
+
+ logf("[INFO][%s] acme: Server responded with status 202; retrying after %ds", certRes.Domain, retryAfter)
+ time.Sleep(time.Duration(retryAfter) * time.Second)
+
+ return false, nil
+ default:
+ return false, handleHTTPError(resp)
}
}
-// getIssuerCertificate requests the issuer certificate and caches it for
-// subsequent requests.
+// getIssuerCertificate requests the issuer certificate
func (c *Client) getIssuerCertificate(url string) ([]byte, error) {
logf("[INFO] acme: Requesting issuer cert from %s", url)
- if c.issuerCert != nil {
- return c.issuerCert, nil
- }
-
resp, err := httpGet(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
- issuerBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024))
+ issuerBytes, err := ioutil.ReadAll(limitReader(resp.Body, maxBodySize))
if err != nil {
return nil, err
}
@@ -703,7 +725,6 @@ func (c *Client) getIssuerCertificate(url string) ([]byte, error) {
return nil, err
}
- c.issuerCert = issuerBytes
return issuerBytes, err
}
diff --git a/vendor/github.com/xenolf/lego/acme/crypto.go b/vendor/github.com/xenolf/lego/acme/crypto.go
index c63b23b99..fa868a90d 100644
--- a/vendor/github.com/xenolf/lego/acme/crypto.go
+++ b/vendor/github.com/xenolf/lego/acme/crypto.go
@@ -226,7 +226,7 @@ func generateCsr(privateKey crypto.PrivateKey, domain string, san []string, must
}
if mustStaple {
- template.Extensions = append(template.Extensions, pkix.Extension{
+ template.ExtraExtensions = append(template.ExtraExtensions, pkix.Extension{
Id: tlsFeatureExtensionOID,
Value: ocspMustStapleFeature,
})
diff --git a/vendor/github.com/xenolf/lego/acme/error.go b/vendor/github.com/xenolf/lego/acme/error.go
index 2aa690b33..6d7013cf1 100644
--- a/vendor/github.com/xenolf/lego/acme/error.go
+++ b/vendor/github.com/xenolf/lego/acme/error.go
@@ -8,9 +8,7 @@ import (
"strings"
)
-const (
- tosAgreementError = "Must agree to subscriber agreement before any further actions"
-)
+const tosAgreementError = "Must agree to subscriber agreement before any further actions"
// RemoteError is the base type for all errors specific to the ACME protocol.
type RemoteError struct {
@@ -54,20 +52,17 @@ func (c challengeError) Error() string {
func handleHTTPError(resp *http.Response) error {
var errorDetail RemoteError
- contenType := resp.Header.Get("Content-Type")
- // try to decode the content as JSON
- if contenType == "application/json" || contenType == "application/problem+json" {
- decoder := json.NewDecoder(resp.Body)
- err := decoder.Decode(&errorDetail)
+ contentType := resp.Header.Get("Content-Type")
+ if contentType == "application/json" || contentType == "application/problem+json" {
+ err := json.NewDecoder(resp.Body).Decode(&errorDetail)
if err != nil {
return err
}
} else {
- detailBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024))
+ detailBytes, err := ioutil.ReadAll(limitReader(resp.Body, maxBodySize))
if err != nil {
return err
}
-
errorDetail.Detail = string(detailBytes)
}
diff --git a/vendor/github.com/xenolf/lego/acme/jws.go b/vendor/github.com/xenolf/lego/acme/jws.go
index f70513e38..2a1fc244d 100644
--- a/vendor/github.com/xenolf/lego/acme/jws.go
+++ b/vendor/github.com/xenolf/lego/acme/jws.go
@@ -32,7 +32,9 @@ func keyAsJWK(key interface{}) *jose.JsonWebKey {
}
}
-// Posts a JWS signed message to the specified URL
+// Posts a JWS signed message to the specified URL.
+// It does NOT close the response body, so the caller must
+// do that if no error was returned.
func (j *jws) post(url string, content []byte) (*http.Response, error) {
signedContent, err := j.signContent(content)
if err != nil {
@@ -44,6 +46,8 @@ func (j *jws) post(url string, content []byte) (*http.Response, error) {
return nil, err
}
+ j.Lock()
+ defer j.Unlock()
j.getNonceFromResponse(resp)
return resp, err
@@ -77,8 +81,6 @@ func (j *jws) signContent(content []byte) (*jose.JsonWebSignature, error) {
}
func (j *jws) getNonceFromResponse(resp *http.Response) error {
- j.Lock()
- defer j.Unlock()
nonce := resp.Header.Get("Replay-Nonce")
if nonce == "" {
return fmt.Errorf("Server did not respond with a proper nonce header.")
@@ -98,6 +100,8 @@ func (j *jws) getNonce() error {
}
func (j *jws) Nonce() (string, error) {
+ j.Lock()
+ defer j.Unlock()
nonce := ""
if len(j.nonces) == 0 {
err := j.getNonce()
@@ -108,8 +112,6 @@ func (j *jws) Nonce() (string, error) {
if len(j.nonces) == 0 {
return "", fmt.Errorf("Can't get nonce")
}
- j.Lock()
- defer j.Unlock()
nonce, j.nonces = j.nonces[len(j.nonces)-1], j.nonces[:len(j.nonces)-1]
return nonce, nil
}
diff --git a/vendor/github.com/xenolf/lego/acme/messages.go b/vendor/github.com/xenolf/lego/acme/messages.go
index 0f6514c3f..36db3b217 100644
--- a/vendor/github.com/xenolf/lego/acme/messages.go
+++ b/vendor/github.com/xenolf/lego/acme/messages.go
@@ -94,16 +94,17 @@ type revokeCertMessage struct {
}
// CertificateResource represents a CA issued certificate.
-// PrivateKey and Certificate are both already PEM encoded
-// and can be directly written to disk. Certificate may
-// be a certificate bundle, depending on the options supplied
-// to create it.
+// PrivateKey, Certificate and IssuerCertificate are all
+// already PEM encoded and can be directly written to disk.
+// Certificate may be a certificate bundle, depending on the
+// options supplied to create it.
type CertificateResource struct {
- Domain string `json:"domain"`
- CertURL string `json:"certUrl"`
- CertStableURL string `json:"certStableUrl"`
- AccountRef string `json:"accountRef,omitempty"`
- PrivateKey []byte `json:"-"`
- Certificate []byte `json:"-"`
- CSR []byte `json:"-"`
+ Domain string `json:"domain"`
+ CertURL string `json:"certUrl"`
+ CertStableURL string `json:"certStableUrl"`
+ AccountRef string `json:"accountRef,omitempty"`
+ PrivateKey []byte `json:"-"`
+ Certificate []byte `json:"-"`
+ IssuerCertificate []byte `json:"-"`
+ CSR []byte `json:"-"`
}
diff --git a/vendor/github.com/xenolf/lego/cli_handlers.go b/vendor/github.com/xenolf/lego/cli_handlers.go
index 858d71000..dad19a144 100644
--- a/vendor/github.com/xenolf/lego/cli_handlers.go
+++ b/vendor/github.com/xenolf/lego/cli_handlers.go
@@ -136,12 +136,20 @@ func saveCertRes(certRes acme.CertificateResource, conf *Configuration) {
privOut := path.Join(conf.CertPath(), certRes.Domain+".key")
pemOut := path.Join(conf.CertPath(), certRes.Domain+".pem")
metaOut := path.Join(conf.CertPath(), certRes.Domain+".json")
+ issuerOut := path.Join(conf.CertPath(), certRes.Domain+".issuer.crt")
err := ioutil.WriteFile(certOut, certRes.Certificate, 0600)
if err != nil {
logger().Fatalf("Unable to save Certificate for domain %s\n\t%s", certRes.Domain, err.Error())
}
+ if certRes.IssuerCertificate != nil {
+ err = ioutil.WriteFile(issuerOut, certRes.IssuerCertificate, 0600)
+ if err != nil {
+ logger().Fatalf("Unable to save IssuerCertificate for domain %s\n\t%s", certRes.Domain, err.Error())
+ }
+ }
+
if certRes.PrivateKey != nil {
// if we were given a CSR, we don't know the private key
err = ioutil.WriteFile(privOut, certRes.PrivateKey, 0600)
diff --git a/vendor/github.com/xenolf/lego/providers/dns/azure/azure.go b/vendor/github.com/xenolf/lego/providers/dns/azure/azure.go
index 6742e4f56..9d281cd69 100644
--- a/vendor/github.com/xenolf/lego/providers/dns/azure/azure.go
+++ b/vendor/github.com/xenolf/lego/providers/dns/azure/azure.go
@@ -72,7 +72,7 @@ func (c *DNSProvider) Present(domain, token, keyAuth string) error {
relative := toRelativeRecord(fqdn, acme.ToFqdn(zone))
rec := dns.RecordSet{
Name: &relative,
- Properties: &dns.RecordSetProperties{
+ RecordSetProperties: &dns.RecordSetProperties{
TTL: to.Int64Ptr(60),
TXTRecords: &[]dns.TxtRecord{dns.TxtRecord{Value: &[]string{value}}},
},
@@ -103,7 +103,7 @@ func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error {
relative := toRelativeRecord(fqdn, acme.ToFqdn(zone))
rsc := dns.NewRecordSetsClient(c.subscriptionId)
rsc.Authorizer, err = c.newServicePrincipalTokenFromCredentials(azure.PublicCloud.ResourceManagerEndpoint)
- _, err = rsc.Delete(c.resourceGroup, zone, relative, dns.TXT, "", "")
+ _, err = rsc.Delete(c.resourceGroup, zone, relative, dns.TXT, "")
if err != nil {
return err
}
diff --git a/vendor/github.com/xenolf/lego/providers/dns/vultr/vultr.go b/vendor/github.com/xenolf/lego/providers/dns/vultr/vultr.go
index 53804e270..bc2067579 100644
--- a/vendor/github.com/xenolf/lego/providers/dns/vultr/vultr.go
+++ b/vendor/github.com/xenolf/lego/providers/dns/vultr/vultr.go
@@ -49,7 +49,7 @@ func (c *DNSProvider) Present(domain, token, keyAuth string) error {
name := c.extractRecordName(fqdn, zoneDomain)
- err = c.client.CreateDnsRecord(zoneDomain, name, "TXT", `"`+value+`"`, 0, ttl)
+ err = c.client.CreateDNSRecord(zoneDomain, name, "TXT", `"`+value+`"`, 0, ttl)
if err != nil {
return fmt.Errorf("Vultr API call failed: %v", err)
}
@@ -67,7 +67,7 @@ func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error {
}
for _, rec := range records {
- err := c.client.DeleteDnsRecord(zoneDomain, rec.RecordID)
+ err := c.client.DeleteDNSRecord(zoneDomain, rec.RecordID)
if err != nil {
return err
}
@@ -76,12 +76,12 @@ func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error {
}
func (c *DNSProvider) getHostedZone(domain string) (string, error) {
- domains, err := c.client.GetDnsDomains()
+ domains, err := c.client.GetDNSDomains()
if err != nil {
return "", fmt.Errorf("Vultr API call failed: %v", err)
}
- var hostedDomain vultr.DnsDomain
+ var hostedDomain vultr.DNSDomain
for _, d := range domains {
if strings.HasSuffix(domain, d.Domain) {
if len(d.Domain) > len(hostedDomain.Domain) {
@@ -96,14 +96,14 @@ func (c *DNSProvider) getHostedZone(domain string) (string, error) {
return hostedDomain.Domain, nil
}
-func (c *DNSProvider) findTxtRecords(domain, fqdn string) (string, []vultr.DnsRecord, error) {
+func (c *DNSProvider) findTxtRecords(domain, fqdn string) (string, []vultr.DNSRecord, error) {
zoneDomain, err := c.getHostedZone(domain)
if err != nil {
return "", nil, err
}
- var records []vultr.DnsRecord
- result, err := c.client.GetDnsRecords(zoneDomain)
+ var records []vultr.DNSRecord
+ result, err := c.client.GetDNSRecords(zoneDomain)
if err != nil {
return "", records, fmt.Errorf("Vultr API call has failed: %v", err)
}