summaryrefslogtreecommitdiffstats
path: root/vendor/github.com
diff options
context:
space:
mode:
authorChristopher Speller <crspeller@gmail.com>2018-08-28 10:05:26 -0700
committerGitHub <noreply@github.com>2018-08-28 10:05:26 -0700
commit61e27beabc9804fdcf59ed9df2180802175a4f70 (patch)
tree52c86f5cdbd4e13d05b8f9dddad1a01b88e26cab /vendor/github.com
parent347ee1d205c95f5fd766e206cc65bfb9782a2623 (diff)
downloadchat-61e27beabc9804fdcf59ed9df2180802175a4f70.tar.gz
chat-61e27beabc9804fdcf59ed9df2180802175a4f70.tar.bz2
chat-61e27beabc9804fdcf59ed9df2180802175a4f70.zip
Updating dependancies. (#9303)
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/avct/uasurfer/browser.go5
-rw-r--r--vendor/github.com/avct/uasurfer/device.go2
-rw-r--r--vendor/github.com/davecgh/go-spew/LICENSE2
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/bypass.go187
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/bypasssafe.go2
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/common.go2
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/dump.go10
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/format.go4
-rw-r--r--vendor/github.com/disintegration/imaging/effects.go36
-rw-r--r--vendor/github.com/disintegration/imaging/helpers.go272
-rw-r--r--vendor/github.com/disintegration/imaging/io.go463
-rw-r--r--vendor/github.com/disintegration/imaging/tools.go34
-rw-r--r--vendor/github.com/go-ini/ini/file.go37
-rw-r--r--vendor/github.com/go-ini/ini/ini.go4
-rw-r--r--vendor/github.com/go-redis/redis/CHANGELOG.md21
-rw-r--r--vendor/github.com/go-redis/redis/README.md33
-rw-r--r--vendor/github.com/go-redis/redis/cluster.go325
-rw-r--r--vendor/github.com/go-redis/redis/command.go956
-rw-r--r--vendor/github.com/go-redis/redis/commands.go296
-rw-r--r--vendor/github.com/go-redis/redis/internal/error.go29
-rw-r--r--vendor/github.com/go-redis/redis/internal/hashtag/hashtag.go6
-rw-r--r--vendor/github.com/go-redis/redis/internal/pool/conn.go41
-rw-r--r--vendor/github.com/go-redis/redis/internal/pool/pool.go132
-rw-r--r--vendor/github.com/go-redis/redis/internal/proto/reader.go172
-rw-r--r--vendor/github.com/go-redis/redis/internal/proto/write_buffer.go101
-rw-r--r--vendor/github.com/go-redis/redis/internal/proto/writer.go159
-rw-r--r--vendor/github.com/go-redis/redis/internal/util/safe.go4
-rw-r--r--vendor/github.com/go-redis/redis/internal/util/unsafe.go10
-rw-r--r--vendor/github.com/go-redis/redis/options.go29
-rw-r--r--vendor/github.com/go-redis/redis/parser.go394
-rw-r--r--vendor/github.com/go-redis/redis/pipeline.go1
-rw-r--r--vendor/github.com/go-redis/redis/pubsub.go290
-rw-r--r--vendor/github.com/go-redis/redis/redis.go91
-rw-r--r--vendor/github.com/go-redis/redis/result.go2
-rw-r--r--vendor/github.com/go-redis/redis/ring.go79
-rw-r--r--vendor/github.com/go-redis/redis/sentinel.go82
-rw-r--r--vendor/github.com/go-redis/redis/tx.go12
-rw-r--r--vendor/github.com/go-redis/redis/universal.go102
-rw-r--r--vendor/github.com/golang/protobuf/LICENSE3
-rw-r--r--vendor/github.com/golang/protobuf/proto/encode.go18
-rw-r--r--vendor/github.com/golang/protobuf/proto/lib.go62
-rw-r--r--vendor/github.com/golang/protobuf/proto/properties.go16
-rw-r--r--vendor/github.com/golang/protobuf/proto/table_marshal.go184
-rw-r--r--vendor/github.com/golang/protobuf/proto/table_unmarshal.go142
-rw-r--r--vendor/github.com/golang/protobuf/proto/text.go4
-rw-r--r--vendor/github.com/golang/protobuf/proto/text_parser.go6
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any.go10
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any/any.pb.go2
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go4
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go4
-rw-r--r--vendor/github.com/gorilla/handlers/cors.go29
-rw-r--r--vendor/github.com/gorilla/handlers/handlers.go225
-rw-r--r--vendor/github.com/gorilla/handlers/logging.go252
-rw-r--r--vendor/github.com/gorilla/websocket/.travis.yml3
-rw-r--r--vendor/github.com/gorilla/websocket/client.go13
-rw-r--r--vendor/github.com/gorilla/websocket/conn.go100
-rw-r--r--vendor/github.com/gorilla/websocket/conn_read.go18
-rw-r--r--vendor/github.com/gorilla/websocket/conn_read_legacy.go21
-rw-r--r--vendor/github.com/gorilla/websocket/server.go79
-rw-r--r--vendor/github.com/hashicorp/errwrap/go.mod1
-rw-r--r--vendor/github.com/hashicorp/go-multierror/go.mod3
-rw-r--r--vendor/github.com/hashicorp/go-multierror/go.sum4
-rw-r--r--vendor/github.com/hashicorp/memberlist/Makefile2
-rw-r--r--vendor/github.com/hashicorp/memberlist/memberlist.go32
-rw-r--r--vendor/github.com/hashicorp/memberlist/net.go92
-rw-r--r--vendor/github.com/lib/pq/go.mod1
-rw-r--r--vendor/github.com/lib/pq/notify.go3
-rw-r--r--vendor/github.com/mailru/easyjson/jlexer/lexer.go13
-rw-r--r--vendor/github.com/mattn/go-runewidth/runewidth.go14
-rw-r--r--vendor/github.com/mitchellh/go-homedir/go.mod1
-rw-r--r--vendor/github.com/mitchellh/go-homedir/homedir.go10
-rw-r--r--vendor/github.com/mitchellh/mapstructure/go.mod1
-rw-r--r--vendor/github.com/prometheus/common/expfmt/text_parse.go2
-rw-r--r--vendor/github.com/prometheus/procfs/.travis.yml3
-rw-r--r--vendor/github.com/rs/cors/README.md8
-rw-r--r--vendor/github.com/rs/cors/cors.go6
-rw-r--r--vendor/github.com/sirupsen/logrus/.travis.yml6
-rw-r--r--vendor/github.com/sirupsen/logrus/README.md60
-rw-r--r--vendor/github.com/sirupsen/logrus/entry.go26
-rw-r--r--vendor/github.com/sirupsen/logrus/exported.go20
-rw-r--r--vendor/github.com/sirupsen/logrus/formatter.go20
-rw-r--r--vendor/github.com/sirupsen/logrus/json_formatter.go12
-rw-r--r--vendor/github.com/sirupsen/logrus/logger.go16
-rw-r--r--vendor/github.com/sirupsen/logrus/text_formatter.go31
-rw-r--r--vendor/github.com/spf13/jwalterweatherman/README.md2
-rw-r--r--vendor/github.com/spf13/pflag/bytes.go104
-rw-r--r--vendor/github.com/spf13/pflag/flag.go3
87 files changed, 3987 insertions, 2131 deletions
diff --git a/vendor/github.com/avct/uasurfer/browser.go b/vendor/github.com/avct/uasurfer/browser.go
index e156818ab..4bfe638ca 100644
--- a/vendor/github.com/avct/uasurfer/browser.go
+++ b/vendor/github.com/avct/uasurfer/browser.go
@@ -28,6 +28,9 @@ func (u *UserAgent) evalBrowserName(ua string) bool {
if strings.Contains(ua, "applewebkit") {
switch {
+ case strings.Contains(ua, "googlebot"):
+ u.Browser.Name = BrowserGoogleBot
+
case strings.Contains(ua, "opr/") || strings.Contains(ua, "opios/"):
u.Browser.Name = BrowserOpera
@@ -189,4 +192,4 @@ func (u *UserAgent) evalBrowserVersion(ua string) {
case BrowserSpotify:
_ = u.Browser.Version.findVersionNumber(ua, "spotify/")
}
-}
+} \ No newline at end of file
diff --git a/vendor/github.com/avct/uasurfer/device.go b/vendor/github.com/avct/uasurfer/device.go
index 70c00b112..52a65ed86 100644
--- a/vendor/github.com/avct/uasurfer/device.go
+++ b/vendor/github.com/avct/uasurfer/device.go
@@ -21,7 +21,7 @@ func (u *UserAgent) evalDevice(ua string) {
u.DeviceType = DevicePhone
// long list of smarttv and tv dongle identifiers
- case strings.Contains(ua, "tv") || strings.Contains(ua, "crkey") || strings.Contains(ua, "googletv") || strings.Contains(ua, "aftb") || strings.Contains(ua, "adt-") || strings.Contains(ua, "roku") || strings.Contains(ua, "viera") || strings.Contains(ua, "aquos") || strings.Contains(ua, "dtv") || strings.Contains(ua, "appletv") || strings.Contains(ua, "smarttv") || strings.Contains(ua, "tuner") || strings.Contains(ua, "smart-tv") || strings.Contains(ua, "hbbtv") || strings.Contains(ua, "netcast") || strings.Contains(ua, "vizio"):
+ case strings.Contains(ua, "tv") || strings.Contains(ua, "crkey") || strings.Contains(ua, "googletv") || strings.Contains(ua, "aftb") || strings.Contains(ua, "aftt") || strings.Contains(ua, "aftm") || strings.Contains(ua, "adt-") || strings.Contains(ua, "roku") || strings.Contains(ua, "viera") || strings.Contains(ua, "aquos") || strings.Contains(ua, "dtv") || strings.Contains(ua, "appletv") || strings.Contains(ua, "smarttv") || strings.Contains(ua, "tuner") || strings.Contains(ua, "smart-tv") || strings.Contains(ua, "hbbtv") || strings.Contains(ua, "netcast") || strings.Contains(ua, "vizio"):
u.DeviceType = DeviceTV
case u.OS.Name == OSAndroid:
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
index c83641619..bc52e96f2 100644
--- a/vendor/github.com/davecgh/go-spew/LICENSE
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -2,7 +2,7 @@ ISC License
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
-Permission to use, copy, modify, and distribute this software for any
+Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
index 8a4a6589a..792994785 100644
--- a/vendor/github.com/davecgh/go-spew/spew/bypass.go
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -16,7 +16,9 @@
// when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
-// +build !js,!appengine,!safe,!disableunsafe
+// Go versions prior to 1.4 are disabled because they use a different layout
+// for interfaces which make the implementation of unsafeReflectValue more complex.
+// +build !js,!appengine,!safe,!disableunsafe,go1.4
package spew
@@ -34,80 +36,49 @@ const (
ptrSize = unsafe.Sizeof((*byte)(nil))
)
+type flag uintptr
+
var (
- // offsetPtr, offsetScalar, and offsetFlag are the offsets for the
- // internal reflect.Value fields. These values are valid before golang
- // commit ecccf07e7f9d which changed the format. The are also valid
- // after commit 82f48826c6c7 which changed the format again to mirror
- // the original format. Code in the init function updates these offsets
- // as necessary.
- offsetPtr = uintptr(ptrSize)
- offsetScalar = uintptr(0)
- offsetFlag = uintptr(ptrSize * 2)
-
- // flagKindWidth and flagKindShift indicate various bits that the
- // reflect package uses internally to track kind information.
- //
- // flagRO indicates whether or not the value field of a reflect.Value is
- // read-only.
- //
- // flagIndir indicates whether the value field of a reflect.Value is
- // the actual data or a pointer to the data.
- //
- // These values are valid before golang commit 90a7c3c86944 which
- // changed their positions. Code in the init function updates these
- // flags as necessary.
- flagKindWidth = uintptr(5)
- flagKindShift = uintptr(flagKindWidth - 1)
- flagRO = uintptr(1 << 0)
- flagIndir = uintptr(1 << 1)
+ // flagRO indicates whether the value field of a reflect.Value
+ // is read-only.
+ flagRO flag
+
+ // flagAddr indicates whether the address of the reflect.Value's
+ // value may be taken.
+ flagAddr flag
)
-func init() {
- // Older versions of reflect.Value stored small integers directly in the
- // ptr field (which is named val in the older versions). Versions
- // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
- // scalar for this purpose which unfortunately came before the flag
- // field, so the offset of the flag field is different for those
- // versions.
- //
- // This code constructs a new reflect.Value from a known small integer
- // and checks if the size of the reflect.Value struct indicates it has
- // the scalar field. When it does, the offsets are updated accordingly.
- vv := reflect.ValueOf(0xf00)
- if unsafe.Sizeof(vv) == (ptrSize * 4) {
- offsetScalar = ptrSize * 2
- offsetFlag = ptrSize * 3
- }
+// flagKindMask holds the bits that make up the kind
+// part of the flags field. In all the supported versions,
+// it is in the lower 5 bits.
+const flagKindMask = flag(0x1f)
- // Commit 90a7c3c86944 changed the flag positions such that the low
- // order bits are the kind. This code extracts the kind from the flags
- // field and ensures it's the correct type. When it's not, the flag
- // order has been changed to the newer format, so the flags are updated
- // accordingly.
- upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
- upfv := *(*uintptr)(upf)
- flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
- if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
- flagKindShift = 0
- flagRO = 1 << 5
- flagIndir = 1 << 6
-
- // Commit adf9b30e5594 modified the flags to separate the
- // flagRO flag into two bits which specifies whether or not the
- // field is embedded. This causes flagIndir to move over a bit
- // and means that flagRO is the combination of either of the
- // original flagRO bit and the new bit.
- //
- // This code detects the change by extracting what used to be
- // the indirect bit to ensure it's set. When it's not, the flag
- // order has been changed to the newer format, so the flags are
- // updated accordingly.
- if upfv&flagIndir == 0 {
- flagRO = 3 << 5
- flagIndir = 1 << 7
- }
+// Different versions of Go have used different
+// bit layouts for the flags type. This table
+// records the known combinations.
+var okFlags = []struct {
+ ro, addr flag
+}{{
+ // From Go 1.4 to 1.5
+ ro: 1 << 5,
+ addr: 1 << 7,
+}, {
+ // Up to Go tip.
+ ro: 1<<5 | 1<<6,
+ addr: 1 << 8,
+}}
+
+var flagValOffset = func() uintptr {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
}
+ return field.Offset
+}()
+
+// flagField returns a pointer to the flag field of a reflect.Value.
+func flagField(v *reflect.Value) *flag {
+ return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
}
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
@@ -119,34 +90,56 @@ func init() {
// This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields.
-func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
- indirects := 1
- vt := v.Type()
- upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
- rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
- if rvf&flagIndir != 0 {
- vt = reflect.PtrTo(v.Type())
- indirects++
- } else if offsetScalar != 0 {
- // The value is in the scalar field when it's not one of the
- // reference types.
- switch vt.Kind() {
- case reflect.Uintptr:
- case reflect.Chan:
- case reflect.Func:
- case reflect.Map:
- case reflect.Ptr:
- case reflect.UnsafePointer:
- default:
- upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
- offsetScalar)
- }
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
+ return v
}
+ flagFieldPtr := flagField(&v)
+ *flagFieldPtr &^= flagRO
+ *flagFieldPtr |= flagAddr
+ return v
+}
- pv := reflect.NewAt(vt, upv)
- rv = pv
- for i := 0; i < indirects; i++ {
- rv = rv.Elem()
+// Sanity checks against future reflect package changes
+// to the type or semantics of the Value.flag field.
+func init() {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
+ panic("reflect.Value flag field has changed kind")
+ }
+ type t0 int
+ var t struct {
+ A t0
+ // t0 will have flagEmbedRO set.
+ t0
+ // a will have flagStickyRO set
+ a t0
+ }
+ vA := reflect.ValueOf(t).FieldByName("A")
+ va := reflect.ValueOf(t).FieldByName("a")
+ vt0 := reflect.ValueOf(t).FieldByName("t0")
+
+ // Infer flagRO from the difference between the flags
+ // for the (otherwise identical) fields in t.
+ flagPublic := *flagField(&vA)
+ flagWithRO := *flagField(&va) | *flagField(&vt0)
+ flagRO = flagPublic ^ flagWithRO
+
+ // Infer flagAddr from the difference between a value
+ // taken from a pointer and not.
+ vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
+ flagNoPtr := *flagField(&vA)
+ flagPtr := *flagField(&vPtrA)
+ flagAddr = flagNoPtr ^ flagPtr
+
+ // Check that the inferred flags tally with one of the known versions.
+ for _, f := range okFlags {
+ if flagRO == f.ro && flagAddr == f.addr {
+ return
+ }
}
- return rv
+ panic("reflect.Value read-only flag has changed semantics")
}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
index 1fe3cf3d5..205c28d68 100644
--- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -16,7 +16,7 @@
// when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
-// +build js appengine safe disableunsafe
+// +build js appengine safe disableunsafe !go1.4
package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
index 7c519ff47..1be8ce945 100644
--- a/vendor/github.com/davecgh/go-spew/spew/common.go
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) {
w.Write(closeParenBytes)
}
-// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
// prefix to Writer w.
func printHexPtr(w io.Writer, p uintptr) {
// Null pointer.
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
index df1d582a7..f78d89fc1 100644
--- a/vendor/github.com/davecgh/go-spew/spew/dump.go
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -35,16 +35,16 @@ var (
// cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them.
- cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
+ cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump
// them.
- cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
+ cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them.
- cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
+ cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
)
// dumpState contains information about the state of a dump operation.
@@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
// Display dereferenced value.
d.w.Write(openParenBytes)
switch {
- case nilFound == true:
+ case nilFound:
d.w.Write(nilAngleBytes)
- case cycleFound == true:
+ case cycleFound:
d.w.Write(circularBytes)
default:
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
index c49875bac..b04edb7d7 100644
--- a/vendor/github.com/davecgh/go-spew/spew/format.go
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) {
// Display dereferenced value.
switch {
- case nilFound == true:
+ case nilFound:
f.fs.Write(nilAngleBytes)
- case cycleFound == true:
+ case cycleFound:
f.fs.Write(circularShortBytes)
default:
diff --git a/vendor/github.com/disintegration/imaging/effects.go b/vendor/github.com/disintegration/imaging/effects.go
index b16781f12..149cfeb0f 100644
--- a/vendor/github.com/disintegration/imaging/effects.go
+++ b/vendor/github.com/disintegration/imaging/effects.go
@@ -38,9 +38,13 @@ func blurHorizontal(img image.Image, kernel []float64) *image.NRGBA {
parallel(0, src.h, func(ys <-chan int) {
scanLine := make([]uint8, src.w*4)
+ scanLineF := make([]float64, len(scanLine))
for y := range ys {
src.scan(0, y, src.w, y+1, scanLine)
- for x := 0; x < src.w; x++ {
+ for i, v := range scanLine {
+ scanLineF[i] = float64(v)
+ }
+ for x, idx := 0, 0; x < src.w; x, idx = x+1, idx+4 {
min := x - radius
if min < 0 {
min = 0
@@ -55,10 +59,10 @@ func blurHorizontal(img image.Image, kernel []float64) *image.NRGBA {
i := ix * 4
weight := kernel[absint(x-ix)]
wsum += weight
- wa := float64(scanLine[i+3]) * weight
- r += float64(scanLine[i+0]) * wa
- g += float64(scanLine[i+1]) * wa
- b += float64(scanLine[i+2]) * wa
+ wa := scanLineF[i+3] * weight
+ r += scanLineF[i+0] * wa
+ g += scanLineF[i+1] * wa
+ b += scanLineF[i+2] * wa
a += wa
}
if a != 0 {
@@ -67,12 +71,12 @@ func blurHorizontal(img image.Image, kernel []float64) *image.NRGBA {
b /= a
}
- j := y*dst.Stride + x*4
- dst.Pix[j+0] = clamp(r)
- dst.Pix[j+1] = clamp(g)
- dst.Pix[j+2] = clamp(b)
- dst.Pix[j+3] = clamp(a / wsum)
+ scanLine[idx+0] = clamp(r)
+ scanLine[idx+1] = clamp(g)
+ scanLine[idx+2] = clamp(b)
+ scanLine[idx+3] = clamp(a / wsum)
}
+ copy(dst.Pix[y*dst.Stride:], scanLine)
}
})
@@ -86,8 +90,12 @@ func blurVertical(img image.Image, kernel []float64) *image.NRGBA {
parallel(0, src.w, func(xs <-chan int) {
scanLine := make([]uint8, src.h*4)
+ scanLineF := make([]float64, len(scanLine))
for x := range xs {
src.scan(x, 0, x+1, src.h, scanLine)
+ for i, v := range scanLine {
+ scanLineF[i] = float64(v)
+ }
for y := 0; y < src.h; y++ {
min := y - radius
if min < 0 {
@@ -103,10 +111,10 @@ func blurVertical(img image.Image, kernel []float64) *image.NRGBA {
i := iy * 4
weight := kernel[absint(y-iy)]
wsum += weight
- wa := float64(scanLine[i+3]) * weight
- r += float64(scanLine[i+0]) * wa
- g += float64(scanLine[i+1]) * wa
- b += float64(scanLine[i+2]) * wa
+ wa := scanLineF[i+3] * weight
+ r += scanLineF[i+0] * wa
+ g += scanLineF[i+1] * wa
+ b += scanLineF[i+2] * wa
a += wa
}
if a != 0 {
diff --git a/vendor/github.com/disintegration/imaging/helpers.go b/vendor/github.com/disintegration/imaging/helpers.go
deleted file mode 100644
index dcb4d7ebb..000000000
--- a/vendor/github.com/disintegration/imaging/helpers.go
+++ /dev/null
@@ -1,272 +0,0 @@
-package imaging
-
-import (
- "bytes"
- "errors"
- "image"
- "image/color"
- "image/draw"
- "image/gif"
- "image/jpeg"
- "image/png"
- "io"
- "os"
- "path/filepath"
- "strings"
-
- "golang.org/x/image/bmp"
- "golang.org/x/image/tiff"
-)
-
-// Format is an image file format.
-type Format int
-
-// Image file formats.
-const (
- JPEG Format = iota
- PNG
- GIF
- TIFF
- BMP
-)
-
-func (f Format) String() string {
- switch f {
- case JPEG:
- return "JPEG"
- case PNG:
- return "PNG"
- case GIF:
- return "GIF"
- case TIFF:
- return "TIFF"
- case BMP:
- return "BMP"
- default:
- return "Unsupported"
- }
-}
-
-var formatFromExt = map[string]Format{
- ".jpg": JPEG,
- ".jpeg": JPEG,
- ".png": PNG,
- ".tif": TIFF,
- ".tiff": TIFF,
- ".bmp": BMP,
- ".gif": GIF,
-}
-
-// FormatFromFilename parses image format from filename extension:
-// "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported.
-func FormatFromFilename(filename string) (Format, error) {
- ext := strings.ToLower(filepath.Ext(filename))
- if f, ok := formatFromExt[ext]; ok {
- return f, nil
- }
- return -1, ErrUnsupportedFormat
-}
-
-var (
- // ErrUnsupportedFormat means the given image format (or file extension) is unsupported.
- ErrUnsupportedFormat = errors.New("imaging: unsupported image format")
-)
-
-type fileSystem interface {
- Create(string) (io.WriteCloser, error)
- Open(string) (io.ReadCloser, error)
-}
-
-type localFS struct{}
-
-func (localFS) Create(name string) (io.WriteCloser, error) { return os.Create(name) }
-func (localFS) Open(name string) (io.ReadCloser, error) { return os.Open(name) }
-
-var fs fileSystem = localFS{}
-
-// Decode reads an image from r.
-func Decode(r io.Reader) (image.Image, error) {
- img, _, err := image.Decode(r)
- return img, err
-}
-
-// Open loads an image from file
-func Open(filename string) (image.Image, error) {
- file, err := fs.Open(filename)
- if err != nil {
- return nil, err
- }
- defer file.Close()
- return Decode(file)
-}
-
-type encodeConfig struct {
- jpegQuality int
- gifNumColors int
- gifQuantizer draw.Quantizer
- gifDrawer draw.Drawer
- pngCompressionLevel png.CompressionLevel
-}
-
-var defaultEncodeConfig = encodeConfig{
- jpegQuality: 95,
- gifNumColors: 256,
- gifQuantizer: nil,
- gifDrawer: nil,
- pngCompressionLevel: png.DefaultCompression,
-}
-
-// EncodeOption sets an optional parameter for the Encode and Save functions.
-type EncodeOption func(*encodeConfig)
-
-// JPEGQuality returns an EncodeOption that sets the output JPEG quality.
-// Quality ranges from 1 to 100 inclusive, higher is better. Default is 95.
-func JPEGQuality(quality int) EncodeOption {
- return func(c *encodeConfig) {
- c.jpegQuality = quality
- }
-}
-
-// GIFNumColors returns an EncodeOption that sets the maximum number of colors
-// used in the GIF-encoded image. It ranges from 1 to 256. Default is 256.
-func GIFNumColors(numColors int) EncodeOption {
- return func(c *encodeConfig) {
- c.gifNumColors = numColors
- }
-}
-
-// GIFQuantizer returns an EncodeOption that sets the quantizer that is used to produce
-// a palette of the GIF-encoded image.
-func GIFQuantizer(quantizer draw.Quantizer) EncodeOption {
- return func(c *encodeConfig) {
- c.gifQuantizer = quantizer
- }
-}
-
-// GIFDrawer returns an EncodeOption that sets the drawer that is used to convert
-// the source image to the desired palette of the GIF-encoded image.
-func GIFDrawer(drawer draw.Drawer) EncodeOption {
- return func(c *encodeConfig) {
- c.gifDrawer = drawer
- }
-}
-
-// PNGCompressionLevel returns an EncodeOption that sets the compression level
-// of the PNG-encoded image. Default is png.DefaultCompression.
-func PNGCompressionLevel(level png.CompressionLevel) EncodeOption {
- return func(c *encodeConfig) {
- c.pngCompressionLevel = level
- }
-}
-
-// Encode writes the image img to w in the specified format (JPEG, PNG, GIF, TIFF or BMP).
-func Encode(w io.Writer, img image.Image, format Format, opts ...EncodeOption) error {
- cfg := defaultEncodeConfig
- for _, option := range opts {
- option(&cfg)
- }
-
- var err error
- switch format {
- case JPEG:
- var rgba *image.RGBA
- if nrgba, ok := img.(*image.NRGBA); ok {
- if nrgba.Opaque() {
- rgba = &image.RGBA{
- Pix: nrgba.Pix,
- Stride: nrgba.Stride,
- Rect: nrgba.Rect,
- }
- }
- }
- if rgba != nil {
- err = jpeg.Encode(w, rgba, &jpeg.Options{Quality: cfg.jpegQuality})
- } else {
- err = jpeg.Encode(w, img, &jpeg.Options{Quality: cfg.jpegQuality})
- }
-
- case PNG:
- enc := png.Encoder{CompressionLevel: cfg.pngCompressionLevel}
- err = enc.Encode(w, img)
-
- case GIF:
- err = gif.Encode(w, img, &gif.Options{
- NumColors: cfg.gifNumColors,
- Quantizer: cfg.gifQuantizer,
- Drawer: cfg.gifDrawer,
- })
-
- case TIFF:
- err = tiff.Encode(w, img, &tiff.Options{Compression: tiff.Deflate, Predictor: true})
-
- case BMP:
- err = bmp.Encode(w, img)
-
- default:
- err = ErrUnsupportedFormat
- }
- return err
-}
-
-// Save saves the image to file with the specified filename.
-// The format is determined from the filename extension: "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported.
-//
-// Examples:
-//
-// // Save the image as PNG.
-// err := imaging.Save(img, "out.png")
-//
-// // Save the image as JPEG with optional quality parameter set to 80.
-// err := imaging.Save(img, "out.jpg", imaging.JPEGQuality(80))
-//
-func Save(img image.Image, filename string, opts ...EncodeOption) (err error) {
- f, err := FormatFromFilename(filename)
- if err != nil {
- return err
- }
- file, err := fs.Create(filename)
- if err != nil {
- return err
- }
-
- defer func() {
- cerr := file.Close()
- if err == nil {
- err = cerr
- }
- }()
-
- return Encode(file, img, f, opts...)
-}
-
-// New creates a new image with the specified width and height, and fills it with the specified color.
-func New(width, height int, fillColor color.Color) *image.NRGBA {
- if width <= 0 || height <= 0 {
- return &image.NRGBA{}
- }
-
- c := color.NRGBAModel.Convert(fillColor).(color.NRGBA)
- if (c == color.NRGBA{0, 0, 0, 0}) {
- return image.NewNRGBA(image.Rect(0, 0, width, height))
- }
-
- return &image.NRGBA{
- Pix: bytes.Repeat([]byte{c.R, c.G, c.B, c.A}, width*height),
- Stride: 4 * width,
- Rect: image.Rect(0, 0, width, height),
- }
-}
-
-// Clone returns a copy of the given image.
-func Clone(img image.Image) *image.NRGBA {
- src := newScanner(img)
- dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
- size := src.w * 4
- parallel(0, src.h, func(ys <-chan int) {
- for y := range ys {
- i := y * dst.Stride
- src.scan(0, y, src.w, y+1, dst.Pix[i:i+size])
- }
- })
- return dst
-}
diff --git a/vendor/github.com/disintegration/imaging/io.go b/vendor/github.com/disintegration/imaging/io.go
new file mode 100644
index 000000000..557bf2f3d
--- /dev/null
+++ b/vendor/github.com/disintegration/imaging/io.go
@@ -0,0 +1,463 @@
+package imaging
+
+import (
+ "encoding/binary"
+ "errors"
+ "image"
+ "image/draw"
+ "image/gif"
+ "image/jpeg"
+ "image/png"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "golang.org/x/image/bmp"
+ "golang.org/x/image/tiff"
+)
+
+// Format is an image file format.
+type Format int
+
+// Image file formats.
+const (
+ JPEG Format = iota
+ PNG
+ GIF
+ TIFF
+ BMP
+)
+
+func (f Format) String() string {
+ switch f {
+ case JPEG:
+ return "JPEG"
+ case PNG:
+ return "PNG"
+ case GIF:
+ return "GIF"
+ case TIFF:
+ return "TIFF"
+ case BMP:
+ return "BMP"
+ default:
+ return "Unsupported"
+ }
+}
+
+var formatFromExt = map[string]Format{
+ "jpg": JPEG,
+ "jpeg": JPEG,
+ "png": PNG,
+ "tif": TIFF,
+ "tiff": TIFF,
+ "bmp": BMP,
+ "gif": GIF,
+}
+
+// FormatFromExtension parses image format from extension:
+// "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported.
+func FormatFromExtension(ext string) (Format, error) {
+ if f, ok := formatFromExt[strings.ToLower(strings.TrimPrefix(ext, "."))]; ok {
+ return f, nil
+ }
+ return -1, ErrUnsupportedFormat
+}
+
+// FormatFromFilename parses image format from filename extension:
+// "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported.
+func FormatFromFilename(filename string) (Format, error) {
+ ext := filepath.Ext(filename)
+ return FormatFromExtension(ext)
+}
+
+var (
+ // ErrUnsupportedFormat means the given image format (or file extension) is unsupported.
+ ErrUnsupportedFormat = errors.New("imaging: unsupported image format")
+)
+
+type fileSystem interface {
+ Create(string) (io.WriteCloser, error)
+ Open(string) (io.ReadCloser, error)
+}
+
+type localFS struct{}
+
+func (localFS) Create(name string) (io.WriteCloser, error) { return os.Create(name) }
+func (localFS) Open(name string) (io.ReadCloser, error) { return os.Open(name) }
+
+var fs fileSystem = localFS{}
+
+type decodeConfig struct {
+ autoOrientation bool
+}
+
+var defaultDecodeConfig = decodeConfig{
+ autoOrientation: false,
+}
+
+// DecodeOption sets an optional parameter for the Decode and Open functions.
+type DecodeOption func(*decodeConfig)
+
+// AutoOrientation returns a DecodeOption that sets the auto-orientation mode.
+// If auto-orientation is enabled, the image will be transformed after decoding
+// according to the EXIF orientation tag (if present). By default it's disabled.
+func AutoOrientation(enabled bool) DecodeOption {
+ return func(c *decodeConfig) {
+ c.autoOrientation = enabled
+ }
+}
+
+// Decode reads an image from r.
+func Decode(r io.Reader, opts ...DecodeOption) (image.Image, error) {
+ cfg := defaultDecodeConfig
+ for _, option := range opts {
+ option(&cfg)
+ }
+
+ if !cfg.autoOrientation {
+ img, _, err := image.Decode(r)
+ return img, err
+ }
+
+ var orient orientation
+ pr, pw := io.Pipe()
+ r = io.TeeReader(r, pw)
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ orient = readOrientation(pr)
+ io.Copy(ioutil.Discard, pr)
+ }()
+
+ img, _, err := image.Decode(r)
+ pw.Close()
+ <-done
+ if err != nil {
+ return nil, err
+ }
+
+ return fixOrientation(img, orient), nil
+}
+
+// Open loads an image from file.
+//
+// Examples:
+//
+// // Load an image from file.
+// img, err := imaging.Open("test.jpg")
+//
+// // Load an image and transform it depending on the EXIF orientation tag (if present).
+// img, err := imaging.Open("test.jpg", imaging.AutoOrientation(true))
+//
+func Open(filename string, opts ...DecodeOption) (image.Image, error) {
+ file, err := fs.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ return Decode(file, opts...)
+}
+
+type encodeConfig struct {
+ jpegQuality int
+ gifNumColors int
+ gifQuantizer draw.Quantizer
+ gifDrawer draw.Drawer
+ pngCompressionLevel png.CompressionLevel
+}
+
+var defaultEncodeConfig = encodeConfig{
+ jpegQuality: 95,
+ gifNumColors: 256,
+ gifQuantizer: nil,
+ gifDrawer: nil,
+ pngCompressionLevel: png.DefaultCompression,
+}
+
+// EncodeOption sets an optional parameter for the Encode and Save functions.
+type EncodeOption func(*encodeConfig)
+
+// JPEGQuality returns an EncodeOption that sets the output JPEG quality.
+// Quality ranges from 1 to 100 inclusive, higher is better. Default is 95.
+func JPEGQuality(quality int) EncodeOption {
+ return func(c *encodeConfig) {
+ c.jpegQuality = quality
+ }
+}
+
+// GIFNumColors returns an EncodeOption that sets the maximum number of colors
+// used in the GIF-encoded image. It ranges from 1 to 256. Default is 256.
+func GIFNumColors(numColors int) EncodeOption {
+ return func(c *encodeConfig) {
+ c.gifNumColors = numColors
+ }
+}
+
+// GIFQuantizer returns an EncodeOption that sets the quantizer that is used to produce
+// a palette of the GIF-encoded image.
+func GIFQuantizer(quantizer draw.Quantizer) EncodeOption {
+ return func(c *encodeConfig) {
+ c.gifQuantizer = quantizer
+ }
+}
+
+// GIFDrawer returns an EncodeOption that sets the drawer that is used to convert
+// the source image to the desired palette of the GIF-encoded image.
+func GIFDrawer(drawer draw.Drawer) EncodeOption {
+ return func(c *encodeConfig) {
+ c.gifDrawer = drawer
+ }
+}
+
+// PNGCompressionLevel returns an EncodeOption that sets the compression level
+// of the PNG-encoded image. Default is png.DefaultCompression.
+func PNGCompressionLevel(level png.CompressionLevel) EncodeOption {
+ return func(c *encodeConfig) {
+ c.pngCompressionLevel = level
+ }
+}
+
+// Encode writes the image img to w in the specified format (JPEG, PNG, GIF, TIFF or BMP).
+func Encode(w io.Writer, img image.Image, format Format, opts ...EncodeOption) error {
+ cfg := defaultEncodeConfig
+ for _, option := range opts {
+ option(&cfg)
+ }
+
+ var err error
+ switch format {
+ case JPEG:
+ var rgba *image.RGBA
+ if nrgba, ok := img.(*image.NRGBA); ok {
+ if nrgba.Opaque() {
+ rgba = &image.RGBA{
+ Pix: nrgba.Pix,
+ Stride: nrgba.Stride,
+ Rect: nrgba.Rect,
+ }
+ }
+ }
+ if rgba != nil {
+ err = jpeg.Encode(w, rgba, &jpeg.Options{Quality: cfg.jpegQuality})
+ } else {
+ err = jpeg.Encode(w, img, &jpeg.Options{Quality: cfg.jpegQuality})
+ }
+
+ case PNG:
+ enc := png.Encoder{CompressionLevel: cfg.pngCompressionLevel}
+ err = enc.Encode(w, img)
+
+ case GIF:
+ err = gif.Encode(w, img, &gif.Options{
+ NumColors: cfg.gifNumColors,
+ Quantizer: cfg.gifQuantizer,
+ Drawer: cfg.gifDrawer,
+ })
+
+ case TIFF:
+ err = tiff.Encode(w, img, &tiff.Options{Compression: tiff.Deflate, Predictor: true})
+
+ case BMP:
+ err = bmp.Encode(w, img)
+
+ default:
+ err = ErrUnsupportedFormat
+ }
+ return err
+}
+
+// Save saves the image to file with the specified filename.
+// The format is determined from the filename extension:
+// "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported.
+//
+// Examples:
+//
+// // Save the image as PNG.
+// err := imaging.Save(img, "out.png")
+//
+// // Save the image as JPEG with optional quality parameter set to 80.
+// err := imaging.Save(img, "out.jpg", imaging.JPEGQuality(80))
+//
+func Save(img image.Image, filename string, opts ...EncodeOption) (err error) {
+ f, err := FormatFromFilename(filename)
+ if err != nil {
+ return err
+ }
+ file, err := fs.Create(filename)
+ if err != nil {
+ return err
+ }
+
+ defer func() {
+ cerr := file.Close()
+ if err == nil {
+ err = cerr
+ }
+ }()
+
+ return Encode(file, img, f, opts...)
+}
+
+// orientation is an EXIF flag that specifies the transformation
+// that should be applied to image to display it correctly.
+type orientation int
+
+const (
+ orientationUnspecified = 0
+ orientationNormal = 1
+ orientationFlipH = 2
+ orientationRotate180 = 3
+ orientationFlipV = 4
+ orientationTranspose = 5
+ orientationRotate270 = 6
+ orientationTransverse = 7
+ orientationRotate90 = 8
+)
+
+// readOrientation tries to read the orientation EXIF flag from image data in r.
+// If the EXIF data block is not found or the orientation flag is not found
+// or any other error occures while reading the data, it returns the
+// orientationUnspecified (0) value.
+func readOrientation(r io.Reader) orientation {
+ const (
+ markerSOI = 0xffd8
+ markerAPP1 = 0xffe1
+ exifHeader = 0x45786966
+ byteOrderBE = 0x4d4d
+ byteOrderLE = 0x4949
+ orientationTag = 0x0112
+ )
+
+ // Check if JPEG SOI marker is present.
+ var soi uint16
+ if err := binary.Read(r, binary.BigEndian, &soi); err != nil {
+ return orientationUnspecified
+ }
+ if soi != markerSOI {
+ return orientationUnspecified // Missing JPEG SOI marker.
+ }
+
+ // Find JPEG APP1 marker.
+ for {
+ var marker, size uint16
+ if err := binary.Read(r, binary.BigEndian, &marker); err != nil {
+ return orientationUnspecified
+ }
+ if err := binary.Read(r, binary.BigEndian, &size); err != nil {
+ return orientationUnspecified
+ }
+ if marker>>8 != 0xff {
+ return orientationUnspecified // Invalid JPEG marker.
+ }
+ if marker == markerAPP1 {
+ break
+ }
+ if size < 2 {
+ return orientationUnspecified // Invalid block size.
+ }
+ if _, err := io.CopyN(ioutil.Discard, r, int64(size-2)); err != nil {
+ return orientationUnspecified
+ }
+ }
+
+ // Check if EXIF header is present.
+ var header uint32
+ if err := binary.Read(r, binary.BigEndian, &header); err != nil {
+ return orientationUnspecified
+ }
+ if header != exifHeader {
+ return orientationUnspecified
+ }
+ if _, err := io.CopyN(ioutil.Discard, r, 2); err != nil {
+ return orientationUnspecified
+ }
+
+ // Read byte order information.
+ var (
+ byteOrderTag uint16
+ byteOrder binary.ByteOrder
+ )
+ if err := binary.Read(r, binary.BigEndian, &byteOrderTag); err != nil {
+ return orientationUnspecified
+ }
+ switch byteOrderTag {
+ case byteOrderBE:
+ byteOrder = binary.BigEndian
+ case byteOrderLE:
+ byteOrder = binary.LittleEndian
+ default:
+ return orientationUnspecified // Invalid byte order flag.
+ }
+ if _, err := io.CopyN(ioutil.Discard, r, 2); err != nil {
+ return orientationUnspecified
+ }
+
+ // Skip the EXIF offset.
+ var offset uint32
+ if err := binary.Read(r, byteOrder, &offset); err != nil {
+ return orientationUnspecified
+ }
+ if offset < 8 {
+ return orientationUnspecified // Invalid offset value.
+ }
+ if _, err := io.CopyN(ioutil.Discard, r, int64(offset-8)); err != nil {
+ return orientationUnspecified
+ }
+
+ // Read the number of tags.
+ var numTags uint16
+ if err := binary.Read(r, byteOrder, &numTags); err != nil {
+ return orientationUnspecified
+ }
+
+ // Find the orientation tag.
+ for i := 0; i < int(numTags); i++ {
+ var tag uint16
+ if err := binary.Read(r, byteOrder, &tag); err != nil {
+ return orientationUnspecified
+ }
+ if tag != orientationTag {
+ if _, err := io.CopyN(ioutil.Discard, r, 10); err != nil {
+ return orientationUnspecified
+ }
+ continue
+ }
+ if _, err := io.CopyN(ioutil.Discard, r, 6); err != nil {
+ return orientationUnspecified
+ }
+ var val uint16
+ if err := binary.Read(r, byteOrder, &val); err != nil {
+ return orientationUnspecified
+ }
+ if val < 1 || val > 8 {
+ return orientationUnspecified // Invalid tag value.
+ }
+ return orientation(val)
+ }
+ return orientationUnspecified // Missing orientation tag.
+}
+
+// fixOrientation applies a transform to img corresponding to the given orientation flag.
+func fixOrientation(img image.Image, o orientation) image.Image {
+ switch o {
+ case orientationNormal:
+ case orientationFlipH:
+ img = FlipH(img)
+ case orientationFlipV:
+ img = FlipV(img)
+ case orientationRotate90:
+ img = Rotate90(img)
+ case orientationRotate180:
+ img = Rotate180(img)
+ case orientationRotate270:
+ img = Rotate270(img)
+ case orientationTranspose:
+ img = Transpose(img)
+ case orientationTransverse:
+ img = Transverse(img)
+ }
+ return img
+}
diff --git a/vendor/github.com/disintegration/imaging/tools.go b/vendor/github.com/disintegration/imaging/tools.go
index fae1fa153..788794619 100644
--- a/vendor/github.com/disintegration/imaging/tools.go
+++ b/vendor/github.com/disintegration/imaging/tools.go
@@ -1,10 +1,44 @@
package imaging
import (
+ "bytes"
"image"
+ "image/color"
"math"
)
+// New creates a new image with the specified width and height, and fills it with the specified color.
+func New(width, height int, fillColor color.Color) *image.NRGBA {
+ if width <= 0 || height <= 0 {
+ return &image.NRGBA{}
+ }
+
+ c := color.NRGBAModel.Convert(fillColor).(color.NRGBA)
+ if (c == color.NRGBA{0, 0, 0, 0}) {
+ return image.NewNRGBA(image.Rect(0, 0, width, height))
+ }
+
+ return &image.NRGBA{
+ Pix: bytes.Repeat([]byte{c.R, c.G, c.B, c.A}, width*height),
+ Stride: 4 * width,
+ Rect: image.Rect(0, 0, width, height),
+ }
+}
+
+// Clone returns a copy of the given image.
+func Clone(img image.Image) *image.NRGBA {
+ src := newScanner(img)
+ dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
+ size := src.w * 4
+ parallel(0, src.h, func(ys <-chan int) {
+ for y := range ys {
+ i := y * dst.Stride
+ src.scan(0, y, src.w, y+1, dst.Pix[i:i+size])
+ }
+ })
+ return dst
+}
+
// Anchor is the anchor point for image alignment.
type Anchor int
diff --git a/vendor/github.com/go-ini/ini/file.go b/vendor/github.com/go-ini/ini/file.go
index d7982c323..1a3186b9f 100644
--- a/vendor/github.com/go-ini/ini/file.go
+++ b/vendor/github.com/go-ini/ini/file.go
@@ -237,13 +237,18 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
for i, sname := range f.sectionList {
sec := f.Section(sname)
if len(sec.Comment) > 0 {
- if sec.Comment[0] != '#' && sec.Comment[0] != ';' {
- sec.Comment = "; " + sec.Comment
- } else {
- sec.Comment = sec.Comment[:1] + " " + strings.TrimSpace(sec.Comment[1:])
- }
- if _, err := buf.WriteString(sec.Comment + LineBreak); err != nil {
- return nil, err
+ // Support multiline comments
+ lines := strings.Split(sec.Comment, LineBreak)
+ for i := range lines {
+ if lines[i][0] != '#' && lines[i][0] != ';' {
+ lines[i] = "; " + lines[i]
+ } else {
+ lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
+ }
+
+ if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
+ return nil, err
+ }
}
}
@@ -300,17 +305,19 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
if len(indent) > 0 && sname != DEFAULT_SECTION {
buf.WriteString(indent)
}
- if key.Comment[0] != '#' && key.Comment[0] != ';' {
- key.Comment = "; " + key.Comment
- } else {
- key.Comment = key.Comment[:1] + " " + strings.TrimSpace(key.Comment[1:])
- }
// Support multiline comments
- key.Comment = strings.Replace(key.Comment, "\n", "\n; ", -1)
+ lines := strings.Split(key.Comment, LineBreak)
+ for i := range lines {
+ if lines[i][0] != '#' && lines[i][0] != ';' {
+ lines[i] = "; " + lines[i]
+ } else {
+ lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
+ }
- if _, err := buf.WriteString(key.Comment + LineBreak); err != nil {
- return nil, err
+ if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
+ return nil, err
+ }
}
}
diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go
index 595f6002f..cb55997a3 100644
--- a/vendor/github.com/go-ini/ini/ini.go
+++ b/vendor/github.com/go-ini/ini/ini.go
@@ -34,7 +34,7 @@ const (
// Maximum allowed depth when recursively substituing variable names.
_DEPTH_VALUES = 99
- _VERSION = "1.38.1"
+ _VERSION = "1.38.2"
)
// Version returns current package version literal.
@@ -204,7 +204,7 @@ func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{Insensitive: true}, source, others...)
}
-// InsensitiveLoad has exactly same functionality as Load function
+// ShadowLoad has exactly same functionality as Load function
// except it allows have shadow keys.
func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
diff --git a/vendor/github.com/go-redis/redis/CHANGELOG.md b/vendor/github.com/go-redis/redis/CHANGELOG.md
new file mode 100644
index 000000000..7c40d5e38
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/CHANGELOG.md
@@ -0,0 +1,21 @@
+# Changelog
+
+## 6.14
+
+- Added Options.MinIdleConns.
+- Added Options.MaxConnAge.
+- PoolStats.FreeConns is renamed to PoolStats.IdleConns.
+- Add Client.Do to simplify creating custom commands.
+- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers.
+- Lower memory usage.
+
+## v6.13
+
+- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set `HashReplicas = 1000` for better keys distribution between shards.
+- Cluster client was optimized to use much less memory when reloading cluster state.
+- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout occurres. In most cases it is recommended to use PubSub.Channel instead.
+- Dialer.KeepAlive is set to 5 minutes by default.
+
+## v6.12
+
+- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis Servers that don't have cluster mode enabled. See https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup
diff --git a/vendor/github.com/go-redis/redis/README.md b/vendor/github.com/go-redis/redis/README.md
index 9f349764a..7d05b4466 100644
--- a/vendor/github.com/go-redis/redis/README.md
+++ b/vendor/github.com/go-redis/redis/README.md
@@ -15,6 +15,7 @@ Supports:
- [Timeouts](https://godoc.org/github.com/go-redis/redis#Options).
- [Redis Sentinel](https://godoc.org/github.com/go-redis/redis#NewFailoverClient).
- [Redis Cluster](https://godoc.org/github.com/go-redis/redis#NewClusterClient).
+- [Cluster of Redis Servers](https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup) without using cluster mode and Redis Sentinel.
- [Ring](https://godoc.org/github.com/go-redis/redis#NewRing).
- [Instrumentation](https://godoc.org/github.com/go-redis/redis#ex-package--Instrumentation).
- [Cache friendly](https://github.com/go-redis/cache).
@@ -86,25 +87,27 @@ Please go through [examples](https://godoc.org/github.com/go-redis/redis#pkg-exa
Some corner cases:
- SET key value EX 10 NX
- set, err := client.SetNX("key", "value", 10*time.Second).Result()
+```go
+// SET key value EX 10 NX
+set, err := client.SetNX("key", "value", 10*time.Second).Result()
- SORT list LIMIT 0 2 ASC
- vals, err := client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
+// SORT list LIMIT 0 2 ASC
+vals, err := client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
- ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
- vals, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeBy{
- Min: "-inf",
- Max: "+inf",
- Offset: 0,
- Count: 2,
- }).Result()
+// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
+vals, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeBy{
+ Min: "-inf",
+ Max: "+inf",
+ Offset: 0,
+ Count: 2,
+}).Result()
- ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
- vals, err := client.ZInterStore("out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2").Result()
+// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
+vals, err := client.ZInterStore("out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2").Result()
- EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
- vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
+// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
+vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
+```
## Benchmark
diff --git a/vendor/github.com/go-redis/redis/cluster.go b/vendor/github.com/go-redis/redis/cluster.go
index 0c58c8532..6b05f1a56 100644
--- a/vendor/github.com/go-redis/redis/cluster.go
+++ b/vendor/github.com/go-redis/redis/cluster.go
@@ -8,7 +8,8 @@ import (
"math"
"math/rand"
"net"
- "strings"
+ "runtime"
+ "sort"
"sync"
"sync/atomic"
"time"
@@ -30,7 +31,7 @@ type ClusterOptions struct {
// The maximum number of retries before giving up. Command is retried
// on network errors and MOVED/ASK redirects.
- // Default is 8.
+ // Default is 8 retries.
MaxRedirects int
// Enables read-only commands on slave nodes.
@@ -39,16 +40,25 @@ type ClusterOptions struct {
// It automatically enables ReadOnly.
RouteByLatency bool
// Allows routing read-only commands to the random master or slave node.
+ // It automatically enables ReadOnly.
RouteRandomly bool
+ // Optional function that returns cluster slots information.
+ // It is useful to manually create cluster of standalone Redis servers
+ // and load-balance read/write operations between master and slaves.
+ // It can use service like ZooKeeper to maintain configuration information
+ // and Cluster.ReloadState to manually trigger state reloading.
+ ClusterSlots func() ([]ClusterSlot, error)
+
// Following options are copied from Options struct.
OnConnect func(*Conn) error
+ Password string
+
MaxRetries int
MinRetryBackoff time.Duration
MaxRetryBackoff time.Duration
- Password string
DialTimeout time.Duration
ReadTimeout time.Duration
@@ -56,6 +66,8 @@ type ClusterOptions struct {
// PoolSize applies per cluster node and not for the whole cluster.
PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
PoolTimeout time.Duration
IdleTimeout time.Duration
IdleCheckFrequency time.Duration
@@ -70,10 +82,14 @@ func (opt *ClusterOptions) init() {
opt.MaxRedirects = 8
}
- if opt.RouteByLatency {
+ if opt.RouteByLatency || opt.RouteRandomly {
opt.ReadOnly = true
}
+ if opt.PoolSize == 0 {
+ opt.PoolSize = 5 * runtime.NumCPU()
+ }
+
switch opt.ReadTimeout {
case -1:
opt.ReadTimeout = 0
@@ -117,10 +133,11 @@ func (opt *ClusterOptions) clientOptions() *Options {
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
- PoolSize: opt.PoolSize,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
-
+ PoolSize: opt.PoolSize,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
IdleCheckFrequency: disableIdleCheck,
TLSConfig: opt.TLSConfig,
@@ -160,10 +177,6 @@ func (n *clusterNode) Close() error {
return n.Client.Close()
}
-func (n *clusterNode) Test() error {
- return n.Client.ClusterInfo().Err()
-}
-
func (n *clusterNode) updateLatency() {
const probes = 10
@@ -330,7 +343,7 @@ func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
v, err := c.nodeCreateGroup.Do(addr, func() (interface{}, error) {
node := newClusterNode(c.opt, addr)
- return node, node.Test()
+ return node, nil
})
c.mu.Lock()
@@ -383,12 +396,31 @@ func (c *clusterNodes) Random() (*clusterNode, error) {
//------------------------------------------------------------------------------
+type clusterSlot struct {
+ start, end int
+ nodes []*clusterNode
+}
+
+type clusterSlotSlice []*clusterSlot
+
+func (p clusterSlotSlice) Len() int {
+ return len(p)
+}
+
+func (p clusterSlotSlice) Less(i, j int) bool {
+ return p[i].start < p[j].start
+}
+
+func (p clusterSlotSlice) Swap(i, j int) {
+ p[i], p[j] = p[j], p[i]
+}
+
type clusterState struct {
nodes *clusterNodes
Masters []*clusterNode
Slaves []*clusterNode
- slots [][]*clusterNode
+ slots []*clusterSlot
generation uint32
createdAt time.Time
@@ -400,19 +432,21 @@ func newClusterState(
c := clusterState{
nodes: nodes,
- slots: make([][]*clusterNode, hashtag.SlotNumber),
+ slots: make([]*clusterSlot, 0, len(slots)),
generation: nodes.NextGeneration(),
createdAt: time.Now(),
}
- isLoopbackOrigin := isLoopbackAddr(origin)
+ originHost, _, _ := net.SplitHostPort(origin)
+ isLoopbackOrigin := isLoopback(originHost)
+
for _, slot := range slots {
var nodes []*clusterNode
for i, slotNode := range slot.Nodes {
addr := slotNode.Addr
- if !isLoopbackOrigin && useOriginAddr(origin, addr) {
- addr = origin
+ if !isLoopbackOrigin {
+ addr = replaceLoopbackHost(addr, originHost)
}
node, err := c.nodes.GetOrCreate(addr)
@@ -430,11 +464,15 @@ func newClusterState(
}
}
- for i := slot.Start; i <= slot.End; i++ {
- c.slots[i] = nodes
- }
+ c.slots = append(c.slots, &clusterSlot{
+ start: slot.Start,
+ end: slot.End,
+ nodes: nodes,
+ })
}
+ sort.Sort(clusterSlotSlice(c.slots))
+
time.AfterFunc(time.Minute, func() {
nodes.GC(c.generation)
})
@@ -442,6 +480,33 @@ func newClusterState(
return &c, nil
}
+func replaceLoopbackHost(nodeAddr, originHost string) string {
+ nodeHost, nodePort, err := net.SplitHostPort(nodeAddr)
+ if err != nil {
+ return nodeAddr
+ }
+
+ nodeIP := net.ParseIP(nodeHost)
+ if nodeIP == nil {
+ return nodeAddr
+ }
+
+ if !nodeIP.IsLoopback() {
+ return nodeAddr
+ }
+
+ // Use origin host which is not loopback and node port.
+ return net.JoinHostPort(originHost, nodePort)
+}
+
+func isLoopback(host string) bool {
+ ip := net.ParseIP(host)
+ if ip == nil {
+ return true
+ }
+ return ip.IsLoopback()
+}
+
func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) {
nodes := c.slotNodes(slot)
if len(nodes) > 0 {
@@ -502,32 +567,24 @@ func (c *clusterState) slotRandomNode(slot int) *clusterNode {
}
func (c *clusterState) slotNodes(slot int) []*clusterNode {
- if slot >= 0 && slot < len(c.slots) {
- return c.slots[slot]
+ i := sort.Search(len(c.slots), func(i int) bool {
+ return c.slots[i].end >= slot
+ })
+ if i >= len(c.slots) {
+ return nil
+ }
+ x := c.slots[i]
+ if slot >= x.start && slot <= x.end {
+ return x.nodes
}
return nil
}
func (c *clusterState) IsConsistent() bool {
- if len(c.Masters) > len(c.Slaves) {
- return false
- }
-
- for _, master := range c.Masters {
- s := master.Client.Info("replication").Val()
- if !strings.Contains(s, "role:master") {
- return false
- }
- }
-
- for _, slave := range c.Slaves {
- s := slave.Client.Info("replication").Val()
- if !strings.Contains(s, "role:slave") {
- return false
- }
+ if c.nodes.opt.ClusterSlots != nil {
+ return true
}
-
- return true
+ return len(c.Masters) <= len(c.Slaves)
}
//------------------------------------------------------------------------------
@@ -555,7 +612,7 @@ func (c *clusterStateHolder) Reload() (*clusterState, error) {
return nil, err
}
if !state.IsConsistent() {
- c.LazyReload()
+ time.AfterFunc(time.Second, c.LazyReload)
}
return state, nil
}
@@ -614,6 +671,14 @@ func (c *clusterStateHolder) Get() (*clusterState, error) {
return nil, errors.New("redis: cluster has no state")
}
+func (c *clusterStateHolder) ReloadOrGet() (*clusterState, error) {
+ state, err := c.Reload()
+ if err == nil {
+ return state, nil
+ }
+ return c.Get()
+}
+
//------------------------------------------------------------------------------
// ClusterClient is a Redis Cluster client representing a pool of zero
@@ -653,6 +718,8 @@ func NewClusterClient(opt *ClusterOptions) *ClusterClient {
c.init()
_, _ = c.state.Reload()
+ _, _ = c.cmdsInfoCache.Get()
+
if opt.IdleCheckFrequency > 0 {
go c.reaper(opt.IdleCheckFrequency)
}
@@ -660,6 +727,13 @@ func NewClusterClient(opt *ClusterOptions) *ClusterClient {
return c
}
+// ReloadState reloads cluster state. It calls ClusterSlots func
+// to get cluster slots information.
+func (c *ClusterClient) ReloadState() error {
+ _, err := c.state.Reload()
+ return err
+}
+
func (c *ClusterClient) init() {
c.cmdable.setProcessor(c.Process)
}
@@ -818,6 +892,7 @@ func (c *ClusterClient) Watch(fn func(*Tx) error, keys ...string) error {
}
if internal.IsRetryableError(err, true) {
+ c.state.LazyReload()
continue
}
@@ -853,6 +928,13 @@ func (c *ClusterClient) Close() error {
return c.nodes.Close()
}
+// Do creates a Cmd from the args and processes the cmd.
+func (c *ClusterClient) Do(args ...interface{}) *Cmd {
+ cmd := NewCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
func (c *ClusterClient) WrapProcess(
fn func(oldProcess func(Cmder) error) func(Cmder) error,
) {
@@ -904,12 +986,14 @@ func (c *ClusterClient) defaultProcess(cmd Cmder) error {
}
if internal.IsRetryableError(err, true) {
- // Firstly retry the same node.
+ c.state.LazyReload()
+
+ // First retry the same node.
if attempt == 0 {
continue
}
- // Secondly try random node.
+ // Second try random node.
node, err = c.nodes.Random()
if err != nil {
break
@@ -944,12 +1028,9 @@ func (c *ClusterClient) defaultProcess(cmd Cmder) error {
// ForEachMaster concurrently calls the fn on each master node in the cluster.
// It returns the first error if any.
func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
- state, err := c.state.Reload()
+ state, err := c.state.ReloadOrGet()
if err != nil {
- state, err = c.state.Get()
- if err != nil {
- return err
- }
+ return err
}
var wg sync.WaitGroup
@@ -980,12 +1061,9 @@ func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
// ForEachSlave concurrently calls the fn on each slave node in the cluster.
// It returns the first error if any.
func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error {
- state, err := c.state.Reload()
+ state, err := c.state.ReloadOrGet()
if err != nil {
- state, err = c.state.Get()
- if err != nil {
- return err
- }
+ return err
}
var wg sync.WaitGroup
@@ -1016,12 +1094,9 @@ func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error {
// ForEachNode concurrently calls the fn on each known node in the cluster.
// It returns the first error if any.
func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error {
- state, err := c.state.Reload()
+ state, err := c.state.ReloadOrGet()
if err != nil {
- state, err = c.state.Get()
- if err != nil {
- return err
- }
+ return err
}
var wg sync.WaitGroup
@@ -1071,7 +1146,7 @@ func (c *ClusterClient) PoolStats() *PoolStats {
acc.Timeouts += s.Timeouts
acc.TotalConns += s.TotalConns
- acc.FreeConns += s.FreeConns
+ acc.IdleConns += s.IdleConns
acc.StaleConns += s.StaleConns
}
@@ -1082,7 +1157,7 @@ func (c *ClusterClient) PoolStats() *PoolStats {
acc.Timeouts += s.Timeouts
acc.TotalConns += s.TotalConns
- acc.FreeConns += s.FreeConns
+ acc.IdleConns += s.IdleConns
acc.StaleConns += s.StaleConns
}
@@ -1090,6 +1165,14 @@ func (c *ClusterClient) PoolStats() *PoolStats {
}
func (c *ClusterClient) loadState() (*clusterState, error) {
+ if c.opt.ClusterSlots != nil {
+ slots, err := c.opt.ClusterSlots()
+ if err != nil {
+ return nil, err
+ }
+ return newClusterState(c.nodes, slots, "")
+ }
+
addrs, err := c.nodes.Addrs()
if err != nil {
return nil, err
@@ -1196,7 +1279,7 @@ func (c *ClusterClient) defaultProcessPipeline(cmds []Cmder) error {
cmdsMap = failedCmds
}
- return firstCmdsErr(cmds)
+ return cmdsFirstErr(cmds)
}
func (c *ClusterClient) mapCmdsByNode(cmds []Cmder) (map[*clusterNode][]Cmder, error) {
@@ -1207,9 +1290,16 @@ func (c *ClusterClient) mapCmdsByNode(cmds []Cmder) (map[*clusterNode][]Cmder, e
}
cmdsMap := make(map[*clusterNode][]Cmder)
+ cmdsAreReadOnly := c.cmdsAreReadOnly(cmds)
for _, cmd := range cmds {
- slot := c.cmdSlot(cmd)
- node, err := state.slotMasterNode(slot)
+ var node *clusterNode
+ var err error
+ if cmdsAreReadOnly {
+ _, node, err = c.cmdSlotAndNode(cmd)
+ } else {
+ slot := c.cmdSlot(cmd)
+ node, err = state.slotMasterNode(slot)
+ }
if err != nil {
return nil, err
}
@@ -1218,6 +1308,16 @@ func (c *ClusterClient) mapCmdsByNode(cmds []Cmder) (map[*clusterNode][]Cmder, e
return cmdsMap, nil
}
+func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
+ for _, cmd := range cmds {
+ cmdInfo := c.cmdInfo(cmd.Name())
+ if cmdInfo == nil || !cmdInfo.ReadOnly {
+ return false
+ }
+ }
+ return true
+}
+
func (c *ClusterClient) remapCmds(cmds []Cmder, failedCmds map[*clusterNode][]Cmder) {
remappedCmds, err := c.mapCmdsByNode(cmds)
if err != nil {
@@ -1233,26 +1333,26 @@ func (c *ClusterClient) remapCmds(cmds []Cmder, failedCmds map[*clusterNode][]Cm
func (c *ClusterClient) pipelineProcessCmds(
node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
) error {
- _ = cn.SetWriteTimeout(c.opt.WriteTimeout)
-
- err := writeCmd(cn, cmds...)
+ err := cn.WithWriter(c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmd(wr, cmds...)
+ })
if err != nil {
setCmdsErr(cmds, err)
failedCmds[node] = cmds
return err
}
- // Set read timeout for all commands.
- _ = cn.SetReadTimeout(c.opt.ReadTimeout)
-
- return c.pipelineReadCmds(cn, cmds, failedCmds)
+ err = cn.WithReader(c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ return c.pipelineReadCmds(rd, cmds, failedCmds)
+ })
+ return err
}
func (c *ClusterClient) pipelineReadCmds(
- cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
+ rd *proto.Reader, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
) error {
for _, cmd := range cmds {
- err := cmd.readReply(cn)
+ err := cmd.readReply(rd)
if err == nil {
continue
}
@@ -1361,7 +1461,7 @@ func (c *ClusterClient) defaultProcessTxPipeline(cmds []Cmder) error {
}
}
- return firstCmdsErr(cmds)
+ return cmdsFirstErr(cmds)
}
func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder {
@@ -1376,35 +1476,37 @@ func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder {
func (c *ClusterClient) txPipelineProcessCmds(
node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
) error {
- cn.SetWriteTimeout(c.opt.WriteTimeout)
- if err := txPipelineWriteMulti(cn, cmds); err != nil {
+ err := cn.WithWriter(c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return txPipelineWriteMulti(wr, cmds)
+ })
+ if err != nil {
setCmdsErr(cmds, err)
failedCmds[node] = cmds
return err
}
- // Set read timeout for all commands.
- cn.SetReadTimeout(c.opt.ReadTimeout)
-
- if err := c.txPipelineReadQueued(cn, cmds, failedCmds); err != nil {
- setCmdsErr(cmds, err)
- return err
- }
-
- return pipelineReadCmds(cn, cmds)
+ err = cn.WithReader(c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ err := c.txPipelineReadQueued(rd, cmds, failedCmds)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ return pipelineReadCmds(rd, cmds)
+ })
+ return err
}
func (c *ClusterClient) txPipelineReadQueued(
- cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
+ rd *proto.Reader, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
) error {
// Parse queued replies.
var statusCmd StatusCmd
- if err := statusCmd.readReply(cn); err != nil {
+ if err := statusCmd.readReply(rd); err != nil {
return err
}
for _, cmd := range cmds {
- err := statusCmd.readReply(cn)
+ err := statusCmd.readReply(rd)
if err == nil {
continue
}
@@ -1417,7 +1519,7 @@ func (c *ClusterClient) txPipelineReadQueued(
}
// Parse number of replies.
- line, err := cn.Rd.ReadLine()
+ line, err := rd.ReadLine()
if err != nil {
if err == Nil {
err = TxFailedErr
@@ -1445,11 +1547,9 @@ func (c *ClusterClient) txPipelineReadQueued(
}
func (c *ClusterClient) pubSub(channels []string) *PubSub {
- opt := c.opt.clientOptions()
-
var node *clusterNode
- return &PubSub{
- opt: opt,
+ pubsub := &PubSub{
+ opt: c.opt.clientOptions(),
newConn: func(channels []string) (*pool.Conn, error) {
if node == nil {
@@ -1472,6 +1572,8 @@ func (c *ClusterClient) pubSub(channels []string) *PubSub {
return node.Client.connPool.CloseConn(cn)
},
}
+ pubsub.init()
+ return pubsub
}
// Subscribe subscribes the client to the specified channels.
@@ -1494,43 +1596,6 @@ func (c *ClusterClient) PSubscribe(channels ...string) *PubSub {
return pubsub
}
-func useOriginAddr(originAddr, nodeAddr string) bool {
- nodeHost, nodePort, err := net.SplitHostPort(nodeAddr)
- if err != nil {
- return false
- }
-
- nodeIP := net.ParseIP(nodeHost)
- if nodeIP == nil {
- return false
- }
-
- if !nodeIP.IsLoopback() {
- return false
- }
-
- _, originPort, err := net.SplitHostPort(originAddr)
- if err != nil {
- return false
- }
-
- return nodePort == originPort
-}
-
-func isLoopbackAddr(addr string) bool {
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- return false
- }
-
- ip := net.ParseIP(host)
- if ip == nil {
- return false
- }
-
- return ip.IsLoopback()
-}
-
func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
for _, n := range nodes {
if n == node {
diff --git a/vendor/github.com/go-redis/redis/command.go b/vendor/github.com/go-redis/redis/command.go
index 552c897bb..ca44d7c8b 100644
--- a/vendor/github.com/go-redis/redis/command.go
+++ b/vendor/github.com/go-redis/redis/command.go
@@ -1,16 +1,14 @@
package redis
import (
- "bytes"
"fmt"
+ "net"
"strconv"
"strings"
"time"
"github.com/go-redis/redis/internal"
- "github.com/go-redis/redis/internal/pool"
"github.com/go-redis/redis/internal/proto"
- "github.com/go-redis/redis/internal/util"
)
type Cmder interface {
@@ -18,13 +16,12 @@ type Cmder interface {
Args() []interface{}
stringArg(int) string
- readReply(*pool.Conn) error
+ readReply(rd *proto.Reader) error
setErr(error)
readTimeout() *time.Duration
Err() error
- fmt.Stringer
}
func setCmdsErr(cmds []Cmder, e error) {
@@ -35,7 +32,7 @@ func setCmdsErr(cmds []Cmder, e error) {
}
}
-func firstCmdsErr(cmds []Cmder) error {
+func cmdsFirstErr(cmds []Cmder) error {
for _, cmd := range cmds {
if err := cmd.Err(); err != nil {
return err
@@ -44,16 +41,14 @@ func firstCmdsErr(cmds []Cmder) error {
return nil
}
-func writeCmd(cn *pool.Conn, cmds ...Cmder) error {
- cn.Wb.Reset()
+func writeCmd(wr *proto.Writer, cmds ...Cmder) error {
for _, cmd := range cmds {
- if err := cn.Wb.Append(cmd.Args()); err != nil {
+ err := wr.WriteArgs(cmd.Args())
+ if err != nil {
return err
}
}
-
- _, err := cn.Write(cn.Wb.Bytes())
- return err
+ return nil
}
func cmdString(cmd Cmder, val interface{}) string {
@@ -165,20 +160,124 @@ func (cmd *Cmd) Result() (interface{}, error) {
return cmd.val, cmd.err
}
-func (cmd *Cmd) String() string {
- return cmdString(cmd, cmd.val)
+func (cmd *Cmd) String() (string, error) {
+ if cmd.err != nil {
+ return "", cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case string:
+ return val, nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for String", val)
+ return "", err
+ }
}
-func (cmd *Cmd) readReply(cn *pool.Conn) error {
- cmd.val, cmd.err = cn.Rd.ReadReply(sliceParser)
+func (cmd *Cmd) Int() (int, error) {
if cmd.err != nil {
- return cmd.err
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return int(val), nil
+ case string:
+ return strconv.Atoi(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
+ return 0, err
}
- if b, ok := cmd.val.([]byte); ok {
- // Bytes must be copied, because underlying memory is reused.
- cmd.val = string(b)
+}
+
+func (cmd *Cmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return val, nil
+ case string:
+ return strconv.ParseInt(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
+ return 0, err
}
- return nil
+}
+
+func (cmd *Cmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return uint64(val), nil
+ case string:
+ return strconv.ParseUint(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Uint64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return float64(val), nil
+ case string:
+ return strconv.ParseFloat(val, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Bool() (bool, error) {
+ if cmd.err != nil {
+ return false, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return val != 0, nil
+ case string:
+ return strconv.ParseBool(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Bool", val)
+ return false, err
+ }
+}
+
+func (cmd *Cmd) readReply(rd *proto.Reader) error {
+ cmd.val, cmd.err = rd.ReadReply(sliceParser)
+ return cmd.err
+}
+
+// Implements proto.MultiBulkParse
+func sliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ vals := make([]interface{}, 0, n)
+ for i := int64(0); i < n; i++ {
+ v, err := rd.ReadReply(sliceParser)
+ if err != nil {
+ if err == Nil {
+ vals = append(vals, nil)
+ continue
+ }
+ if err, ok := err.(proto.RedisError); ok {
+ vals = append(vals, err)
+ continue
+ }
+ return nil, err
+ }
+
+ switch v := v.(type) {
+ case string:
+ vals = append(vals, v)
+ default:
+ vals = append(vals, v)
+ }
+ }
+ return vals, nil
}
//------------------------------------------------------------------------------
@@ -209,9 +308,9 @@ func (cmd *SliceCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *SliceCmd) readReply(cn *pool.Conn) error {
+func (cmd *SliceCmd) readReply(rd *proto.Reader) error {
var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(sliceParser)
+ v, cmd.err = rd.ReadArrayReply(sliceParser)
if cmd.err != nil {
return cmd.err
}
@@ -247,8 +346,8 @@ func (cmd *StatusCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *StatusCmd) readReply(cn *pool.Conn) error {
- cmd.val, cmd.err = cn.Rd.ReadStringReply()
+func (cmd *StatusCmd) readReply(rd *proto.Reader) error {
+ cmd.val, cmd.err = rd.ReadString()
return cmd.err
}
@@ -280,8 +379,8 @@ func (cmd *IntCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *IntCmd) readReply(cn *pool.Conn) error {
- cmd.val, cmd.err = cn.Rd.ReadIntReply()
+func (cmd *IntCmd) readReply(rd *proto.Reader) error {
+ cmd.val, cmd.err = rd.ReadIntReply()
return cmd.err
}
@@ -315,9 +414,9 @@ func (cmd *DurationCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *DurationCmd) readReply(cn *pool.Conn) error {
+func (cmd *DurationCmd) readReply(rd *proto.Reader) error {
var n int64
- n, cmd.err = cn.Rd.ReadIntReply()
+ n, cmd.err = rd.ReadIntReply()
if cmd.err != nil {
return cmd.err
}
@@ -353,9 +452,9 @@ func (cmd *TimeCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *TimeCmd) readReply(cn *pool.Conn) error {
+func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(timeParser)
+ v, cmd.err = rd.ReadArrayReply(timeParser)
if cmd.err != nil {
return cmd.err
}
@@ -363,6 +462,25 @@ func (cmd *TimeCmd) readReply(cn *pool.Conn) error {
return nil
}
+// Implements proto.MultiBulkParse
+func timeParser(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d elements, expected 2", n)
+ }
+
+ sec, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ microsec, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ return time.Unix(sec, microsec*1000), nil
+}
+
//------------------------------------------------------------------------------
type BoolCmd struct {
@@ -391,11 +509,9 @@ func (cmd *BoolCmd) String() string {
return cmdString(cmd, cmd.val)
}
-var ok = []byte("OK")
-
-func (cmd *BoolCmd) readReply(cn *pool.Conn) error {
+func (cmd *BoolCmd) readReply(rd *proto.Reader) error {
var v interface{}
- v, cmd.err = cn.Rd.ReadReply(nil)
+ v, cmd.err = rd.ReadReply(nil)
// `SET key value NX` returns nil when key already exists. But
// `SETNX key value` returns bool (0/1). So convert nil to bool.
// TODO: is this okay?
@@ -411,8 +527,8 @@ func (cmd *BoolCmd) readReply(cn *pool.Conn) error {
case int64:
cmd.val = v == 1
return nil
- case []byte:
- cmd.val = bytes.Equal(v, ok)
+ case string:
+ cmd.val = v == "OK"
return nil
default:
cmd.err = fmt.Errorf("got %T, wanted int64 or string", v)
@@ -425,7 +541,7 @@ func (cmd *BoolCmd) readReply(cn *pool.Conn) error {
type StringCmd struct {
baseCmd
- val []byte
+ val string
}
var _ Cmder = (*StringCmd)(nil)
@@ -437,7 +553,7 @@ func NewStringCmd(args ...interface{}) *StringCmd {
}
func (cmd *StringCmd) Val() string {
- return util.BytesToString(cmd.val)
+ return cmd.val
}
func (cmd *StringCmd) Result() (string, error) {
@@ -445,7 +561,14 @@ func (cmd *StringCmd) Result() (string, error) {
}
func (cmd *StringCmd) Bytes() ([]byte, error) {
- return cmd.val, cmd.err
+ return []byte(cmd.val), cmd.err
+}
+
+func (cmd *StringCmd) Int() (int, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.Atoi(cmd.Val())
}
func (cmd *StringCmd) Int64() (int64, error) {
@@ -473,15 +596,15 @@ func (cmd *StringCmd) Scan(val interface{}) error {
if cmd.err != nil {
return cmd.err
}
- return proto.Scan(cmd.val, val)
+ return proto.Scan([]byte(cmd.val), val)
}
func (cmd *StringCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *StringCmd) readReply(cn *pool.Conn) error {
- cmd.val, cmd.err = cn.Rd.ReadBytesReply()
+func (cmd *StringCmd) readReply(rd *proto.Reader) error {
+ cmd.val, cmd.err = rd.ReadString()
return cmd.err
}
@@ -513,8 +636,8 @@ func (cmd *FloatCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *FloatCmd) readReply(cn *pool.Conn) error {
- cmd.val, cmd.err = cn.Rd.ReadFloatReply()
+func (cmd *FloatCmd) readReply(rd *proto.Reader) error {
+ cmd.val, cmd.err = rd.ReadFloatReply()
return cmd.err
}
@@ -550,9 +673,9 @@ func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
return proto.ScanSlice(cmd.Val(), container)
}
-func (cmd *StringSliceCmd) readReply(cn *pool.Conn) error {
+func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(stringSliceParser)
+ v, cmd.err = rd.ReadArrayReply(stringSliceParser)
if cmd.err != nil {
return cmd.err
}
@@ -560,6 +683,22 @@ func (cmd *StringSliceCmd) readReply(cn *pool.Conn) error {
return nil
}
+// Implements proto.MultiBulkParse
+func stringSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ ss := make([]string, 0, n)
+ for i := int64(0); i < n; i++ {
+ s, err := rd.ReadString()
+ if err == Nil {
+ ss = append(ss, "")
+ } else if err != nil {
+ return nil, err
+ } else {
+ ss = append(ss, s)
+ }
+ }
+ return ss, nil
+}
+
//------------------------------------------------------------------------------
type BoolSliceCmd struct {
@@ -588,9 +727,9 @@ func (cmd *BoolSliceCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *BoolSliceCmd) readReply(cn *pool.Conn) error {
+func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(boolSliceParser)
+ v, cmd.err = rd.ReadArrayReply(boolSliceParser)
if cmd.err != nil {
return cmd.err
}
@@ -598,6 +737,19 @@ func (cmd *BoolSliceCmd) readReply(cn *pool.Conn) error {
return nil
}
+// Implements proto.MultiBulkParse
+func boolSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ bools := make([]bool, 0, n)
+ for i := int64(0); i < n; i++ {
+ n, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ bools = append(bools, n == 1)
+ }
+ return bools, nil
+}
+
//------------------------------------------------------------------------------
type StringStringMapCmd struct {
@@ -626,9 +778,9 @@ func (cmd *StringStringMapCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *StringStringMapCmd) readReply(cn *pool.Conn) error {
+func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error {
var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(stringStringMapParser)
+ v, cmd.err = rd.ReadArrayReply(stringStringMapParser)
if cmd.err != nil {
return cmd.err
}
@@ -636,6 +788,25 @@ func (cmd *StringStringMapCmd) readReply(cn *pool.Conn) error {
return nil
}
+// Implements proto.MultiBulkParse
+func stringStringMapParser(rd *proto.Reader, n int64) (interface{}, error) {
+ m := make(map[string]string, n/2)
+ for i := int64(0); i < n; i += 2 {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ m[key] = value
+ }
+ return m, nil
+}
+
//------------------------------------------------------------------------------
type StringIntMapCmd struct {
@@ -664,9 +835,9 @@ func (cmd *StringIntMapCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *StringIntMapCmd) readReply(cn *pool.Conn) error {
+func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error {
var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(stringIntMapParser)
+ v, cmd.err = rd.ReadArrayReply(stringIntMapParser)
if cmd.err != nil {
return cmd.err
}
@@ -674,6 +845,25 @@ func (cmd *StringIntMapCmd) readReply(cn *pool.Conn) error {
return nil
}
+// Implements proto.MultiBulkParse
+func stringIntMapParser(rd *proto.Reader, n int64) (interface{}, error) {
+ m := make(map[string]int64, n/2)
+ for i := int64(0); i < n; i += 2 {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ n, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ m[key] = n
+ }
+ return m, nil
+}
+
//------------------------------------------------------------------------------
type StringStructMapCmd struct {
@@ -702,9 +892,9 @@ func (cmd *StringStructMapCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *StringStructMapCmd) readReply(cn *pool.Conn) error {
+func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(stringStructMapParser)
+ v, cmd.err = rd.ReadArrayReply(stringStructMapParser)
if cmd.err != nil {
return cmd.err
}
@@ -712,6 +902,380 @@ func (cmd *StringStructMapCmd) readReply(cn *pool.Conn) error {
return nil
}
+// Implements proto.MultiBulkParse
+func stringStructMapParser(rd *proto.Reader, n int64) (interface{}, error) {
+ m := make(map[string]struct{}, n)
+ for i := int64(0); i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ m[key] = struct{}{}
+ }
+ return m, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XMessage struct {
+ ID string
+ Values map[string]interface{}
+}
+
+type XMessageSliceCmd struct {
+ baseCmd
+
+ val []XMessage
+}
+
+var _ Cmder = (*XMessageSliceCmd)(nil)
+
+func NewXMessageSliceCmd(args ...interface{}) *XMessageSliceCmd {
+ return &XMessageSliceCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *XMessageSliceCmd) Val() []XMessage {
+ return cmd.val
+}
+
+func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XMessageSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error {
+ var v interface{}
+ v, cmd.err = rd.ReadArrayReply(xMessageSliceParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.([]XMessage)
+ return nil
+}
+
+// Implements proto.MultiBulkParse
+func xMessageSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ msgs := make([]XMessage, 0, n)
+ for i := int64(0); i < n; i++ {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ id, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ v, err := rd.ReadArrayReply(stringInterfaceMapParser)
+ if err != nil {
+ return nil, err
+ }
+
+ msgs = append(msgs, XMessage{
+ ID: id,
+ Values: v.(map[string]interface{}),
+ })
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ return msgs, nil
+}
+
+// Implements proto.MultiBulkParse
+func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) {
+ m := make(map[string]interface{}, n/2)
+ for i := int64(0); i < n; i += 2 {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ m[key] = value
+ }
+ return m, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XStream struct {
+ Stream string
+ Messages []XMessage
+}
+
+type XStreamSliceCmd struct {
+ baseCmd
+
+ val []XStream
+}
+
+var _ Cmder = (*XStreamSliceCmd)(nil)
+
+func NewXStreamSliceCmd(args ...interface{}) *XStreamSliceCmd {
+ return &XStreamSliceCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *XStreamSliceCmd) Val() []XStream {
+ return cmd.val
+}
+
+func (cmd *XStreamSliceCmd) Result() ([]XStream, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XStreamSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
+ var v interface{}
+ v, cmd.err = rd.ReadArrayReply(xStreamSliceParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.([]XStream)
+ return nil
+}
+
+// Implements proto.MultiBulkParse
+func xStreamSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ ret := make([]XStream, 0, n)
+ for i := int64(0); i < n; i++ {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d, wanted 2", n)
+ }
+
+ stream, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ v, err := rd.ReadArrayReply(xMessageSliceParser)
+ if err != nil {
+ return nil, err
+ }
+
+ ret = append(ret, XStream{
+ Stream: stream,
+ Messages: v.([]XMessage),
+ })
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ return ret, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XPending struct {
+ Count int64
+ Lower string
+ Higher string
+ Consumers map[string]int64
+}
+
+type XPendingCmd struct {
+ baseCmd
+ val *XPending
+}
+
+var _ Cmder = (*XPendingCmd)(nil)
+
+func NewXPendingCmd(args ...interface{}) *XPendingCmd {
+ return &XPendingCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *XPendingCmd) Val() *XPending {
+ return cmd.val
+}
+
+func (cmd *XPendingCmd) Result() (*XPending, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
+ var info interface{}
+ info, cmd.err = rd.ReadArrayReply(xPendingParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = info.(*XPending)
+ return nil
+}
+
+func xPendingParser(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 4 {
+ return nil, fmt.Errorf("got %d, wanted 4", n)
+ }
+
+ count, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ lower, err := rd.ReadString()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ higher, err := rd.ReadString()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ pending := &XPending{
+ Count: count,
+ Lower: lower,
+ Higher: higher,
+ }
+ _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ for i := int64(0); i < n; i++ {
+ _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d, wanted 2", n)
+ }
+
+ consumerName, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ consumerPending, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ if pending.Consumers == nil {
+ pending.Consumers = make(map[string]int64)
+ }
+ pending.Consumers[consumerName] = consumerPending
+
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ })
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ return pending, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XPendingExt struct {
+ Id string
+ Consumer string
+ Idle time.Duration
+ RetryCount int64
+}
+
+type XPendingExtCmd struct {
+ baseCmd
+ val []XPendingExt
+}
+
+var _ Cmder = (*XPendingExtCmd)(nil)
+
+func NewXPendingExtCmd(args ...interface{}) *XPendingExtCmd {
+ return &XPendingExtCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *XPendingExtCmd) Val() []XPendingExt {
+ return cmd.val
+}
+
+func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingExtCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
+ var info interface{}
+ info, cmd.err = rd.ReadArrayReply(xPendingExtSliceParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = info.([]XPendingExt)
+ return nil
+}
+
+func xPendingExtSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ ret := make([]XPendingExt, 0, n)
+ for i := int64(0); i < n; i++ {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 4 {
+ return nil, fmt.Errorf("got %d, wanted 4", n)
+ }
+
+ id, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ consumer, err := rd.ReadString()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ idle, err := rd.ReadIntReply()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ retryCount, err := rd.ReadIntReply()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ ret = append(ret, XPendingExt{
+ Id: id,
+ Consumer: consumer,
+ Idle: time.Duration(idle) * time.Millisecond,
+ RetryCount: retryCount,
+ })
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ return ret, nil
+}
+
+//------------------------------------------------------------------------------
+
//------------------------------------------------------------------------------
type ZSliceCmd struct {
@@ -740,9 +1304,9 @@ func (cmd *ZSliceCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *ZSliceCmd) readReply(cn *pool.Conn) error {
+func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error {
var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(zSliceParser)
+ v, cmd.err = rd.ReadArrayReply(zSliceParser)
if cmd.err != nil {
return cmd.err
}
@@ -750,6 +1314,27 @@ func (cmd *ZSliceCmd) readReply(cn *pool.Conn) error {
return nil
}
+// Implements proto.MultiBulkParse
+func zSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ zz := make([]Z, n/2)
+ for i := int64(0); i < n; i += 2 {
+ var err error
+
+ z := &zz[i/2]
+
+ z.Member, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ z.Score, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+ return zz, nil
+}
+
//------------------------------------------------------------------------------
type ScanCmd struct {
@@ -782,8 +1367,8 @@ func (cmd *ScanCmd) String() string {
return cmdString(cmd, cmd.page)
}
-func (cmd *ScanCmd) readReply(cn *pool.Conn) error {
- cmd.page, cmd.cursor, cmd.err = cn.Rd.ReadScanReply()
+func (cmd *ScanCmd) readReply(rd *proto.Reader) error {
+ cmd.page, cmd.cursor, cmd.err = rd.ReadScanReply()
return cmd.err
}
@@ -833,9 +1418,9 @@ func (cmd *ClusterSlotsCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *ClusterSlotsCmd) readReply(cn *pool.Conn) error {
+func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(clusterSlotsParser)
+ v, cmd.err = rd.ReadArrayReply(clusterSlotsParser)
if cmd.err != nil {
return cmd.err
}
@@ -843,6 +1428,70 @@ func (cmd *ClusterSlotsCmd) readReply(cn *pool.Conn) error {
return nil
}
+// Implements proto.MultiBulkParse
+func clusterSlotsParser(rd *proto.Reader, n int64) (interface{}, error) {
+ slots := make([]ClusterSlot, n)
+ for i := 0; i < len(slots); i++ {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n < 2 {
+ err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
+ return nil, err
+ }
+
+ start, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ end, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ nodes := make([]ClusterNode, n-2)
+ for j := 0; j < len(nodes); j++ {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n != 2 && n != 3 {
+ err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n)
+ return nil, err
+ }
+
+ ip, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ port, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ nodes[j].Addr = net.JoinHostPort(ip, port)
+
+ if n == 3 {
+ id, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ nodes[j].Id = id
+ }
+ }
+
+ slots[i] = ClusterSlot{
+ Start: int(start),
+ End: int(end),
+ Nodes: nodes,
+ }
+ }
+ return slots, nil
+}
+
//------------------------------------------------------------------------------
// GeoLocation is used with GeoAdd to add geospatial location.
@@ -924,9 +1573,9 @@ func (cmd *GeoLocationCmd) String() string {
return cmdString(cmd, cmd.locations)
}
-func (cmd *GeoLocationCmd) readReply(cn *pool.Conn) error {
+func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error {
var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q))
+ v, cmd.err = rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q))
if cmd.err != nil {
return cmd.err
}
@@ -934,6 +1583,73 @@ func (cmd *GeoLocationCmd) readReply(cn *pool.Conn) error {
return nil
}
+func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse {
+ return func(rd *proto.Reader, n int64) (interface{}, error) {
+ var loc GeoLocation
+ var err error
+
+ loc.Name, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ if q.WithDist {
+ loc.Dist, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if q.WithGeoHash {
+ loc.GeoHash, err = rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if q.WithCoord {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n != 2 {
+ return nil, fmt.Errorf("got %d coordinates, expected 2", n)
+ }
+
+ loc.Longitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ loc.Latitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &loc, nil
+ }
+}
+
+func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse {
+ return func(rd *proto.Reader, n int64) (interface{}, error) {
+ locs := make([]GeoLocation, 0, n)
+ for i := int64(0); i < n; i++ {
+ v, err := rd.ReadReply(newGeoLocationParser(q))
+ if err != nil {
+ return nil, err
+ }
+ switch vv := v.(type) {
+ case string:
+ locs = append(locs, GeoLocation{
+ Name: vv,
+ })
+ case *GeoLocation:
+ locs = append(locs, *vv)
+ default:
+ return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v)
+ }
+ }
+ return locs, nil
+ }
+}
+
//------------------------------------------------------------------------------
type GeoPos struct {
@@ -966,9 +1682,9 @@ func (cmd *GeoPosCmd) String() string {
return cmdString(cmd, cmd.positions)
}
-func (cmd *GeoPosCmd) readReply(cn *pool.Conn) error {
+func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(geoPosSliceParser)
+ v, cmd.err = rd.ReadArrayReply(geoPosSliceParser)
if cmd.err != nil {
return cmd.err
}
@@ -976,6 +1692,44 @@ func (cmd *GeoPosCmd) readReply(cn *pool.Conn) error {
return nil
}
+func geoPosSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ positions := make([]*GeoPos, 0, n)
+ for i := int64(0); i < n; i++ {
+ v, err := rd.ReadReply(geoPosParser)
+ if err != nil {
+ if err == Nil {
+ positions = append(positions, nil)
+ continue
+ }
+ return nil, err
+ }
+ switch v := v.(type) {
+ case *GeoPos:
+ positions = append(positions, v)
+ default:
+ return nil, fmt.Errorf("got %T, expected *GeoPos", v)
+ }
+ }
+ return positions, nil
+}
+
+func geoPosParser(rd *proto.Reader, n int64) (interface{}, error) {
+ var pos GeoPos
+ var err error
+
+ pos.Longitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+
+ pos.Latitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+
+ return &pos, nil
+}
+
//------------------------------------------------------------------------------
type CommandInfo struct {
@@ -1014,9 +1768,9 @@ func (cmd *CommandsInfoCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *CommandsInfoCmd) readReply(cn *pool.Conn) error {
+func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
var v interface{}
- v, cmd.err = cn.Rd.ReadArrayReply(commandInfoSliceParser)
+ v, cmd.err = rd.ReadArrayReply(commandInfoSliceParser)
if cmd.err != nil {
return cmd.err
}
@@ -1024,6 +1778,74 @@ func (cmd *CommandsInfoCmd) readReply(cn *pool.Conn) error {
return nil
}
+// Implements proto.MultiBulkParse
+func commandInfoSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ m := make(map[string]*CommandInfo, n)
+ for i := int64(0); i < n; i++ {
+ v, err := rd.ReadReply(commandInfoParser)
+ if err != nil {
+ return nil, err
+ }
+ vv := v.(*CommandInfo)
+ m[vv.Name] = vv
+
+ }
+ return m, nil
+}
+
+func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
+ var cmd CommandInfo
+ var err error
+
+ if n != 6 {
+ return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 6", n)
+ }
+
+ cmd.Name, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ arity, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.Arity = int8(arity)
+
+ flags, err := rd.ReadReply(stringSliceParser)
+ if err != nil {
+ return nil, err
+ }
+ cmd.Flags = flags.([]string)
+
+ firstKeyPos, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.FirstKeyPos = int8(firstKeyPos)
+
+ lastKeyPos, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.LastKeyPos = int8(lastKeyPos)
+
+ stepCount, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.StepCount = int8(stepCount)
+
+ for _, flag := range cmd.Flags {
+ if flag == "readonly" {
+ cmd.ReadOnly = true
+ break
+ }
+ }
+
+ return &cmd, nil
+}
+
//------------------------------------------------------------------------------
type cmdsInfoCache struct {
diff --git a/vendor/github.com/go-redis/redis/commands.go b/vendor/github.com/go-redis/redis/commands.go
index c6a88154e..b259e3a8c 100644
--- a/vendor/github.com/go-redis/redis/commands.go
+++ b/vendor/github.com/go-redis/redis/commands.go
@@ -62,6 +62,7 @@ type Cmdable interface {
TxPipelined(fn func(Pipeliner) error) ([]Cmder, error)
TxPipeline() Pipeliner
+ Command() *CommandsInfoCmd
ClientGetName() *StringCmd
Echo(message interface{}) *StringCmd
Ping() *StatusCmd
@@ -171,6 +172,26 @@ type Cmdable interface {
SRem(key string, members ...interface{}) *IntCmd
SUnion(keys ...string) *StringSliceCmd
SUnionStore(destination string, keys ...string) *IntCmd
+ XAdd(a *XAddArgs) *StringCmd
+ XLen(stream string) *IntCmd
+ XRange(stream, start, stop string) *XMessageSliceCmd
+ XRangeN(stream, start, stop string, count int64) *XMessageSliceCmd
+ XRevRange(stream string, start, stop string) *XMessageSliceCmd
+ XRevRangeN(stream string, start, stop string, count int64) *XMessageSliceCmd
+ XRead(a *XReadArgs) *XStreamSliceCmd
+ XReadStreams(streams ...string) *XStreamSliceCmd
+ XGroupCreate(stream, group, start string) *StatusCmd
+ XGroupSetID(stream, group, start string) *StatusCmd
+ XGroupDestroy(stream, group string) *IntCmd
+ XGroupDelConsumer(stream, group, consumer string) *IntCmd
+ XReadGroup(a *XReadGroupArgs) *XStreamSliceCmd
+ XAck(stream, group string, ids ...string) *IntCmd
+ XPending(stream, group string) *XPendingCmd
+ XPendingExt(a *XPendingExtArgs) *XPendingExtCmd
+ XClaim(a *XClaimArgs) *XMessageSliceCmd
+ XClaimJustID(a *XClaimArgs) *StringSliceCmd
+ XTrim(key string, maxLen int64) *IntCmd
+ XTrimApprox(key string, maxLen int64) *IntCmd
ZAdd(key string, members ...Z) *IntCmd
ZAddNX(key string, members ...Z) *IntCmd
ZAddXX(key string, members ...Z) *IntCmd
@@ -209,6 +230,7 @@ type Cmdable interface {
BgRewriteAOF() *StatusCmd
BgSave() *StatusCmd
ClientKill(ipPort string) *StatusCmd
+ ClientKillByFilter(keys ...string) *IntCmd
ClientList() *StringCmd
ClientPause(dur time.Duration) *BoolCmd
ConfigGet(parameter string) *SliceCmd
@@ -265,9 +287,9 @@ type Cmdable interface {
GeoRadiusByMemberRO(key, member string, query *GeoRadiusQuery) *GeoLocationCmd
GeoDist(key string, member1, member2, unit string) *FloatCmd
GeoHash(key string, members ...string) *StringSliceCmd
- Command() *CommandsInfoCmd
ReadOnly() *StatusCmd
ReadWrite() *StatusCmd
+ MemoryUsage(key string, samples ...int) *IntCmd
}
type StatefulCmdable interface {
@@ -345,6 +367,12 @@ func (c *statefulCmdable) SwapDB(index1, index2 int) *StatusCmd {
//------------------------------------------------------------------------------
+func (c *cmdable) Command() *CommandsInfoCmd {
+ cmd := NewCommandsInfoCmd("command")
+ c.process(cmd)
+ return cmd
+}
+
func (c *cmdable) Del(keys ...string) *IntCmd {
args := make([]interface{}, 1+len(keys))
args[0] = "del"
@@ -411,7 +439,7 @@ func (c *cmdable) Migrate(host, port, key string, db int64, timeout time.Duratio
db,
formatMs(timeout),
)
- cmd.setReadTimeout(readTimeout(timeout))
+ cmd.setReadTimeout(timeout)
c.process(cmd)
return cmd
}
@@ -985,7 +1013,7 @@ func (c *cmdable) BLPop(timeout time.Duration, keys ...string) *StringSliceCmd {
}
args[len(args)-1] = formatSec(timeout)
cmd := NewStringSliceCmd(args...)
- cmd.setReadTimeout(readTimeout(timeout))
+ cmd.setReadTimeout(timeout)
c.process(cmd)
return cmd
}
@@ -998,7 +1026,7 @@ func (c *cmdable) BRPop(timeout time.Duration, keys ...string) *StringSliceCmd {
}
args[len(keys)+1] = formatSec(timeout)
cmd := NewStringSliceCmd(args...)
- cmd.setReadTimeout(readTimeout(timeout))
+ cmd.setReadTimeout(timeout)
c.process(cmd)
return cmd
}
@@ -1010,7 +1038,7 @@ func (c *cmdable) BRPopLPush(source, destination string, timeout time.Duration)
destination,
formatSec(timeout),
)
- cmd.setReadTimeout(readTimeout(timeout))
+ cmd.setReadTimeout(timeout)
c.process(cmd)
return cmd
}
@@ -1282,6 +1310,239 @@ func (c *cmdable) SUnionStore(destination string, keys ...string) *IntCmd {
//------------------------------------------------------------------------------
+type XAddArgs struct {
+ Stream string
+ MaxLen int64 // MAXLEN N
+ MaxLenApprox int64 // MAXLEN ~ N
+ ID string
+ Values map[string]interface{}
+}
+
+func (c *cmdable) XAdd(a *XAddArgs) *StringCmd {
+ args := make([]interface{}, 0, 6+len(a.Values)*2)
+ args = append(args, "xadd")
+ args = append(args, a.Stream)
+ if a.MaxLen > 0 {
+ args = append(args, "maxlen", a.MaxLen)
+ } else if a.MaxLenApprox > 0 {
+ args = append(args, "maxlen", "~", a.MaxLenApprox)
+ }
+ if a.ID != "" {
+ args = append(args, a.ID)
+ } else {
+ args = append(args, "*")
+ }
+ for k, v := range a.Values {
+ args = append(args, k)
+ args = append(args, v)
+ }
+
+ cmd := NewStringCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XLen(stream string) *IntCmd {
+ cmd := NewIntCmd("xlen", stream)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XRange(stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd("xrange", stream, start, stop)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XRangeN(stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd("xrange", stream, start, stop, "count", count)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XRevRange(stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd("xrevrange", stream, start, stop)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XRevRangeN(stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd("xrevrange", stream, start, stop, "count", count)
+ c.process(cmd)
+ return cmd
+}
+
+type XReadArgs struct {
+ Streams []string
+ Count int64
+ Block time.Duration
+}
+
+func (c *cmdable) XRead(a *XReadArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 5+len(a.Streams))
+ args = append(args, "xread")
+ if a.Count > 0 {
+ args = append(args, "count")
+ args = append(args, a.Count)
+ }
+ if a.Block >= 0 {
+ args = append(args, "block")
+ args = append(args, int64(a.Block/time.Millisecond))
+ }
+ args = append(args, "streams")
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+
+ cmd := NewXStreamSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XReadStreams(streams ...string) *XStreamSliceCmd {
+ return c.XRead(&XReadArgs{
+ Streams: streams,
+ Block: -1,
+ })
+}
+
+func (c *cmdable) XGroupCreate(stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd("xgroup", "create", stream, group, start)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XGroupSetID(stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd("xgroup", "setid", stream, group, start)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XGroupDestroy(stream, group string) *IntCmd {
+ cmd := NewIntCmd("xgroup", "destroy", stream, group)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XGroupDelConsumer(stream, group, consumer string) *IntCmd {
+ cmd := NewIntCmd("xgroup", "delconsumer", stream, group, consumer)
+ c.process(cmd)
+ return cmd
+}
+
+type XReadGroupArgs struct {
+ Group string
+ Consumer string
+ Streams []string
+ Count int64
+ Block time.Duration
+}
+
+func (c *cmdable) XReadGroup(a *XReadGroupArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 8+len(a.Streams))
+ args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
+ if a.Count > 0 {
+ args = append(args, "count", a.Count)
+ }
+ if a.Block >= 0 {
+ args = append(args, "block", int64(a.Block/time.Millisecond))
+ }
+ args = append(args, "streams")
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+
+ cmd := NewXStreamSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XAck(stream, group string, ids ...string) *IntCmd {
+ args := []interface{}{"xack", stream, group}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XPending(stream, group string) *XPendingCmd {
+ cmd := NewXPendingCmd("xpending", stream, group)
+ c.process(cmd)
+ return cmd
+}
+
+type XPendingExtArgs struct {
+ Stream string
+ Group string
+ Start string
+ End string
+ Count int64
+ Consumer string
+}
+
+func (c *cmdable) XPendingExt(a *XPendingExtArgs) *XPendingExtCmd {
+ args := make([]interface{}, 0, 7)
+ args = append(args, "xpending", a.Stream, a.Group, a.Start, a.End, a.Count)
+ if a.Consumer != "" {
+ args = append(args, a.Consumer)
+ }
+ cmd := NewXPendingExtCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+type XClaimArgs struct {
+ Stream string
+ Group string
+ Consumer string
+ MinIdle time.Duration
+ Messages []string
+}
+
+func (c *cmdable) XClaim(a *XClaimArgs) *XMessageSliceCmd {
+ args := xClaimArgs(a)
+ cmd := NewXMessageSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XClaimJustID(a *XClaimArgs) *StringSliceCmd {
+ args := xClaimArgs(a)
+ args = append(args, "justid")
+ cmd := NewStringSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func xClaimArgs(a *XClaimArgs) []interface{} {
+ args := make([]interface{}, 0, 4+len(a.Messages))
+ args = append(args,
+ "xclaim",
+ a.Stream,
+ a.Group, a.Consumer,
+ int64(a.MinIdle/time.Millisecond))
+ for _, id := range a.Messages {
+ args = append(args, id)
+ }
+ return args
+}
+
+func (c *cmdable) XTrim(key string, maxLen int64) *IntCmd {
+ cmd := NewIntCmd("xtrim", key, "maxlen", maxLen)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XTrimApprox(key string, maxLen int64) *IntCmd {
+ cmd := NewIntCmd("xtrim", key, "maxlen", "~", maxLen)
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
// Z represents sorted set member.
type Z struct {
Score float64
@@ -1682,6 +1943,20 @@ func (c *cmdable) ClientKill(ipPort string) *StatusCmd {
return cmd
}
+// ClientKillByFilter is new style synx, while the ClientKill is old
+// CLIENT KILL <option> [value] ... <option> [value]
+func (c *cmdable) ClientKillByFilter(keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "client"
+ args[1] = "kill"
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
func (c *cmdable) ClientList() *StringCmd {
cmd := NewStringCmd("client", "list")
c.process(cmd)
@@ -2168,8 +2443,15 @@ func (c *cmdable) GeoPos(key string, members ...string) *GeoPosCmd {
//------------------------------------------------------------------------------
-func (c *cmdable) Command() *CommandsInfoCmd {
- cmd := NewCommandsInfoCmd("command")
+func (c *cmdable) MemoryUsage(key string, samples ...int) *IntCmd {
+ args := []interface{}{"memory", "usage", key}
+ if len(samples) > 0 {
+ if len(samples) != 1 {
+ panic("MemoryUsage expects single sample count")
+ }
+ args = append(args, "SAMPLES", samples[0])
+ }
+ cmd := NewIntCmd(args...)
c.process(cmd)
return cmd
}
diff --git a/vendor/github.com/go-redis/redis/internal/error.go b/vendor/github.com/go-redis/redis/internal/error.go
index 7b419577e..bda97baa6 100644
--- a/vendor/github.com/go-redis/redis/internal/error.go
+++ b/vendor/github.com/go-redis/redis/internal/error.go
@@ -8,9 +8,15 @@ import (
"github.com/go-redis/redis/internal/proto"
)
-func IsRetryableError(err error, retryNetError bool) bool {
- if IsNetworkError(err) {
- return retryNetError
+func IsRetryableError(err error, retryTimeout bool) bool {
+ if err == io.EOF {
+ return true
+ }
+ if netErr, ok := err.(net.Error); ok {
+ if netErr.Timeout() {
+ return retryTimeout
+ }
+ return true
}
s := err.Error()
if s == "ERR max number of clients reached" {
@@ -19,6 +25,9 @@ func IsRetryableError(err error, retryNetError bool) bool {
if strings.HasPrefix(s, "LOADING ") {
return true
}
+ if strings.HasPrefix(s, "READONLY ") {
+ return true
+ }
if strings.HasPrefix(s, "CLUSTERDOWN ") {
return true
}
@@ -30,24 +39,12 @@ func IsRedisError(err error) bool {
return ok
}
-func IsNetworkError(err error) bool {
- if err == io.EOF {
- return true
- }
- _, ok := err.(net.Error)
- return ok
-}
-
-func IsReadOnlyError(err error) bool {
- return strings.HasPrefix(err.Error(), "READONLY ")
-}
-
func IsBadConn(err error, allowTimeout bool) bool {
if err == nil {
return false
}
if IsRedisError(err) {
- return false
+ return strings.HasPrefix(err.Error(), "READONLY ")
}
if allowTimeout {
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
diff --git a/vendor/github.com/go-redis/redis/internal/hashtag/hashtag.go b/vendor/github.com/go-redis/redis/internal/hashtag/hashtag.go
index 8c7ebbfa6..22f5b3981 100644
--- a/vendor/github.com/go-redis/redis/internal/hashtag/hashtag.go
+++ b/vendor/github.com/go-redis/redis/internal/hashtag/hashtag.go
@@ -5,7 +5,7 @@ import (
"strings"
)
-const SlotNumber = 16384
+const slotNumber = 16384
// CRC16 implementation according to CCITT standards.
// Copyright 2001-2010 Georges Menie (www.menie.org)
@@ -56,7 +56,7 @@ func Key(key string) string {
}
func RandomSlot() int {
- return rand.Intn(SlotNumber)
+ return rand.Intn(slotNumber)
}
// hashSlot returns a consistent slot number between 0 and 16383
@@ -66,7 +66,7 @@ func Slot(key string) int {
return RandomSlot()
}
key = Key(key)
- return int(crc16sum(key)) % SlotNumber
+ return int(crc16sum(key)) % slotNumber
}
func crc16sum(key string) (crc uint16) {
diff --git a/vendor/github.com/go-redis/redis/internal/pool/conn.go b/vendor/github.com/go-redis/redis/internal/pool/conn.go
index 8af51d9de..1095bfe59 100644
--- a/vendor/github.com/go-redis/redis/internal/pool/conn.go
+++ b/vendor/github.com/go-redis/redis/internal/pool/conn.go
@@ -13,19 +13,21 @@ var noDeadline = time.Time{}
type Conn struct {
netConn net.Conn
- Rd *proto.Reader
- Wb *proto.WriteBuffer
+ rd *proto.Reader
+ rdLocked bool
+ wr *proto.Writer
- Inited bool
- usedAt atomic.Value
+ InitedAt time.Time
+ pooled bool
+ usedAt atomic.Value
}
func NewConn(netConn net.Conn) *Conn {
cn := &Conn{
netConn: netConn,
- Wb: proto.NewWriteBuffer(),
}
- cn.Rd = proto.NewReader(cn.netConn)
+ cn.rd = proto.NewReader(netConn)
+ cn.wr = proto.NewWriter(netConn)
cn.SetUsedAt(time.Now())
return cn
}
@@ -40,14 +42,11 @@ func (cn *Conn) SetUsedAt(tm time.Time) {
func (cn *Conn) SetNetConn(netConn net.Conn) {
cn.netConn = netConn
- cn.Rd.Reset(netConn)
+ cn.rd.Reset(netConn)
+ cn.wr.Reset(netConn)
}
-func (cn *Conn) IsStale(timeout time.Duration) bool {
- return timeout > 0 && time.Since(cn.UsedAt()) > timeout
-}
-
-func (cn *Conn) SetReadTimeout(timeout time.Duration) error {
+func (cn *Conn) setReadTimeout(timeout time.Duration) error {
now := time.Now()
cn.SetUsedAt(now)
if timeout > 0 {
@@ -56,7 +55,7 @@ func (cn *Conn) SetReadTimeout(timeout time.Duration) error {
return cn.netConn.SetReadDeadline(noDeadline)
}
-func (cn *Conn) SetWriteTimeout(timeout time.Duration) error {
+func (cn *Conn) setWriteTimeout(timeout time.Duration) error {
now := time.Now()
cn.SetUsedAt(now)
if timeout > 0 {
@@ -73,6 +72,22 @@ func (cn *Conn) RemoteAddr() net.Addr {
return cn.netConn.RemoteAddr()
}
+func (cn *Conn) WithReader(timeout time.Duration, fn func(rd *proto.Reader) error) error {
+ _ = cn.setReadTimeout(timeout)
+ return fn(cn.rd)
+}
+
+func (cn *Conn) WithWriter(timeout time.Duration, fn func(wr *proto.Writer) error) error {
+ _ = cn.setWriteTimeout(timeout)
+
+ firstErr := fn(cn.wr)
+ err := cn.wr.Flush()
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ return firstErr
+}
+
func (cn *Conn) Close() error {
return cn.netConn.Close()
}
diff --git a/vendor/github.com/go-redis/redis/internal/pool/pool.go b/vendor/github.com/go-redis/redis/internal/pool/pool.go
index cab66904a..9cecee8ad 100644
--- a/vendor/github.com/go-redis/redis/internal/pool/pool.go
+++ b/vendor/github.com/go-redis/redis/internal/pool/pool.go
@@ -28,7 +28,6 @@ type Stats struct {
Timeouts uint32 // number of times a wait timeout occurred
TotalConns uint32 // number of total connections in the pool
- FreeConns uint32 // deprecated - use IdleConns
IdleConns uint32 // number of idle connections in the pool
StaleConns uint32 // number of stale connections removed from the pool
}
@@ -53,6 +52,8 @@ type Options struct {
OnClose func(*Conn) error
PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
PoolTimeout time.Duration
IdleTimeout time.Duration
IdleCheckFrequency time.Duration
@@ -63,16 +64,16 @@ type ConnPool struct {
dialErrorsNum uint32 // atomic
- lastDialError error
lastDialErrorMu sync.RWMutex
+ lastDialError error
queue chan struct{}
- connsMu sync.Mutex
- conns []*Conn
-
- idleConnsMu sync.RWMutex
- idleConns []*Conn
+ connsMu sync.Mutex
+ conns []*Conn
+ idleConns []*Conn
+ poolSize int
+ idleConnsLen int
stats Stats
@@ -90,6 +91,10 @@ func NewConnPool(opt *Options) *ConnPool {
idleConns: make([]*Conn, 0, opt.PoolSize),
}
+ for i := 0; i < opt.MinIdleConns; i++ {
+ p.checkMinIdleConns()
+ }
+
if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 {
go p.reaper(opt.IdleCheckFrequency)
}
@@ -97,19 +102,53 @@ func NewConnPool(opt *Options) *ConnPool {
return p
}
+func (p *ConnPool) checkMinIdleConns() {
+ if p.opt.MinIdleConns == 0 {
+ return
+ }
+ if p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns {
+ p.poolSize++
+ p.idleConnsLen++
+ go p.addIdleConn()
+ }
+}
+
+func (p *ConnPool) addIdleConn() {
+ cn, err := p.newConn(true)
+ if err != nil {
+ return
+ }
+
+ p.connsMu.Lock()
+ p.conns = append(p.conns, cn)
+ p.idleConns = append(p.idleConns, cn)
+ p.connsMu.Unlock()
+}
+
func (p *ConnPool) NewConn() (*Conn, error) {
- cn, err := p.newConn()
+ return p._NewConn(false)
+}
+
+func (p *ConnPool) _NewConn(pooled bool) (*Conn, error) {
+ cn, err := p.newConn(pooled)
if err != nil {
return nil, err
}
p.connsMu.Lock()
p.conns = append(p.conns, cn)
+ if pooled {
+ if p.poolSize < p.opt.PoolSize {
+ p.poolSize++
+ } else {
+ cn.pooled = false
+ }
+ }
p.connsMu.Unlock()
return cn, nil
}
-func (p *ConnPool) newConn() (*Conn, error) {
+func (p *ConnPool) newConn(pooled bool) (*Conn, error) {
if p.closed() {
return nil, ErrClosed
}
@@ -127,7 +166,9 @@ func (p *ConnPool) newConn() (*Conn, error) {
return nil, err
}
- return NewConn(netConn), nil
+ cn := NewConn(netConn)
+ cn.pooled = pooled
+ return cn, nil
}
func (p *ConnPool) tryDial() {
@@ -174,16 +215,16 @@ func (p *ConnPool) Get() (*Conn, error) {
}
for {
- p.idleConnsMu.Lock()
+ p.connsMu.Lock()
cn := p.popIdle()
- p.idleConnsMu.Unlock()
+ p.connsMu.Unlock()
if cn == nil {
break
}
- if cn.IsStale(p.opt.IdleTimeout) {
- p.CloseConn(cn)
+ if p.isStaleConn(cn) {
+ _ = p.CloseConn(cn)
continue
}
@@ -193,7 +234,7 @@ func (p *ConnPool) Get() (*Conn, error) {
atomic.AddUint32(&p.stats.Misses, 1)
- newcn, err := p.NewConn()
+ newcn, err := p._NewConn(true)
if err != nil {
p.freeTurn()
return nil, err
@@ -241,21 +282,21 @@ func (p *ConnPool) popIdle() *Conn {
idx := len(p.idleConns) - 1
cn := p.idleConns[idx]
p.idleConns = p.idleConns[:idx]
-
+ p.idleConnsLen--
+ p.checkMinIdleConns()
return cn
}
func (p *ConnPool) Put(cn *Conn) {
- buf := cn.Rd.PeekBuffered()
- if buf != nil {
- internal.Logf("connection has unread data: %.100q", buf)
+ if !cn.pooled {
p.Remove(cn)
return
}
- p.idleConnsMu.Lock()
+ p.connsMu.Lock()
p.idleConns = append(p.idleConns, cn)
- p.idleConnsMu.Unlock()
+ p.idleConnsLen++
+ p.connsMu.Unlock()
p.freeTurn()
}
@@ -275,6 +316,10 @@ func (p *ConnPool) removeConn(cn *Conn) {
for i, c := range p.conns {
if c == cn {
p.conns = append(p.conns[:i], p.conns[i+1:]...)
+ if cn.pooled {
+ p.poolSize--
+ p.checkMinIdleConns()
+ }
break
}
}
@@ -291,17 +336,17 @@ func (p *ConnPool) closeConn(cn *Conn) error {
// Len returns total number of connections.
func (p *ConnPool) Len() int {
p.connsMu.Lock()
- l := len(p.conns)
+ n := len(p.conns)
p.connsMu.Unlock()
- return l
+ return n
}
-// FreeLen returns number of idle connections.
+// IdleLen returns number of idle connections.
func (p *ConnPool) IdleLen() int {
- p.idleConnsMu.RLock()
- l := len(p.idleConns)
- p.idleConnsMu.RUnlock()
- return l
+ p.connsMu.Lock()
+ n := p.idleConnsLen
+ p.connsMu.Unlock()
+ return n
}
func (p *ConnPool) Stats() *Stats {
@@ -312,7 +357,6 @@ func (p *ConnPool) Stats() *Stats {
Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
TotalConns: uint32(p.Len()),
- FreeConns: uint32(idleLen),
IdleConns: uint32(idleLen),
StaleConns: atomic.LoadUint32(&p.stats.StaleConns),
}
@@ -349,11 +393,10 @@ func (p *ConnPool) Close() error {
}
}
p.conns = nil
- p.connsMu.Unlock()
-
- p.idleConnsMu.Lock()
+ p.poolSize = 0
p.idleConns = nil
- p.idleConnsMu.Unlock()
+ p.idleConnsLen = 0
+ p.connsMu.Unlock()
return firstErr
}
@@ -364,11 +407,12 @@ func (p *ConnPool) reapStaleConn() *Conn {
}
cn := p.idleConns[0]
- if !cn.IsStale(p.opt.IdleTimeout) {
+ if !p.isStaleConn(cn) {
return nil
}
p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...)
+ p.idleConnsLen--
return cn
}
@@ -378,9 +422,9 @@ func (p *ConnPool) ReapStaleConns() (int, error) {
for {
p.getTurn()
- p.idleConnsMu.Lock()
+ p.connsMu.Lock()
cn := p.reapStaleConn()
- p.idleConnsMu.Unlock()
+ p.connsMu.Unlock()
if cn != nil {
p.removeConn(cn)
@@ -414,3 +458,19 @@ func (p *ConnPool) reaper(frequency time.Duration) {
atomic.AddUint32(&p.stats.StaleConns, uint32(n))
}
}
+
+func (p *ConnPool) isStaleConn(cn *Conn) bool {
+ if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 {
+ return false
+ }
+
+ now := time.Now()
+ if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout {
+ return true
+ }
+ if p.opt.MaxConnAge > 0 && now.Sub(cn.InitedAt) >= p.opt.MaxConnAge {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/go-redis/redis/internal/proto/reader.go b/vendor/github.com/go-redis/redis/internal/proto/reader.go
index d5d695358..896b6f654 100644
--- a/vendor/github.com/go-redis/redis/internal/proto/reader.go
+++ b/vendor/github.com/go-redis/redis/internal/proto/reader.go
@@ -9,8 +9,6 @@ import (
"github.com/go-redis/redis/internal/util"
)
-const bytesAllocLimit = 1024 * 1024 // 1mb
-
const (
ErrorReply = '-'
StatusReply = '+'
@@ -32,40 +30,23 @@ func (e RedisError) Error() string { return string(e) }
type MultiBulkParse func(*Reader, int64) (interface{}, error)
type Reader struct {
- src *bufio.Reader
- buf []byte
+ rd *bufio.Reader
+ _buf []byte
}
func NewReader(rd io.Reader) *Reader {
return &Reader{
- src: bufio.NewReader(rd),
- buf: make([]byte, 4096),
+ rd: bufio.NewReader(rd),
+ _buf: make([]byte, 64),
}
}
func (r *Reader) Reset(rd io.Reader) {
- r.src.Reset(rd)
-}
-
-func (r *Reader) PeekBuffered() []byte {
- if n := r.src.Buffered(); n != 0 {
- b, _ := r.src.Peek(n)
- return b
- }
- return nil
-}
-
-func (r *Reader) ReadN(n int) ([]byte, error) {
- b, err := readN(r.src, r.buf, n)
- if err != nil {
- return nil, err
- }
- r.buf = b
- return b, nil
+ r.rd.Reset(rd)
}
func (r *Reader) ReadLine() ([]byte, error) {
- line, isPrefix, err := r.src.ReadLine()
+ line, isPrefix, err := r.rd.ReadLine()
if err != nil {
return nil, err
}
@@ -91,11 +72,11 @@ func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) {
case ErrorReply:
return nil, ParseErrorReply(line)
case StatusReply:
- return parseStatusValue(line), nil
+ return string(line[1:]), nil
case IntReply:
return util.ParseInt(line[1:], 10, 64)
case StringReply:
- return r.readTmpBytesValue(line)
+ return r.readStringReply(line)
case ArrayReply:
n, err := parseArrayLen(line)
if err != nil {
@@ -121,47 +102,42 @@ func (r *Reader) ReadIntReply() (int64, error) {
}
}
-func (r *Reader) ReadTmpBytesReply() ([]byte, error) {
+func (r *Reader) ReadString() (string, error) {
line, err := r.ReadLine()
if err != nil {
- return nil, err
+ return "", err
}
switch line[0] {
case ErrorReply:
- return nil, ParseErrorReply(line)
+ return "", ParseErrorReply(line)
case StringReply:
- return r.readTmpBytesValue(line)
+ return r.readStringReply(line)
case StatusReply:
- return parseStatusValue(line), nil
+ return string(line[1:]), nil
+ case IntReply:
+ return string(line[1:]), nil
default:
- return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line)
+ return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line)
}
}
-func (r *Reader) ReadBytesReply() ([]byte, error) {
- b, err := r.ReadTmpBytesReply()
- if err != nil {
- return nil, err
+func (r *Reader) readStringReply(line []byte) (string, error) {
+ if isNilReply(line) {
+ return "", Nil
}
- cp := make([]byte, len(b))
- copy(cp, b)
- return cp, nil
-}
-func (r *Reader) ReadStringReply() (string, error) {
- b, err := r.ReadTmpBytesReply()
+ replyLen, err := strconv.Atoi(string(line[1:]))
if err != nil {
return "", err
}
- return string(b), nil
-}
-func (r *Reader) ReadFloatReply() (float64, error) {
- b, err := r.ReadTmpBytesReply()
+ b := make([]byte, replyLen+2)
+ _, err = io.ReadFull(r.rd, b)
if err != nil {
- return 0, err
+ return "", err
}
- return util.ParseFloat(b, 64)
+
+ return util.BytesToString(b[:replyLen]), nil
}
func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) {
@@ -219,7 +195,7 @@ func (r *Reader) ReadScanReply() ([]string, uint64, error) {
keys := make([]string, n)
for i := int64(0); i < n; i++ {
- key, err := r.ReadStringReply()
+ key, err := r.ReadString()
if err != nil {
return nil, 0, err
}
@@ -229,25 +205,8 @@ func (r *Reader) ReadScanReply() ([]string, uint64, error) {
return keys, cursor, err
}
-func (r *Reader) readTmpBytesValue(line []byte) ([]byte, error) {
- if isNilReply(line) {
- return nil, Nil
- }
-
- replyLen, err := strconv.Atoi(string(line[1:]))
- if err != nil {
- return nil, err
- }
-
- b, err := r.ReadN(replyLen + 2)
- if err != nil {
- return nil, err
- }
- return b[:replyLen], nil
-}
-
func (r *Reader) ReadInt() (int64, error) {
- b, err := r.ReadTmpBytesReply()
+ b, err := r.readTmpBytesReply()
if err != nil {
return 0, err
}
@@ -255,55 +214,62 @@ func (r *Reader) ReadInt() (int64, error) {
}
func (r *Reader) ReadUint() (uint64, error) {
- b, err := r.ReadTmpBytesReply()
+ b, err := r.readTmpBytesReply()
if err != nil {
return 0, err
}
return util.ParseUint(b, 10, 64)
}
-// --------------------------------------------------------------------
-
-func readN(r io.Reader, b []byte, n int) ([]byte, error) {
- if n == 0 && b == nil {
- return make([]byte, 0), nil
+func (r *Reader) ReadFloatReply() (float64, error) {
+ b, err := r.readTmpBytesReply()
+ if err != nil {
+ return 0, err
}
+ return util.ParseFloat(b, 64)
+}
- if cap(b) >= n {
- b = b[:n]
- _, err := io.ReadFull(r, b)
- return b, err
+func (r *Reader) readTmpBytesReply() ([]byte, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return nil, err
}
- b = b[:cap(b)]
-
- pos := 0
- for pos < n {
- diff := n - len(b)
- if diff > bytesAllocLimit {
- diff = bytesAllocLimit
- }
- b = append(b, make([]byte, diff)...)
+ switch line[0] {
+ case ErrorReply:
+ return nil, ParseErrorReply(line)
+ case StringReply:
+ return r._readTmpBytesReply(line)
+ case StatusReply:
+ return line[1:], nil
+ default:
+ return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line)
+ }
+}
- nn, err := io.ReadFull(r, b[pos:])
- if err != nil {
- return nil, err
- }
- pos += nn
+func (r *Reader) _readTmpBytesReply(line []byte) ([]byte, error) {
+ if isNilReply(line) {
+ return nil, Nil
}
- return b, nil
-}
+ replyLen, err := strconv.Atoi(string(line[1:]))
+ if err != nil {
+ return nil, err
+ }
-func formatInt(n int64) string {
- return strconv.FormatInt(n, 10)
-}
+ buf := r.buf(replyLen + 2)
+ _, err = io.ReadFull(r.rd, buf)
+ if err != nil {
+ return nil, err
+ }
-func formatUint(u uint64) string {
- return strconv.FormatUint(u, 10)
+ return buf[:replyLen], nil
}
-func formatFloat(f float64) string {
- return strconv.FormatFloat(f, 'f', -1, 64)
+func (r *Reader) buf(n int) []byte {
+ if d := n - cap(r._buf); d > 0 {
+ r._buf = append(r._buf, make([]byte, d)...)
+ }
+ return r._buf[:n]
}
func isNilReply(b []byte) bool {
@@ -316,10 +282,6 @@ func ParseErrorReply(line []byte) error {
return RedisError(string(line[1:]))
}
-func parseStatusValue(line []byte) []byte {
- return line[1:]
-}
-
func parseArrayLen(line []byte) (int64, error) {
if isNilReply(line) {
return 0, Nil
diff --git a/vendor/github.com/go-redis/redis/internal/proto/write_buffer.go b/vendor/github.com/go-redis/redis/internal/proto/write_buffer.go
deleted file mode 100644
index cc4014fb4..000000000
--- a/vendor/github.com/go-redis/redis/internal/proto/write_buffer.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package proto
-
-import (
- "encoding"
- "fmt"
- "strconv"
-)
-
-type WriteBuffer struct {
- b []byte
-}
-
-func NewWriteBuffer() *WriteBuffer {
- return &WriteBuffer{
- b: make([]byte, 0, 4096),
- }
-}
-
-func (w *WriteBuffer) Len() int { return len(w.b) }
-func (w *WriteBuffer) Bytes() []byte { return w.b }
-func (w *WriteBuffer) Reset() { w.b = w.b[:0] }
-
-func (w *WriteBuffer) Append(args []interface{}) error {
- w.b = append(w.b, ArrayReply)
- w.b = strconv.AppendUint(w.b, uint64(len(args)), 10)
- w.b = append(w.b, '\r', '\n')
-
- for _, arg := range args {
- if err := w.append(arg); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (w *WriteBuffer) append(val interface{}) error {
- switch v := val.(type) {
- case nil:
- w.AppendString("")
- case string:
- w.AppendString(v)
- case []byte:
- w.AppendBytes(v)
- case int:
- w.AppendString(formatInt(int64(v)))
- case int8:
- w.AppendString(formatInt(int64(v)))
- case int16:
- w.AppendString(formatInt(int64(v)))
- case int32:
- w.AppendString(formatInt(int64(v)))
- case int64:
- w.AppendString(formatInt(v))
- case uint:
- w.AppendString(formatUint(uint64(v)))
- case uint8:
- w.AppendString(formatUint(uint64(v)))
- case uint16:
- w.AppendString(formatUint(uint64(v)))
- case uint32:
- w.AppendString(formatUint(uint64(v)))
- case uint64:
- w.AppendString(formatUint(v))
- case float32:
- w.AppendString(formatFloat(float64(v)))
- case float64:
- w.AppendString(formatFloat(v))
- case bool:
- if v {
- w.AppendString("1")
- } else {
- w.AppendString("0")
- }
- case encoding.BinaryMarshaler:
- b, err := v.MarshalBinary()
- if err != nil {
- return err
- }
- w.AppendBytes(b)
- default:
- return fmt.Errorf(
- "redis: can't marshal %T (consider implementing encoding.BinaryMarshaler)", val)
- }
- return nil
-}
-
-func (w *WriteBuffer) AppendString(s string) {
- w.b = append(w.b, StringReply)
- w.b = strconv.AppendUint(w.b, uint64(len(s)), 10)
- w.b = append(w.b, '\r', '\n')
- w.b = append(w.b, s...)
- w.b = append(w.b, '\r', '\n')
-}
-
-func (w *WriteBuffer) AppendBytes(p []byte) {
- w.b = append(w.b, StringReply)
- w.b = strconv.AppendUint(w.b, uint64(len(p)), 10)
- w.b = append(w.b, '\r', '\n')
- w.b = append(w.b, p...)
- w.b = append(w.b, '\r', '\n')
-}
diff --git a/vendor/github.com/go-redis/redis/internal/proto/writer.go b/vendor/github.com/go-redis/redis/internal/proto/writer.go
new file mode 100644
index 000000000..d106ce0ee
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/proto/writer.go
@@ -0,0 +1,159 @@
+package proto
+
+import (
+ "bufio"
+ "encoding"
+ "fmt"
+ "io"
+ "strconv"
+
+ "github.com/go-redis/redis/internal/util"
+)
+
+type Writer struct {
+ wr *bufio.Writer
+
+ lenBuf []byte
+ numBuf []byte
+}
+
+func NewWriter(wr io.Writer) *Writer {
+ return &Writer{
+ wr: bufio.NewWriter(wr),
+
+ lenBuf: make([]byte, 64),
+ numBuf: make([]byte, 64),
+ }
+}
+
+func (w *Writer) WriteArgs(args []interface{}) error {
+ err := w.wr.WriteByte(ArrayReply)
+ if err != nil {
+ return err
+ }
+
+ err = w.writeLen(len(args))
+ if err != nil {
+ return err
+ }
+
+ for _, arg := range args {
+ err := w.writeArg(arg)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (w *Writer) writeLen(n int) error {
+ w.lenBuf = strconv.AppendUint(w.lenBuf[:0], uint64(n), 10)
+ w.lenBuf = append(w.lenBuf, '\r', '\n')
+ _, err := w.wr.Write(w.lenBuf)
+ return err
+}
+
+func (w *Writer) writeArg(v interface{}) error {
+ switch v := v.(type) {
+ case nil:
+ return w.string("")
+ case string:
+ return w.string(v)
+ case []byte:
+ return w.bytes(v)
+ case int:
+ return w.int(int64(v))
+ case int8:
+ return w.int(int64(v))
+ case int16:
+ return w.int(int64(v))
+ case int32:
+ return w.int(int64(v))
+ case int64:
+ return w.int(v)
+ case uint:
+ return w.uint(uint64(v))
+ case uint8:
+ return w.uint(uint64(v))
+ case uint16:
+ return w.uint(uint64(v))
+ case uint32:
+ return w.uint(uint64(v))
+ case uint64:
+ return w.uint(v)
+ case float32:
+ return w.float(float64(v))
+ case float64:
+ return w.float(v)
+ case bool:
+ if v {
+ return w.int(1)
+ } else {
+ return w.int(0)
+ }
+ case encoding.BinaryMarshaler:
+ b, err := v.MarshalBinary()
+ if err != nil {
+ return err
+ }
+ return w.bytes(b)
+ default:
+ return fmt.Errorf(
+ "redis: can't marshal %T (implement encoding.BinaryMarshaler)", v)
+ }
+}
+
+func (w *Writer) bytes(b []byte) error {
+ err := w.wr.WriteByte(StringReply)
+ if err != nil {
+ return err
+ }
+
+ err = w.writeLen(len(b))
+ if err != nil {
+ return err
+ }
+
+ _, err = w.wr.Write(b)
+ if err != nil {
+ return err
+ }
+
+ return w.crlf()
+}
+
+func (w *Writer) string(s string) error {
+ return w.bytes(util.StringToBytes(s))
+}
+
+func (w *Writer) uint(n uint64) error {
+ w.numBuf = strconv.AppendUint(w.numBuf[:0], n, 10)
+ return w.bytes(w.numBuf)
+}
+
+func (w *Writer) int(n int64) error {
+ w.numBuf = strconv.AppendInt(w.numBuf[:0], n, 10)
+ return w.bytes(w.numBuf)
+}
+
+func (w *Writer) float(f float64) error {
+ w.numBuf = strconv.AppendFloat(w.numBuf[:0], f, 'f', -1, 64)
+ return w.bytes(w.numBuf)
+}
+
+func (w *Writer) crlf() error {
+ err := w.wr.WriteByte('\r')
+ if err != nil {
+ return err
+ }
+ return w.wr.WriteByte('\n')
+}
+
+func (w *Writer) Reset(wr io.Writer) {
+ w.wr.Reset(wr)
+}
+
+func (w *Writer) Flush() error {
+ return w.wr.Flush()
+}
diff --git a/vendor/github.com/go-redis/redis/internal/util/safe.go b/vendor/github.com/go-redis/redis/internal/util/safe.go
index cd8918330..1b3060ebc 100644
--- a/vendor/github.com/go-redis/redis/internal/util/safe.go
+++ b/vendor/github.com/go-redis/redis/internal/util/safe.go
@@ -5,3 +5,7 @@ package util
func BytesToString(b []byte) string {
return string(b)
}
+
+func StringToBytes(s string) []byte {
+ return []byte(s)
+}
diff --git a/vendor/github.com/go-redis/redis/internal/util/unsafe.go b/vendor/github.com/go-redis/redis/internal/util/unsafe.go
index 93a89c55c..c9868aac2 100644
--- a/vendor/github.com/go-redis/redis/internal/util/unsafe.go
+++ b/vendor/github.com/go-redis/redis/internal/util/unsafe.go
@@ -10,3 +10,13 @@ import (
func BytesToString(b []byte) string {
return *(*string)(unsafe.Pointer(&b))
}
+
+// StringToBytes converts string to byte slice.
+func StringToBytes(s string) []byte {
+ return *(*[]byte)(unsafe.Pointer(
+ &struct {
+ string
+ Cap int
+ }{s, len(s)},
+ ))
+}
diff --git a/vendor/github.com/go-redis/redis/options.go b/vendor/github.com/go-redis/redis/options.go
index 35ce06195..2b5bcb58a 100644
--- a/vendor/github.com/go-redis/redis/options.go
+++ b/vendor/github.com/go-redis/redis/options.go
@@ -59,16 +59,24 @@ type Options struct {
// Maximum number of socket connections.
// Default is 10 connections per every CPU as reported by runtime.NumCPU.
PoolSize int
+ // Minimum number of idle connections which is useful when establishing
+ // new connection is slow.
+ MinIdleConns int
+ // Connection age at which client retires (closes) the connection.
+ // Default is to not close aged connections.
+ MaxConnAge time.Duration
// Amount of time client waits for connection if all connections
// are busy before returning an error.
// Default is ReadTimeout + 1 second.
PoolTimeout time.Duration
// Amount of time after which client closes idle connections.
// Should be less than server's timeout.
- // Default is 5 minutes.
+ // Default is 5 minutes. -1 disables idle timeout check.
IdleTimeout time.Duration
- // Frequency of idle checks.
- // Default is 1 minute. -1 disables idle check.
+ // Frequency of idle checks made by idle connections reaper.
+ // Default is 1 minute. -1 disables idle connections reaper,
+ // but idle connections are still discarded by the client
+ // if IdleTimeout is set.
IdleCheckFrequency time.Duration
// Enables read only queries on slave nodes.
@@ -84,12 +92,15 @@ func (opt *Options) init() {
}
if opt.Dialer == nil {
opt.Dialer = func() (net.Conn, error) {
- conn, err := net.DialTimeout(opt.Network, opt.Addr, opt.DialTimeout)
- if opt.TLSConfig == nil || err != nil {
- return conn, err
+ netDialer := &net.Dialer{
+ Timeout: opt.DialTimeout,
+ KeepAlive: 5 * time.Minute,
+ }
+ if opt.TLSConfig == nil {
+ return netDialer.Dial(opt.Network, opt.Addr)
+ } else {
+ return tls.DialWithDialer(netDialer, opt.Network, opt.Addr, opt.TLSConfig)
}
- t := tls.Client(conn, opt.TLSConfig)
- return t, t.Handshake()
}
}
if opt.PoolSize == 0 {
@@ -192,6 +203,8 @@ func newConnPool(opt *Options) *pool.ConnPool {
return pool.NewConnPool(&pool.Options{
Dialer: opt.Dialer,
PoolSize: opt.PoolSize,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
PoolTimeout: opt.PoolTimeout,
IdleTimeout: opt.IdleTimeout,
IdleCheckFrequency: opt.IdleCheckFrequency,
diff --git a/vendor/github.com/go-redis/redis/parser.go b/vendor/github.com/go-redis/redis/parser.go
deleted file mode 100644
index f0dc67f0e..000000000
--- a/vendor/github.com/go-redis/redis/parser.go
+++ /dev/null
@@ -1,394 +0,0 @@
-package redis
-
-import (
- "fmt"
- "net"
- "strconv"
- "time"
-
- "github.com/go-redis/redis/internal/proto"
-)
-
-// Implements proto.MultiBulkParse
-func sliceParser(rd *proto.Reader, n int64) (interface{}, error) {
- vals := make([]interface{}, 0, n)
- for i := int64(0); i < n; i++ {
- v, err := rd.ReadReply(sliceParser)
- if err != nil {
- if err == Nil {
- vals = append(vals, nil)
- continue
- }
- if err, ok := err.(proto.RedisError); ok {
- vals = append(vals, err)
- continue
- }
- return nil, err
- }
-
- switch v := v.(type) {
- case []byte:
- vals = append(vals, string(v))
- default:
- vals = append(vals, v)
- }
- }
- return vals, nil
-}
-
-// Implements proto.MultiBulkParse
-func boolSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
- bools := make([]bool, 0, n)
- for i := int64(0); i < n; i++ {
- n, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- bools = append(bools, n == 1)
- }
- return bools, nil
-}
-
-// Implements proto.MultiBulkParse
-func stringSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
- ss := make([]string, 0, n)
- for i := int64(0); i < n; i++ {
- s, err := rd.ReadStringReply()
- if err == Nil {
- ss = append(ss, "")
- } else if err != nil {
- return nil, err
- } else {
- ss = append(ss, s)
- }
- }
- return ss, nil
-}
-
-// Implements proto.MultiBulkParse
-func stringStringMapParser(rd *proto.Reader, n int64) (interface{}, error) {
- m := make(map[string]string, n/2)
- for i := int64(0); i < n; i += 2 {
- key, err := rd.ReadStringReply()
- if err != nil {
- return nil, err
- }
-
- value, err := rd.ReadStringReply()
- if err != nil {
- return nil, err
- }
-
- m[key] = value
- }
- return m, nil
-}
-
-// Implements proto.MultiBulkParse
-func stringIntMapParser(rd *proto.Reader, n int64) (interface{}, error) {
- m := make(map[string]int64, n/2)
- for i := int64(0); i < n; i += 2 {
- key, err := rd.ReadStringReply()
- if err != nil {
- return nil, err
- }
-
- n, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- m[key] = n
- }
- return m, nil
-}
-
-// Implements proto.MultiBulkParse
-func stringStructMapParser(rd *proto.Reader, n int64) (interface{}, error) {
- m := make(map[string]struct{}, n)
- for i := int64(0); i < n; i++ {
- key, err := rd.ReadStringReply()
- if err != nil {
- return nil, err
- }
-
- m[key] = struct{}{}
- }
- return m, nil
-}
-
-// Implements proto.MultiBulkParse
-func zSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
- zz := make([]Z, n/2)
- for i := int64(0); i < n; i += 2 {
- var err error
-
- z := &zz[i/2]
-
- z.Member, err = rd.ReadStringReply()
- if err != nil {
- return nil, err
- }
-
- z.Score, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
- }
- return zz, nil
-}
-
-// Implements proto.MultiBulkParse
-func clusterSlotsParser(rd *proto.Reader, n int64) (interface{}, error) {
- slots := make([]ClusterSlot, n)
- for i := 0; i < len(slots); i++ {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n < 2 {
- err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
- return nil, err
- }
-
- start, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- end, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- nodes := make([]ClusterNode, n-2)
- for j := 0; j < len(nodes); j++ {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n != 2 && n != 3 {
- err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n)
- return nil, err
- }
-
- ip, err := rd.ReadStringReply()
- if err != nil {
- return nil, err
- }
-
- port, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- nodes[j].Addr = net.JoinHostPort(ip, strconv.FormatInt(port, 10))
-
- if n == 3 {
- id, err := rd.ReadStringReply()
- if err != nil {
- return nil, err
- }
- nodes[j].Id = id
- }
- }
-
- slots[i] = ClusterSlot{
- Start: int(start),
- End: int(end),
- Nodes: nodes,
- }
- }
- return slots, nil
-}
-
-func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse {
- return func(rd *proto.Reader, n int64) (interface{}, error) {
- var loc GeoLocation
- var err error
-
- loc.Name, err = rd.ReadStringReply()
- if err != nil {
- return nil, err
- }
- if q.WithDist {
- loc.Dist, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
- }
- if q.WithGeoHash {
- loc.GeoHash, err = rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- }
- if q.WithCoord {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n != 2 {
- return nil, fmt.Errorf("got %d coordinates, expected 2", n)
- }
-
- loc.Longitude, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
- loc.Latitude, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
- }
-
- return &loc, nil
- }
-}
-
-func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse {
- return func(rd *proto.Reader, n int64) (interface{}, error) {
- locs := make([]GeoLocation, 0, n)
- for i := int64(0); i < n; i++ {
- v, err := rd.ReadReply(newGeoLocationParser(q))
- if err != nil {
- return nil, err
- }
- switch vv := v.(type) {
- case []byte:
- locs = append(locs, GeoLocation{
- Name: string(vv),
- })
- case *GeoLocation:
- locs = append(locs, *vv)
- default:
- return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v)
- }
- }
- return locs, nil
- }
-}
-
-func geoPosParser(rd *proto.Reader, n int64) (interface{}, error) {
- var pos GeoPos
- var err error
-
- pos.Longitude, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
-
- pos.Latitude, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
-
- return &pos, nil
-}
-
-func geoPosSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
- positions := make([]*GeoPos, 0, n)
- for i := int64(0); i < n; i++ {
- v, err := rd.ReadReply(geoPosParser)
- if err != nil {
- if err == Nil {
- positions = append(positions, nil)
- continue
- }
- return nil, err
- }
- switch v := v.(type) {
- case *GeoPos:
- positions = append(positions, v)
- default:
- return nil, fmt.Errorf("got %T, expected *GeoPos", v)
- }
- }
- return positions, nil
-}
-
-func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
- var cmd CommandInfo
- var err error
-
- if n != 6 {
- return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 6", n)
- }
-
- cmd.Name, err = rd.ReadStringReply()
- if err != nil {
- return nil, err
- }
-
- arity, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.Arity = int8(arity)
-
- flags, err := rd.ReadReply(stringSliceParser)
- if err != nil {
- return nil, err
- }
- cmd.Flags = flags.([]string)
-
- firstKeyPos, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.FirstKeyPos = int8(firstKeyPos)
-
- lastKeyPos, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.LastKeyPos = int8(lastKeyPos)
-
- stepCount, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.StepCount = int8(stepCount)
-
- for _, flag := range cmd.Flags {
- if flag == "readonly" {
- cmd.ReadOnly = true
- break
- }
- }
-
- return &cmd, nil
-}
-
-// Implements proto.MultiBulkParse
-func commandInfoSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
- m := make(map[string]*CommandInfo, n)
- for i := int64(0); i < n; i++ {
- v, err := rd.ReadReply(commandInfoParser)
- if err != nil {
- return nil, err
- }
- vv := v.(*CommandInfo)
- m[vv.Name] = vv
-
- }
- return m, nil
-}
-
-// Implements proto.MultiBulkParse
-func timeParser(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d elements, expected 2", n)
- }
-
- sec, err := rd.ReadInt()
- if err != nil {
- return nil, err
- }
-
- microsec, err := rd.ReadInt()
- if err != nil {
- return nil, err
- }
-
- return time.Unix(sec, microsec*1000), nil
-}
diff --git a/vendor/github.com/go-redis/redis/pipeline.go b/vendor/github.com/go-redis/redis/pipeline.go
index 9349ef553..ba852283e 100644
--- a/vendor/github.com/go-redis/redis/pipeline.go
+++ b/vendor/github.com/go-redis/redis/pipeline.go
@@ -31,6 +31,7 @@ type Pipeline struct {
closed bool
}
+// Process queues the cmd for later execution.
func (c *Pipeline) Process(cmd Cmder) error {
c.mu.Lock()
c.cmds = append(c.cmds, cmd)
diff --git a/vendor/github.com/go-redis/redis/pubsub.go b/vendor/github.com/go-redis/redis/pubsub.go
index b56728f3e..b08f34ad2 100644
--- a/vendor/github.com/go-redis/redis/pubsub.go
+++ b/vendor/github.com/go-redis/redis/pubsub.go
@@ -2,20 +2,20 @@ package redis
import (
"fmt"
- "net"
"sync"
"time"
"github.com/go-redis/redis/internal"
"github.com/go-redis/redis/internal/pool"
+ "github.com/go-redis/redis/internal/proto"
)
-// PubSub implements Pub/Sub commands as described in
-// http://redis.io/topics/pubsub. It's NOT safe for concurrent use by
-// multiple goroutines.
+// PubSub implements Pub/Sub commands bas described in
+// http://redis.io/topics/pubsub. Message receiving is NOT safe
+// for concurrent use by multiple goroutines.
//
-// PubSub automatically resubscribes to the channels and patterns
-// when Redis becomes unavailable.
+// PubSub automatically reconnects to Redis Server and resubscribes
+// to the channels in case of network errors.
type PubSub struct {
opt *Options
@@ -27,11 +27,17 @@ type PubSub struct {
channels map[string]struct{}
patterns map[string]struct{}
closed bool
+ exit chan struct{}
cmd *Cmd
chOnce sync.Once
ch chan *Message
+ ping chan struct{}
+}
+
+func (c *PubSub) init() {
+ c.exit = make(chan struct{})
}
func (c *PubSub) conn() (*pool.Conn, error) {
@@ -41,7 +47,7 @@ func (c *PubSub) conn() (*pool.Conn, error) {
return cn, err
}
-func (c *PubSub) _conn(channels []string) (*pool.Conn, error) {
+func (c *PubSub) _conn(newChannels []string) (*pool.Conn, error) {
if c.closed {
return nil, pool.ErrClosed
}
@@ -50,6 +56,9 @@ func (c *PubSub) _conn(channels []string) (*pool.Conn, error) {
return c.cn, nil
}
+ channels := mapKeys(c.channels)
+ channels = append(channels, newChannels...)
+
cn, err := c.newConn(channels)
if err != nil {
return nil, err
@@ -64,61 +73,81 @@ func (c *PubSub) _conn(channels []string) (*pool.Conn, error) {
return cn, nil
}
+func (c *PubSub) writeCmd(cn *pool.Conn, cmd Cmder) error {
+ return cn.WithWriter(c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmd(wr, cmd)
+ })
+}
+
func (c *PubSub) resubscribe(cn *pool.Conn) error {
var firstErr error
+
if len(c.channels) > 0 {
- channels := make([]string, len(c.channels))
- i := 0
- for channel := range c.channels {
- channels[i] = channel
- i++
- }
- if err := c._subscribe(cn, "subscribe", channels...); err != nil && firstErr == nil {
+ err := c._subscribe(cn, "subscribe", mapKeys(c.channels))
+ if err != nil && firstErr == nil {
firstErr = err
}
}
+
if len(c.patterns) > 0 {
- patterns := make([]string, len(c.patterns))
- i := 0
- for pattern := range c.patterns {
- patterns[i] = pattern
- i++
- }
- if err := c._subscribe(cn, "psubscribe", patterns...); err != nil && firstErr == nil {
+ err := c._subscribe(cn, "psubscribe", mapKeys(c.patterns))
+ if err != nil && firstErr == nil {
firstErr = err
}
}
+
return firstErr
}
-func (c *PubSub) _subscribe(cn *pool.Conn, redisCmd string, channels ...string) error {
- args := make([]interface{}, 1+len(channels))
- args[0] = redisCmd
- for i, channel := range channels {
- args[1+i] = channel
+func mapKeys(m map[string]struct{}) []string {
+ s := make([]string, len(m))
+ i := 0
+ for k := range m {
+ s[i] = k
+ i++
}
- cmd := NewSliceCmd(args...)
+ return s
+}
- cn.SetWriteTimeout(c.opt.WriteTimeout)
- return writeCmd(cn, cmd)
+func (c *PubSub) _subscribe(
+ cn *pool.Conn, redisCmd string, channels []string,
+) error {
+ args := make([]interface{}, 0, 1+len(channels))
+ args = append(args, redisCmd)
+ for _, channel := range channels {
+ args = append(args, channel)
+ }
+ cmd := NewSliceCmd(args...)
+ return c.writeCmd(cn, cmd)
}
-func (c *PubSub) releaseConn(cn *pool.Conn, err error) {
+func (c *PubSub) releaseConn(cn *pool.Conn, err error, allowTimeout bool) {
c.mu.Lock()
- c._releaseConn(cn, err)
+ c._releaseConn(cn, err, allowTimeout)
c.mu.Unlock()
}
-func (c *PubSub) _releaseConn(cn *pool.Conn, err error) {
+func (c *PubSub) _releaseConn(cn *pool.Conn, err error, allowTimeout bool) {
if c.cn != cn {
return
}
- if internal.IsBadConn(err, true) {
- _ = c.closeTheCn()
+ if internal.IsBadConn(err, allowTimeout) {
+ c._reconnect(err)
}
}
-func (c *PubSub) closeTheCn() error {
+func (c *PubSub) _reconnect(reason error) {
+ _ = c._closeTheCn(reason)
+ _, _ = c._conn(nil)
+}
+
+func (c *PubSub) _closeTheCn(reason error) error {
+ if c.cn == nil {
+ return nil
+ }
+ if !c.closed {
+ internal.Logf("redis: discarding bad PubSub connection: %s", reason)
+ }
err := c.closeConn(c.cn)
c.cn = nil
return err
@@ -132,25 +161,25 @@ func (c *PubSub) Close() error {
return pool.ErrClosed
}
c.closed = true
+ close(c.exit)
- if c.cn != nil {
- return c.closeTheCn()
- }
- return nil
+ err := c._closeTheCn(pool.ErrClosed)
+ return err
}
// Subscribe the client to the specified channels. It returns
// empty subscription if there are no channels.
func (c *PubSub) Subscribe(channels ...string) error {
c.mu.Lock()
+ defer c.mu.Unlock()
+
err := c.subscribe("subscribe", channels...)
if c.channels == nil {
c.channels = make(map[string]struct{})
}
- for _, channel := range channels {
- c.channels[channel] = struct{}{}
+ for _, s := range channels {
+ c.channels[s] = struct{}{}
}
- c.mu.Unlock()
return err
}
@@ -158,14 +187,15 @@ func (c *PubSub) Subscribe(channels ...string) error {
// empty subscription if there are no patterns.
func (c *PubSub) PSubscribe(patterns ...string) error {
c.mu.Lock()
+ defer c.mu.Unlock()
+
err := c.subscribe("psubscribe", patterns...)
if c.patterns == nil {
c.patterns = make(map[string]struct{})
}
- for _, pattern := range patterns {
- c.patterns[pattern] = struct{}{}
+ for _, s := range patterns {
+ c.patterns[s] = struct{}{}
}
- c.mu.Unlock()
return err
}
@@ -173,11 +203,12 @@ func (c *PubSub) PSubscribe(patterns ...string) error {
// them if none is given.
func (c *PubSub) Unsubscribe(channels ...string) error {
c.mu.Lock()
- err := c.subscribe("unsubscribe", channels...)
+ defer c.mu.Unlock()
+
for _, channel := range channels {
delete(c.channels, channel)
}
- c.mu.Unlock()
+ err := c.subscribe("unsubscribe", channels...)
return err
}
@@ -185,11 +216,12 @@ func (c *PubSub) Unsubscribe(channels ...string) error {
// them if none is given.
func (c *PubSub) PUnsubscribe(patterns ...string) error {
c.mu.Lock()
- err := c.subscribe("punsubscribe", patterns...)
+ defer c.mu.Unlock()
+
for _, pattern := range patterns {
delete(c.patterns, pattern)
}
- c.mu.Unlock()
+ err := c.subscribe("punsubscribe", patterns...)
return err
}
@@ -199,8 +231,8 @@ func (c *PubSub) subscribe(redisCmd string, channels ...string) error {
return err
}
- err = c._subscribe(cn, redisCmd, channels...)
- c._releaseConn(cn, err)
+ err = c._subscribe(cn, redisCmd, channels)
+ c._releaseConn(cn, err, false)
return err
}
@@ -216,9 +248,8 @@ func (c *PubSub) Ping(payload ...string) error {
return err
}
- cn.SetWriteTimeout(c.opt.WriteTimeout)
- err = writeCmd(cn, cmd)
- c.releaseConn(cn, err)
+ err = c.writeCmd(cn, cmd)
+ c.releaseConn(cn, err, false)
return err
}
@@ -297,8 +328,8 @@ func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
}
// ReceiveTimeout acts like Receive but returns an error if message
-// is not received in time. This is low-level API and most clients
-// should use ReceiveMessage.
+// is not received in time. This is low-level API and in most cases
+// Channel should be used instead.
func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) {
if c.cmd == nil {
c.cmd = NewCmd()
@@ -309,9 +340,11 @@ func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) {
return nil, err
}
- cn.SetReadTimeout(timeout)
- err = c.cmd.readReply(cn)
- c.releaseConn(cn, err)
+ err = cn.WithReader(timeout, func(rd *proto.Reader) error {
+ return c.cmd.readReply(rd)
+ })
+
+ c.releaseConn(cn, err, timeout > 0)
if err != nil {
return nil, err
}
@@ -320,49 +353,23 @@ func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) {
}
// Receive returns a message as a Subscription, Message, Pong or error.
-// See PubSub example for details. This is low-level API and most clients
-// should use ReceiveMessage.
+// See PubSub example for details. This is low-level API and in most cases
+// Channel should be used instead.
func (c *PubSub) Receive() (interface{}, error) {
return c.ReceiveTimeout(0)
}
-// ReceiveMessage returns a Message or error ignoring Subscription or Pong
-// messages. It automatically reconnects to Redis Server and resubscribes
-// to channels in case of network errors.
+// ReceiveMessage returns a Message or error ignoring Subscription and Pong
+// messages. This is low-level API and in most cases Channel should be used
+// instead.
func (c *PubSub) ReceiveMessage() (*Message, error) {
- return c.receiveMessage(5 * time.Second)
-}
-
-func (c *PubSub) receiveMessage(timeout time.Duration) (*Message, error) {
- var errNum uint
for {
- msgi, err := c.ReceiveTimeout(timeout)
+ msg, err := c.Receive()
if err != nil {
- if !internal.IsNetworkError(err) {
- return nil, err
- }
-
- errNum++
- if errNum < 3 {
- if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
- err := c.Ping()
- if err != nil {
- internal.Logf("PubSub.Ping failed: %s", err)
- }
- }
- } else {
- // 3 consequent errors - connection is broken or
- // Redis Server is down.
- // Sleep to not exceed max number of open connections.
- time.Sleep(time.Second)
- }
- continue
+ return nil, err
}
- // Reset error number, because we received a message.
- errNum = 0
-
- switch msg := msgi.(type) {
+ switch msg := msg.(type) {
case *Subscription:
// Ignore.
case *Pong:
@@ -370,30 +377,93 @@ func (c *PubSub) receiveMessage(timeout time.Duration) (*Message, error) {
case *Message:
return msg, nil
default:
- return nil, fmt.Errorf("redis: unknown message: %T", msgi)
+ err := fmt.Errorf("redis: unknown message: %T", msg)
+ return nil, err
}
}
}
// Channel returns a Go channel for concurrently receiving messages.
-// The channel is closed with PubSub. Receive or ReceiveMessage APIs
-// can not be used after channel is created.
+// It periodically sends Ping messages to test connection health.
+// The channel is closed with PubSub. Receive* APIs can not be used
+// after channel is created.
func (c *PubSub) Channel() <-chan *Message {
- c.chOnce.Do(func() {
- c.ch = make(chan *Message, 100)
- go func() {
- for {
- msg, err := c.ReceiveMessage()
- if err != nil {
- if err == pool.ErrClosed {
- break
- }
- continue
+ c.chOnce.Do(c.initChannel)
+ return c.ch
+}
+
+func (c *PubSub) initChannel() {
+ c.ch = make(chan *Message, 100)
+ c.ping = make(chan struct{}, 10)
+
+ go func() {
+ var errCount int
+ for {
+ msg, err := c.Receive()
+ if err != nil {
+ if err == pool.ErrClosed {
+ close(c.ch)
+ return
}
+ if errCount > 0 {
+ time.Sleep(c.retryBackoff(errCount))
+ }
+ errCount++
+ continue
+ }
+ errCount = 0
+
+ // Any message is as good as a ping.
+ select {
+ case c.ping <- struct{}{}:
+ default:
+ }
+
+ switch msg := msg.(type) {
+ case *Subscription:
+ // Ignore.
+ case *Pong:
+ // Ignore.
+ case *Message:
c.ch <- msg
+ default:
+ internal.Logf("redis: unknown message: %T", msg)
}
- close(c.ch)
- }()
- })
- return c.ch
+ }
+ }()
+
+ go func() {
+ const timeout = 5 * time.Second
+
+ timer := time.NewTimer(timeout)
+ timer.Stop()
+
+ healthy := true
+ var pingErr error
+ for {
+ timer.Reset(timeout)
+ select {
+ case <-c.ping:
+ healthy = true
+ if !timer.Stop() {
+ <-timer.C
+ }
+ case <-timer.C:
+ pingErr = c.Ping()
+ if healthy {
+ healthy = false
+ } else {
+ c.mu.Lock()
+ c._reconnect(pingErr)
+ c.mu.Unlock()
+ }
+ case <-c.exit:
+ return
+ }
+ }
+ }()
+}
+
+func (c *PubSub) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
}
diff --git a/vendor/github.com/go-redis/redis/redis.go b/vendor/github.com/go-redis/redis/redis.go
index beb632e1e..3e72bf060 100644
--- a/vendor/github.com/go-redis/redis/redis.go
+++ b/vendor/github.com/go-redis/redis/redis.go
@@ -50,7 +50,7 @@ func (c *baseClient) newConn() (*pool.Conn, error) {
return nil, err
}
- if !cn.Inited {
+ if cn.InitedAt.IsZero() {
if err := c.initConn(cn); err != nil {
_ = c.connPool.CloseConn(cn)
return nil, err
@@ -66,7 +66,7 @@ func (c *baseClient) getConn() (*pool.Conn, error) {
return nil, err
}
- if !cn.Inited {
+ if cn.InitedAt.IsZero() {
err := c.initConn(cn)
if err != nil {
c.connPool.Remove(cn)
@@ -88,7 +88,7 @@ func (c *baseClient) releaseConn(cn *pool.Conn, err error) bool {
}
func (c *baseClient) initConn(cn *pool.Conn) error {
- cn.Inited = true
+ cn.InitedAt = time.Now()
if c.opt.Password == "" &&
c.opt.DB == 0 &&
@@ -123,8 +123,17 @@ func (c *baseClient) initConn(cn *pool.Conn) error {
return nil
}
+// Do creates a Cmd from the args and processes the cmd.
+func (c *baseClient) Do(args ...interface{}) *Cmd {
+ cmd := NewCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
// WrapProcess wraps function that processes Redis commands.
-func (c *baseClient) WrapProcess(fn func(oldProcess func(cmd Cmder) error) func(cmd Cmder) error) {
+func (c *baseClient) WrapProcess(
+ fn func(oldProcess func(cmd Cmder) error) func(cmd Cmder) error,
+) {
c.process = fn(c.process)
}
@@ -147,8 +156,10 @@ func (c *baseClient) defaultProcess(cmd Cmder) error {
return err
}
- cn.SetWriteTimeout(c.opt.WriteTimeout)
- if err := writeCmd(cn, cmd); err != nil {
+ err = cn.WithWriter(c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmd(wr, cmd)
+ })
+ if err != nil {
c.releaseConn(cn, err)
cmd.setErr(err)
if internal.IsRetryableError(err, true) {
@@ -157,8 +168,9 @@ func (c *baseClient) defaultProcess(cmd Cmder) error {
return err
}
- cn.SetReadTimeout(c.cmdTimeout(cmd))
- err = cmd.readReply(cn)
+ err = cn.WithReader(c.cmdTimeout(cmd), func(rd *proto.Reader) error {
+ return cmd.readReply(rd)
+ })
c.releaseConn(cn, err)
if err != nil && internal.IsRetryableError(err, cmd.readTimeout() == nil) {
continue
@@ -176,9 +188,8 @@ func (c *baseClient) retryBackoff(attempt int) time.Duration {
func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
if timeout := cmd.readTimeout(); timeout != nil {
- return *timeout
+ return readTimeout(*timeout)
}
-
return c.opt.ReadTimeout
}
@@ -244,24 +255,27 @@ func (c *baseClient) generalProcessPipeline(cmds []Cmder, p pipelineProcessor) e
break
}
}
- return firstCmdsErr(cmds)
+ return cmdsFirstErr(cmds)
}
func (c *baseClient) pipelineProcessCmds(cn *pool.Conn, cmds []Cmder) (bool, error) {
- cn.SetWriteTimeout(c.opt.WriteTimeout)
- if err := writeCmd(cn, cmds...); err != nil {
+ err := cn.WithWriter(c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmd(wr, cmds...)
+ })
+ if err != nil {
setCmdsErr(cmds, err)
return true, err
}
- // Set read timeout for all commands.
- cn.SetReadTimeout(c.opt.ReadTimeout)
- return true, pipelineReadCmds(cn, cmds)
+ err = cn.WithReader(c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ return pipelineReadCmds(rd, cmds)
+ })
+ return true, err
}
-func pipelineReadCmds(cn *pool.Conn, cmds []Cmder) error {
+func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
for _, cmd := range cmds {
- err := cmd.readReply(cn)
+ err := cmd.readReply(rd)
if err != nil && !internal.IsRedisError(err) {
return err
}
@@ -270,47 +284,50 @@ func pipelineReadCmds(cn *pool.Conn, cmds []Cmder) error {
}
func (c *baseClient) txPipelineProcessCmds(cn *pool.Conn, cmds []Cmder) (bool, error) {
- cn.SetWriteTimeout(c.opt.WriteTimeout)
- if err := txPipelineWriteMulti(cn, cmds); err != nil {
+ err := cn.WithWriter(c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return txPipelineWriteMulti(wr, cmds)
+ })
+ if err != nil {
setCmdsErr(cmds, err)
return true, err
}
- // Set read timeout for all commands.
- cn.SetReadTimeout(c.opt.ReadTimeout)
-
- if err := c.txPipelineReadQueued(cn, cmds); err != nil {
- setCmdsErr(cmds, err)
- return false, err
- }
-
- return false, pipelineReadCmds(cn, cmds)
+ err = cn.WithReader(c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ err := txPipelineReadQueued(rd, cmds)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ return pipelineReadCmds(rd, cmds)
+ })
+ return false, err
}
-func txPipelineWriteMulti(cn *pool.Conn, cmds []Cmder) error {
+func txPipelineWriteMulti(wr *proto.Writer, cmds []Cmder) error {
multiExec := make([]Cmder, 0, len(cmds)+2)
multiExec = append(multiExec, NewStatusCmd("MULTI"))
multiExec = append(multiExec, cmds...)
multiExec = append(multiExec, NewSliceCmd("EXEC"))
- return writeCmd(cn, multiExec...)
+ return writeCmd(wr, multiExec...)
}
-func (c *baseClient) txPipelineReadQueued(cn *pool.Conn, cmds []Cmder) error {
+func txPipelineReadQueued(rd *proto.Reader, cmds []Cmder) error {
// Parse queued replies.
var statusCmd StatusCmd
- if err := statusCmd.readReply(cn); err != nil {
+ err := statusCmd.readReply(rd)
+ if err != nil {
return err
}
for _ = range cmds {
- err := statusCmd.readReply(cn)
+ err = statusCmd.readReply(rd)
if err != nil && !internal.IsRedisError(err) {
return err
}
}
// Parse number of replies.
- line, err := cn.Rd.ReadLine()
+ line, err := rd.ReadLine()
if err != nil {
if err == Nil {
err = TxFailedErr
@@ -424,7 +441,7 @@ func (c *Client) TxPipeline() Pipeliner {
}
func (c *Client) pubSub() *PubSub {
- return &PubSub{
+ pubsub := &PubSub{
opt: c.opt,
newConn: func(channels []string) (*pool.Conn, error) {
@@ -432,6 +449,8 @@ func (c *Client) pubSub() *PubSub {
},
closeConn: c.connPool.CloseConn,
}
+ pubsub.init()
+ return pubsub
}
// Subscribe subscribes the client to the specified channels.
diff --git a/vendor/github.com/go-redis/redis/result.go b/vendor/github.com/go-redis/redis/result.go
index e086e8e34..e438f260b 100644
--- a/vendor/github.com/go-redis/redis/result.go
+++ b/vendor/github.com/go-redis/redis/result.go
@@ -53,7 +53,7 @@ func NewBoolResult(val bool, err error) *BoolCmd {
// NewStringResult returns a StringCmd initialised with val and err for testing
func NewStringResult(val string, err error) *StringCmd {
var cmd StringCmd
- cmd.val = []byte(val)
+ cmd.val = val
cmd.setErr(err)
return &cmd
}
diff --git a/vendor/github.com/go-redis/redis/ring.go b/vendor/github.com/go-redis/redis/ring.go
index b47a1094e..3ded28060 100644
--- a/vendor/github.com/go-redis/redis/ring.go
+++ b/vendor/github.com/go-redis/redis/ring.go
@@ -16,7 +16,8 @@ import (
"github.com/go-redis/redis/internal/pool"
)
-const nreplicas = 100
+// Hash is type of hash function used in consistent hash.
+type Hash consistenthash.Hash
var errRingShardsDown = errors.New("redis: all ring shards are down")
@@ -30,6 +31,27 @@ type RingOptions struct {
// Shard is considered down after 3 subsequent failed checks.
HeartbeatFrequency time.Duration
+ // Hash function used in consistent hash.
+ // Default is crc32.ChecksumIEEE.
+ Hash Hash
+
+ // Number of replicas in consistent hash.
+ // Default is 100 replicas.
+ //
+ // Higher number of replicas will provide less deviation, that is keys will be
+ // distributed to nodes more evenly.
+ //
+ // Following is deviation for common nreplicas:
+ // --------------------------------------------------------
+ // | nreplicas | standard error | 99% confidence interval |
+ // | 10 | 0.3152 | (0.37, 1.98) |
+ // | 100 | 0.0997 | (0.76, 1.28) |
+ // | 1000 | 0.0316 | (0.92, 1.09) |
+ // --------------------------------------------------------
+ //
+ // See https://arxiv.org/abs/1406.2294 for reference
+ HashReplicas int
+
// Following options are copied from Options struct.
OnConnect func(*Conn) error
@@ -46,6 +68,8 @@ type RingOptions struct {
WriteTimeout time.Duration
PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
PoolTimeout time.Duration
IdleTimeout time.Duration
IdleCheckFrequency time.Duration
@@ -56,6 +80,10 @@ func (opt *RingOptions) init() {
opt.HeartbeatFrequency = 500 * time.Millisecond
}
+ if opt.HashReplicas == 0 {
+ opt.HashReplicas = 100
+ }
+
switch opt.MinRetryBackoff {
case -1:
opt.MinRetryBackoff = 0
@@ -82,6 +110,8 @@ func (opt *RingOptions) clientOptions() *Options {
WriteTimeout: opt.WriteTimeout,
PoolSize: opt.PoolSize,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
PoolTimeout: opt.PoolTimeout,
IdleTimeout: opt.IdleTimeout,
IdleCheckFrequency: opt.IdleCheckFrequency,
@@ -133,16 +163,21 @@ func (shard *ringShard) Vote(up bool) bool {
//------------------------------------------------------------------------------
type ringShards struct {
+ opt *RingOptions
+
mu sync.RWMutex
hash *consistenthash.Map
shards map[string]*ringShard // read only
list []*ringShard // read only
+ len int
closed bool
}
-func newRingShards() *ringShards {
+func newRingShards(opt *RingOptions) *ringShards {
return &ringShards{
- hash: consistenthash.New(nreplicas, nil),
+ opt: opt,
+
+ hash: newConsistentHash(opt),
shards: make(map[string]*ringShard),
}
}
@@ -238,18 +273,28 @@ func (c *ringShards) Heartbeat(frequency time.Duration) {
// rebalance removes dead shards from the Ring.
func (c *ringShards) rebalance() {
- hash := consistenthash.New(nreplicas, nil)
+ hash := newConsistentHash(c.opt)
+ var shardsNum int
for name, shard := range c.shards {
if shard.IsUp() {
hash.Add(name)
+ shardsNum++
}
}
c.mu.Lock()
c.hash = hash
+ c.len = shardsNum
c.mu.Unlock()
}
+func (c *ringShards) Len() int {
+ c.mu.RLock()
+ l := c.len
+ c.mu.RUnlock()
+ return l
+}
+
func (c *ringShards) Close() error {
c.mu.Lock()
defer c.mu.Unlock()
@@ -305,7 +350,7 @@ func NewRing(opt *RingOptions) *Ring {
ring := &Ring{
opt: opt,
- shards: newRingShards(),
+ shards: newRingShards(opt),
}
ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo)
@@ -363,11 +408,16 @@ func (c *Ring) PoolStats() *PoolStats {
acc.Misses += s.Misses
acc.Timeouts += s.Timeouts
acc.TotalConns += s.TotalConns
- acc.FreeConns += s.FreeConns
+ acc.IdleConns += s.IdleConns
}
return &acc
}
+// Len returns the current number of shards in the ring.
+func (c *Ring) Len() int {
+ return c.shards.Len()
+}
+
// Subscribe subscribes the client to the specified channels.
func (c *Ring) Subscribe(channels ...string) *PubSub {
if len(channels) == 0 {
@@ -466,7 +516,16 @@ func (c *Ring) cmdShard(cmd Cmder) (*ringShard, error) {
return c.shards.GetByKey(firstKey)
}
-func (c *Ring) WrapProcess(fn func(oldProcess func(cmd Cmder) error) func(cmd Cmder) error) {
+// Do creates a Cmd from the args and processes the cmd.
+func (c *Ring) Do(args ...interface{}) *Cmd {
+ cmd := NewCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Ring) WrapProcess(
+ fn func(oldProcess func(cmd Cmder) error) func(cmd Cmder) error,
+) {
c.ForEachShard(func(c *Client) error {
c.WrapProcess(fn)
return nil
@@ -552,7 +611,7 @@ func (c *Ring) defaultProcessPipeline(cmds []Cmder) error {
cmdsMap = failedCmdsMap
}
- return firstCmdsErr(cmds)
+ return cmdsFirstErr(cmds)
}
func (c *Ring) TxPipeline() Pipeliner {
@@ -570,3 +629,7 @@ func (c *Ring) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
func (c *Ring) Close() error {
return c.shards.Close()
}
+
+func newConsistentHash(opt *RingOptions) *consistenthash.Map {
+ return consistenthash.New(opt.HashReplicas, consistenthash.Hash(opt.Hash))
+}
diff --git a/vendor/github.com/go-redis/redis/sentinel.go b/vendor/github.com/go-redis/redis/sentinel.go
index 3cedf36ee..c5f71493d 100644
--- a/vendor/github.com/go-redis/redis/sentinel.go
+++ b/vendor/github.com/go-redis/redis/sentinel.go
@@ -29,13 +29,17 @@ type FailoverOptions struct {
Password string
DB int
- MaxRetries int
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
DialTimeout time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
PoolTimeout time.Duration
IdleTimeout time.Duration
IdleCheckFrequency time.Duration
@@ -92,7 +96,7 @@ func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
},
}
c.baseClient.init()
- c.setProcessor(c.Process)
+ c.cmdable.setProcessor(c.Process)
return &c
}
@@ -116,7 +120,7 @@ func NewSentinelClient(opt *Options) *SentinelClient {
}
func (c *SentinelClient) PubSub() *PubSub {
- return &PubSub{
+ pubsub := &PubSub{
opt: c.opt,
newConn: func(channels []string) (*pool.Conn, error) {
@@ -124,6 +128,8 @@ func (c *SentinelClient) PubSub() *PubSub {
},
closeConn: c.connPool.CloseConn,
}
+ pubsub.init()
+ return pubsub
}
func (c *SentinelClient) GetMasterAddrByName(name string) *StringSliceCmd {
@@ -180,10 +186,7 @@ func (d *sentinelFailover) MasterAddr() (string, error) {
if err != nil {
return "", err
}
-
- if d._masterAddr != addr {
- d.switchMaster(addr)
- }
+ d._switchMaster(addr)
return addr, nil
}
@@ -194,11 +197,11 @@ func (d *sentinelFailover) masterAddr() (string, error) {
addr, err := d.sentinel.GetMasterAddrByName(d.masterName).Result()
if err == nil {
addr := net.JoinHostPort(addr[0], addr[1])
- internal.Logf("sentinel: master=%q addr=%q", d.masterName, addr)
return addr, nil
}
- internal.Logf("sentinel: GetMasterAddrByName name=%q failed: %s", d.masterName, err)
+ internal.Logf("sentinel: GetMasterAddrByName name=%q failed: %s",
+ d.masterName, err)
d._resetSentinel()
}
@@ -234,15 +237,23 @@ func (d *sentinelFailover) masterAddr() (string, error) {
return "", errors.New("redis: all sentinels are unreachable")
}
-func (d *sentinelFailover) switchMaster(masterAddr string) {
- internal.Logf(
- "sentinel: new master=%q addr=%q",
- d.masterName, masterAddr,
- )
- _ = d.Pool().Filter(func(cn *pool.Conn) bool {
- return cn.RemoteAddr().String() != masterAddr
+func (c *sentinelFailover) switchMaster(addr string) {
+ c.mu.Lock()
+ c._switchMaster(addr)
+ c.mu.Unlock()
+}
+
+func (c *sentinelFailover) _switchMaster(addr string) {
+ if c._masterAddr == addr {
+ return
+ }
+
+ internal.Logf("sentinel: new master=%q addr=%q",
+ c.masterName, addr)
+ _ = c.Pool().Filter(func(cn *pool.Conn) bool {
+ return cn.RemoteAddr().String() != addr
})
- d._masterAddr = masterAddr
+ c._masterAddr = addr
}
func (d *sentinelFailover) setSentinel(sentinel *SentinelClient) {
@@ -292,27 +303,25 @@ func (d *sentinelFailover) discoverSentinels(sentinel *SentinelClient) {
}
func (d *sentinelFailover) listen(sentinel *SentinelClient) {
- var pubsub *PubSub
- for {
- if pubsub == nil {
- pubsub = sentinel.PubSub()
+ pubsub := sentinel.PubSub()
+ defer pubsub.Close()
- if err := pubsub.Subscribe("+switch-master"); err != nil {
- internal.Logf("sentinel: Subscribe failed: %s", err)
- pubsub.Close()
- d.resetSentinel()
- return
- }
- }
+ err := pubsub.Subscribe("+switch-master")
+ if err != nil {
+ internal.Logf("sentinel: Subscribe failed: %s", err)
+ d.resetSentinel()
+ return
+ }
+ for {
msg, err := pubsub.ReceiveMessage()
if err != nil {
- if err != pool.ErrClosed {
- internal.Logf("sentinel: ReceiveMessage failed: %s", err)
- pubsub.Close()
+ if err == pool.ErrClosed {
+ d.resetSentinel()
+ return
}
- d.resetSentinel()
- return
+ internal.Logf("sentinel: ReceiveMessage failed: %s", err)
+ continue
}
switch msg.Channel {
@@ -323,12 +332,7 @@ func (d *sentinelFailover) listen(sentinel *SentinelClient) {
continue
}
addr := net.JoinHostPort(parts[3], parts[4])
-
- d.mu.Lock()
- if d._masterAddr != addr {
- d.switchMaster(addr)
- }
- d.mu.Unlock()
+ d.switchMaster(addr)
}
}
}
diff --git a/vendor/github.com/go-redis/redis/tx.go b/vendor/github.com/go-redis/redis/tx.go
index 6a753b6a0..6a7da99dd 100644
--- a/vendor/github.com/go-redis/redis/tx.go
+++ b/vendor/github.com/go-redis/redis/tx.go
@@ -29,6 +29,10 @@ func (c *Client) newTx() *Tx {
return &tx
}
+// Watch prepares a transcaction and marks the keys to be watched
+// for conditional execution if there are any keys.
+//
+// The transaction is automatically closed when the fn exits.
func (c *Client) Watch(fn func(*Tx) error, keys ...string) error {
tx := c.newTx()
if len(keys) > 0 {
@@ -74,6 +78,7 @@ func (c *Tx) Unwatch(keys ...string) *StatusCmd {
return cmd
}
+// Pipeline creates a new pipeline. It is more convenient to use Pipelined.
func (c *Tx) Pipeline() Pipeliner {
pipe := Pipeline{
exec: c.processTxPipeline,
@@ -82,23 +87,24 @@ func (c *Tx) Pipeline() Pipeliner {
return &pipe
}
-// Pipelined executes commands queued in the fn in a transaction
-// and restores the connection state to normal.
+// Pipelined executes commands queued in the fn in a transaction.
//
// When using WATCH, EXEC will execute commands only if the watched keys
// were not modified, allowing for a check-and-set mechanism.
//
// Exec always returns list of commands. If transaction fails
-// TxFailedErr is returned. Otherwise Exec returns error of the first
+// TxFailedErr is returned. Otherwise Exec returns an error of the first
// failed command or nil.
func (c *Tx) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
return c.Pipeline().Pipelined(fn)
}
+// TxPipelined is an alias for Pipelined.
func (c *Tx) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
return c.Pipelined(fn)
}
+// TxPipeline is an alias for Pipeline.
func (c *Tx) TxPipeline() Pipeliner {
return c.Pipeline()
}
diff --git a/vendor/github.com/go-redis/redis/universal.go b/vendor/github.com/go-redis/redis/universal.go
index 9e30c81d9..a60756246 100644
--- a/vendor/github.com/go-redis/redis/universal.go
+++ b/vendor/github.com/go-redis/redis/universal.go
@@ -12,35 +12,38 @@ type UniversalOptions struct {
// of cluster/sentinel nodes.
Addrs []string
- // The sentinel master name.
- // Only failover clients.
- MasterName string
-
// Database to be selected after connecting to the server.
// Only single-node and failover clients.
DB int
- // Only cluster clients.
-
- // Enables read only queries on slave nodes.
- ReadOnly bool
-
- MaxRedirects int
- RouteByLatency bool
-
- // Common options
+ // Common options.
OnConnect func(*Conn) error
- MaxRetries int
Password string
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
DialTimeout time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
PoolTimeout time.Duration
IdleTimeout time.Duration
IdleCheckFrequency time.Duration
TLSConfig *tls.Config
+
+ // Only cluster clients.
+
+ MaxRedirects int
+ ReadOnly bool
+ RouteByLatency bool
+ RouteRandomly bool
+
+ // The sentinel master name.
+ // Only failover clients.
+ MasterName string
}
func (o *UniversalOptions) cluster() *ClusterOptions {
@@ -49,22 +52,31 @@ func (o *UniversalOptions) cluster() *ClusterOptions {
}
return &ClusterOptions{
- Addrs: o.Addrs,
+ Addrs: o.Addrs,
+ OnConnect: o.OnConnect,
+
+ Password: o.Password,
+
MaxRedirects: o.MaxRedirects,
- RouteByLatency: o.RouteByLatency,
ReadOnly: o.ReadOnly,
+ RouteByLatency: o.RouteByLatency,
+ RouteRandomly: o.RouteRandomly,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
- OnConnect: o.OnConnect,
- MaxRetries: o.MaxRetries,
- Password: o.Password,
DialTimeout: o.DialTimeout,
ReadTimeout: o.ReadTimeout,
WriteTimeout: o.WriteTimeout,
PoolSize: o.PoolSize,
+ MinIdleConns: o.MinIdleConns,
+ MaxConnAge: o.MaxConnAge,
PoolTimeout: o.PoolTimeout,
IdleTimeout: o.IdleTimeout,
IdleCheckFrequency: o.IdleCheckFrequency,
- TLSConfig: o.TLSConfig,
+
+ TLSConfig: o.TLSConfig,
}
}
@@ -76,19 +88,27 @@ func (o *UniversalOptions) failover() *FailoverOptions {
return &FailoverOptions{
SentinelAddrs: o.Addrs,
MasterName: o.MasterName,
- DB: o.DB,
+ OnConnect: o.OnConnect,
+
+ DB: o.DB,
+ Password: o.Password,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
- OnConnect: o.OnConnect,
- MaxRetries: o.MaxRetries,
- Password: o.Password,
- DialTimeout: o.DialTimeout,
- ReadTimeout: o.ReadTimeout,
- WriteTimeout: o.WriteTimeout,
PoolSize: o.PoolSize,
+ MinIdleConns: o.MinIdleConns,
+ MaxConnAge: o.MaxConnAge,
PoolTimeout: o.PoolTimeout,
IdleTimeout: o.IdleTimeout,
IdleCheckFrequency: o.IdleCheckFrequency,
- TLSConfig: o.TLSConfig,
+
+ TLSConfig: o.TLSConfig,
}
}
@@ -99,20 +119,28 @@ func (o *UniversalOptions) simple() *Options {
}
return &Options{
- Addr: addr,
- DB: o.DB,
+ Addr: addr,
+ OnConnect: o.OnConnect,
+
+ DB: o.DB,
+ Password: o.Password,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
- OnConnect: o.OnConnect,
- MaxRetries: o.MaxRetries,
- Password: o.Password,
- DialTimeout: o.DialTimeout,
- ReadTimeout: o.ReadTimeout,
- WriteTimeout: o.WriteTimeout,
PoolSize: o.PoolSize,
+ MinIdleConns: o.MinIdleConns,
+ MaxConnAge: o.MaxConnAge,
PoolTimeout: o.PoolTimeout,
IdleTimeout: o.IdleTimeout,
IdleCheckFrequency: o.IdleCheckFrequency,
- TLSConfig: o.TLSConfig,
+
+ TLSConfig: o.TLSConfig,
}
}
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
index 1b1b1921e..0f646931a 100644
--- a/vendor/github.com/golang/protobuf/LICENSE
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -1,7 +1,4 @@
-Go support for Protocol Buffers - Google's data interchange format
-
Copyright 2010 The Go Authors. All rights reserved.
-https://github.com/golang/protobuf
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
index c27d35f86..3abfed2cf 100644
--- a/vendor/github.com/golang/protobuf/proto/encode.go
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -37,27 +37,9 @@ package proto
import (
"errors"
- "fmt"
"reflect"
)
-// RequiredNotSetError is the error returned if Marshal is called with
-// a protocol buffer struct whose required fields have not
-// all been initialized. It is also the error returned if Unmarshal is
-// called with an encoded protocol buffer that does not include all the
-// required fields.
-//
-// When printed, RequiredNotSetError reports the first unset required field in a
-// message. If the field cannot be precisely determined, it is reported as
-// "{Unknown}".
-type RequiredNotSetError struct {
- field string
-}
-
-func (e *RequiredNotSetError) Error() string {
- return fmt.Sprintf("proto: required field %q not set", e.field)
-}
-
var (
// errRepeatedHasNil is the error returned if Marshal is called with
// a struct with a repeated field containing a nil element.
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
index 0e2191b8a..75565cc6d 100644
--- a/vendor/github.com/golang/protobuf/proto/lib.go
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -265,7 +265,6 @@ package proto
import (
"encoding/json"
- "errors"
"fmt"
"log"
"reflect"
@@ -274,7 +273,66 @@ import (
"sync"
)
-var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string")
+// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
+// Marshal reports this when a required field is not initialized.
+// Unmarshal reports this when a required field is missing from the wire data.
+type RequiredNotSetError struct{ field string }
+
+func (e *RequiredNotSetError) Error() string {
+ if e.field == "" {
+ return fmt.Sprintf("proto: required field not set")
+ }
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+ return true
+}
+
+type invalidUTF8Error struct{ field string }
+
+func (e *invalidUTF8Error) Error() string {
+ if e.field == "" {
+ return "proto: invalid UTF-8 detected"
+ }
+ return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
+}
+func (e *invalidUTF8Error) InvalidUTF8() bool {
+ return true
+}
+
+// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
+// This error should not be exposed to the external API as such errors should
+// be recreated with the field information.
+var errInvalidUTF8 = &invalidUTF8Error{}
+
+// isNonFatal reports whether the error is either a RequiredNotSet error
+// or a InvalidUTF8 error.
+func isNonFatal(err error) bool {
+ if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
+ return true
+ }
+ if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
+ return true
+ }
+ return false
+}
+
+type nonFatal struct{ E error }
+
+// Merge merges err into nf and reports whether it was successful.
+// Otherwise it returns false for any fatal non-nil errors.
+func (nf *nonFatal) Merge(err error) (ok bool) {
+ if err == nil {
+ return true // not an error
+ }
+ if !isNonFatal(err) {
+ return false // fatal error
+ }
+ if nf.E == nil {
+ nf.E = err // store first instance of non-fatal error
+ }
+ return true
+}
// Message is implemented by generated protocol buffer messages.
type Message interface {
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
index f710adab0..50b99b83a 100644
--- a/vendor/github.com/golang/protobuf/proto/properties.go
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -139,7 +139,7 @@ type Properties struct {
Repeated bool
Packed bool // relevant for repeated primitives only
Enum string // set for enum types only
- proto3 bool // whether this is known to be a proto3 field; set for []byte only
+ proto3 bool // whether this is known to be a proto3 field
oneof bool // whether this is a oneof field
Default string // default value
@@ -148,9 +148,9 @@ type Properties struct {
stype reflect.Type // set for struct types only
sprop *StructProperties // set for struct types only
- mtype reflect.Type // set for map types only
- mkeyprop *Properties // set for map types only
- mvalprop *Properties // set for map types only
+ mtype reflect.Type // set for map types only
+ MapKeyProp *Properties // set for map types only
+ MapValProp *Properties // set for map types only
}
// String formats the properties in the protobuf struct field tag style.
@@ -275,16 +275,16 @@ func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, loc
case reflect.Map:
p.mtype = t1
- p.mkeyprop = &Properties{}
- p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
- p.mvalprop = &Properties{}
+ p.MapKeyProp = &Properties{}
+ p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.MapValProp = &Properties{}
vtype := p.mtype.Elem()
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
// The value type is not a message (*T) or bytes ([]byte),
// so we need encoders for the pointer to this type.
vtype = reflect.PtrTo(vtype)
}
- p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
}
if p.stype != nil {
diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go
index 0f212b302..b16794496 100644
--- a/vendor/github.com/golang/protobuf/proto/table_marshal.go
+++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go
@@ -231,7 +231,7 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
return b, err
}
- var err, errreq error
+ var err, errLater error
// The old marshaler encodes extensions at beginning.
if u.extensions.IsValid() {
e := ptr.offset(u.extensions).toExtensions()
@@ -252,11 +252,13 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
}
}
for _, f := range u.fields {
- if f.required && errreq == nil {
+ if f.required {
if ptr.offset(f.field).getPointer().isNil() {
// Required field is not set.
// We record the error but keep going, to give a complete marshaling.
- errreq = &RequiredNotSetError{f.name}
+ if errLater == nil {
+ errLater = &RequiredNotSetError{f.name}
+ }
continue
}
}
@@ -269,14 +271,21 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
if err1, ok := err.(*RequiredNotSetError); ok {
// Required field in submessage is not set.
// We record the error but keep going, to give a complete marshaling.
- if errreq == nil {
- errreq = &RequiredNotSetError{f.name + "." + err1.field}
+ if errLater == nil {
+ errLater = &RequiredNotSetError{f.name + "." + err1.field}
}
continue
}
if err == errRepeatedHasNil {
err = errors.New("proto: repeated field " + f.name + " has nil element")
}
+ if err == errInvalidUTF8 {
+ if errLater == nil {
+ fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+ errLater = &invalidUTF8Error{fullName}
+ }
+ continue
+ }
return b, err
}
}
@@ -284,7 +293,7 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
s := *ptr.offset(u.unrecognized).toBytes()
b = append(b, s...)
}
- return b, errreq
+ return b, errLater
}
// computeMarshalInfo initializes the marshal info.
@@ -530,6 +539,7 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma
packed := false
proto3 := false
+ validateUTF8 := true
for i := 2; i < len(tags); i++ {
if tags[i] == "packed" {
packed = true
@@ -538,6 +548,7 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma
proto3 = true
}
}
+ validateUTF8 = validateUTF8 && proto3
switch t.Kind() {
case reflect.Bool:
@@ -735,6 +746,18 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma
}
return sizeFloat64Value, appendFloat64Value
case reflect.String:
+ if validateUTF8 {
+ if pointer {
+ return sizeStringPtr, appendUTF8StringPtr
+ }
+ if slice {
+ return sizeStringSlice, appendUTF8StringSlice
+ }
+ if nozero {
+ return sizeStringValueNoZero, appendUTF8StringValueNoZero
+ }
+ return sizeStringValue, appendUTF8StringValue
+ }
if pointer {
return sizeStringPtr, appendStringPtr
}
@@ -1984,51 +2007,104 @@ func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byt
}
func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
v := *ptr.toString()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ if v == "" {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return b, nil
+ }
+ v := *p
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toStringSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ return b, nil
+}
+func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ v := *ptr.toString()
if !utf8.ValidString(v) {
- return nil, errInvalidUTF8
+ invalidUTF8 = true
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
return b, nil
}
-func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
v := *ptr.toString()
if v == "" {
return b, nil
}
if !utf8.ValidString(v) {
- return nil, errInvalidUTF8
+ invalidUTF8 = true
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
return b, nil
}
-func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
p := *ptr.toStringPtr()
if p == nil {
return b, nil
}
v := *p
if !utf8.ValidString(v) {
- return nil, errInvalidUTF8
+ invalidUTF8 = true
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
return b, nil
}
-func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
s := *ptr.toStringSlice()
for _, v := range s {
if !utf8.ValidString(v) {
- return nil, errInvalidUTF8
+ invalidUTF8 = true
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
}
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
return b, nil
}
func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
@@ -2107,7 +2183,8 @@ func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
},
func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
s := ptr.getPointerSlice()
- var err, errreq error
+ var err error
+ var nerr nonFatal
for _, v := range s {
if v.isNil() {
return b, errRepeatedHasNil
@@ -2115,22 +2192,14 @@ func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
b = appendVarint(b, wiretag) // start group
b, err = u.marshal(b, v, deterministic)
b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
- if err != nil {
- if _, ok := err.(*RequiredNotSetError); ok {
- // Required field in submessage is not set.
- // We record the error but keep going, to give a complete marshaling.
- if errreq == nil {
- errreq = err
- }
- continue
- }
+ if !nerr.Merge(err) {
if err == ErrNil {
err = errRepeatedHasNil
}
return b, err
}
}
- return b, errreq
+ return b, nerr.E
}
}
@@ -2174,7 +2243,8 @@ func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
},
func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
s := ptr.getPointerSlice()
- var err, errreq error
+ var err error
+ var nerr nonFatal
for _, v := range s {
if v.isNil() {
return b, errRepeatedHasNil
@@ -2184,22 +2254,14 @@ func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
b = appendVarint(b, uint64(siz))
b, err = u.marshal(b, v, deterministic)
- if err != nil {
- if _, ok := err.(*RequiredNotSetError); ok {
- // Required field in submessage is not set.
- // We record the error but keep going, to give a complete marshaling.
- if errreq == nil {
- errreq = err
- }
- continue
- }
+ if !nerr.Merge(err) {
if err == ErrNil {
err = errRepeatedHasNil
}
return b, err
}
}
- return b, errreq
+ return b, nerr.E
}
}
@@ -2223,6 +2285,25 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
// value.
// Key cannot be pointer-typed.
valIsPtr := valType.Kind() == reflect.Ptr
+
+ // If value is a message with nested maps, calling
+ // valSizer in marshal may be quadratic. We should use
+ // cached version in marshal (but not in size).
+ // If value is not message type, we don't have size cache,
+ // but it cannot be nested either. Just use valSizer.
+ valCachedSizer := valSizer
+ if valIsPtr && valType.Elem().Kind() == reflect.Struct {
+ u := getMarshalInfo(valType.Elem())
+ valCachedSizer = func(ptr pointer, tagsize int) int {
+ // Same as message sizer, but use cache.
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ siz := u.cachedsize(p)
+ return siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ }
return func(ptr pointer, tagsize int) int {
m := ptr.asPointerTo(t).Elem() // the map
n := 0
@@ -2243,24 +2324,26 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
if len(keys) > 1 && deterministic {
sort.Sort(mapKeys(keys))
}
+
+ var nerr nonFatal
for _, k := range keys {
ki := k.Interface()
vi := m.MapIndex(k).Interface()
kaddr := toAddrPointer(&ki, false) // pointer to key
vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
b = appendVarint(b, tag)
- siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+ siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
b = appendVarint(b, uint64(siz))
b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
- if err != nil {
+ if !nerr.Merge(err) {
return b, err
}
b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
- if err != nil && err != ErrNil { // allow nil value in map
+ if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
return b, err
}
}
- return b, nil
+ return b, nerr.E
}
}
@@ -2333,6 +2416,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
defer mu.Unlock()
var err error
+ var nerr nonFatal
// Fast-path for common cases: zero or one extensions.
// Don't bother sorting the keys.
@@ -2352,11 +2436,11 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
v := e.value
p := toAddrPointer(&v, ei.isptr)
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
- if err != nil {
+ if !nerr.Merge(err) {
return b, err
}
}
- return b, nil
+ return b, nerr.E
}
// Sort the keys to provide a deterministic encoding.
@@ -2383,11 +2467,11 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
v := e.value
p := toAddrPointer(&v, ei.isptr)
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
- if err != nil {
+ if !nerr.Merge(err) {
return b, err
}
}
- return b, nil
+ return b, nerr.E
}
// message set format is:
@@ -2444,6 +2528,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
defer mu.Unlock()
var err error
+ var nerr nonFatal
// Fast-path for common cases: zero or one extensions.
// Don't bother sorting the keys.
@@ -2470,12 +2555,12 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
v := e.value
p := toAddrPointer(&v, ei.isptr)
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
- if err != nil {
+ if !nerr.Merge(err) {
return b, err
}
b = append(b, 1<<3|WireEndGroup)
}
- return b, nil
+ return b, nerr.E
}
// Sort the keys to provide a deterministic encoding.
@@ -2509,11 +2594,11 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
p := toAddrPointer(&v, ei.isptr)
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
b = append(b, 1<<3|WireEndGroup)
- if err != nil {
+ if !nerr.Merge(err) {
return b, err
}
}
- return b, nil
+ return b, nerr.E
}
// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
@@ -2556,6 +2641,7 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ
sort.Ints(keys)
var err error
+ var nerr nonFatal
for _, k := range keys {
e := m[int32(k)]
if e.value == nil || e.desc == nil {
@@ -2572,11 +2658,11 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ
v := e.value
p := toAddrPointer(&v, ei.isptr)
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
- if err != nil {
+ if !nerr.Merge(err) {
return b, err
}
}
- return b, nil
+ return b, nerr.E
}
// newMarshaler is the interface representing objects that can marshal themselves.
diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
index 55f0340a3..ebf1caa56 100644
--- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
+++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
@@ -97,6 +97,8 @@ type unmarshalFieldInfo struct {
// if a required field, contains a single set bit at this field's index in the required field list.
reqMask uint64
+
+ name string // name of the field, for error reporting
}
var (
@@ -136,8 +138,8 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
if u.isMessageSet {
return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
}
- var reqMask uint64 // bitmask of required fields we've seen.
- var rnse *RequiredNotSetError // an instance of a RequiredNotSetError returned by a submessage.
+ var reqMask uint64 // bitmask of required fields we've seen.
+ var errLater error
for len(b) > 0 {
// Read tag and wire type.
// Special case 1 and 2 byte varints.
@@ -176,11 +178,20 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
if r, ok := err.(*RequiredNotSetError); ok {
// Remember this error, but keep parsing. We need to produce
// a full parse even if a required field is missing.
- rnse = r
+ if errLater == nil {
+ errLater = r
+ }
reqMask |= f.reqMask
continue
}
if err != errInternalBadWireType {
+ if err == errInvalidUTF8 {
+ if errLater == nil {
+ fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+ errLater = &invalidUTF8Error{fullName}
+ }
+ continue
+ }
return err
}
// Fragments with bad wire type are treated as unknown fields.
@@ -239,20 +250,16 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
emap[int32(tag)] = e
}
}
- if rnse != nil {
- // A required field of a submessage/group is missing. Return that error.
- return rnse
- }
- if reqMask != u.reqMask {
+ if reqMask != u.reqMask && errLater == nil {
// A required field of this message is missing.
for _, n := range u.reqFields {
if reqMask&1 == 0 {
- return &RequiredNotSetError{n}
+ errLater = &RequiredNotSetError{n}
}
reqMask >>= 1
}
}
- return nil
+ return errLater
}
// computeUnmarshalInfo fills in u with information for use
@@ -351,7 +358,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
}
// Store the info in the correct slot in the message.
- u.setTag(tag, toField(&f), unmarshal, reqMask)
+ u.setTag(tag, toField(&f), unmarshal, reqMask, name)
}
// Find any types associated with oneof fields.
@@ -366,10 +373,17 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
f := typ.Field(0) // oneof implementers have one field
baseUnmarshal := fieldUnmarshaler(&f)
- tagstr := strings.Split(f.Tag.Get("protobuf"), ",")[1]
- tag, err := strconv.Atoi(tagstr)
+ tags := strings.Split(f.Tag.Get("protobuf"), ",")
+ fieldNum, err := strconv.Atoi(tags[1])
if err != nil {
- panic("protobuf tag field not an integer: " + tagstr)
+ panic("protobuf tag field not an integer: " + tags[1])
+ }
+ var name string
+ for _, tag := range tags {
+ if strings.HasPrefix(tag, "name=") {
+ name = strings.TrimPrefix(tag, "name=")
+ break
+ }
}
// Find the oneof field that this struct implements.
@@ -380,7 +394,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
// That lets us know where this struct should be stored
// when we encounter it during unmarshaling.
unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
- u.setTag(tag, of.field, unmarshal, 0)
+ u.setTag(fieldNum, of.field, unmarshal, 0, name)
}
}
}
@@ -401,7 +415,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
// [0 0] is [tag=0/wiretype=varint varint-encoded-0].
u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
- }, 0)
+ }, 0, "")
// Set mask for required field check.
u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
@@ -413,8 +427,9 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
// tag = tag # for field
// field/unmarshal = unmarshal info for that field.
// reqMask = if required, bitmask for field position in required field list. 0 otherwise.
-func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64) {
- i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask}
+// name = short name of the field.
+func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64, name string) {
+ i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask, name: name}
n := u.typ.NumField()
if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
for len(u.dense) <= tag {
@@ -442,11 +457,17 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
tagArray := strings.Split(tags, ",")
encoding := tagArray[0]
name := "unknown"
+ proto3 := false
+ validateUTF8 := true
for _, tag := range tagArray[3:] {
if strings.HasPrefix(tag, "name=") {
name = tag[5:]
}
+ if tag == "proto3" {
+ proto3 = true
+ }
}
+ validateUTF8 = validateUTF8 && proto3
// Figure out packaging (pointer, slice, or both)
slice := false
@@ -594,6 +615,15 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
}
return unmarshalBytesValue
case reflect.String:
+ if validateUTF8 {
+ if pointer {
+ return unmarshalUTF8StringPtr
+ }
+ if slice {
+ return unmarshalUTF8StringSlice
+ }
+ return unmarshalUTF8StringValue
+ }
if pointer {
return unmarshalStringPtr
}
@@ -1448,9 +1478,6 @@ func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
return nil, io.ErrUnexpectedEOF
}
v := string(b[:x])
- if !utf8.ValidString(v) {
- return nil, errInvalidUTF8
- }
*f.toString() = v
return b[x:], nil
}
@@ -1468,9 +1495,6 @@ func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
return nil, io.ErrUnexpectedEOF
}
v := string(b[:x])
- if !utf8.ValidString(v) {
- return nil, errInvalidUTF8
- }
*f.toStringPtr() = &v
return b[x:], nil
}
@@ -1488,11 +1512,69 @@ func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
return nil, io.ErrUnexpectedEOF
}
v := string(b[:x])
+ s := f.toStringSlice()
+ *s = append(*s, v)
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toString() = v
if !utf8.ValidString(v) {
- return nil, errInvalidUTF8
+ return b[x:], errInvalidUTF8
}
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toStringPtr() = &v
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
s := f.toStringSlice()
*s = append(*s, v)
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
return b[x:], nil
}
@@ -1674,6 +1756,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
// Maps will be somewhat slow. Oh well.
// Read key and value from data.
+ var nerr nonFatal
k := reflect.New(kt)
v := reflect.New(vt)
for len(b) > 0 {
@@ -1694,7 +1777,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
err = errInternalBadWireType // skip unknown tag
}
- if err == nil {
+ if nerr.Merge(err) {
continue
}
if err != errInternalBadWireType {
@@ -1717,7 +1800,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
// Insert into map.
m.SetMapIndex(k.Elem(), v.Elem())
- return r, nil
+ return r, nerr.E
}
}
@@ -1743,15 +1826,16 @@ func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshal
// Unmarshal data into holder.
// We unmarshal into the first field of the holder object.
var err error
+ var nerr nonFatal
b, err = unmarshal(b, valToPointer(v).offset(field0), w)
- if err != nil {
+ if !nerr.Merge(err) {
return nil, err
}
// Write pointer to holder into target field.
f.asPointerTo(ityp).Elem().Set(v)
- return b, nil
+ return b, nerr.E
}
}
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
index 2205fdaad..1aaee725b 100644
--- a/vendor/github.com/golang/protobuf/proto/text.go
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -353,7 +353,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return err
}
}
- if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
+ if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
@@ -370,7 +370,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return err
}
}
- if err := tm.writeAny(w, val, props.mvalprop); err != nil {
+ if err := tm.writeAny(w, val, props.MapValProp); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
index 0685bae36..bb55a3af2 100644
--- a/vendor/github.com/golang/protobuf/proto/text_parser.go
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -630,17 +630,17 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
if err := p.consumeToken(":"); err != nil {
return err
}
- if err := p.readAny(key, props.mkeyprop); err != nil {
+ if err := p.readAny(key, props.MapKeyProp); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
case "value":
- if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+ if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
return err
}
- if err := p.readAny(val, props.mvalprop); err != nil {
+ if err := p.readAny(val, props.MapValProp); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
index b2af97f4a..70276e8f5 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -130,10 +130,12 @@ func UnmarshalAny(any *any.Any, pb proto.Message) error {
// Is returns true if any value contains a given message type.
func Is(any *any.Any, pb proto.Message) bool {
- aname, err := AnyMessageName(any)
- if err != nil {
+ // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
+ // but it avoids scanning TypeUrl for the slash.
+ if any == nil {
return false
}
-
- return aname == proto.MessageName(pb)
+ name := proto.MessageName(pb)
+ prefix := len(any.TypeUrl) - len(name)
+ return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
index f67edc7dc..e3c56d3ff 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -121,7 +121,7 @@ type Any struct {
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
- TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
+ TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
index 4d75473b8..a7beb2c41 100644
--- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -82,14 +82,14 @@ type Duration struct {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
- Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
- Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
index e9c222282..8e76ae976 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -100,12 +100,12 @@ type Timestamp struct {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
- Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
- Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
diff --git a/vendor/github.com/gorilla/handlers/cors.go b/vendor/github.com/gorilla/handlers/cors.go
index 1cf7581ce..1acf80d1b 100644
--- a/vendor/github.com/gorilla/handlers/cors.go
+++ b/vendor/github.com/gorilla/handlers/cors.go
@@ -48,7 +48,10 @@ const (
func (ch *cors) ServeHTTP(w http.ResponseWriter, r *http.Request) {
origin := r.Header.Get(corsOriginHeader)
if !ch.isOriginAllowed(origin) {
- ch.h.ServeHTTP(w, r)
+ if r.Method != corsOptionMethod || ch.ignoreOptions {
+ ch.h.ServeHTTP(w, r)
+ }
+
return
}
@@ -111,13 +114,17 @@ func (ch *cors) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
returnOrigin := origin
- for _, o := range ch.allowedOrigins {
- // A configuration of * is different than explicitly setting an allowed
- // origin. Returning arbitrary origin headers an an access control allow
- // origin header is unsafe and is not required by any use case.
- if o == corsOriginMatchAll {
- returnOrigin = "*"
- break
+ if ch.allowedOriginValidator == nil && len(ch.allowedOrigins) == 0 {
+ returnOrigin = "*"
+ } else {
+ for _, o := range ch.allowedOrigins {
+ // A configuration of * is different than explicitly setting an allowed
+ // origin. Returning arbitrary origin headers in an access control allow
+ // origin header is unsafe and is not required by any use case.
+ if o == corsOriginMatchAll {
+ returnOrigin = "*"
+ break
+ }
}
}
w.Header().Set(corsAllowOriginHeader, returnOrigin)
@@ -159,7 +166,7 @@ func parseCORSOptions(opts ...CORSOption) *cors {
ch := &cors{
allowedMethods: defaultCorsMethods,
allowedHeaders: defaultCorsHeaders,
- allowedOrigins: []string{corsOriginMatchAll},
+ allowedOrigins: []string{},
}
for _, option := range opts {
@@ -307,6 +314,10 @@ func (ch *cors) isOriginAllowed(origin string) bool {
return ch.allowedOriginValidator(origin)
}
+ if len(ch.allowedOrigins) == 0 {
+ return true
+ }
+
for _, allowedOrigin := range ch.allowedOrigins {
if allowedOrigin == origin || allowedOrigin == corsOriginMatchAll {
return true
diff --git a/vendor/github.com/gorilla/handlers/handlers.go b/vendor/github.com/gorilla/handlers/handlers.go
index 75db7f87b..d03f2bf13 100644
--- a/vendor/github.com/gorilla/handlers/handlers.go
+++ b/vendor/github.com/gorilla/handlers/handlers.go
@@ -7,15 +7,10 @@ package handlers
import (
"bufio"
"fmt"
- "io"
"net"
"net/http"
- "net/url"
"sort"
- "strconv"
"strings"
- "time"
- "unicode/utf8"
)
// MethodHandler is an http.Handler that dispatches to a handler whose key in the
@@ -48,59 +43,6 @@ func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
}
}
-// loggingHandler is the http.Handler implementation for LoggingHandlerTo and its
-// friends
-type loggingHandler struct {
- writer io.Writer
- handler http.Handler
-}
-
-// combinedLoggingHandler is the http.Handler implementation for LoggingHandlerTo
-// and its friends
-type combinedLoggingHandler struct {
- writer io.Writer
- handler http.Handler
-}
-
-func (h loggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- t := time.Now()
- logger := makeLogger(w)
- url := *req.URL
- h.handler.ServeHTTP(logger, req)
- writeLog(h.writer, req, url, t, logger.Status(), logger.Size())
-}
-
-func (h combinedLoggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- t := time.Now()
- logger := makeLogger(w)
- url := *req.URL
- h.handler.ServeHTTP(logger, req)
- writeCombinedLog(h.writer, req, url, t, logger.Status(), logger.Size())
-}
-
-func makeLogger(w http.ResponseWriter) loggingResponseWriter {
- var logger loggingResponseWriter = &responseLogger{w: w, status: http.StatusOK}
- if _, ok := w.(http.Hijacker); ok {
- logger = &hijackLogger{responseLogger{w: w, status: http.StatusOK}}
- }
- h, ok1 := logger.(http.Hijacker)
- c, ok2 := w.(http.CloseNotifier)
- if ok1 && ok2 {
- return hijackCloseNotifier{logger, h, c}
- }
- if ok2 {
- return &closeNotifyWriter{logger, c}
- }
- return logger
-}
-
-type commonLoggingResponseWriter interface {
- http.ResponseWriter
- http.Flusher
- Status() int
- Size() int
-}
-
// responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP
// status code and body size
type responseLogger struct {
@@ -165,173 +107,6 @@ type hijackCloseNotifier struct {
http.CloseNotifier
}
-const lowerhex = "0123456789abcdef"
-
-func appendQuoted(buf []byte, s string) []byte {
- var runeTmp [utf8.UTFMax]byte
- for width := 0; len(s) > 0; s = s[width:] {
- r := rune(s[0])
- width = 1
- if r >= utf8.RuneSelf {
- r, width = utf8.DecodeRuneInString(s)
- }
- if width == 1 && r == utf8.RuneError {
- buf = append(buf, `\x`...)
- buf = append(buf, lowerhex[s[0]>>4])
- buf = append(buf, lowerhex[s[0]&0xF])
- continue
- }
- if r == rune('"') || r == '\\' { // always backslashed
- buf = append(buf, '\\')
- buf = append(buf, byte(r))
- continue
- }
- if strconv.IsPrint(r) {
- n := utf8.EncodeRune(runeTmp[:], r)
- buf = append(buf, runeTmp[:n]...)
- continue
- }
- switch r {
- case '\a':
- buf = append(buf, `\a`...)
- case '\b':
- buf = append(buf, `\b`...)
- case '\f':
- buf = append(buf, `\f`...)
- case '\n':
- buf = append(buf, `\n`...)
- case '\r':
- buf = append(buf, `\r`...)
- case '\t':
- buf = append(buf, `\t`...)
- case '\v':
- buf = append(buf, `\v`...)
- default:
- switch {
- case r < ' ':
- buf = append(buf, `\x`...)
- buf = append(buf, lowerhex[s[0]>>4])
- buf = append(buf, lowerhex[s[0]&0xF])
- case r > utf8.MaxRune:
- r = 0xFFFD
- fallthrough
- case r < 0x10000:
- buf = append(buf, `\u`...)
- for s := 12; s >= 0; s -= 4 {
- buf = append(buf, lowerhex[r>>uint(s)&0xF])
- }
- default:
- buf = append(buf, `\U`...)
- for s := 28; s >= 0; s -= 4 {
- buf = append(buf, lowerhex[r>>uint(s)&0xF])
- }
- }
- }
- }
- return buf
-
-}
-
-// buildCommonLogLine builds a log entry for req in Apache Common Log Format.
-// ts is the timestamp with which the entry should be logged.
-// status and size are used to provide the response HTTP status and size.
-func buildCommonLogLine(req *http.Request, url url.URL, ts time.Time, status int, size int) []byte {
- username := "-"
- if url.User != nil {
- if name := url.User.Username(); name != "" {
- username = name
- }
- }
-
- host, _, err := net.SplitHostPort(req.RemoteAddr)
-
- if err != nil {
- host = req.RemoteAddr
- }
-
- uri := req.RequestURI
-
- // Requests using the CONNECT method over HTTP/2.0 must use
- // the authority field (aka r.Host) to identify the target.
- // Refer: https://httpwg.github.io/specs/rfc7540.html#CONNECT
- if req.ProtoMajor == 2 && req.Method == "CONNECT" {
- uri = req.Host
- }
- if uri == "" {
- uri = url.RequestURI()
- }
-
- buf := make([]byte, 0, 3*(len(host)+len(username)+len(req.Method)+len(uri)+len(req.Proto)+50)/2)
- buf = append(buf, host...)
- buf = append(buf, " - "...)
- buf = append(buf, username...)
- buf = append(buf, " ["...)
- buf = append(buf, ts.Format("02/Jan/2006:15:04:05 -0700")...)
- buf = append(buf, `] "`...)
- buf = append(buf, req.Method...)
- buf = append(buf, " "...)
- buf = appendQuoted(buf, uri)
- buf = append(buf, " "...)
- buf = append(buf, req.Proto...)
- buf = append(buf, `" `...)
- buf = append(buf, strconv.Itoa(status)...)
- buf = append(buf, " "...)
- buf = append(buf, strconv.Itoa(size)...)
- return buf
-}
-
-// writeLog writes a log entry for req to w in Apache Common Log Format.
-// ts is the timestamp with which the entry should be logged.
-// status and size are used to provide the response HTTP status and size.
-func writeLog(w io.Writer, req *http.Request, url url.URL, ts time.Time, status, size int) {
- buf := buildCommonLogLine(req, url, ts, status, size)
- buf = append(buf, '\n')
- w.Write(buf)
-}
-
-// writeCombinedLog writes a log entry for req to w in Apache Combined Log Format.
-// ts is the timestamp with which the entry should be logged.
-// status and size are used to provide the response HTTP status and size.
-func writeCombinedLog(w io.Writer, req *http.Request, url url.URL, ts time.Time, status, size int) {
- buf := buildCommonLogLine(req, url, ts, status, size)
- buf = append(buf, ` "`...)
- buf = appendQuoted(buf, req.Referer())
- buf = append(buf, `" "`...)
- buf = appendQuoted(buf, req.UserAgent())
- buf = append(buf, '"', '\n')
- w.Write(buf)
-}
-
-// CombinedLoggingHandler return a http.Handler that wraps h and logs requests to out in
-// Apache Combined Log Format.
-//
-// See http://httpd.apache.org/docs/2.2/logs.html#combined for a description of this format.
-//
-// LoggingHandler always sets the ident field of the log to -
-func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler {
- return combinedLoggingHandler{out, h}
-}
-
-// LoggingHandler return a http.Handler that wraps h and logs requests to out in
-// Apache Common Log Format (CLF).
-//
-// See http://httpd.apache.org/docs/2.2/logs.html#common for a description of this format.
-//
-// LoggingHandler always sets the ident field of the log to -
-//
-// Example:
-//
-// r := mux.NewRouter()
-// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
-// w.Write([]byte("This is a catch-all route"))
-// })
-// loggedRouter := handlers.LoggingHandler(os.Stdout, r)
-// http.ListenAndServe(":1123", loggedRouter)
-//
-func LoggingHandler(out io.Writer, h http.Handler) http.Handler {
- return loggingHandler{out, h}
-}
-
// isContentType validates the Content-Type header matches the supplied
// contentType. That is, its type and subtype match.
func isContentType(h http.Header, contentType string) bool {
diff --git a/vendor/github.com/gorilla/handlers/logging.go b/vendor/github.com/gorilla/handlers/logging.go
new file mode 100644
index 000000000..cbd182f3a
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/logging.go
@@ -0,0 +1,252 @@
+// Copyright 2013 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package handlers
+
+import (
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "strconv"
+ "time"
+ "unicode/utf8"
+)
+
+// Logging
+
+// FormatterParams is the structure any formatter will be handed when time to log comes
+type LogFormatterParams struct {
+ Request *http.Request
+ URL url.URL
+ TimeStamp time.Time
+ StatusCode int
+ Size int
+}
+
+// LogFormatter gives the signature of the formatter function passed to CustomLoggingHandler
+type LogFormatter func(writer io.Writer, params LogFormatterParams)
+
+// loggingHandler is the http.Handler implementation for LoggingHandlerTo and its
+// friends
+
+type loggingHandler struct {
+ writer io.Writer
+ handler http.Handler
+ formatter LogFormatter
+}
+
+func (h loggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ t := time.Now()
+ logger := makeLogger(w)
+ url := *req.URL
+
+ h.handler.ServeHTTP(logger, req)
+
+ params := LogFormatterParams{
+ Request: req,
+ URL: url,
+ TimeStamp: t,
+ StatusCode: logger.Status(),
+ Size: logger.Size(),
+ }
+
+ h.formatter(h.writer, params)
+}
+
+func makeLogger(w http.ResponseWriter) loggingResponseWriter {
+ var logger loggingResponseWriter = &responseLogger{w: w, status: http.StatusOK}
+ if _, ok := w.(http.Hijacker); ok {
+ logger = &hijackLogger{responseLogger{w: w, status: http.StatusOK}}
+ }
+ h, ok1 := logger.(http.Hijacker)
+ c, ok2 := w.(http.CloseNotifier)
+ if ok1 && ok2 {
+ return hijackCloseNotifier{logger, h, c}
+ }
+ if ok2 {
+ return &closeNotifyWriter{logger, c}
+ }
+ return logger
+}
+
+type commonLoggingResponseWriter interface {
+ http.ResponseWriter
+ http.Flusher
+ Status() int
+ Size() int
+}
+
+const lowerhex = "0123456789abcdef"
+
+func appendQuoted(buf []byte, s string) []byte {
+ var runeTmp [utf8.UTFMax]byte
+ for width := 0; len(s) > 0; s = s[width:] {
+ r := rune(s[0])
+ width = 1
+ if r >= utf8.RuneSelf {
+ r, width = utf8.DecodeRuneInString(s)
+ }
+ if width == 1 && r == utf8.RuneError {
+ buf = append(buf, `\x`...)
+ buf = append(buf, lowerhex[s[0]>>4])
+ buf = append(buf, lowerhex[s[0]&0xF])
+ continue
+ }
+ if r == rune('"') || r == '\\' { // always backslashed
+ buf = append(buf, '\\')
+ buf = append(buf, byte(r))
+ continue
+ }
+ if strconv.IsPrint(r) {
+ n := utf8.EncodeRune(runeTmp[:], r)
+ buf = append(buf, runeTmp[:n]...)
+ continue
+ }
+ switch r {
+ case '\a':
+ buf = append(buf, `\a`...)
+ case '\b':
+ buf = append(buf, `\b`...)
+ case '\f':
+ buf = append(buf, `\f`...)
+ case '\n':
+ buf = append(buf, `\n`...)
+ case '\r':
+ buf = append(buf, `\r`...)
+ case '\t':
+ buf = append(buf, `\t`...)
+ case '\v':
+ buf = append(buf, `\v`...)
+ default:
+ switch {
+ case r < ' ':
+ buf = append(buf, `\x`...)
+ buf = append(buf, lowerhex[s[0]>>4])
+ buf = append(buf, lowerhex[s[0]&0xF])
+ case r > utf8.MaxRune:
+ r = 0xFFFD
+ fallthrough
+ case r < 0x10000:
+ buf = append(buf, `\u`...)
+ for s := 12; s >= 0; s -= 4 {
+ buf = append(buf, lowerhex[r>>uint(s)&0xF])
+ }
+ default:
+ buf = append(buf, `\U`...)
+ for s := 28; s >= 0; s -= 4 {
+ buf = append(buf, lowerhex[r>>uint(s)&0xF])
+ }
+ }
+ }
+ }
+ return buf
+
+}
+
+// buildCommonLogLine builds a log entry for req in Apache Common Log Format.
+// ts is the timestamp with which the entry should be logged.
+// status and size are used to provide the response HTTP status and size.
+func buildCommonLogLine(req *http.Request, url url.URL, ts time.Time, status int, size int) []byte {
+ username := "-"
+ if url.User != nil {
+ if name := url.User.Username(); name != "" {
+ username = name
+ }
+ }
+
+ host, _, err := net.SplitHostPort(req.RemoteAddr)
+
+ if err != nil {
+ host = req.RemoteAddr
+ }
+
+ uri := req.RequestURI
+
+ // Requests using the CONNECT method over HTTP/2.0 must use
+ // the authority field (aka r.Host) to identify the target.
+ // Refer: https://httpwg.github.io/specs/rfc7540.html#CONNECT
+ if req.ProtoMajor == 2 && req.Method == "CONNECT" {
+ uri = req.Host
+ }
+ if uri == "" {
+ uri = url.RequestURI()
+ }
+
+ buf := make([]byte, 0, 3*(len(host)+len(username)+len(req.Method)+len(uri)+len(req.Proto)+50)/2)
+ buf = append(buf, host...)
+ buf = append(buf, " - "...)
+ buf = append(buf, username...)
+ buf = append(buf, " ["...)
+ buf = append(buf, ts.Format("02/Jan/2006:15:04:05 -0700")...)
+ buf = append(buf, `] "`...)
+ buf = append(buf, req.Method...)
+ buf = append(buf, " "...)
+ buf = appendQuoted(buf, uri)
+ buf = append(buf, " "...)
+ buf = append(buf, req.Proto...)
+ buf = append(buf, `" `...)
+ buf = append(buf, strconv.Itoa(status)...)
+ buf = append(buf, " "...)
+ buf = append(buf, strconv.Itoa(size)...)
+ return buf
+}
+
+// writeLog writes a log entry for req to w in Apache Common Log Format.
+// ts is the timestamp with which the entry should be logged.
+// status and size are used to provide the response HTTP status and size.
+func writeLog(writer io.Writer, params LogFormatterParams) {
+ buf := buildCommonLogLine(params.Request, params.URL, params.TimeStamp, params.StatusCode, params.Size)
+ buf = append(buf, '\n')
+ writer.Write(buf)
+}
+
+// writeCombinedLog writes a log entry for req to w in Apache Combined Log Format.
+// ts is the timestamp with which the entry should be logged.
+// status and size are used to provide the response HTTP status and size.
+func writeCombinedLog(writer io.Writer, params LogFormatterParams) {
+ buf := buildCommonLogLine(params.Request, params.URL, params.TimeStamp, params.StatusCode, params.Size)
+ buf = append(buf, ` "`...)
+ buf = appendQuoted(buf, params.Request.Referer())
+ buf = append(buf, `" "`...)
+ buf = appendQuoted(buf, params.Request.UserAgent())
+ buf = append(buf, '"', '\n')
+ writer.Write(buf)
+}
+
+// CombinedLoggingHandler return a http.Handler that wraps h and logs requests to out in
+// Apache Combined Log Format.
+//
+// See http://httpd.apache.org/docs/2.2/logs.html#combined for a description of this format.
+//
+// LoggingHandler always sets the ident field of the log to -
+func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler {
+ return loggingHandler{out, h, writeCombinedLog}
+}
+
+// LoggingHandler return a http.Handler that wraps h and logs requests to out in
+// Apache Common Log Format (CLF).
+//
+// See http://httpd.apache.org/docs/2.2/logs.html#common for a description of this format.
+//
+// LoggingHandler always sets the ident field of the log to -
+//
+// Example:
+//
+// r := mux.NewRouter()
+// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+// w.Write([]byte("This is a catch-all route"))
+// })
+// loggedRouter := handlers.LoggingHandler(os.Stdout, r)
+// http.ListenAndServe(":1123", loggedRouter)
+//
+func LoggingHandler(out io.Writer, h http.Handler) http.Handler {
+ return loggingHandler{out, h, writeLog}
+}
+
+// CustomLoggingHandler provides a way to supply a custom log formatter
+// while taking advantage of the mechanisms in this package
+func CustomLoggingHandler(out io.Writer, h http.Handler, f LogFormatter) http.Handler {
+ return loggingHandler{out, h, f}
+}
diff --git a/vendor/github.com/gorilla/websocket/.travis.yml b/vendor/github.com/gorilla/websocket/.travis.yml
index 1f730470a..a880cd982 100644
--- a/vendor/github.com/gorilla/websocket/.travis.yml
+++ b/vendor/github.com/gorilla/websocket/.travis.yml
@@ -3,9 +3,6 @@ sudo: false
matrix:
include:
- - go: 1.4
- - go: 1.5.x
- - go: 1.6.x
- go: 1.7.x
- go: 1.8.x
- go: 1.9.x
diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go
index 41f8ed53d..7a24309ce 100644
--- a/vendor/github.com/gorilla/websocket/client.go
+++ b/vendor/github.com/gorilla/websocket/client.go
@@ -69,6 +69,17 @@ type Dialer struct {
// do not limit the size of the messages that can be sent or received.
ReadBufferSize, WriteBufferSize int
+ // WriteBufferPool is a pool of buffers for write operations. If the value
+ // is not set, then write buffers are allocated to the connection for the
+ // lifetime of the connection.
+ //
+ // A pool is most useful when the application has a modest volume of writes
+ // across a large number of connections.
+ //
+ // Applications should use a single pool for each unique value of
+ // WriteBufferSize.
+ WriteBufferPool BufferPool
+
// Subprotocols specifies the client's requested subprotocols.
Subprotocols []string
@@ -277,7 +288,7 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re
}
}
- conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize)
+ conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil)
if err := req.Write(netConn); err != nil {
return nil, nil, err
diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go
index 5f46bf4a5..d2a21c148 100644
--- a/vendor/github.com/gorilla/websocket/conn.go
+++ b/vendor/github.com/gorilla/websocket/conn.go
@@ -223,6 +223,20 @@ func isValidReceivedCloseCode(code int) bool {
return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999)
}
+// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this
+// interface. The type of the value stored in a pool is not specified.
+type BufferPool interface {
+ // Get gets a value from the pool or returns nil if the pool is empty.
+ Get() interface{}
+ // Put adds a value to the pool.
+ Put(interface{})
+}
+
+// writePoolData is the type added to the write buffer pool. This wrapper is
+// used to prevent applications from peeking at and depending on the values
+// added to the pool.
+type writePoolData struct{ buf []byte }
+
// The Conn type represents a WebSocket connection.
type Conn struct {
conn net.Conn
@@ -232,6 +246,8 @@ type Conn struct {
// Write fields
mu chan bool // used as mutex to protect write to conn
writeBuf []byte // frame is constructed in this buffer.
+ writePool BufferPool
+ writeBufSize int
writeDeadline time.Time
writer io.WriteCloser // the current writer returned to the application
isWriting bool // for best-effort concurrent write detection
@@ -263,64 +279,29 @@ type Conn struct {
newDecompressionReader func(io.Reader) io.ReadCloser
}
-func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn {
- return newConnBRW(conn, isServer, readBufferSize, writeBufferSize, nil)
-}
-
-type writeHook struct {
- p []byte
-}
-
-func (wh *writeHook) Write(p []byte) (int, error) {
- wh.p = p
- return len(p), nil
-}
-
-func newConnBRW(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, brw *bufio.ReadWriter) *Conn {
- mu := make(chan bool, 1)
- mu <- true
+func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn {
- var br *bufio.Reader
- if readBufferSize == 0 && brw != nil && brw.Reader != nil {
- // Reuse the supplied bufio.Reader if the buffer has a useful size.
- // This code assumes that peek on a reader returns
- // bufio.Reader.buf[:0].
- brw.Reader.Reset(conn)
- if p, err := brw.Reader.Peek(0); err == nil && cap(p) >= 256 {
- br = brw.Reader
- }
- }
if br == nil {
if readBufferSize == 0 {
readBufferSize = defaultReadBufferSize
- }
- if readBufferSize < maxControlFramePayloadSize {
+ } else if readBufferSize < maxControlFramePayloadSize {
+ // must be large enough for control frame
readBufferSize = maxControlFramePayloadSize
}
br = bufio.NewReaderSize(conn, readBufferSize)
}
- var writeBuf []byte
- if writeBufferSize == 0 && brw != nil && brw.Writer != nil {
- // Use the bufio.Writer's buffer if the buffer has a useful size. This
- // code assumes that bufio.Writer.buf[:1] is passed to the
- // bufio.Writer's underlying writer.
- var wh writeHook
- brw.Writer.Reset(&wh)
- brw.Writer.WriteByte(0)
- brw.Flush()
- if cap(wh.p) >= maxFrameHeaderSize+256 {
- writeBuf = wh.p[:cap(wh.p)]
- }
+ if writeBufferSize <= 0 {
+ writeBufferSize = defaultWriteBufferSize
}
+ writeBufferSize += maxFrameHeaderSize
- if writeBuf == nil {
- if writeBufferSize == 0 {
- writeBufferSize = defaultWriteBufferSize
- }
- writeBuf = make([]byte, writeBufferSize+maxFrameHeaderSize)
+ if writeBuf == nil && writeBufferPool == nil {
+ writeBuf = make([]byte, writeBufferSize)
}
+ mu := make(chan bool, 1)
+ mu <- true
c := &Conn{
isServer: isServer,
br: br,
@@ -328,6 +309,8 @@ func newConnBRW(conn net.Conn, isServer bool, readBufferSize, writeBufferSize in
mu: mu,
readFinal: true,
writeBuf: writeBuf,
+ writePool: writeBufferPool,
+ writeBufSize: writeBufferSize,
enableWriteCompression: true,
compressionLevel: defaultCompressionLevel,
}
@@ -370,6 +353,15 @@ func (c *Conn) writeFatal(err error) error {
return err
}
+func (c *Conn) read(n int) ([]byte, error) {
+ p, err := c.br.Peek(n)
+ if err == io.EOF {
+ err = errUnexpectedEOF
+ }
+ c.br.Discard(len(p))
+ return p, err
+}
+
func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error {
<-c.mu
defer func() { c.mu <- true }()
@@ -475,7 +467,19 @@ func (c *Conn) prepWrite(messageType int) error {
c.writeErrMu.Lock()
err := c.writeErr
c.writeErrMu.Unlock()
- return err
+ if err != nil {
+ return err
+ }
+
+ if c.writeBuf == nil {
+ wpd, ok := c.writePool.Get().(writePoolData)
+ if ok {
+ c.writeBuf = wpd.buf
+ } else {
+ c.writeBuf = make([]byte, c.writeBufSize)
+ }
+ }
+ return nil
}
// NextWriter returns a writer for the next message to send. The writer's Close
@@ -601,6 +605,10 @@ func (w *messageWriter) flushFrame(final bool, extra []byte) error {
if final {
c.writer = nil
+ if c.writePool != nil {
+ c.writePool.Put(writePoolData{buf: c.writeBuf})
+ c.writeBuf = nil
+ }
return nil
}
diff --git a/vendor/github.com/gorilla/websocket/conn_read.go b/vendor/github.com/gorilla/websocket/conn_read.go
deleted file mode 100644
index 1ea15059e..000000000
--- a/vendor/github.com/gorilla/websocket/conn_read.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.5
-
-package websocket
-
-import "io"
-
-func (c *Conn) read(n int) ([]byte, error) {
- p, err := c.br.Peek(n)
- if err == io.EOF {
- err = errUnexpectedEOF
- }
- c.br.Discard(len(p))
- return p, err
-}
diff --git a/vendor/github.com/gorilla/websocket/conn_read_legacy.go b/vendor/github.com/gorilla/websocket/conn_read_legacy.go
deleted file mode 100644
index 018541cf6..000000000
--- a/vendor/github.com/gorilla/websocket/conn_read_legacy.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.5
-
-package websocket
-
-import "io"
-
-func (c *Conn) read(n int) ([]byte, error) {
- p, err := c.br.Peek(n)
- if err == io.EOF {
- err = errUnexpectedEOF
- }
- if len(p) > 0 {
- // advance over the bytes just read
- io.ReadFull(c.br, p)
- }
- return p, err
-}
diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go
index aee270528..3bd627bc1 100644
--- a/vendor/github.com/gorilla/websocket/server.go
+++ b/vendor/github.com/gorilla/websocket/server.go
@@ -7,6 +7,7 @@ package websocket
import (
"bufio"
"errors"
+ "io"
"net"
"net/http"
"net/url"
@@ -33,10 +34,23 @@ type Upgrader struct {
// or received.
ReadBufferSize, WriteBufferSize int
+ // WriteBufferPool is a pool of buffers for write operations. If the value
+ // is not set, then write buffers are allocated to the connection for the
+ // lifetime of the connection.
+ //
+ // A pool is most useful when the application has a modest volume of writes
+ // across a large number of connections.
+ //
+ // Applications should use a single pool for each unique value of
+ // WriteBufferSize.
+ WriteBufferPool BufferPool
+
// Subprotocols specifies the server's supported protocols in order of
- // preference. If this field is set, then the Upgrade method negotiates a
+ // preference. If this field is not nil, then the Upgrade method negotiates a
// subprotocol by selecting the first match in this list with a protocol
- // requested by the client.
+ // requested by the client. If there's no match, then no protocol is
+ // negotiated (the Sec-Websocket-Protocol header is not included in the
+ // handshake response).
Subprotocols []string
// Error specifies the function for generating HTTP error responses. If Error
@@ -177,7 +191,21 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade
return nil, errors.New("websocket: client sent data before handshake is complete")
}
- c := newConnBRW(netConn, true, u.ReadBufferSize, u.WriteBufferSize, brw)
+ var br *bufio.Reader
+ if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 {
+ // Reuse hijacked buffered reader as connection reader.
+ br = brw.Reader
+ }
+
+ buf := bufioWriterBuffer(netConn, brw.Writer)
+
+ var writeBuf []byte
+ if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 {
+ // Reuse hijacked write buffer as connection buffer.
+ writeBuf = buf
+ }
+
+ c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf)
c.subprotocol = subprotocol
if compress {
@@ -185,7 +213,13 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade
c.newDecompressionReader = decompressNoContextTakeover
}
- p := c.writeBuf[:0]
+ // Use larger of hijacked buffer and connection write buffer for header.
+ p := buf
+ if len(c.writeBuf) > len(p) {
+ p = c.writeBuf
+ }
+ p = p[:0]
+
p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
p = append(p, computeAcceptKey(challengeKey)...)
p = append(p, "\r\n"...)
@@ -296,3 +330,40 @@ func IsWebSocketUpgrade(r *http.Request) bool {
return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
tokenListContainsValue(r.Header, "Upgrade", "websocket")
}
+
+// bufioReader size returns the size of a bufio.Reader.
+func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int {
+ // This code assumes that peek on a reset reader returns
+ // bufio.Reader.buf[:0].
+ // TODO: Use bufio.Reader.Size() after Go 1.10
+ br.Reset(originalReader)
+ if p, err := br.Peek(0); err == nil {
+ return cap(p)
+ }
+ return 0
+}
+
+// writeHook is an io.Writer that records the last slice passed to it vio
+// io.Writer.Write.
+type writeHook struct {
+ p []byte
+}
+
+func (wh *writeHook) Write(p []byte) (int, error) {
+ wh.p = p
+ return len(p), nil
+}
+
+// bufioWriterBuffer grabs the buffer from a bufio.Writer.
+func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte {
+ // This code assumes that bufio.Writer.buf[:1] is passed to the
+ // bufio.Writer's underlying writer.
+ var wh writeHook
+ bw.Reset(&wh)
+ bw.WriteByte(0)
+ bw.Flush()
+
+ bw.Reset(originalWriter)
+
+ return wh.p[:cap(wh.p)]
+}
diff --git a/vendor/github.com/hashicorp/errwrap/go.mod b/vendor/github.com/hashicorp/errwrap/go.mod
new file mode 100644
index 000000000..c9b84022c
--- /dev/null
+++ b/vendor/github.com/hashicorp/errwrap/go.mod
@@ -0,0 +1 @@
+module github.com/hashicorp/errwrap
diff --git a/vendor/github.com/hashicorp/go-multierror/go.mod b/vendor/github.com/hashicorp/go-multierror/go.mod
new file mode 100644
index 000000000..2534331d5
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/go.mod
@@ -0,0 +1,3 @@
+module github.com/hashicorp/go-multierror
+
+require github.com/hashicorp/errwrap v1.0.0
diff --git a/vendor/github.com/hashicorp/go-multierror/go.sum b/vendor/github.com/hashicorp/go-multierror/go.sum
new file mode 100644
index 000000000..85b1f8ff3
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/go.sum
@@ -0,0 +1,4 @@
+github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4=
+github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
diff --git a/vendor/github.com/hashicorp/memberlist/Makefile b/vendor/github.com/hashicorp/memberlist/Makefile
index 891e8364a..e34a0818d 100644
--- a/vendor/github.com/hashicorp/memberlist/Makefile
+++ b/vendor/github.com/hashicorp/memberlist/Makefile
@@ -16,4 +16,4 @@ deps:
go get -d -v ./...
echo $(DEPS) | xargs -n1 go get -d
-.PNONY: test cov integ
+.PHONY: test cov integ
diff --git a/vendor/github.com/hashicorp/memberlist/memberlist.go b/vendor/github.com/hashicorp/memberlist/memberlist.go
index e9084f9fd..bd8abd23f 100644
--- a/vendor/github.com/hashicorp/memberlist/memberlist.go
+++ b/vendor/github.com/hashicorp/memberlist/memberlist.go
@@ -15,6 +15,7 @@ multiple routes.
package memberlist
import (
+ "container/list"
"fmt"
"log"
"net"
@@ -34,6 +35,7 @@ type Memberlist struct {
sequenceNum uint32 // Local sequence number
incarnation uint32 // Local incarnation number
numNodes uint32 // Number of known nodes (estimate)
+ pushPullReq uint32 // Number of push/pull requests
config *Config
shutdown int32 // Used as an atomic boolean value
@@ -45,7 +47,11 @@ type Memberlist struct {
leaveLock sync.Mutex // Serializes calls to Leave
transport Transport
- handoff chan msgHandoff
+
+ handoffCh chan struct{}
+ highPriorityMsgQueue *list.List
+ lowPriorityMsgQueue *list.List
+ msgQueueLock sync.Mutex
nodeLock sync.RWMutex
nodes []*nodeState // Known nodes
@@ -160,17 +166,19 @@ func newMemberlist(conf *Config) (*Memberlist, error) {
}
m := &Memberlist{
- config: conf,
- shutdownCh: make(chan struct{}),
- leaveBroadcast: make(chan struct{}, 1),
- transport: transport,
- handoff: make(chan msgHandoff, conf.HandoffQueueDepth),
- nodeMap: make(map[string]*nodeState),
- nodeTimers: make(map[string]*suspicion),
- awareness: newAwareness(conf.AwarenessMaxMultiplier),
- ackHandlers: make(map[uint32]*ackHandler),
- broadcasts: &TransmitLimitedQueue{RetransmitMult: conf.RetransmitMult},
- logger: logger,
+ config: conf,
+ shutdownCh: make(chan struct{}),
+ leaveBroadcast: make(chan struct{}, 1),
+ transport: transport,
+ handoffCh: make(chan struct{}, 1),
+ highPriorityMsgQueue: list.New(),
+ lowPriorityMsgQueue: list.New(),
+ nodeMap: make(map[string]*nodeState),
+ nodeTimers: make(map[string]*suspicion),
+ awareness: newAwareness(conf.AwarenessMaxMultiplier),
+ ackHandlers: make(map[uint32]*ackHandler),
+ broadcasts: &TransmitLimitedQueue{RetransmitMult: conf.RetransmitMult},
+ logger: logger,
}
m.broadcasts.NumNodes = func() int {
return m.estNumNodes()
diff --git a/vendor/github.com/hashicorp/memberlist/net.go b/vendor/github.com/hashicorp/memberlist/net.go
index a4330c4d2..f6a0d45fe 100644
--- a/vendor/github.com/hashicorp/memberlist/net.go
+++ b/vendor/github.com/hashicorp/memberlist/net.go
@@ -8,9 +8,10 @@ import (
"hash/crc32"
"io"
"net"
+ "sync/atomic"
"time"
- "github.com/armon/go-metrics"
+ metrics "github.com/armon/go-metrics"
"github.com/hashicorp/go-msgpack/codec"
)
@@ -71,7 +72,8 @@ const (
compoundOverhead = 2 // Assumed overhead per entry in compoundHeader
userMsgOverhead = 1
blockingWarning = 10 * time.Millisecond // Warn if a UDP packet takes this long to process
- maxPushStateBytes = 10 * 1024 * 1024
+ maxPushStateBytes = 20 * 1024 * 1024
+ maxPushPullRequests = 128 // Maximum number of concurrent push/pull requests
)
// ping request sent directly to node
@@ -238,6 +240,16 @@ func (m *Memberlist) handleConn(conn net.Conn) {
m.logger.Printf("[ERR] memberlist: Failed to receive user message: %s %s", err, LogConn(conn))
}
case pushPullMsg:
+ // Increment counter of pending push/pulls
+ numConcurrent := atomic.AddUint32(&m.pushPullReq, 1)
+ defer atomic.AddUint32(&m.pushPullReq, ^uint32(0))
+
+ // Check if we have too many open push/pull requests
+ if numConcurrent >= maxPushPullRequests {
+ m.logger.Printf("[ERR] memberlist: Too many pending push/pull requests")
+ return
+ }
+
join, remoteNodes, userState, err := m.readRemoteState(bufConn, dec)
if err != nil {
m.logger.Printf("[ERR] memberlist: Failed to read remote state: %s %s", err, LogConn(conn))
@@ -357,10 +369,25 @@ func (m *Memberlist) handleCommand(buf []byte, from net.Addr, timestamp time.Tim
case deadMsg:
fallthrough
case userMsg:
+ // Determine the message queue, prioritize alive
+ queue := m.lowPriorityMsgQueue
+ if msgType == aliveMsg {
+ queue = m.highPriorityMsgQueue
+ }
+
+ // Check for overflow and append if not full
+ m.msgQueueLock.Lock()
+ if queue.Len() >= m.config.HandoffQueueDepth {
+ m.logger.Printf("[WARN] memberlist: handler queue full, dropping message (%d) %s", msgType, LogAddress(from))
+ } else {
+ queue.PushBack(msgHandoff{msgType, buf, from})
+ }
+ m.msgQueueLock.Unlock()
+
+ // Notify of pending message
select {
- case m.handoff <- msgHandoff{msgType, buf, from}:
+ case m.handoffCh <- struct{}{}:
default:
- m.logger.Printf("[WARN] memberlist: handler queue full, dropping message (%d) %s", msgType, LogAddress(from))
}
default:
@@ -368,28 +395,51 @@ func (m *Memberlist) handleCommand(buf []byte, from net.Addr, timestamp time.Tim
}
}
+// getNextMessage returns the next message to process in priority order, using LIFO
+func (m *Memberlist) getNextMessage() (msgHandoff, bool) {
+ m.msgQueueLock.Lock()
+ defer m.msgQueueLock.Unlock()
+
+ if el := m.highPriorityMsgQueue.Back(); el != nil {
+ m.highPriorityMsgQueue.Remove(el)
+ msg := el.Value.(msgHandoff)
+ return msg, true
+ } else if el := m.lowPriorityMsgQueue.Back(); el != nil {
+ m.lowPriorityMsgQueue.Remove(el)
+ msg := el.Value.(msgHandoff)
+ return msg, true
+ }
+ return msgHandoff{}, false
+}
+
// packetHandler is a long running goroutine that processes messages received
// over the packet interface, but is decoupled from the listener to avoid
// blocking the listener which may cause ping/ack messages to be delayed.
func (m *Memberlist) packetHandler() {
for {
select {
- case msg := <-m.handoff:
- msgType := msg.msgType
- buf := msg.buf
- from := msg.from
-
- switch msgType {
- case suspectMsg:
- m.handleSuspect(buf, from)
- case aliveMsg:
- m.handleAlive(buf, from)
- case deadMsg:
- m.handleDead(buf, from)
- case userMsg:
- m.handleUser(buf, from)
- default:
- m.logger.Printf("[ERR] memberlist: Message type (%d) not supported %s (packet handler)", msgType, LogAddress(from))
+ case <-m.handoffCh:
+ for {
+ msg, ok := m.getNextMessage()
+ if !ok {
+ break
+ }
+ msgType := msg.msgType
+ buf := msg.buf
+ from := msg.from
+
+ switch msgType {
+ case suspectMsg:
+ m.handleSuspect(buf, from)
+ case aliveMsg:
+ m.handleAlive(buf, from)
+ case deadMsg:
+ m.handleDead(buf, from)
+ case userMsg:
+ m.handleUser(buf, from)
+ default:
+ m.logger.Printf("[ERR] memberlist: Message type (%d) not supported %s (packet handler)", msgType, LogAddress(from))
+ }
}
case <-m.shutdownCh:
@@ -1094,7 +1144,7 @@ func (m *Memberlist) sendPingAndWaitForAck(addr string, ping ping, deadline time
}
if ack.SeqNo != ping.SeqNo {
- return false, fmt.Errorf("Sequence number from ack (%d) doesn't match ping (%d)", ack.SeqNo, ping.SeqNo, LogConn(conn))
+ return false, fmt.Errorf("Sequence number from ack (%d) doesn't match ping (%d)", ack.SeqNo, ping.SeqNo)
}
return true, nil
diff --git a/vendor/github.com/lib/pq/go.mod b/vendor/github.com/lib/pq/go.mod
new file mode 100644
index 000000000..edf0b343f
--- /dev/null
+++ b/vendor/github.com/lib/pq/go.mod
@@ -0,0 +1 @@
+module github.com/lib/pq
diff --git a/vendor/github.com/lib/pq/notify.go b/vendor/github.com/lib/pq/notify.go
index 947d189f4..850bb9040 100644
--- a/vendor/github.com/lib/pq/notify.go
+++ b/vendor/github.com/lib/pq/notify.go
@@ -725,6 +725,9 @@ func (l *Listener) Close() error {
}
l.isClosed = true
+ // Unblock calls to Listen()
+ l.reconnectCond.Broadcast()
+
return nil
}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go
index 90525e665..51f056615 100644
--- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go
+++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go
@@ -240,7 +240,7 @@ func (r *Lexer) fetchNumber() {
// findStringLen tries to scan into the string literal for ending quote char to determine required size.
// The size will be exact if no escapes are present and may be inexact if there are escaped chars.
-func findStringLen(data []byte) (hasEscapes bool, length int) {
+func findStringLen(data []byte) (isValid, hasEscapes bool, length int) {
delta := 0
for i := 0; i < len(data); i++ {
@@ -252,11 +252,11 @@ func findStringLen(data []byte) (hasEscapes bool, length int) {
delta++
}
case '"':
- return (delta > 0), (i - delta)
+ return true, (delta > 0), (i - delta)
}
}
- return false, len(data)
+ return false, false, len(data)
}
// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
@@ -342,7 +342,12 @@ func (r *Lexer) fetchString() {
r.pos++
data := r.Data[r.pos:]
- hasEscapes, length := findStringLen(data)
+ isValid, hasEscapes, length := findStringLen(data)
+ if !isValid {
+ r.pos += length
+ r.errParse("unterminated string literal")
+ return
+ }
if !hasEscapes {
r.token.byteValue = data[:length]
r.pos += length + 1
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go
index 2164497ad..82568a1bb 100644
--- a/vendor/github.com/mattn/go-runewidth/runewidth.go
+++ b/vendor/github.com/mattn/go-runewidth/runewidth.go
@@ -1,13 +1,24 @@
package runewidth
+import "os"
+
var (
// EastAsianWidth will be set true if the current locale is CJK
- EastAsianWidth = IsEastAsian()
+ EastAsianWidth bool
// DefaultCondition is a condition in current locale
DefaultCondition = &Condition{EastAsianWidth}
)
+func init() {
+ env := os.Getenv("RUNEWIDTH_EASTASIAN")
+ if env == "" {
+ EastAsianWidth = IsEastAsian()
+ } else {
+ EastAsianWidth = env == "1"
+ }
+}
+
type interval struct {
first rune
last rune
@@ -55,6 +66,7 @@ var private = table{
var nonprint = table{
{0x0000, 0x001F}, {0x007F, 0x009F}, {0x00AD, 0x00AD},
{0x070F, 0x070F}, {0x180B, 0x180E}, {0x200B, 0x200F},
+ {0x2028, 0x2029},
{0x202A, 0x202E}, {0x206A, 0x206F}, {0xD800, 0xDFFF},
{0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFE, 0xFFFF},
}
diff --git a/vendor/github.com/mitchellh/go-homedir/go.mod b/vendor/github.com/mitchellh/go-homedir/go.mod
new file mode 100644
index 000000000..7efa09a04
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/go.mod
@@ -0,0 +1 @@
+module github.com/mitchellh/go-homedir
diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go
index acbb605d5..fb87bef94 100644
--- a/vendor/github.com/mitchellh/go-homedir/homedir.go
+++ b/vendor/github.com/mitchellh/go-homedir/homedir.go
@@ -141,14 +141,16 @@ func dirWindows() (string, error) {
return home, nil
}
+ // Prefer standard environment variable USERPROFILE
+ if home := os.Getenv("USERPROFILE"); home != "" {
+ return home, nil
+ }
+
drive := os.Getenv("HOMEDRIVE")
path := os.Getenv("HOMEPATH")
home := drive + path
if drive == "" || path == "" {
- home = os.Getenv("USERPROFILE")
- }
- if home == "" {
- return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank")
+ return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank")
}
return home, nil
diff --git a/vendor/github.com/mitchellh/mapstructure/go.mod b/vendor/github.com/mitchellh/mapstructure/go.mod
new file mode 100644
index 000000000..d2a712562
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/go.mod
@@ -0,0 +1 @@
+module github.com/mitchellh/mapstructure
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
index b86290afa..ec3d86ba7 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -359,7 +359,7 @@ func (p *TextParser) startLabelValue() stateFn {
}
return p.readingValue
default:
- p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
+ p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
return nil
}
}
diff --git a/vendor/github.com/prometheus/procfs/.travis.yml b/vendor/github.com/prometheus/procfs/.travis.yml
index 5416cf8a2..66a0b7cf7 100644
--- a/vendor/github.com/prometheus/procfs/.travis.yml
+++ b/vendor/github.com/prometheus/procfs/.travis.yml
@@ -3,11 +3,8 @@ sudo: false
language: go
go:
-- 1.7.x
-- 1.8.x
- 1.9.x
- 1.10.x
-- 1.x
go_import_path: github.com/prometheus/procfs
diff --git a/vendor/github.com/rs/cors/README.md b/vendor/github.com/rs/cors/README.md
index 425ed9624..87413cca1 100644
--- a/vendor/github.com/rs/cors/README.md
+++ b/vendor/github.com/rs/cors/README.md
@@ -49,6 +49,14 @@ The server now runs on `localhost:8080`:
{"hello": "world"}
+### Allow * With Credentials Security Protection
+
+This library has been modified to avoid a well known security issue when configured with `AllowedOrigins` to `*` and `AllowCredentials` to `true`. Such setup used to make the library reflects the request `Origin` header value, working around a security protection embedded into the standard that makes clients to refuse such configuration. This behavior has been removed with [#55](https://github.com/rs/cors/issues/55) and [#57](https://github.com/rs/cors/issues/57).
+
+If you depend on this behavior and understand the implications, you can restore it using the `AllowOriginFunc` with `func(origin string) {return true}`.
+
+Please refer to [#55](https://github.com/rs/cors/issues/55) for more information about the security implications.
+
### More Examples
* `net/http`: [examples/nethttp/server.go](https://github.com/rs/cors/blob/master/examples/nethttp/server.go)
diff --git a/vendor/github.com/rs/cors/cors.go b/vendor/github.com/rs/cors/cors.go
index 0aa4f51f1..15181089a 100644
--- a/vendor/github.com/rs/cors/cors.go
+++ b/vendor/github.com/rs/cors/cors.go
@@ -174,7 +174,7 @@ func AllowAll() *Cors {
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"HEAD", "GET", "POST", "PUT", "PATCH", "DELETE"},
AllowedHeaders: []string{"*"},
- AllowCredentials: true,
+ AllowCredentials: false,
})
}
@@ -269,7 +269,7 @@ func (c *Cors) handlePreflight(w http.ResponseWriter, r *http.Request) {
c.logf(" Preflight aborted: headers '%v' not allowed", reqHeaders)
return
}
- if c.allowedOriginsAll && !c.allowCredentials {
+ if c.allowedOriginsAll {
headers.Set("Access-Control-Allow-Origin", "*")
} else {
headers.Set("Access-Control-Allow-Origin", origin)
@@ -321,7 +321,7 @@ func (c *Cors) handleActualRequest(w http.ResponseWriter, r *http.Request) {
return
}
- if c.allowedOriginsAll && !c.allowCredentials {
+ if c.allowedOriginsAll {
headers.Set("Access-Control-Allow-Origin", "*")
} else {
headers.Set("Access-Control-Allow-Origin", origin)
diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml
index a23296a53..2f19b4a75 100644
--- a/vendor/github.com/sirupsen/logrus/.travis.yml
+++ b/vendor/github.com/sirupsen/logrus/.travis.yml
@@ -1,9 +1,7 @@
language: go
go:
- - 1.6.x
- - 1.7.x
- - 1.8.x
- - tip
+ - 1.9.x
+ - 1.10.x
env:
- GOMAXPROCS=4 GORACE=halt_on_error=1
install:
diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md
index f77819b16..072e99be3 100644
--- a/vendor/github.com/sirupsen/logrus/README.md
+++ b/vendor/github.com/sirupsen/logrus/README.md
@@ -241,60 +241,8 @@ func init() {
```
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
-| Hook | Description |
-| ----- | ----------- |
-| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
-| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
-| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
-| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
-| [Application Insights](https://github.com/jjcollinge/logrus-appinsights) | Hook for logging to [Application Insights](https://azure.microsoft.com/en-us/services/application-insights/)
-| [AzureTableHook](https://github.com/kpfaulkner/azuretablehook/) | Hook for logging to Azure Table Storage|
-| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
-| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
-| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) |
-| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
-| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/)
-| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
-| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
-| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
-| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
-| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
-| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
-| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) |
-| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
-| [KafkaLogrus](https://github.com/tracer0tong/kafkalogrus) | Hook for logging to Kafka |
-| [Kafka REST Proxy](https://github.com/Nordstrom/logrus-kafka-rest-proxy) | Hook for logging to [Kafka REST Proxy](https://docs.confluent.io/current/kafka-rest/docs) |
-| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
-| [Logbeat](https://github.com/macandmia/logbeat) | Hook for logging to [Opbeat](https://opbeat.com/) |
-| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
-| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
-| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
-| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
-| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
-| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
-| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) |
-| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
-| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
-| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
-| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
-| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
-| [Promrus](https://github.com/weaveworks/promrus) | Expose number of log messages as [Prometheus](https://prometheus.io/) metrics |
-| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
-| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
-| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
-| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
-| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
-| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
-| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
-| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
-| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
-| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
-| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. |
-| [Telegram](https://github.com/rossmcdonald/telegram_hook) | Hook for logging errors to [Telegram](https://telegram.org/) |
-| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
-| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
-| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
-| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) |
+A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks)
+
#### Level logging
@@ -372,6 +320,8 @@ The built-in logging formatters are:
field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true`. For Windows, see
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
+ * When colors are enabled, levels are truncated to 4 characters by default. To disable
+ truncation set the `DisableLevelTruncation` field to `true`.
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
* `logrus.JSONFormatter`. Logs fields as JSON.
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
@@ -495,7 +445,7 @@ logrus.RegisterExitHandler(handler)
#### Thread safety
-By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
+By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs.
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
Situation when locking is not needed includes:
diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go
index 778f4c9f0..473bd1a0d 100644
--- a/vendor/github.com/sirupsen/logrus/entry.go
+++ b/vendor/github.com/sirupsen/logrus/entry.go
@@ -48,7 +48,7 @@ type Entry struct {
func NewEntry(logger *Logger) *Entry {
return &Entry{
Logger: logger,
- // Default is three fields, give a little extra room
+ // Default is five fields, give a little extra room
Data: make(Fields, 5),
}
}
@@ -83,14 +83,28 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
for k, v := range fields {
data[k] = v
}
- return &Entry{Logger: entry.Logger, Data: data}
+ return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time}
+}
+
+// Overrides the time of the Entry.
+func (entry *Entry) WithTime(t time.Time) *Entry {
+ return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t}
}
// This function is not declared with a pointer value because otherwise
// race conditions will occur when using multiple goroutines
func (entry Entry) log(level Level, msg string) {
var buffer *bytes.Buffer
- entry.Time = time.Now()
+
+ // Default to now, but allow users to override if they want.
+ //
+ // We don't have to worry about polluting future calls to Entry#log()
+ // with this assignment because this function is declared with a
+ // non-pointer receiver.
+ if entry.Time.IsZero() {
+ entry.Time = time.Now()
+ }
+
entry.Level = level
entry.Message = msg
@@ -113,12 +127,10 @@ func (entry Entry) log(level Level, msg string) {
}
}
-// This function is not declared with a pointer value because otherwise
-// race conditions will occur when using multiple goroutines
-func (entry Entry) fireHooks() {
+func (entry *Entry) fireHooks() {
entry.Logger.mu.Lock()
defer entry.Logger.mu.Unlock()
- err := entry.Logger.Hooks.Fire(entry.Level, &entry)
+ err := entry.Logger.Hooks.Fire(entry.Level, entry)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
}
diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go
index 013183eda..eb612a6f3 100644
--- a/vendor/github.com/sirupsen/logrus/exported.go
+++ b/vendor/github.com/sirupsen/logrus/exported.go
@@ -2,6 +2,7 @@ package logrus
import (
"io"
+ "time"
)
var (
@@ -15,9 +16,7 @@ func StandardLogger() *Logger {
// SetOutput sets the standard logger output.
func SetOutput(out io.Writer) {
- std.mu.Lock()
- defer std.mu.Unlock()
- std.Out = out
+ std.SetOutput(out)
}
// SetFormatter sets the standard logger formatter.
@@ -72,6 +71,15 @@ func WithFields(fields Fields) *Entry {
return std.WithFields(fields)
}
+// WithTime creats an entry from the standard logger and overrides the time of
+// logs generated with it.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithTime(t time.Time) *Entry {
+ return std.WithTime(t)
+}
+
// Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) {
std.Debug(args...)
@@ -107,7 +115,7 @@ func Panic(args ...interface{}) {
std.Panic(args...)
}
-// Fatal logs a message at level Fatal on the standard logger.
+// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
func Fatal(args ...interface{}) {
std.Fatal(args...)
}
@@ -147,7 +155,7 @@ func Panicf(format string, args ...interface{}) {
std.Panicf(format, args...)
}
-// Fatalf logs a message at level Fatal on the standard logger.
+// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
func Fatalf(format string, args ...interface{}) {
std.Fatalf(format, args...)
}
@@ -187,7 +195,7 @@ func Panicln(args ...interface{}) {
std.Panicln(args...)
}
-// Fatalln logs a message at level Fatal on the standard logger.
+// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
func Fatalln(args ...interface{}) {
std.Fatalln(args...)
}
diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go
index b183ff5b1..83c74947b 100644
--- a/vendor/github.com/sirupsen/logrus/formatter.go
+++ b/vendor/github.com/sirupsen/logrus/formatter.go
@@ -30,16 +30,22 @@ type Formatter interface {
//
// It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters.
-func prefixFieldClashes(data Fields) {
- if t, ok := data["time"]; ok {
- data["fields.time"] = t
+func prefixFieldClashes(data Fields, fieldMap FieldMap) {
+ timeKey := fieldMap.resolve(FieldKeyTime)
+ if t, ok := data[timeKey]; ok {
+ data["fields."+timeKey] = t
+ delete(data, timeKey)
}
- if m, ok := data["msg"]; ok {
- data["fields.msg"] = m
+ msgKey := fieldMap.resolve(FieldKeyMsg)
+ if m, ok := data[msgKey]; ok {
+ data["fields."+msgKey] = m
+ delete(data, msgKey)
}
- if l, ok := data["level"]; ok {
- data["fields.level"] = l
+ levelKey := fieldMap.resolve(FieldKeyLevel)
+ if l, ok := data[levelKey]; ok {
+ data["fields."+levelKey] = l
+ delete(data, levelKey)
}
}
diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go
index fb01c1b10..dab17610f 100644
--- a/vendor/github.com/sirupsen/logrus/json_formatter.go
+++ b/vendor/github.com/sirupsen/logrus/json_formatter.go
@@ -33,6 +33,9 @@ type JSONFormatter struct {
// DisableTimestamp allows disabling automatic timestamps in output
DisableTimestamp bool
+ // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key.
+ DataKey string
+
// FieldMap allows users to customize the names of keys for default fields.
// As an example:
// formatter := &JSONFormatter{
@@ -58,7 +61,14 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data[k] = v
}
}
- prefixFieldClashes(data)
+
+ if f.DataKey != "" {
+ newData := make(Fields, 4)
+ newData[f.DataKey] = data
+ data = newData
+ }
+
+ prefixFieldClashes(data, f.FieldMap)
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go
index fdaf8a653..342f7977d 100644
--- a/vendor/github.com/sirupsen/logrus/logger.go
+++ b/vendor/github.com/sirupsen/logrus/logger.go
@@ -5,6 +5,7 @@ import (
"os"
"sync"
"sync/atomic"
+ "time"
)
type Logger struct {
@@ -88,7 +89,7 @@ func (logger *Logger) releaseEntry(entry *Entry) {
}
// Adds a field to the log entry, note that it doesn't log until you call
-// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry.
// If you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
entry := logger.newEntry()
@@ -112,6 +113,13 @@ func (logger *Logger) WithError(err error) *Entry {
return entry.WithError(err)
}
+// Overrides the time of the log entry.
+func (logger *Logger) WithTime(t time.Time) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithTime(t)
+}
+
func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.level() >= DebugLevel {
entry := logger.newEntry()
@@ -316,6 +324,12 @@ func (logger *Logger) SetLevel(level Level) {
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
}
+func (logger *Logger) SetOutput(out io.Writer) {
+ logger.mu.Lock()
+ defer logger.mu.Unlock()
+ logger.Out = out
+}
+
func (logger *Logger) AddHook(hook Hook) {
logger.mu.Lock()
defer logger.mu.Unlock()
diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go
index 61b21caea..3e5504030 100644
--- a/vendor/github.com/sirupsen/logrus/text_formatter.go
+++ b/vendor/github.com/sirupsen/logrus/text_formatter.go
@@ -20,6 +20,7 @@ const (
var (
baseTimestamp time.Time
+ emptyFieldMap FieldMap
)
func init() {
@@ -50,12 +51,24 @@ type TextFormatter struct {
// be desired.
DisableSorting bool
+ // Disables the truncation of the level text to 4 characters.
+ DisableLevelTruncation bool
+
// QuoteEmptyFields will wrap empty fields in quotes if true
QuoteEmptyFields bool
// Whether the logger's out is to a terminal
isTerminal bool
+ // FieldMap allows users to customize the names of keys for default fields.
+ // As an example:
+ // formatter := &TextFormatter{
+ // FieldMap: FieldMap{
+ // FieldKeyTime: "@timestamp",
+ // FieldKeyLevel: "@level",
+ // FieldKeyMsg: "@message"}}
+ FieldMap FieldMap
+
sync.Once
}
@@ -67,7 +80,8 @@ func (f *TextFormatter) init(entry *Entry) {
// Format renders a single log entry
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
- var b *bytes.Buffer
+ prefixFieldClashes(entry.Data, f.FieldMap)
+
keys := make([]string, 0, len(entry.Data))
for k := range entry.Data {
keys = append(keys, k)
@@ -76,14 +90,14 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
if !f.DisableSorting {
sort.Strings(keys)
}
+
+ var b *bytes.Buffer
if entry.Buffer != nil {
b = entry.Buffer
} else {
b = &bytes.Buffer{}
}
- prefixFieldClashes(entry.Data)
-
f.Do(func() { f.init(entry) })
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
@@ -96,11 +110,11 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
f.printColored(b, entry, keys, timestampFormat)
} else {
if !f.DisableTimestamp {
- f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
+ f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyTime), entry.Time.Format(timestampFormat))
}
- f.appendKeyValue(b, "level", entry.Level.String())
+ f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyLevel), entry.Level.String())
if entry.Message != "" {
- f.appendKeyValue(b, "msg", entry.Message)
+ f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyMsg), entry.Message)
}
for _, key := range keys {
f.appendKeyValue(b, key, entry.Data[key])
@@ -124,7 +138,10 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
levelColor = blue
}
- levelText := strings.ToUpper(entry.Level.String())[0:4]
+ levelText := strings.ToUpper(entry.Level.String())
+ if !f.DisableLevelTruncation {
+ levelText = levelText[0:4]
+ }
if f.DisableTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
diff --git a/vendor/github.com/spf13/jwalterweatherman/README.md b/vendor/github.com/spf13/jwalterweatherman/README.md
index d8cfd27ab..932a23fc6 100644
--- a/vendor/github.com/spf13/jwalterweatherman/README.md
+++ b/vendor/github.com/spf13/jwalterweatherman/README.md
@@ -18,7 +18,7 @@ provides a few advantages over using the standard log library alone.
I really wanted a very straightforward library that could seamlessly do
the following things.
-1. Replace all the println, printf, etc statements thought my code with
+1. Replace all the println, printf, etc statements thoughout my code with
something more useful
2. Allow the user to easily control what levels are printed to stdout
3. Allow the user to easily control what levels are logged
diff --git a/vendor/github.com/spf13/pflag/bytes.go b/vendor/github.com/spf13/pflag/bytes.go
index 12c58db9f..67d530457 100644
--- a/vendor/github.com/spf13/pflag/bytes.go
+++ b/vendor/github.com/spf13/pflag/bytes.go
@@ -1,6 +1,7 @@
package pflag
import (
+ "encoding/base64"
"encoding/hex"
"fmt"
"strings"
@@ -9,10 +10,12 @@ import (
// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded
type bytesHexValue []byte
+// String implements pflag.Value.String.
func (bytesHex bytesHexValue) String() string {
return fmt.Sprintf("%X", []byte(bytesHex))
}
+// Set implements pflag.Value.Set.
func (bytesHex *bytesHexValue) Set(value string) error {
bin, err := hex.DecodeString(strings.TrimSpace(value))
@@ -25,6 +28,7 @@ func (bytesHex *bytesHexValue) Set(value string) error {
return nil
}
+// Type implements pflag.Value.Type.
func (*bytesHexValue) Type() string {
return "bytesHex"
}
@@ -103,3 +107,103 @@ func BytesHex(name string, value []byte, usage string) *[]byte {
func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
return CommandLine.BytesHexP(name, shorthand, value, usage)
}
+
+// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded
+type bytesBase64Value []byte
+
+// String implements pflag.Value.String.
+func (bytesBase64 bytesBase64Value) String() string {
+ return base64.StdEncoding.EncodeToString([]byte(bytesBase64))
+}
+
+// Set implements pflag.Value.Set.
+func (bytesBase64 *bytesBase64Value) Set(value string) error {
+ bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value))
+
+ if err != nil {
+ return err
+ }
+
+ *bytesBase64 = bin
+
+ return nil
+}
+
+// Type implements pflag.Value.Type.
+func (*bytesBase64Value) Type() string {
+ return "bytesBase64"
+}
+
+func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value {
+ *p = val
+ return (*bytesBase64Value)(p)
+}
+
+func bytesBase64ValueConv(sval string) (interface{}, error) {
+
+ bin, err := base64.StdEncoding.DecodeString(sval)
+ if err == nil {
+ return bin, nil
+ }
+
+ return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
+}
+
+// GetBytesBase64 return the []byte value of a flag with the given name
+func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) {
+ val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv)
+
+ if err != nil {
+ return []byte{}, err
+ }
+
+ return val.([]byte), nil
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+ f.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ f.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+ CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesBase64VarP(p, name, "", value, usage)
+ return p
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesBase64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func BytesBase64(name string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesBase64P(name, "", value, usage)
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesBase64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go
index 5eadc84e3..5cc710ccd 100644
--- a/vendor/github.com/spf13/pflag/flag.go
+++ b/vendor/github.com/spf13/pflag/flag.go
@@ -990,11 +990,12 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin
}
func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) {
+ outArgs = args
+
if strings.HasPrefix(shorthands, "test.") {
return
}
- outArgs = args
outShorts = shorthands[1:]
c := shorthands[0]