summaryrefslogtreecommitdiffstats
path: root/Godeps/_workspace/src/gopkg.in
diff options
context:
space:
mode:
Diffstat (limited to 'Godeps/_workspace/src/gopkg.in')
-rw-r--r--Godeps/_workspace/src/gopkg.in/bufio.v1/.travis.yml11
-rw-r--r--Godeps/_workspace/src/gopkg.in/bufio.v1/LICENSE27
-rw-r--r--Godeps/_workspace/src/gopkg.in/bufio.v1/Makefile2
-rw-r--r--Godeps/_workspace/src/gopkg.in/bufio.v1/buffer.go413
-rw-r--r--Godeps/_workspace/src/gopkg.in/bufio.v1/buffer_test.go527
-rw-r--r--Godeps/_workspace/src/gopkg.in/bufio.v1/bufio.go728
-rw-r--r--Godeps/_workspace/src/gopkg.in/bufio.v1/bufio_test.go1418
-rw-r--r--Godeps/_workspace/src/gopkg.in/bufio.v1/export_test.go9
-rw-r--r--Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore6
-rw-r--r--Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml15
-rw-r--r--Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS34
-rw-r--r--Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md263
-rw-r--r--Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md77
-rw-r--r--Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE28
-rw-r--r--Godeps/_workspace/src/gopkg.in/fsnotify.v1/NotUsed.xcworkspace0
-rw-r--r--Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml26
-rw-r--r--Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go42
-rw-r--r--Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go62
-rw-r--r--Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go306
-rw-r--r--Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go186
-rw-r--r--Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go228
-rw-r--r--Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go292
-rw-r--r--Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go1135
-rw-r--r--Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go463
-rw-r--r--Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go11
-rw-r--r--Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go12
-rw-r--r--Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go561
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/.travis.yml19
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/LICENSE27
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/Makefile3
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/command.go597
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/commands.go1246
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/doc.go4
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/error.go23
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/example_test.go180
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/export_test.go5
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/multi.go138
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/parser.go262
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/parser_test.go54
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/pipeline.go91
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/pool.go405
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/pubsub.go134
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit.go53
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit_test.go31
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/redis.go231
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/redis_test.go3333
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/script.go52
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/sentinel.go291
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/sentinel_test.go185
-rw-r--r--Godeps/_workspace/src/gopkg.in/redis.v2/testdata/sentinel.conf6
50 files changed, 14252 insertions, 0 deletions
diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/.travis.yml b/Godeps/_workspace/src/gopkg.in/bufio.v1/.travis.yml
new file mode 100644
index 000000000..ccca6bb4a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/.travis.yml
@@ -0,0 +1,11 @@
+language: go
+
+go:
+ - 1.0
+ - 1.1
+ - 1.2
+ - tip
+
+install:
+ - go get launchpad.net/gocheck
+ - go get gopkg.in/bufio.v1
diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/LICENSE b/Godeps/_workspace/src/gopkg.in/bufio.v1/LICENSE
new file mode 100644
index 000000000..07a316cbf
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013 The bufio Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/Makefile b/Godeps/_workspace/src/gopkg.in/bufio.v1/Makefile
new file mode 100644
index 000000000..038ed47e9
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/Makefile
@@ -0,0 +1,2 @@
+all:
+ go test gopkg.in/bufio.v1
diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/buffer.go b/Godeps/_workspace/src/gopkg.in/bufio.v1/buffer.go
new file mode 100644
index 000000000..8b915605b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/buffer.go
@@ -0,0 +1,413 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bufio
+
+// Simple byte buffer for marshaling data.
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "unicode/utf8"
+)
+
+// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
+// The zero value for Buffer is an empty buffer ready to use.
+type Buffer struct {
+ buf []byte // contents are the bytes buf[off : len(buf)]
+ off int // read at &buf[off], write at &buf[len(buf)]
+ runeBytes [utf8.UTFMax]byte // avoid allocation of slice on each WriteByte or Rune
+ bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation.
+ lastRead readOp // last read operation, so that Unread* can work correctly.
+}
+
+// The readOp constants describe the last action performed on
+// the buffer, so that UnreadRune and UnreadByte can
+// check for invalid usage.
+type readOp int
+
+const (
+ opInvalid readOp = iota // Non-read operation.
+ opReadRune // Read rune.
+ opRead // Any other read operation.
+)
+
+// ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer.
+var ErrTooLarge = errors.New("bytes.Buffer: too large")
+
+// Bytes returns a slice of the contents of the unread portion of the buffer;
+// len(b.Bytes()) == b.Len(). If the caller changes the contents of the
+// returned slice, the contents of the buffer will change provided there
+// are no intervening method calls on the Buffer.
+func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
+
+// String returns the contents of the unread portion of the buffer
+// as a string. If the Buffer is a nil pointer, it returns "<nil>".
+func (b *Buffer) String() string {
+ if b == nil {
+ // Special case, useful in debugging.
+ return "<nil>"
+ }
+ return string(b.buf[b.off:])
+}
+
+// Len returns the number of bytes of the unread portion of the buffer;
+// b.Len() == len(b.Bytes()).
+func (b *Buffer) Len() int { return len(b.buf) - b.off }
+
+// Truncate discards all but the first n unread bytes from the buffer.
+// It panics if n is negative or greater than the length of the buffer.
+func (b *Buffer) Truncate(n int) {
+ b.lastRead = opInvalid
+ switch {
+ case n < 0 || n > b.Len():
+ panic("bytes.Buffer: truncation out of range")
+ case n == 0:
+ // Reuse buffer space.
+ b.off = 0
+ }
+ b.buf = b.buf[0 : b.off+n]
+}
+
+// Reset resets the buffer so it has no content.
+// b.Reset() is the same as b.Truncate(0).
+func (b *Buffer) Reset() { b.Truncate(0) }
+
+// grow grows the buffer to guarantee space for n more bytes.
+// It returns the index where bytes should be written.
+// If the buffer can't grow it will panic with ErrTooLarge.
+func (b *Buffer) grow(n int) int {
+ m := b.Len()
+ // If buffer is empty, reset to recover space.
+ if m == 0 && b.off != 0 {
+ b.Truncate(0)
+ }
+ if len(b.buf)+n > cap(b.buf) {
+ var buf []byte
+ if b.buf == nil && n <= len(b.bootstrap) {
+ buf = b.bootstrap[0:]
+ } else if m+n <= cap(b.buf)/2 {
+ // We can slide things down instead of allocating a new
+ // slice. We only need m+n <= cap(b.buf) to slide, but
+ // we instead let capacity get twice as large so we
+ // don't spend all our time copying.
+ copy(b.buf[:], b.buf[b.off:])
+ buf = b.buf[:m]
+ } else {
+ // not enough space anywhere
+ buf = makeSlice(2*cap(b.buf) + n)
+ copy(buf, b.buf[b.off:])
+ }
+ b.buf = buf
+ b.off = 0
+ }
+ b.buf = b.buf[0 : b.off+m+n]
+ return b.off + m
+}
+
+// Grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After Grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+// If n is negative, Grow will panic.
+// If the buffer can't grow it will panic with ErrTooLarge.
+func (b *Buffer) Grow(n int) {
+ if n < 0 {
+ panic("bytes.Buffer.Grow: negative count")
+ }
+ m := b.grow(n)
+ b.buf = b.buf[0:m]
+}
+
+// Write appends the contents of p to the buffer, growing the buffer as
+// needed. The return value n is the length of p; err is always nil. If the
+// buffer becomes too large, Write will panic with ErrTooLarge.
+func (b *Buffer) Write(p []byte) (n int, err error) {
+ b.lastRead = opInvalid
+ m := b.grow(len(p))
+ return copy(b.buf[m:], p), nil
+}
+
+// WriteString appends the contents of s to the buffer, growing the buffer as
+// needed. The return value n is the length of s; err is always nil. If the
+// buffer becomes too large, WriteString will panic with ErrTooLarge.
+func (b *Buffer) WriteString(s string) (n int, err error) {
+ b.lastRead = opInvalid
+ m := b.grow(len(s))
+ return copy(b.buf[m:], s), nil
+}
+
+// MinRead is the minimum slice size passed to a Read call by
+// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
+// what is required to hold the contents of r, ReadFrom will not grow the
+// underlying buffer.
+const MinRead = 512
+
+// ReadFrom reads data from r until EOF and appends it to the buffer, growing
+// the buffer as needed. The return value n is the number of bytes read. Any
+// error except io.EOF encountered during the read is also returned. If the
+// buffer becomes too large, ReadFrom will panic with ErrTooLarge.
+func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
+ b.lastRead = opInvalid
+ // If buffer is empty, reset to recover space.
+ if b.off >= len(b.buf) {
+ b.Truncate(0)
+ }
+ for {
+ if free := cap(b.buf) - len(b.buf); free < MinRead {
+ // not enough space at end
+ newBuf := b.buf
+ if b.off+free < MinRead {
+ // not enough space using beginning of buffer;
+ // double buffer capacity
+ newBuf = makeSlice(2*cap(b.buf) + MinRead)
+ }
+ copy(newBuf, b.buf[b.off:])
+ b.buf = newBuf[:len(b.buf)-b.off]
+ b.off = 0
+ }
+ m, e := r.Read(b.buf[len(b.buf):cap(b.buf)])
+ b.buf = b.buf[0 : len(b.buf)+m]
+ n += int64(m)
+ if e == io.EOF {
+ break
+ }
+ if e != nil {
+ return n, e
+ }
+ }
+ return n, nil // err is EOF, so return nil explicitly
+}
+
+// makeSlice allocates a slice of size n. If the allocation fails, it panics
+// with ErrTooLarge.
+func makeSlice(n int) []byte {
+ // If the make fails, give a known error.
+ defer func() {
+ if recover() != nil {
+ panic(ErrTooLarge)
+ }
+ }()
+ return make([]byte, n)
+}
+
+// WriteTo writes data to w until the buffer is drained or an error occurs.
+// The return value n is the number of bytes written; it always fits into an
+// int, but it is int64 to match the io.WriterTo interface. Any error
+// encountered during the write is also returned.
+func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
+ b.lastRead = opInvalid
+ if b.off < len(b.buf) {
+ nBytes := b.Len()
+ m, e := w.Write(b.buf[b.off:])
+ if m > nBytes {
+ panic("bytes.Buffer.WriteTo: invalid Write count")
+ }
+ b.off += m
+ n = int64(m)
+ if e != nil {
+ return n, e
+ }
+ // all bytes should have been written, by definition of
+ // Write method in io.Writer
+ if m != nBytes {
+ return n, io.ErrShortWrite
+ }
+ }
+ // Buffer is now empty; reset.
+ b.Truncate(0)
+ return
+}
+
+// WriteByte appends the byte c to the buffer, growing the buffer as needed.
+// The returned error is always nil, but is included to match bufio.Writer's
+// WriteByte. If the buffer becomes too large, WriteByte will panic with
+// ErrTooLarge.
+func (b *Buffer) WriteByte(c byte) error {
+ b.lastRead = opInvalid
+ m := b.grow(1)
+ b.buf[m] = c
+ return nil
+}
+
+// WriteRune appends the UTF-8 encoding of Unicode code point r to the
+// buffer, returning its length and an error, which is always nil but is
+// included to match bufio.Writer's WriteRune. The buffer is grown as needed;
+// if it becomes too large, WriteRune will panic with ErrTooLarge.
+func (b *Buffer) WriteRune(r rune) (n int, err error) {
+ if r < utf8.RuneSelf {
+ b.WriteByte(byte(r))
+ return 1, nil
+ }
+ n = utf8.EncodeRune(b.runeBytes[0:], r)
+ b.Write(b.runeBytes[0:n])
+ return n, nil
+}
+
+// Read reads the next len(p) bytes from the buffer or until the buffer
+// is drained. The return value n is the number of bytes read. If the
+// buffer has no data to return, err is io.EOF (unless len(p) is zero);
+// otherwise it is nil.
+func (b *Buffer) Read(p []byte) (n int, err error) {
+ b.lastRead = opInvalid
+ if b.off >= len(b.buf) {
+ // Buffer is empty, reset to recover space.
+ b.Truncate(0)
+ if len(p) == 0 {
+ return
+ }
+ return 0, io.EOF
+ }
+ n = copy(p, b.buf[b.off:])
+ b.off += n
+ if n > 0 {
+ b.lastRead = opRead
+ }
+ return
+}
+
+// Next returns a slice containing the next n bytes from the buffer,
+// advancing the buffer as if the bytes had been returned by Read.
+// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
+// The slice is only valid until the next call to a read or write method.
+func (b *Buffer) Next(n int) []byte {
+ b.lastRead = opInvalid
+ m := b.Len()
+ if n > m {
+ n = m
+ }
+ data := b.buf[b.off : b.off+n]
+ b.off += n
+ if n > 0 {
+ b.lastRead = opRead
+ }
+ return data
+}
+
+// ReadByte reads and returns the next byte from the buffer.
+// If no byte is available, it returns error io.EOF.
+func (b *Buffer) ReadByte() (c byte, err error) {
+ b.lastRead = opInvalid
+ if b.off >= len(b.buf) {
+ // Buffer is empty, reset to recover space.
+ b.Truncate(0)
+ return 0, io.EOF
+ }
+ c = b.buf[b.off]
+ b.off++
+ b.lastRead = opRead
+ return c, nil
+}
+
+// ReadRune reads and returns the next UTF-8-encoded
+// Unicode code point from the buffer.
+// If no bytes are available, the error returned is io.EOF.
+// If the bytes are an erroneous UTF-8 encoding, it
+// consumes one byte and returns U+FFFD, 1.
+func (b *Buffer) ReadRune() (r rune, size int, err error) {
+ b.lastRead = opInvalid
+ if b.off >= len(b.buf) {
+ // Buffer is empty, reset to recover space.
+ b.Truncate(0)
+ return 0, 0, io.EOF
+ }
+ b.lastRead = opReadRune
+ c := b.buf[b.off]
+ if c < utf8.RuneSelf {
+ b.off++
+ return rune(c), 1, nil
+ }
+ r, n := utf8.DecodeRune(b.buf[b.off:])
+ b.off += n
+ return r, n, nil
+}
+
+// UnreadRune unreads the last rune returned by ReadRune.
+// If the most recent read or write operation on the buffer was
+// not a ReadRune, UnreadRune returns an error. (In this regard
+// it is stricter than UnreadByte, which will unread the last byte
+// from any read operation.)
+func (b *Buffer) UnreadRune() error {
+ if b.lastRead != opReadRune {
+ return errors.New("bytes.Buffer: UnreadRune: previous operation was not ReadRune")
+ }
+ b.lastRead = opInvalid
+ if b.off > 0 {
+ _, n := utf8.DecodeLastRune(b.buf[0:b.off])
+ b.off -= n
+ }
+ return nil
+}
+
+// UnreadByte unreads the last byte returned by the most recent
+// read operation. If write has happened since the last read, UnreadByte
+// returns an error.
+func (b *Buffer) UnreadByte() error {
+ if b.lastRead != opReadRune && b.lastRead != opRead {
+ return errors.New("bytes.Buffer: UnreadByte: previous operation was not a read")
+ }
+ b.lastRead = opInvalid
+ if b.off > 0 {
+ b.off--
+ }
+ return nil
+}
+
+// ReadBytes reads until the first occurrence of delim in the input,
+// returning a slice containing the data up to and including the delimiter.
+// If ReadBytes encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadBytes returns err != nil if and only if the returned data does not end in
+// delim.
+func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
+ slice, err := b.readSlice(delim)
+ // return a copy of slice. The buffer's backing array may
+ // be overwritten by later calls.
+ line = append(line, slice...)
+ return
+}
+
+// readSlice is like ReadBytes but returns a reference to internal buffer data.
+func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
+ i := bytes.IndexByte(b.buf[b.off:], delim)
+ end := b.off + i + 1
+ if i < 0 {
+ end = len(b.buf)
+ err = io.EOF
+ }
+ line = b.buf[b.off:end]
+ b.off = end
+ b.lastRead = opRead
+ return line, err
+}
+
+// ReadString reads until the first occurrence of delim in the input,
+// returning a string containing the data up to and including the delimiter.
+// If ReadString encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadString returns err != nil if and only if the returned data does not end
+// in delim.
+func (b *Buffer) ReadString(delim byte) (line string, err error) {
+ slice, err := b.readSlice(delim)
+ return string(slice), err
+}
+
+// NewBuffer creates and initializes a new Buffer using buf as its initial
+// contents. It is intended to prepare a Buffer to read existing data. It
+// can also be used to size the internal buffer for writing. To do that,
+// buf should have the desired capacity but a length of zero.
+//
+// In most cases, new(Buffer) (or just declaring a Buffer variable) is
+// sufficient to initialize a Buffer.
+func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
+
+// NewBufferString creates and initializes a new Buffer using string s as its
+// initial contents. It is intended to prepare a buffer to read an existing
+// string.
+//
+// In most cases, new(Buffer) (or just declaring a Buffer variable) is
+// sufficient to initialize a Buffer.
+func NewBufferString(s string) *Buffer {
+ return &Buffer{buf: []byte(s)}
+}
diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/buffer_test.go b/Godeps/_workspace/src/gopkg.in/bufio.v1/buffer_test.go
new file mode 100644
index 000000000..ca1ac2105
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/buffer_test.go
@@ -0,0 +1,527 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bufio
+
+import (
+ "bytes"
+ "io"
+ "math/rand"
+ "runtime"
+ "testing"
+ "unicode/utf8"
+)
+
+const N = 10000 // make this bigger for a larger (and slower) test
+var data string // test data for write tests
+var testBytes []byte // test data; same as data but as a slice.
+
+func init() {
+ testBytes = make([]byte, N)
+ for i := 0; i < N; i++ {
+ testBytes[i] = 'a' + byte(i%26)
+ }
+ data = string(testBytes)
+}
+
+// Verify that contents of buf match the string s.
+func check(t *testing.T, testname string, buf *Buffer, s string) {
+ bytes := buf.Bytes()
+ str := buf.String()
+ if buf.Len() != len(bytes) {
+ t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes))
+ }
+
+ if buf.Len() != len(str) {
+ t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str))
+ }
+
+ if buf.Len() != len(s) {
+ t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s))
+ }
+
+ if string(bytes) != s {
+ t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s)
+ }
+}
+
+// Fill buf through n writes of string fus.
+// The initial contents of buf corresponds to the string s;
+// the result is the final contents of buf returned as a string.
+func fillString(t *testing.T, testname string, buf *Buffer, s string, n int, fus string) string {
+ check(t, testname+" (fill 1)", buf, s)
+ for ; n > 0; n-- {
+ m, err := buf.WriteString(fus)
+ if m != len(fus) {
+ t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fus))
+ }
+ if err != nil {
+ t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err)
+ }
+ s += fus
+ check(t, testname+" (fill 4)", buf, s)
+ }
+ return s
+}
+
+// Fill buf through n writes of byte slice fub.
+// The initial contents of buf corresponds to the string s;
+// the result is the final contents of buf returned as a string.
+func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string {
+ check(t, testname+" (fill 1)", buf, s)
+ for ; n > 0; n-- {
+ m, err := buf.Write(fub)
+ if m != len(fub) {
+ t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub))
+ }
+ if err != nil {
+ t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err)
+ }
+ s += string(fub)
+ check(t, testname+" (fill 4)", buf, s)
+ }
+ return s
+}
+
+func TestNewBuffer(t *testing.T) {
+ buf := NewBuffer(testBytes)
+ check(t, "NewBuffer", buf, data)
+}
+
+func TestNewBufferString(t *testing.T) {
+ buf := NewBufferString(data)
+ check(t, "NewBufferString", buf, data)
+}
+
+// Empty buf through repeated reads into fub.
+// The initial contents of buf corresponds to the string s.
+func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) {
+ check(t, testname+" (empty 1)", buf, s)
+
+ for {
+ n, err := buf.Read(fub)
+ if n == 0 {
+ break
+ }
+ if err != nil {
+ t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err)
+ }
+ s = s[n:]
+ check(t, testname+" (empty 3)", buf, s)
+ }
+
+ check(t, testname+" (empty 4)", buf, "")
+}
+
+func TestBasicOperations(t *testing.T) {
+ var buf Buffer
+
+ for i := 0; i < 5; i++ {
+ check(t, "TestBasicOperations (1)", &buf, "")
+
+ buf.Reset()
+ check(t, "TestBasicOperations (2)", &buf, "")
+
+ buf.Truncate(0)
+ check(t, "TestBasicOperations (3)", &buf, "")
+
+ n, err := buf.Write([]byte(data[0:1]))
+ if n != 1 {
+ t.Errorf("wrote 1 byte, but n == %d", n)
+ }
+ if err != nil {
+ t.Errorf("err should always be nil, but err == %s", err)
+ }
+ check(t, "TestBasicOperations (4)", &buf, "a")
+
+ buf.WriteByte(data[1])
+ check(t, "TestBasicOperations (5)", &buf, "ab")
+
+ n, err = buf.Write([]byte(data[2:26]))
+ if n != 24 {
+ t.Errorf("wrote 25 bytes, but n == %d", n)
+ }
+ check(t, "TestBasicOperations (6)", &buf, string(data[0:26]))
+
+ buf.Truncate(26)
+ check(t, "TestBasicOperations (7)", &buf, string(data[0:26]))
+
+ buf.Truncate(20)
+ check(t, "TestBasicOperations (8)", &buf, string(data[0:20]))
+
+ empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5))
+ empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100))
+
+ buf.WriteByte(data[1])
+ c, err := buf.ReadByte()
+ if err != nil {
+ t.Error("ReadByte unexpected eof")
+ }
+ if c != data[1] {
+ t.Errorf("ReadByte wrong value c=%v", c)
+ }
+ c, err = buf.ReadByte()
+ if err == nil {
+ t.Error("ReadByte unexpected not eof")
+ }
+ }
+}
+
+func TestLargeStringWrites(t *testing.T) {
+ var buf Buffer
+ limit := 30
+ if testing.Short() {
+ limit = 9
+ }
+ for i := 3; i < limit; i += 3 {
+ s := fillString(t, "TestLargeWrites (1)", &buf, "", 5, data)
+ empty(t, "TestLargeStringWrites (2)", &buf, s, make([]byte, len(data)/i))
+ }
+ check(t, "TestLargeStringWrites (3)", &buf, "")
+}
+
+func TestLargeByteWrites(t *testing.T) {
+ var buf Buffer
+ limit := 30
+ if testing.Short() {
+ limit = 9
+ }
+ for i := 3; i < limit; i += 3 {
+ s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes)
+ empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i))
+ }
+ check(t, "TestLargeByteWrites (3)", &buf, "")
+}
+
+func TestLargeStringReads(t *testing.T) {
+ var buf Buffer
+ for i := 3; i < 30; i += 3 {
+ s := fillString(t, "TestLargeReads (1)", &buf, "", 5, data[0:len(data)/i])
+ empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data)))
+ }
+ check(t, "TestLargeStringReads (3)", &buf, "")
+}
+
+func TestLargeByteReads(t *testing.T) {
+ var buf Buffer
+ for i := 3; i < 30; i += 3 {
+ s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
+ empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data)))
+ }
+ check(t, "TestLargeByteReads (3)", &buf, "")
+}
+
+func TestMixedReadsAndWrites(t *testing.T) {
+ var buf Buffer
+ s := ""
+ for i := 0; i < 50; i++ {
+ wlen := rand.Intn(len(data))
+ if i%2 == 0 {
+ s = fillString(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, data[0:wlen])
+ } else {
+ s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen])
+ }
+
+ rlen := rand.Intn(len(data))
+ fub := make([]byte, rlen)
+ n, _ := buf.Read(fub)
+ s = s[n:]
+ }
+ empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len()))
+}
+
+func TestNil(t *testing.T) {
+ var b *Buffer
+ if b.String() != "<nil>" {
+ t.Errorf("expected <nil>; got %q", b.String())
+ }
+}
+
+func TestReadFrom(t *testing.T) {
+ var buf Buffer
+ for i := 3; i < 30; i += 3 {
+ s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
+ var b Buffer
+ b.ReadFrom(&buf)
+ empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data)))
+ }
+}
+
+func TestWriteTo(t *testing.T) {
+ var buf Buffer
+ for i := 3; i < 30; i += 3 {
+ s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
+ var b Buffer
+ buf.WriteTo(&b)
+ empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data)))
+ }
+}
+
+func TestRuneIO(t *testing.T) {
+ const NRune = 1000
+ // Built a test slice while we write the data
+ b := make([]byte, utf8.UTFMax*NRune)
+ var buf Buffer
+ n := 0
+ for r := rune(0); r < NRune; r++ {
+ size := utf8.EncodeRune(b[n:], r)
+ nbytes, err := buf.WriteRune(r)
+ if err != nil {
+ t.Fatalf("WriteRune(%U) error: %s", r, err)
+ }
+ if nbytes != size {
+ t.Fatalf("WriteRune(%U) expected %d, got %d", r, size, nbytes)
+ }
+ n += size
+ }
+ b = b[0:n]
+
+ // Check the resulting bytes
+ if !bytes.Equal(buf.Bytes(), b) {
+ t.Fatalf("incorrect result from WriteRune: %q not %q", buf.Bytes(), b)
+ }
+
+ p := make([]byte, utf8.UTFMax)
+ // Read it back with ReadRune
+ for r := rune(0); r < NRune; r++ {
+ size := utf8.EncodeRune(p, r)
+ nr, nbytes, err := buf.ReadRune()
+ if nr != r || nbytes != size || err != nil {
+ t.Fatalf("ReadRune(%U) got %U,%d not %U,%d (err=%s)", r, nr, nbytes, r, size, err)
+ }
+ }
+
+ // Check that UnreadRune works
+ buf.Reset()
+ buf.Write(b)
+ for r := rune(0); r < NRune; r++ {
+ r1, size, _ := buf.ReadRune()
+ if err := buf.UnreadRune(); err != nil {
+ t.Fatalf("UnreadRune(%U) got error %q", r, err)
+ }
+ r2, nbytes, err := buf.ReadRune()
+ if r1 != r2 || r1 != r || nbytes != size || err != nil {
+ t.Fatalf("ReadRune(%U) after UnreadRune got %U,%d not %U,%d (err=%s)", r, r2, nbytes, r, size, err)
+ }
+ }
+}
+
+func TestNext(t *testing.T) {
+ b := []byte{0, 1, 2, 3, 4}
+ tmp := make([]byte, 5)
+ for i := 0; i <= 5; i++ {
+ for j := i; j <= 5; j++ {
+ for k := 0; k <= 6; k++ {
+ // 0 <= i <= j <= 5; 0 <= k <= 6
+ // Check that if we start with a buffer
+ // of length j at offset i and ask for
+ // Next(k), we get the right bytes.
+ buf := NewBuffer(b[0:j])
+ n, _ := buf.Read(tmp[0:i])
+ if n != i {
+ t.Fatalf("Read %d returned %d", i, n)
+ }
+ bb := buf.Next(k)
+ want := k
+ if want > j-i {
+ want = j - i
+ }
+ if len(bb) != want {
+ t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb))
+ }
+ for l, v := range bb {
+ if v != byte(l+i) {
+ t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i)
+ }
+ }
+ }
+ }
+ }
+}
+
+var readBytesTests = []struct {
+ buffer string
+ delim byte
+ expected []string
+ err error
+}{
+ {"", 0, []string{""}, io.EOF},
+ {"a\x00", 0, []string{"a\x00"}, nil},
+ {"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil},
+ {"hello\x01world", 1, []string{"hello\x01"}, nil},
+ {"foo\nbar", 0, []string{"foo\nbar"}, io.EOF},
+ {"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil},
+ {"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, io.EOF},
+}
+
+func TestReadBytes(t *testing.T) {
+ for _, test := range readBytesTests {
+ buf := NewBufferString(test.buffer)
+ var err error
+ for _, expected := range test.expected {
+ var bytes []byte
+ bytes, err = buf.ReadBytes(test.delim)
+ if string(bytes) != expected {
+ t.Errorf("expected %q, got %q", expected, bytes)
+ }
+ if err != nil {
+ break
+ }
+ }
+ if err != test.err {
+ t.Errorf("expected error %v, got %v", test.err, err)
+ }
+ }
+}
+
+func TestReadString(t *testing.T) {
+ for _, test := range readBytesTests {
+ buf := NewBufferString(test.buffer)
+ var err error
+ for _, expected := range test.expected {
+ var s string
+ s, err = buf.ReadString(test.delim)
+ if s != expected {
+ t.Errorf("expected %q, got %q", expected, s)
+ }
+ if err != nil {
+ break
+ }
+ }
+ if err != test.err {
+ t.Errorf("expected error %v, got %v", test.err, err)
+ }
+ }
+}
+
+func BenchmarkReadString(b *testing.B) {
+ const n = 32 << 10
+
+ data := make([]byte, n)
+ data[n-1] = 'x'
+ b.SetBytes(int64(n))
+ for i := 0; i < b.N; i++ {
+ buf := NewBuffer(data)
+ _, err := buf.ReadString('x')
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestGrow(t *testing.T) {
+ x := []byte{'x'}
+ y := []byte{'y'}
+ tmp := make([]byte, 72)
+ for _, startLen := range []int{0, 100, 1000, 10000, 100000} {
+ xBytes := bytes.Repeat(x, startLen)
+ for _, growLen := range []int{0, 100, 1000, 10000, 100000} {
+ buf := NewBuffer(xBytes)
+ // If we read, this affects buf.off, which is good to test.
+ readBytes, _ := buf.Read(tmp)
+ buf.Grow(growLen)
+ yBytes := bytes.Repeat(y, growLen)
+ // Check no allocation occurs in write, as long as we're single-threaded.
+ var m1, m2 runtime.MemStats
+ runtime.ReadMemStats(&m1)
+ buf.Write(yBytes)
+ runtime.ReadMemStats(&m2)
+ if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs {
+ t.Errorf("allocation occurred during write")
+ }
+ // Check that buffer has correct data.
+ if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) {
+ t.Errorf("bad initial data at %d %d", startLen, growLen)
+ }
+ if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) {
+ t.Errorf("bad written data at %d %d", startLen, growLen)
+ }
+ }
+ }
+}
+
+// Was a bug: used to give EOF reading empty slice at EOF.
+func TestReadEmptyAtEOF(t *testing.T) {
+ b := new(Buffer)
+ slice := make([]byte, 0)
+ n, err := b.Read(slice)
+ if err != nil {
+ t.Errorf("read error: %v", err)
+ }
+ if n != 0 {
+ t.Errorf("wrong count; got %d want 0", n)
+ }
+}
+
+func TestBufferUnreadByte(t *testing.T) {
+ b := new(Buffer)
+ b.WriteString("abcdefghijklmnopqrstuvwxyz")
+
+ _, err := b.ReadBytes('m')
+ if err != nil {
+ t.Fatalf("ReadBytes: %v", err)
+ }
+
+ err = b.UnreadByte()
+ if err != nil {
+ t.Fatalf("UnreadByte: %v", err)
+ }
+ c, err := b.ReadByte()
+ if err != nil {
+ t.Fatalf("ReadByte: %v", err)
+ }
+ if c != 'm' {
+ t.Errorf("ReadByte = %q; want %q", c, 'm')
+ }
+}
+
+// Tests that we occasionally compact. Issue 5154.
+func TestBufferGrowth(t *testing.T) {
+ var b Buffer
+ buf := make([]byte, 1024)
+ b.Write(buf[0:1])
+ var cap0 int
+ for i := 0; i < 5<<10; i++ {
+ b.Write(buf)
+ b.Read(buf)
+ if i == 0 {
+ cap0 = b.Cap()
+ }
+ }
+ cap1 := b.Cap()
+ // (*Buffer).grow allows for 2x capacity slop before sliding,
+ // so set our error threshold at 3x.
+ if cap1 > cap0*3 {
+ t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0)
+ }
+}
+
+// From Issue 5154.
+func BenchmarkBufferNotEmptyWriteRead(b *testing.B) {
+ buf := make([]byte, 1024)
+ for i := 0; i < b.N; i++ {
+ var b Buffer
+ b.Write(buf[0:1])
+ for i := 0; i < 5<<10; i++ {
+ b.Write(buf)
+ b.Read(buf)
+ }
+ }
+}
+
+// Check that we don't compact too often. From Issue 5154.
+func BenchmarkBufferFullSmallReads(b *testing.B) {
+ buf := make([]byte, 1024)
+ for i := 0; i < b.N; i++ {
+ var b Buffer
+ b.Write(buf)
+ for b.Len()+20 < b.Cap() {
+ b.Write(buf[:10])
+ }
+ for i := 0; i < 5<<10; i++ {
+ b.Read(buf[:1])
+ b.Write(buf[:1])
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/bufio.go b/Godeps/_workspace/src/gopkg.in/bufio.v1/bufio.go
new file mode 100644
index 000000000..8f5cdc084
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/bufio.go
@@ -0,0 +1,728 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer
+// object, creating another object (Reader or Writer) that also implements
+// the interface but provides buffering and some help for textual I/O.
+package bufio
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "unicode/utf8"
+)
+
+const (
+ defaultBufSize = 4096
+)
+
+var (
+ ErrInvalidUnreadByte = errors.New("bufio: invalid use of UnreadByte")
+ ErrInvalidUnreadRune = errors.New("bufio: invalid use of UnreadRune")
+ ErrBufferFull = errors.New("bufio: buffer full")
+ ErrNegativeCount = errors.New("bufio: negative count")
+)
+
+// Buffered input.
+
+// Reader implements buffering for an io.Reader object.
+type Reader struct {
+ buf []byte
+ rd io.Reader
+ r, w int
+ err error
+ lastByte int
+ lastRuneSize int
+}
+
+const minReadBufferSize = 16
+const maxConsecutiveEmptyReads = 100
+
+// NewReaderSize returns a new Reader whose buffer has at least the specified
+// size. If the argument io.Reader is already a Reader with large enough
+// size, it returns the underlying Reader.
+func NewReaderSize(rd io.Reader, size int) *Reader {
+ // Is it already a Reader?
+ b, ok := rd.(*Reader)
+ if ok && len(b.buf) >= size {
+ return b
+ }
+ if size < minReadBufferSize {
+ size = minReadBufferSize
+ }
+ r := new(Reader)
+ r.reset(make([]byte, size), rd)
+ return r
+}
+
+// NewReader returns a new Reader whose buffer has the default size.
+func NewReader(rd io.Reader) *Reader {
+ return NewReaderSize(rd, defaultBufSize)
+}
+
+// Reset discards any buffered data, resets all state, and switches
+// the buffered reader to read from r.
+func (b *Reader) Reset(r io.Reader) {
+ b.reset(b.buf, r)
+}
+
+func (b *Reader) reset(buf []byte, r io.Reader) {
+ *b = Reader{
+ buf: buf,
+ rd: r,
+ lastByte: -1,
+ lastRuneSize: -1,
+ }
+}
+
+var errNegativeRead = errors.New("bufio: reader returned negative count from Read")
+
+// fill reads a new chunk into the buffer.
+func (b *Reader) fill() {
+ // Slide existing data to beginning.
+ if b.r > 0 {
+ copy(b.buf, b.buf[b.r:b.w])
+ b.w -= b.r
+ b.r = 0
+ }
+
+ if b.w >= len(b.buf) {
+ panic("bufio: tried to fill full buffer")
+ }
+
+ // Read new data: try a limited number of times.
+ for i := maxConsecutiveEmptyReads; i > 0; i-- {
+ n, err := b.rd.Read(b.buf[b.w:])
+ if n < 0 {
+ panic(errNegativeRead)
+ }
+ b.w += n
+ if err != nil {
+ b.err = err
+ return
+ }
+ if n > 0 {
+ return
+ }
+ }
+ b.err = io.ErrNoProgress
+}
+
+func (b *Reader) readErr() error {
+ err := b.err
+ b.err = nil
+ return err
+}
+
+// Peek returns the next n bytes without advancing the reader. The bytes stop
+// being valid at the next read call. If Peek returns fewer than n bytes, it
+// also returns an error explaining why the read is short. The error is
+// ErrBufferFull if n is larger than b's buffer size.
+func (b *Reader) Peek(n int) ([]byte, error) {
+ if n < 0 {
+ return nil, ErrNegativeCount
+ }
+ if n > len(b.buf) {
+ return nil, ErrBufferFull
+ }
+ // 0 <= n <= len(b.buf)
+ for b.w-b.r < n && b.err == nil {
+ b.fill() // b.w-b.r < len(b.buf) => buffer is not full
+ }
+ m := b.w - b.r
+ if m > n {
+ m = n
+ }
+ var err error
+ if m < n {
+ err = b.readErr()
+ if err == nil {
+ err = ErrBufferFull
+ }
+ }
+ return b.buf[b.r : b.r+m], err
+}
+
+// Read reads data into p.
+// It returns the number of bytes read into p.
+// It calls Read at most once on the underlying Reader,
+// hence n may be less than len(p).
+// At EOF, the count will be zero and err will be io.EOF.
+func (b *Reader) Read(p []byte) (n int, err error) {
+ n = len(p)
+ if n == 0 {
+ return 0, b.readErr()
+ }
+ if b.r == b.w {
+ if b.err != nil {
+ return 0, b.readErr()
+ }
+ if len(p) >= len(b.buf) {
+ // Large read, empty buffer.
+ // Read directly into p to avoid copy.
+ n, b.err = b.rd.Read(p)
+ if n < 0 {
+ panic(errNegativeRead)
+ }
+ if n > 0 {
+ b.lastByte = int(p[n-1])
+ b.lastRuneSize = -1
+ }
+ return n, b.readErr()
+ }
+ b.fill() // buffer is empty
+ if b.w == b.r {
+ return 0, b.readErr()
+ }
+ }
+
+ if n > b.w-b.r {
+ n = b.w - b.r
+ }
+ copy(p[0:n], b.buf[b.r:])
+ b.r += n
+ b.lastByte = int(b.buf[b.r-1])
+ b.lastRuneSize = -1
+ return n, nil
+}
+
+// ReadByte reads and returns a single byte.
+// If no byte is available, returns an error.
+func (b *Reader) ReadByte() (c byte, err error) {
+ b.lastRuneSize = -1
+ for b.r == b.w {
+ if b.err != nil {
+ return 0, b.readErr()
+ }
+ b.fill() // buffer is empty
+ }
+ c = b.buf[b.r]
+ b.r++
+ b.lastByte = int(c)
+ return c, nil
+}
+
+// UnreadByte unreads the last byte. Only the most recently read byte can be unread.
+func (b *Reader) UnreadByte() error {
+ if b.lastByte < 0 || b.r == 0 && b.w > 0 {
+ return ErrInvalidUnreadByte
+ }
+ // b.r > 0 || b.w == 0
+ if b.r > 0 {
+ b.r--
+ } else {
+ // b.r == 0 && b.w == 0
+ b.w = 1
+ }
+ b.buf[b.r] = byte(b.lastByte)
+ b.lastByte = -1
+ b.lastRuneSize = -1
+ return nil
+}
+
+// ReadRune reads a single UTF-8 encoded Unicode character and returns the
+// rune and its size in bytes. If the encoded rune is invalid, it consumes one byte
+// and returns unicode.ReplacementChar (U+FFFD) with a size of 1.
+func (b *Reader) ReadRune() (r rune, size int, err error) {
+ for b.r+utf8.UTFMax > b.w && !utf8.FullRune(b.buf[b.r:b.w]) && b.err == nil && b.w-b.r < len(b.buf) {
+ b.fill() // b.w-b.r < len(buf) => buffer is not full
+ }
+ b.lastRuneSize = -1
+ if b.r == b.w {
+ return 0, 0, b.readErr()
+ }
+ r, size = rune(b.buf[b.r]), 1
+ if r >= 0x80 {
+ r, size = utf8.DecodeRune(b.buf[b.r:b.w])
+ }
+ b.r += size
+ b.lastByte = int(b.buf[b.r-1])
+ b.lastRuneSize = size
+ return r, size, nil
+}
+
+// UnreadRune unreads the last rune. If the most recent read operation on
+// the buffer was not a ReadRune, UnreadRune returns an error. (In this
+// regard it is stricter than UnreadByte, which will unread the last byte
+// from any read operation.)
+func (b *Reader) UnreadRune() error {
+ if b.lastRuneSize < 0 || b.r < b.lastRuneSize {
+ return ErrInvalidUnreadRune
+ }
+ b.r -= b.lastRuneSize
+ b.lastByte = -1
+ b.lastRuneSize = -1
+ return nil
+}
+
+// Buffered returns the number of bytes that can be read from the current buffer.
+func (b *Reader) Buffered() int { return b.w - b.r }
+
+// ReadSlice reads until the first occurrence of delim in the input,
+// returning a slice pointing at the bytes in the buffer.
+// The bytes stop being valid at the next read.
+// If ReadSlice encounters an error before finding a delimiter,
+// it returns all the data in the buffer and the error itself (often io.EOF).
+// ReadSlice fails with error ErrBufferFull if the buffer fills without a delim.
+// Because the data returned from ReadSlice will be overwritten
+// by the next I/O operation, most clients should use
+// ReadBytes or ReadString instead.
+// ReadSlice returns err != nil if and only if line does not end in delim.
+func (b *Reader) ReadSlice(delim byte) (line []byte, err error) {
+ for {
+ // Search buffer.
+ if i := bytes.IndexByte(b.buf[b.r:b.w], delim); i >= 0 {
+ line = b.buf[b.r : b.r+i+1]
+ b.r += i + 1
+ break
+ }
+
+ // Pending error?
+ if b.err != nil {
+ line = b.buf[b.r:b.w]
+ b.r = b.w
+ err = b.readErr()
+ break
+ }
+
+ // Buffer full?
+ if n := b.Buffered(); n >= len(b.buf) {
+ b.r = b.w
+ line = b.buf
+ err = ErrBufferFull
+ break
+ }
+
+ b.fill() // buffer is not full
+ }
+
+ // Handle last byte, if any.
+ if i := len(line) - 1; i >= 0 {
+ b.lastByte = int(line[i])
+ }
+
+ return
+}
+
+// ReadN tries to read exactly n bytes.
+// The bytes stop being valid at the next read call.
+// If ReadN encounters an error before reading n bytes,
+// it returns all the data in the buffer and the error itself (often io.EOF).
+// ReadN fails with error ErrBufferFull if the buffer fills
+// without reading N bytes.
+// Because the data returned from ReadN will be overwritten
+// by the next I/O operation, most clients should use
+// ReadBytes or ReadString instead.
+func (b *Reader) ReadN(n int) ([]byte, error) {
+ for b.Buffered() < n {
+ if b.err != nil {
+ buf := b.buf[b.r:b.w]
+ b.r = b.w
+ return buf, b.readErr()
+ }
+
+ // Buffer is full?
+ if b.Buffered() >= len(b.buf) {
+ b.r = b.w
+ return b.buf, ErrBufferFull
+ }
+
+ b.fill()
+ }
+ buf := b.buf[b.r : b.r+n]
+ b.r += n
+ return buf, nil
+}
+
+// ReadLine is a low-level line-reading primitive. Most callers should use
+// ReadBytes('\n') or ReadString('\n') instead or use a Scanner.
+//
+// ReadLine tries to return a single line, not including the end-of-line bytes.
+// If the line was too long for the buffer then isPrefix is set and the
+// beginning of the line is returned. The rest of the line will be returned
+// from future calls. isPrefix will be false when returning the last fragment
+// of the line. The returned buffer is only valid until the next call to
+// ReadLine. ReadLine either returns a non-nil line or it returns an error,
+// never both.
+//
+// The text returned from ReadLine does not include the line end ("\r\n" or "\n").
+// No indication or error is given if the input ends without a final line end.
+// Calling UnreadByte after ReadLine will always unread the last byte read
+// (possibly a character belonging to the line end) even if that byte is not
+// part of the line returned by ReadLine.
+func (b *Reader) ReadLine() (line []byte, isPrefix bool, err error) {
+ line, err = b.ReadSlice('\n')
+ if err == ErrBufferFull {
+ // Handle the case where "\r\n" straddles the buffer.
+ if len(line) > 0 && line[len(line)-1] == '\r' {
+ // Put the '\r' back on buf and drop it from line.
+ // Let the next call to ReadLine check for "\r\n".
+ if b.r == 0 {
+ // should be unreachable
+ panic("bufio: tried to rewind past start of buffer")
+ }
+ b.r--
+ line = line[:len(line)-1]
+ }
+ return line, true, nil
+ }
+
+ if len(line) == 0 {
+ if err != nil {
+ line = nil
+ }
+ return
+ }
+ err = nil
+
+ if line[len(line)-1] == '\n' {
+ drop := 1
+ if len(line) > 1 && line[len(line)-2] == '\r' {
+ drop = 2
+ }
+ line = line[:len(line)-drop]
+ }
+ return
+}
+
+// ReadBytes reads until the first occurrence of delim in the input,
+// returning a slice containing the data up to and including the delimiter.
+// If ReadBytes encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadBytes returns err != nil if and only if the returned data does not end in
+// delim.
+// For simple uses, a Scanner may be more convenient.
+func (b *Reader) ReadBytes(delim byte) (line []byte, err error) {
+ // Use ReadSlice to look for array,
+ // accumulating full buffers.
+ var frag []byte
+ var full [][]byte
+ err = nil
+
+ for {
+ var e error
+ frag, e = b.ReadSlice(delim)
+ if e == nil { // got final fragment
+ break
+ }
+ if e != ErrBufferFull { // unexpected error
+ err = e
+ break
+ }
+
+ // Make a copy of the buffer.
+ buf := make([]byte, len(frag))
+ copy(buf, frag)
+ full = append(full, buf)
+ }
+
+ // Allocate new buffer to hold the full pieces and the fragment.
+ n := 0
+ for i := range full {
+ n += len(full[i])
+ }
+ n += len(frag)
+
+ // Copy full pieces and fragment in.
+ buf := make([]byte, n)
+ n = 0
+ for i := range full {
+ n += copy(buf[n:], full[i])
+ }
+ copy(buf[n:], frag)
+ return buf, err
+}
+
+// ReadString reads until the first occurrence of delim in the input,
+// returning a string containing the data up to and including the delimiter.
+// If ReadString encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadString returns err != nil if and only if the returned data does not end in
+// delim.
+// For simple uses, a Scanner may be more convenient.
+func (b *Reader) ReadString(delim byte) (line string, err error) {
+ bytes, err := b.ReadBytes(delim)
+ line = string(bytes)
+ return line, err
+}
+
+// WriteTo implements io.WriterTo.
+func (b *Reader) WriteTo(w io.Writer) (n int64, err error) {
+ n, err = b.writeBuf(w)
+ if err != nil {
+ return
+ }
+
+ if r, ok := b.rd.(io.WriterTo); ok {
+ m, err := r.WriteTo(w)
+ n += m
+ return n, err
+ }
+
+ if w, ok := w.(io.ReaderFrom); ok {
+ m, err := w.ReadFrom(b.rd)
+ n += m
+ return n, err
+ }
+
+ if b.w-b.r < len(b.buf) {
+ b.fill() // buffer not full
+ }
+
+ for b.r < b.w {
+ // b.r < b.w => buffer is not empty
+ m, err := b.writeBuf(w)
+ n += m
+ if err != nil {
+ return n, err
+ }
+ b.fill() // buffer is empty
+ }
+
+ if b.err == io.EOF {
+ b.err = nil
+ }
+
+ return n, b.readErr()
+}
+
+// writeBuf writes the Reader's buffer to the writer.
+func (b *Reader) writeBuf(w io.Writer) (int64, error) {
+ n, err := w.Write(b.buf[b.r:b.w])
+ if n < b.r-b.w {
+ panic(errors.New("bufio: writer did not write all data"))
+ }
+ b.r += n
+ return int64(n), err
+}
+
+// buffered output
+
+// Writer implements buffering for an io.Writer object.
+// If an error occurs writing to a Writer, no more data will be
+// accepted and all subsequent writes will return the error.
+// After all data has been written, the client should call the
+// Flush method to guarantee all data has been forwarded to
+// the underlying io.Writer.
+type Writer struct {
+ err error
+ buf []byte
+ n int
+ wr io.Writer
+}
+
+// NewWriterSize returns a new Writer whose buffer has at least the specified
+// size. If the argument io.Writer is already a Writer with large enough
+// size, it returns the underlying Writer.
+func NewWriterSize(w io.Writer, size int) *Writer {
+ // Is it already a Writer?
+ b, ok := w.(*Writer)
+ if ok && len(b.buf) >= size {
+ return b
+ }
+ if size <= 0 {
+ size = defaultBufSize
+ }
+ return &Writer{
+ buf: make([]byte, size),
+ wr: w,
+ }
+}
+
+// NewWriter returns a new Writer whose buffer has the default size.
+func NewWriter(w io.Writer) *Writer {
+ return NewWriterSize(w, defaultBufSize)
+}
+
+// Reset discards any unflushed buffered data, clears any error, and
+// resets b to write its output to w.
+func (b *Writer) Reset(w io.Writer) {
+ b.err = nil
+ b.n = 0
+ b.wr = w
+}
+
+// Flush writes any buffered data to the underlying io.Writer.
+func (b *Writer) Flush() error {
+ err := b.flush()
+ return err
+}
+
+func (b *Writer) flush() error {
+ if b.err != nil {
+ return b.err
+ }
+ if b.n == 0 {
+ return nil
+ }
+ n, err := b.wr.Write(b.buf[0:b.n])
+ if n < b.n && err == nil {
+ err = io.ErrShortWrite
+ }
+ if err != nil {
+ if n > 0 && n < b.n {
+ copy(b.buf[0:b.n-n], b.buf[n:b.n])
+ }
+ b.n -= n
+ b.err = err
+ return err
+ }
+ b.n = 0
+ return nil
+}
+
+// Available returns how many bytes are unused in the buffer.
+func (b *Writer) Available() int { return len(b.buf) - b.n }
+
+// Buffered returns the number of bytes that have been written into the current buffer.
+func (b *Writer) Buffered() int { return b.n }
+
+// Write writes the contents of p into the buffer.
+// It returns the number of bytes written.
+// If nn < len(p), it also returns an error explaining
+// why the write is short.
+func (b *Writer) Write(p []byte) (nn int, err error) {
+ for len(p) > b.Available() && b.err == nil {
+ var n int
+ if b.Buffered() == 0 {
+ // Large write, empty buffer.
+ // Write directly from p to avoid copy.
+ n, b.err = b.wr.Write(p)
+ } else {
+ n = copy(b.buf[b.n:], p)
+ b.n += n
+ b.flush()
+ }
+ nn += n
+ p = p[n:]
+ }
+ if b.err != nil {
+ return nn, b.err
+ }
+ n := copy(b.buf[b.n:], p)
+ b.n += n
+ nn += n
+ return nn, nil
+}
+
+// WriteByte writes a single byte.
+func (b *Writer) WriteByte(c byte) error {
+ if b.err != nil {
+ return b.err
+ }
+ if b.Available() <= 0 && b.flush() != nil {
+ return b.err
+ }
+ b.buf[b.n] = c
+ b.n++
+ return nil
+}
+
+// WriteRune writes a single Unicode code point, returning
+// the number of bytes written and any error.
+func (b *Writer) WriteRune(r rune) (size int, err error) {
+ if r < utf8.RuneSelf {
+ err = b.WriteByte(byte(r))
+ if err != nil {
+ return 0, err
+ }
+ return 1, nil
+ }
+ if b.err != nil {
+ return 0, b.err
+ }
+ n := b.Available()
+ if n < utf8.UTFMax {
+ if b.flush(); b.err != nil {
+ return 0, b.err
+ }
+ n = b.Available()
+ if n < utf8.UTFMax {
+ // Can only happen if buffer is silly small.
+ return b.WriteString(string(r))
+ }
+ }
+ size = utf8.EncodeRune(b.buf[b.n:], r)
+ b.n += size
+ return size, nil
+}
+
+// WriteString writes a string.
+// It returns the number of bytes written.
+// If the count is less than len(s), it also returns an error explaining
+// why the write is short.
+func (b *Writer) WriteString(s string) (int, error) {
+ nn := 0
+ for len(s) > b.Available() && b.err == nil {
+ n := copy(b.buf[b.n:], s)
+ b.n += n
+ nn += n
+ s = s[n:]
+ b.flush()
+ }
+ if b.err != nil {
+ return nn, b.err
+ }
+ n := copy(b.buf[b.n:], s)
+ b.n += n
+ nn += n
+ return nn, nil
+}
+
+// ReadFrom implements io.ReaderFrom.
+func (b *Writer) ReadFrom(r io.Reader) (n int64, err error) {
+ if b.Buffered() == 0 {
+ if w, ok := b.wr.(io.ReaderFrom); ok {
+ return w.ReadFrom(r)
+ }
+ }
+ var m int
+ for {
+ if b.Available() == 0 {
+ if err1 := b.flush(); err1 != nil {
+ return n, err1
+ }
+ }
+ nr := 0
+ for nr < maxConsecutiveEmptyReads {
+ m, err = r.Read(b.buf[b.n:])
+ if m != 0 || err != nil {
+ break
+ }
+ nr++
+ }
+ if nr == maxConsecutiveEmptyReads {
+ return n, io.ErrNoProgress
+ }
+ b.n += m
+ n += int64(m)
+ if err != nil {
+ break
+ }
+ }
+ if err == io.EOF {
+ // If we filled the buffer exactly, flush pre-emptively.
+ if b.Available() == 0 {
+ err = b.flush()
+ } else {
+ err = nil
+ }
+ }
+ return n, err
+}
+
+// buffered input and output
+
+// ReadWriter stores pointers to a Reader and a Writer.
+// It implements io.ReadWriter.
+type ReadWriter struct {
+ *Reader
+ *Writer
+}
+
+// NewReadWriter allocates a new ReadWriter that dispatches to r and w.
+func NewReadWriter(r *Reader, w *Writer) *ReadWriter {
+ return &ReadWriter{r, w}
+}
diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/bufio_test.go b/Godeps/_workspace/src/gopkg.in/bufio.v1/bufio_test.go
new file mode 100644
index 000000000..f19d9bd28
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/bufio_test.go
@@ -0,0 +1,1418 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bufio_test
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "strings"
+ "testing"
+ "testing/iotest"
+ "time"
+ "unicode/utf8"
+
+ . "gopkg.in/bufio.v1"
+)
+
+// Reads from a reader and rot13s the result.
+type rot13Reader struct {
+ r io.Reader
+}
+
+func newRot13Reader(r io.Reader) *rot13Reader {
+ r13 := new(rot13Reader)
+ r13.r = r
+ return r13
+}
+
+func (r13 *rot13Reader) Read(p []byte) (int, error) {
+ n, err := r13.r.Read(p)
+ if err != nil {
+ return n, err
+ }
+ for i := 0; i < n; i++ {
+ c := p[i] | 0x20 // lowercase byte
+ if 'a' <= c && c <= 'm' {
+ p[i] += 13
+ } else if 'n' <= c && c <= 'z' {
+ p[i] -= 13
+ }
+ }
+ return n, nil
+}
+
+// Call ReadByte to accumulate the text of a file
+func readBytes(buf *Reader) string {
+ var b [1000]byte
+ nb := 0
+ for {
+ c, err := buf.ReadByte()
+ if err == io.EOF {
+ break
+ }
+ if err == nil {
+ b[nb] = c
+ nb++
+ } else if err != iotest.ErrTimeout {
+ panic("Data: " + err.Error())
+ }
+ }
+ return string(b[0:nb])
+}
+
+func TestReaderSimple(t *testing.T) {
+ data := "hello world"
+ b := NewReader(strings.NewReader(data))
+ if s := readBytes(b); s != "hello world" {
+ t.Errorf("simple hello world test failed: got %q", s)
+ }
+
+ b = NewReader(newRot13Reader(strings.NewReader(data)))
+ if s := readBytes(b); s != "uryyb jbeyq" {
+ t.Errorf("rot13 hello world test failed: got %q", s)
+ }
+}
+
+type readMaker struct {
+ name string
+ fn func(io.Reader) io.Reader
+}
+
+var readMakers = []readMaker{
+ {"full", func(r io.Reader) io.Reader { return r }},
+ {"byte", iotest.OneByteReader},
+ {"half", iotest.HalfReader},
+ {"data+err", iotest.DataErrReader},
+ {"timeout", iotest.TimeoutReader},
+}
+
+// Call ReadString (which ends up calling everything else)
+// to accumulate the text of a file.
+func readLines(b *Reader) string {
+ s := ""
+ for {
+ s1, err := b.ReadString('\n')
+ if err == io.EOF {
+ break
+ }
+ if err != nil && err != iotest.ErrTimeout {
+ panic("GetLines: " + err.Error())
+ }
+ s += s1
+ }
+ return s
+}
+
+// Call Read to accumulate the text of a file
+func reads(buf *Reader, m int) string {
+ var b [1000]byte
+ nb := 0
+ for {
+ n, err := buf.Read(b[nb : nb+m])
+ nb += n
+ if err == io.EOF {
+ break
+ }
+ }
+ return string(b[0:nb])
+}
+
+type bufReader struct {
+ name string
+ fn func(*Reader) string
+}
+
+var bufreaders = []bufReader{
+ {"1", func(b *Reader) string { return reads(b, 1) }},
+ {"2", func(b *Reader) string { return reads(b, 2) }},
+ {"3", func(b *Reader) string { return reads(b, 3) }},
+ {"4", func(b *Reader) string { return reads(b, 4) }},
+ {"5", func(b *Reader) string { return reads(b, 5) }},
+ {"7", func(b *Reader) string { return reads(b, 7) }},
+ {"bytes", readBytes},
+ {"lines", readLines},
+}
+
+const minReadBufferSize = 16
+
+var bufsizes = []int{
+ 0, minReadBufferSize, 23, 32, 46, 64, 93, 128, 1024, 4096,
+}
+
+func TestReader(t *testing.T) {
+ var texts [31]string
+ str := ""
+ all := ""
+ for i := 0; i < len(texts)-1; i++ {
+ texts[i] = str + "\n"
+ all += texts[i]
+ str += string(i%26 + 'a')
+ }
+ texts[len(texts)-1] = all
+
+ for h := 0; h < len(texts); h++ {
+ text := texts[h]
+ for i := 0; i < len(readMakers); i++ {
+ for j := 0; j < len(bufreaders); j++ {
+ for k := 0; k < len(bufsizes); k++ {
+ readmaker := readMakers[i]
+ bufreader := bufreaders[j]
+ bufsize := bufsizes[k]
+ read := readmaker.fn(strings.NewReader(text))
+ buf := NewReaderSize(read, bufsize)
+ s := bufreader.fn(buf)
+ if s != text {
+ t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q",
+ readmaker.name, bufreader.name, bufsize, text, s)
+ }
+ }
+ }
+ }
+ }
+}
+
+type zeroReader struct{}
+
+func (zeroReader) Read(p []byte) (int, error) {
+ return 0, nil
+}
+
+func TestZeroReader(t *testing.T) {
+ var z zeroReader
+ r := NewReader(z)
+
+ c := make(chan error)
+ go func() {
+ _, err := r.ReadByte()
+ c <- err
+ }()
+
+ select {
+ case err := <-c:
+ if err == nil {
+ t.Error("error expected")
+ } else if err != io.ErrNoProgress {
+ t.Error("unexpected error:", err)
+ }
+ case <-time.After(time.Second):
+ t.Error("test timed out (endless loop in ReadByte?)")
+ }
+}
+
+// A StringReader delivers its data one string segment at a time via Read.
+type StringReader struct {
+ data []string
+ step int
+}
+
+func (r *StringReader) Read(p []byte) (n int, err error) {
+ if r.step < len(r.data) {
+ s := r.data[r.step]
+ n = copy(p, s)
+ r.step++
+ } else {
+ err = io.EOF
+ }
+ return
+}
+
+func readRuneSegments(t *testing.T, segments []string) {
+ got := ""
+ want := strings.Join(segments, "")
+ r := NewReader(&StringReader{data: segments})
+ for {
+ r, _, err := r.ReadRune()
+ if err != nil {
+ if err != io.EOF {
+ return
+ }
+ break
+ }
+ got += string(r)
+ }
+ if got != want {
+ t.Errorf("segments=%v got=%s want=%s", segments, got, want)
+ }
+}
+
+var segmentList = [][]string{
+ {},
+ {""},
+ {"日", "本語"},
+ {"\u65e5", "\u672c", "\u8a9e"},
+ {"\U000065e5", "\U0000672c", "\U00008a9e"},
+ {"\xe6", "\x97\xa5\xe6", "\x9c\xac\xe8\xaa\x9e"},
+ {"Hello", ", ", "World", "!"},
+ {"Hello", ", ", "", "World", "!"},
+}
+
+func TestReadRune(t *testing.T) {
+ for _, s := range segmentList {
+ readRuneSegments(t, s)
+ }
+}
+
+func TestUnreadRune(t *testing.T) {
+ segments := []string{"Hello, world:", "日本語"}
+ r := NewReader(&StringReader{data: segments})
+ got := ""
+ want := strings.Join(segments, "")
+ // Normal execution.
+ for {
+ r1, _, err := r.ReadRune()
+ if err != nil {
+ if err != io.EOF {
+ t.Error("unexpected error on ReadRune:", err)
+ }
+ break
+ }
+ got += string(r1)
+ // Put it back and read it again.
+ if err = r.UnreadRune(); err != nil {
+ t.Fatal("unexpected error on UnreadRune:", err)
+ }
+ r2, _, err := r.ReadRune()
+ if err != nil {
+ t.Fatal("unexpected error reading after unreading:", err)
+ }
+ if r1 != r2 {
+ t.Fatalf("incorrect rune after unread: got %c, want %c", r1, r2)
+ }
+ }
+ if got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
+func TestReaderUnreadByte(t *testing.T) {
+ segments := []string{"Hello, ", "world"}
+ r := NewReader(&StringReader{data: segments})
+ got := ""
+ want := strings.Join(segments, "")
+ // Normal execution.
+ for {
+ b1, err := r.ReadByte()
+ if err != nil {
+ if err != io.EOF {
+ t.Error("unexpected error on ReadByte:", err)
+ }
+ break
+ }
+ got += string(b1)
+ // Put it back and read it again.
+ if err = r.UnreadByte(); err != nil {
+ t.Fatal("unexpected error on UnreadByte:", err)
+ }
+ b2, err := r.ReadByte()
+ if err != nil {
+ t.Fatal("unexpected error reading after unreading:", err)
+ }
+ if b1 != b2 {
+ t.Fatalf("incorrect byte after unread: got %q, want %q", b1, b2)
+ }
+ }
+ if got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
+func TestUnreadByteMultiple(t *testing.T) {
+ segments := []string{"Hello, ", "world"}
+ data := strings.Join(segments, "")
+ for n := 0; n <= len(data); n++ {
+ r := NewReader(&StringReader{data: segments})
+ // Read n bytes.
+ for i := 0; i < n; i++ {
+ b, err := r.ReadByte()
+ if err != nil {
+ t.Fatalf("n = %d: unexpected error on ReadByte: %v", n, err)
+ }
+ if b != data[i] {
+ t.Fatalf("n = %d: incorrect byte returned from ReadByte: got %q, want %q", n, b, data[i])
+ }
+ }
+ // Unread one byte if there is one.
+ if n > 0 {
+ if err := r.UnreadByte(); err != nil {
+ t.Errorf("n = %d: unexpected error on UnreadByte: %v", n, err)
+ }
+ }
+ // Test that we cannot unread any further.
+ if err := r.UnreadByte(); err == nil {
+ t.Errorf("n = %d: expected error on UnreadByte", n)
+ }
+ }
+}
+
+func TestUnreadByteOthers(t *testing.T) {
+ // A list of readers to use in conjunction with UnreadByte.
+ var readers = []func(*Reader, byte) ([]byte, error){
+ (*Reader).ReadBytes,
+ (*Reader).ReadSlice,
+ func(r *Reader, delim byte) ([]byte, error) {
+ data, err := r.ReadString(delim)
+ return []byte(data), err
+ },
+ // ReadLine doesn't fit the data/pattern easily
+ // so we leave it out. It should be covered via
+ // the ReadSlice test since ReadLine simply calls
+ // ReadSlice, and it's that function that handles
+ // the last byte.
+ }
+
+ // Try all readers with UnreadByte.
+ for rno, read := range readers {
+ // Some input data that is longer than the minimum reader buffer size.
+ const n = 10
+ var buf bytes.Buffer
+ for i := 0; i < n; i++ {
+ buf.WriteString("abcdefg")
+ }
+
+ r := NewReaderSize(&buf, minReadBufferSize)
+ readTo := func(delim byte, want string) {
+ data, err := read(r, delim)
+ if err != nil {
+ t.Fatalf("#%d: unexpected error reading to %c: %v", rno, delim, err)
+ }
+ if got := string(data); got != want {
+ t.Fatalf("#%d: got %q, want %q", rno, got, want)
+ }
+ }
+
+ // Read the data with occasional UnreadByte calls.
+ for i := 0; i < n; i++ {
+ readTo('d', "abcd")
+ for j := 0; j < 3; j++ {
+ if err := r.UnreadByte(); err != nil {
+ t.Fatalf("#%d: unexpected error on UnreadByte: %v", rno, err)
+ }
+ readTo('d', "d")
+ }
+ readTo('g', "efg")
+ }
+
+ // All data should have been read.
+ _, err := r.ReadByte()
+ if err != io.EOF {
+ t.Errorf("#%d: got error %v; want EOF", rno, err)
+ }
+ }
+}
+
+// Test that UnreadRune fails if the preceding operation was not a ReadRune.
+func TestUnreadRuneError(t *testing.T) {
+ buf := make([]byte, 3) // All runes in this test are 3 bytes long
+ r := NewReader(&StringReader{data: []string{"日本語日本語日本語"}})
+ if r.UnreadRune() == nil {
+ t.Error("expected error on UnreadRune from fresh buffer")
+ }
+ _, _, err := r.ReadRune()
+ if err != nil {
+ t.Error("unexpected error on ReadRune (1):", err)
+ }
+ if err = r.UnreadRune(); err != nil {
+ t.Error("unexpected error on UnreadRune (1):", err)
+ }
+ if r.UnreadRune() == nil {
+ t.Error("expected error after UnreadRune (1)")
+ }
+ // Test error after Read.
+ _, _, err = r.ReadRune() // reset state
+ if err != nil {
+ t.Error("unexpected error on ReadRune (2):", err)
+ }
+ _, err = r.Read(buf)
+ if err != nil {
+ t.Error("unexpected error on Read (2):", err)
+ }
+ if r.UnreadRune() == nil {
+ t.Error("expected error after Read (2)")
+ }
+ // Test error after ReadByte.
+ _, _, err = r.ReadRune() // reset state
+ if err != nil {
+ t.Error("unexpected error on ReadRune (2):", err)
+ }
+ for _ = range buf {
+ _, err = r.ReadByte()
+ if err != nil {
+ t.Error("unexpected error on ReadByte (2):", err)
+ }
+ }
+ if r.UnreadRune() == nil {
+ t.Error("expected error after ReadByte")
+ }
+ // Test error after UnreadByte.
+ _, _, err = r.ReadRune() // reset state
+ if err != nil {
+ t.Error("unexpected error on ReadRune (3):", err)
+ }
+ _, err = r.ReadByte()
+ if err != nil {
+ t.Error("unexpected error on ReadByte (3):", err)
+ }
+ err = r.UnreadByte()
+ if err != nil {
+ t.Error("unexpected error on UnreadByte (3):", err)
+ }
+ if r.UnreadRune() == nil {
+ t.Error("expected error after UnreadByte (3)")
+ }
+}
+
+func TestUnreadRuneAtEOF(t *testing.T) {
+ // UnreadRune/ReadRune should error at EOF (was a bug; used to panic)
+ r := NewReader(strings.NewReader("x"))
+ r.ReadRune()
+ r.ReadRune()
+ r.UnreadRune()
+ _, _, err := r.ReadRune()
+ if err == nil {
+ t.Error("expected error at EOF")
+ } else if err != io.EOF {
+ t.Error("expected EOF; got", err)
+ }
+}
+
+func TestReadWriteRune(t *testing.T) {
+ const NRune = 1000
+ byteBuf := new(bytes.Buffer)
+ w := NewWriter(byteBuf)
+ // Write the runes out using WriteRune
+ buf := make([]byte, utf8.UTFMax)
+ for r := rune(0); r < NRune; r++ {
+ size := utf8.EncodeRune(buf, r)
+ nbytes, err := w.WriteRune(r)
+ if err != nil {
+ t.Fatalf("WriteRune(0x%x) error: %s", r, err)
+ }
+ if nbytes != size {
+ t.Fatalf("WriteRune(0x%x) expected %d, got %d", r, size, nbytes)
+ }
+ }
+ w.Flush()
+
+ r := NewReader(byteBuf)
+ // Read them back with ReadRune
+ for r1 := rune(0); r1 < NRune; r1++ {
+ size := utf8.EncodeRune(buf, r1)
+ nr, nbytes, err := r.ReadRune()
+ if nr != r1 || nbytes != size || err != nil {
+ t.Fatalf("ReadRune(0x%x) got 0x%x,%d not 0x%x,%d (err=%s)", r1, nr, nbytes, r1, size, err)
+ }
+ }
+}
+
+func TestWriter(t *testing.T) {
+ var data [8192]byte
+
+ for i := 0; i < len(data); i++ {
+ data[i] = byte(' ' + i%('~'-' '))
+ }
+ w := new(bytes.Buffer)
+ for i := 0; i < len(bufsizes); i++ {
+ for j := 0; j < len(bufsizes); j++ {
+ nwrite := bufsizes[i]
+ bs := bufsizes[j]
+
+ // Write nwrite bytes using buffer size bs.
+ // Check that the right amount makes it out
+ // and that the data is correct.
+
+ w.Reset()
+ buf := NewWriterSize(w, bs)
+ context := fmt.Sprintf("nwrite=%d bufsize=%d", nwrite, bs)
+ n, e1 := buf.Write(data[0:nwrite])
+ if e1 != nil || n != nwrite {
+ t.Errorf("%s: buf.Write %d = %d, %v", context, nwrite, n, e1)
+ continue
+ }
+ if e := buf.Flush(); e != nil {
+ t.Errorf("%s: buf.Flush = %v", context, e)
+ }
+
+ written := w.Bytes()
+ if len(written) != nwrite {
+ t.Errorf("%s: %d bytes written", context, len(written))
+ }
+ for l := 0; l < len(written); l++ {
+ if written[i] != data[i] {
+ t.Errorf("wrong bytes written")
+ t.Errorf("want=%q", data[0:len(written)])
+ t.Errorf("have=%q", written)
+ }
+ }
+ }
+ }
+}
+
+// Check that write errors are returned properly.
+
+type errorWriterTest struct {
+ n, m int
+ err error
+ expect error
+}
+
+func (w errorWriterTest) Write(p []byte) (int, error) {
+ return len(p) * w.n / w.m, w.err
+}
+
+var errorWriterTests = []errorWriterTest{
+ {0, 1, nil, io.ErrShortWrite},
+ {1, 2, nil, io.ErrShortWrite},
+ {1, 1, nil, nil},
+ {0, 1, io.ErrClosedPipe, io.ErrClosedPipe},
+ {1, 2, io.ErrClosedPipe, io.ErrClosedPipe},
+ {1, 1, io.ErrClosedPipe, io.ErrClosedPipe},
+}
+
+func TestWriteErrors(t *testing.T) {
+ for _, w := range errorWriterTests {
+ buf := NewWriter(w)
+ _, e := buf.Write([]byte("hello world"))
+ if e != nil {
+ t.Errorf("Write hello to %v: %v", w, e)
+ continue
+ }
+ // Two flushes, to verify the error is sticky.
+ for i := 0; i < 2; i++ {
+ e = buf.Flush()
+ if e != w.expect {
+ t.Errorf("Flush %d/2 %v: got %v, wanted %v", i+1, w, e, w.expect)
+ }
+ }
+ }
+}
+
+func TestNewReaderSizeIdempotent(t *testing.T) {
+ const BufSize = 1000
+ b := NewReaderSize(strings.NewReader("hello world"), BufSize)
+ // Does it recognize itself?
+ b1 := NewReaderSize(b, BufSize)
+ if b1 != b {
+ t.Error("NewReaderSize did not detect underlying Reader")
+ }
+ // Does it wrap if existing buffer is too small?
+ b2 := NewReaderSize(b, 2*BufSize)
+ if b2 == b {
+ t.Error("NewReaderSize did not enlarge buffer")
+ }
+}
+
+func TestNewWriterSizeIdempotent(t *testing.T) {
+ const BufSize = 1000
+ b := NewWriterSize(new(bytes.Buffer), BufSize)
+ // Does it recognize itself?
+ b1 := NewWriterSize(b, BufSize)
+ if b1 != b {
+ t.Error("NewWriterSize did not detect underlying Writer")
+ }
+ // Does it wrap if existing buffer is too small?
+ b2 := NewWriterSize(b, 2*BufSize)
+ if b2 == b {
+ t.Error("NewWriterSize did not enlarge buffer")
+ }
+}
+
+func TestWriteString(t *testing.T) {
+ const BufSize = 8
+ buf := new(bytes.Buffer)
+ b := NewWriterSize(buf, BufSize)
+ b.WriteString("0") // easy
+ b.WriteString("123456") // still easy
+ b.WriteString("7890") // easy after flush
+ b.WriteString("abcdefghijklmnopqrstuvwxy") // hard
+ b.WriteString("z")
+ if err := b.Flush(); err != nil {
+ t.Error("WriteString", err)
+ }
+ s := "01234567890abcdefghijklmnopqrstuvwxyz"
+ if string(buf.Bytes()) != s {
+ t.Errorf("WriteString wants %q gets %q", s, string(buf.Bytes()))
+ }
+}
+
+func TestBufferFull(t *testing.T) {
+ const longString = "And now, hello, world! It is the time for all good men to come to the aid of their party"
+ buf := NewReaderSize(strings.NewReader(longString), minReadBufferSize)
+ line, err := buf.ReadSlice('!')
+ if string(line) != "And now, hello, " || err != ErrBufferFull {
+ t.Errorf("first ReadSlice(,) = %q, %v", line, err)
+ }
+ line, err = buf.ReadSlice('!')
+ if string(line) != "world!" || err != nil {
+ t.Errorf("second ReadSlice(,) = %q, %v", line, err)
+ }
+}
+
+func TestPeek(t *testing.T) {
+ p := make([]byte, 10)
+ // string is 16 (minReadBufferSize) long.
+ buf := NewReaderSize(strings.NewReader("abcdefghijklmnop"), minReadBufferSize)
+ if s, err := buf.Peek(1); string(s) != "a" || err != nil {
+ t.Fatalf("want %q got %q, err=%v", "a", string(s), err)
+ }
+ if s, err := buf.Peek(4); string(s) != "abcd" || err != nil {
+ t.Fatalf("want %q got %q, err=%v", "abcd", string(s), err)
+ }
+ if _, err := buf.Peek(-1); err != ErrNegativeCount {
+ t.Fatalf("want ErrNegativeCount got %v", err)
+ }
+ if _, err := buf.Peek(32); err != ErrBufferFull {
+ t.Fatalf("want ErrBufFull got %v", err)
+ }
+ if _, err := buf.Read(p[0:3]); string(p[0:3]) != "abc" || err != nil {
+ t.Fatalf("want %q got %q, err=%v", "abc", string(p[0:3]), err)
+ }
+ if s, err := buf.Peek(1); string(s) != "d" || err != nil {
+ t.Fatalf("want %q got %q, err=%v", "d", string(s), err)
+ }
+ if s, err := buf.Peek(2); string(s) != "de" || err != nil {
+ t.Fatalf("want %q got %q, err=%v", "de", string(s), err)
+ }
+ if _, err := buf.Read(p[0:3]); string(p[0:3]) != "def" || err != nil {
+ t.Fatalf("want %q got %q, err=%v", "def", string(p[0:3]), err)
+ }
+ if s, err := buf.Peek(4); string(s) != "ghij" || err != nil {
+ t.Fatalf("want %q got %q, err=%v", "ghij", string(s), err)
+ }
+ if _, err := buf.Read(p[0:]); string(p[0:]) != "ghijklmnop" || err != nil {
+ t.Fatalf("want %q got %q, err=%v", "ghijklmnop", string(p[0:minReadBufferSize]), err)
+ }
+ if s, err := buf.Peek(0); string(s) != "" || err != nil {
+ t.Fatalf("want %q got %q, err=%v", "", string(s), err)
+ }
+ if _, err := buf.Peek(1); err != io.EOF {
+ t.Fatalf("want EOF got %v", err)
+ }
+
+ // Test for issue 3022, not exposing a reader's error on a successful Peek.
+ buf = NewReaderSize(dataAndEOFReader("abcd"), 32)
+ if s, err := buf.Peek(2); string(s) != "ab" || err != nil {
+ t.Errorf(`Peek(2) on "abcd", EOF = %q, %v; want "ab", nil`, string(s), err)
+ }
+ if s, err := buf.Peek(4); string(s) != "abcd" || err != nil {
+ t.Errorf(`Peek(4) on "abcd", EOF = %q, %v; want "abcd", nil`, string(s), err)
+ }
+ if n, err := buf.Read(p[0:5]); string(p[0:n]) != "abcd" || err != nil {
+ t.Fatalf("Read after peek = %q, %v; want abcd, EOF", p[0:n], err)
+ }
+ if n, err := buf.Read(p[0:1]); string(p[0:n]) != "" || err != io.EOF {
+ t.Fatalf(`second Read after peek = %q, %v; want "", EOF`, p[0:n], err)
+ }
+}
+
+type dataAndEOFReader string
+
+func (r dataAndEOFReader) Read(p []byte) (int, error) {
+ return copy(p, r), io.EOF
+}
+
+func TestPeekThenUnreadRune(t *testing.T) {
+ // This sequence used to cause a crash.
+ r := NewReader(strings.NewReader("x"))
+ r.ReadRune()
+ r.Peek(1)
+ r.UnreadRune()
+ r.ReadRune() // Used to panic here
+}
+
+var testOutput = []byte("0123456789abcdefghijklmnopqrstuvwxy")
+var testInput = []byte("012\n345\n678\n9ab\ncde\nfgh\nijk\nlmn\nopq\nrst\nuvw\nxy")
+var testInputrn = []byte("012\r\n345\r\n678\r\n9ab\r\ncde\r\nfgh\r\nijk\r\nlmn\r\nopq\r\nrst\r\nuvw\r\nxy\r\n\n\r\n")
+
+// TestReader wraps a []byte and returns reads of a specific length.
+type testReader struct {
+ data []byte
+ stride int
+}
+
+func (t *testReader) Read(buf []byte) (n int, err error) {
+ n = t.stride
+ if n > len(t.data) {
+ n = len(t.data)
+ }
+ if n > len(buf) {
+ n = len(buf)
+ }
+ copy(buf, t.data)
+ t.data = t.data[n:]
+ if len(t.data) == 0 {
+ err = io.EOF
+ }
+ return
+}
+
+func testReadLine(t *testing.T, input []byte) {
+ //for stride := 1; stride < len(input); stride++ {
+ for stride := 1; stride < 2; stride++ {
+ done := 0
+ reader := testReader{input, stride}
+ l := NewReaderSize(&reader, len(input)+1)
+ for {
+ line, isPrefix, err := l.ReadLine()
+ if len(line) > 0 && err != nil {
+ t.Errorf("ReadLine returned both data and error: %s", err)
+ }
+ if isPrefix {
+ t.Errorf("ReadLine returned prefix")
+ }
+ if err != nil {
+ if err != io.EOF {
+ t.Fatalf("Got unknown error: %s", err)
+ }
+ break
+ }
+ if want := testOutput[done : done+len(line)]; !bytes.Equal(want, line) {
+ t.Errorf("Bad line at stride %d: want: %x got: %x", stride, want, line)
+ }
+ done += len(line)
+ }
+ if done != len(testOutput) {
+ t.Errorf("ReadLine didn't return everything: got: %d, want: %d (stride: %d)", done, len(testOutput), stride)
+ }
+ }
+}
+
+func TestReadLine(t *testing.T) {
+ testReadLine(t, testInput)
+ testReadLine(t, testInputrn)
+}
+
+func TestLineTooLong(t *testing.T) {
+ data := make([]byte, 0)
+ for i := 0; i < minReadBufferSize*5/2; i++ {
+ data = append(data, '0'+byte(i%10))
+ }
+ buf := bytes.NewReader(data)
+ l := NewReaderSize(buf, minReadBufferSize)
+ line, isPrefix, err := l.ReadLine()
+ if !isPrefix || !bytes.Equal(line, data[:minReadBufferSize]) || err != nil {
+ t.Errorf("bad result for first line: got %q want %q %v", line, data[:minReadBufferSize], err)
+ }
+ data = data[len(line):]
+ line, isPrefix, err = l.ReadLine()
+ if !isPrefix || !bytes.Equal(line, data[:minReadBufferSize]) || err != nil {
+ t.Errorf("bad result for second line: got %q want %q %v", line, data[:minReadBufferSize], err)
+ }
+ data = data[len(line):]
+ line, isPrefix, err = l.ReadLine()
+ if isPrefix || !bytes.Equal(line, data[:minReadBufferSize/2]) || err != nil {
+ t.Errorf("bad result for third line: got %q want %q %v", line, data[:minReadBufferSize/2], err)
+ }
+ line, isPrefix, err = l.ReadLine()
+ if isPrefix || err == nil {
+ t.Errorf("expected no more lines: %x %s", line, err)
+ }
+}
+
+func TestReadAfterLines(t *testing.T) {
+ line1 := "this is line1"
+ restData := "this is line2\nthis is line 3\n"
+ inbuf := bytes.NewReader([]byte(line1 + "\n" + restData))
+ outbuf := new(bytes.Buffer)
+ maxLineLength := len(line1) + len(restData)/2
+ l := NewReaderSize(inbuf, maxLineLength)
+ line, isPrefix, err := l.ReadLine()
+ if isPrefix || err != nil || string(line) != line1 {
+ t.Errorf("bad result for first line: isPrefix=%v err=%v line=%q", isPrefix, err, string(line))
+ }
+ n, err := io.Copy(outbuf, l)
+ if int(n) != len(restData) || err != nil {
+ t.Errorf("bad result for Read: n=%d err=%v", n, err)
+ }
+ if outbuf.String() != restData {
+ t.Errorf("bad result for Read: got %q; expected %q", outbuf.String(), restData)
+ }
+}
+
+func TestReadEmptyBuffer(t *testing.T) {
+ l := NewReaderSize(new(bytes.Buffer), minReadBufferSize)
+ line, isPrefix, err := l.ReadLine()
+ if err != io.EOF {
+ t.Errorf("expected EOF from ReadLine, got '%s' %t %s", line, isPrefix, err)
+ }
+}
+
+func TestLinesAfterRead(t *testing.T) {
+ l := NewReaderSize(bytes.NewReader([]byte("foo")), minReadBufferSize)
+ _, err := ioutil.ReadAll(l)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ line, isPrefix, err := l.ReadLine()
+ if err != io.EOF {
+ t.Errorf("expected EOF from ReadLine, got '%s' %t %s", line, isPrefix, err)
+ }
+}
+
+func TestReadLineNonNilLineOrError(t *testing.T) {
+ r := NewReader(strings.NewReader("line 1\n"))
+ for i := 0; i < 2; i++ {
+ l, _, err := r.ReadLine()
+ if l != nil && err != nil {
+ t.Fatalf("on line %d/2; ReadLine=%#v, %v; want non-nil line or Error, but not both",
+ i+1, l, err)
+ }
+ }
+}
+
+type readLineResult struct {
+ line []byte
+ isPrefix bool
+ err error
+}
+
+var readLineNewlinesTests = []struct {
+ input string
+ expect []readLineResult
+}{
+ {"012345678901234\r\n012345678901234\r\n", []readLineResult{
+ {[]byte("012345678901234"), true, nil},
+ {nil, false, nil},
+ {[]byte("012345678901234"), true, nil},
+ {nil, false, nil},
+ {nil, false, io.EOF},
+ }},
+ {"0123456789012345\r012345678901234\r", []readLineResult{
+ {[]byte("0123456789012345"), true, nil},
+ {[]byte("\r012345678901234"), true, nil},
+ {[]byte("\r"), false, nil},
+ {nil, false, io.EOF},
+ }},
+}
+
+func TestReadLineNewlines(t *testing.T) {
+ for _, e := range readLineNewlinesTests {
+ testReadLineNewlines(t, e.input, e.expect)
+ }
+}
+
+func testReadLineNewlines(t *testing.T, input string, expect []readLineResult) {
+ b := NewReaderSize(strings.NewReader(input), minReadBufferSize)
+ for i, e := range expect {
+ line, isPrefix, err := b.ReadLine()
+ if !bytes.Equal(line, e.line) {
+ t.Errorf("%q call %d, line == %q, want %q", input, i, line, e.line)
+ return
+ }
+ if isPrefix != e.isPrefix {
+ t.Errorf("%q call %d, isPrefix == %v, want %v", input, i, isPrefix, e.isPrefix)
+ return
+ }
+ if err != e.err {
+ t.Errorf("%q call %d, err == %v, want %v", input, i, err, e.err)
+ return
+ }
+ }
+}
+
+func createTestInput(n int) []byte {
+ input := make([]byte, n)
+ for i := range input {
+ // 101 and 251 are arbitrary prime numbers.
+ // The idea is to create an input sequence
+ // which doesn't repeat too frequently.
+ input[i] = byte(i % 251)
+ if i%101 == 0 {
+ input[i] ^= byte(i / 101)
+ }
+ }
+ return input
+}
+
+func TestReaderWriteTo(t *testing.T) {
+ input := createTestInput(8192)
+ r := NewReader(onlyReader{bytes.NewReader(input)})
+ w := new(bytes.Buffer)
+ if n, err := r.WriteTo(w); err != nil || n != int64(len(input)) {
+ t.Fatalf("r.WriteTo(w) = %d, %v, want %d, nil", n, err, len(input))
+ }
+
+ for i, val := range w.Bytes() {
+ if val != input[i] {
+ t.Errorf("after write: out[%d] = %#x, want %#x", i, val, input[i])
+ }
+ }
+}
+
+type errorWriterToTest struct {
+ rn, wn int
+ rerr, werr error
+ expected error
+}
+
+func (r errorWriterToTest) Read(p []byte) (int, error) {
+ return len(p) * r.rn, r.rerr
+}
+
+func (w errorWriterToTest) Write(p []byte) (int, error) {
+ return len(p) * w.wn, w.werr
+}
+
+var errorWriterToTests = []errorWriterToTest{
+ {1, 0, nil, io.ErrClosedPipe, io.ErrClosedPipe},
+ {0, 1, io.ErrClosedPipe, nil, io.ErrClosedPipe},
+ {0, 0, io.ErrUnexpectedEOF, io.ErrClosedPipe, io.ErrClosedPipe},
+ {0, 1, io.EOF, nil, nil},
+}
+
+func TestReaderWriteToErrors(t *testing.T) {
+ for i, rw := range errorWriterToTests {
+ r := NewReader(rw)
+ if _, err := r.WriteTo(rw); err != rw.expected {
+ t.Errorf("r.WriteTo(errorWriterToTests[%d]) = _, %v, want _,%v", i, err, rw.expected)
+ }
+ }
+}
+
+func TestWriterReadFrom(t *testing.T) {
+ ws := []func(io.Writer) io.Writer{
+ func(w io.Writer) io.Writer { return onlyWriter{w} },
+ func(w io.Writer) io.Writer { return w },
+ }
+
+ rs := []func(io.Reader) io.Reader{
+ iotest.DataErrReader,
+ func(r io.Reader) io.Reader { return r },
+ }
+
+ for ri, rfunc := range rs {
+ for wi, wfunc := range ws {
+ input := createTestInput(8192)
+ b := new(bytes.Buffer)
+ w := NewWriter(wfunc(b))
+ r := rfunc(bytes.NewReader(input))
+ if n, err := w.ReadFrom(r); err != nil || n != int64(len(input)) {
+ t.Errorf("ws[%d],rs[%d]: w.ReadFrom(r) = %d, %v, want %d, nil", wi, ri, n, err, len(input))
+ continue
+ }
+ if err := w.Flush(); err != nil {
+ t.Errorf("Flush returned %v", err)
+ continue
+ }
+ if got, want := b.String(), string(input); got != want {
+ t.Errorf("ws[%d], rs[%d]:\ngot %q\nwant %q\n", wi, ri, got, want)
+ }
+ }
+ }
+}
+
+type errorReaderFromTest struct {
+ rn, wn int
+ rerr, werr error
+ expected error
+}
+
+func (r errorReaderFromTest) Read(p []byte) (int, error) {
+ return len(p) * r.rn, r.rerr
+}
+
+func (w errorReaderFromTest) Write(p []byte) (int, error) {
+ return len(p) * w.wn, w.werr
+}
+
+var errorReaderFromTests = []errorReaderFromTest{
+ {0, 1, io.EOF, nil, nil},
+ {1, 1, io.EOF, nil, nil},
+ {0, 1, io.ErrClosedPipe, nil, io.ErrClosedPipe},
+ {0, 0, io.ErrClosedPipe, io.ErrShortWrite, io.ErrClosedPipe},
+ {1, 0, nil, io.ErrShortWrite, io.ErrShortWrite},
+}
+
+func TestWriterReadFromErrors(t *testing.T) {
+ for i, rw := range errorReaderFromTests {
+ w := NewWriter(rw)
+ if _, err := w.ReadFrom(rw); err != rw.expected {
+ t.Errorf("w.ReadFrom(errorReaderFromTests[%d]) = _, %v, want _,%v", i, err, rw.expected)
+ }
+ }
+}
+
+// TestWriterReadFromCounts tests that using io.Copy to copy into a
+// bufio.Writer does not prematurely flush the buffer. For example, when
+// buffering writes to a network socket, excessive network writes should be
+// avoided.
+func TestWriterReadFromCounts(t *testing.T) {
+ var w0 writeCountingDiscard
+ b0 := NewWriterSize(&w0, 1234)
+ b0.WriteString(strings.Repeat("x", 1000))
+ if w0 != 0 {
+ t.Fatalf("write 1000 'x's: got %d writes, want 0", w0)
+ }
+ b0.WriteString(strings.Repeat("x", 200))
+ if w0 != 0 {
+ t.Fatalf("write 1200 'x's: got %d writes, want 0", w0)
+ }
+ io.Copy(b0, onlyReader{strings.NewReader(strings.Repeat("x", 30))})
+ if w0 != 0 {
+ t.Fatalf("write 1230 'x's: got %d writes, want 0", w0)
+ }
+ io.Copy(b0, onlyReader{strings.NewReader(strings.Repeat("x", 9))})
+ if w0 != 1 {
+ t.Fatalf("write 1239 'x's: got %d writes, want 1", w0)
+ }
+
+ var w1 writeCountingDiscard
+ b1 := NewWriterSize(&w1, 1234)
+ b1.WriteString(strings.Repeat("x", 1200))
+ b1.Flush()
+ if w1 != 1 {
+ t.Fatalf("flush 1200 'x's: got %d writes, want 1", w1)
+ }
+ b1.WriteString(strings.Repeat("x", 89))
+ if w1 != 1 {
+ t.Fatalf("write 1200 + 89 'x's: got %d writes, want 1", w1)
+ }
+ io.Copy(b1, onlyReader{strings.NewReader(strings.Repeat("x", 700))})
+ if w1 != 1 {
+ t.Fatalf("write 1200 + 789 'x's: got %d writes, want 1", w1)
+ }
+ io.Copy(b1, onlyReader{strings.NewReader(strings.Repeat("x", 600))})
+ if w1 != 2 {
+ t.Fatalf("write 1200 + 1389 'x's: got %d writes, want 2", w1)
+ }
+ b1.Flush()
+ if w1 != 3 {
+ t.Fatalf("flush 1200 + 1389 'x's: got %d writes, want 3", w1)
+ }
+}
+
+// A writeCountingDiscard is like ioutil.Discard and counts the number of times
+// Write is called on it.
+type writeCountingDiscard int
+
+func (w *writeCountingDiscard) Write(p []byte) (int, error) {
+ *w++
+ return len(p), nil
+}
+
+type negativeReader int
+
+func (r *negativeReader) Read([]byte) (int, error) { return -1, nil }
+
+func TestNegativeRead(t *testing.T) {
+ // should panic with a description pointing at the reader, not at itself.
+ // (should NOT panic with slice index error, for example.)
+ b := NewReader(new(negativeReader))
+ defer func() {
+ switch err := recover().(type) {
+ case nil:
+ t.Fatal("read did not panic")
+ case error:
+ if !strings.Contains(err.Error(), "reader returned negative count from Read") {
+ t.Fatalf("wrong panic: %v", err)
+ }
+ default:
+ t.Fatalf("unexpected panic value: %T(%v)", err, err)
+ }
+ }()
+ b.Read(make([]byte, 100))
+}
+
+var errFake = errors.New("fake error")
+
+type errorThenGoodReader struct {
+ didErr bool
+ nread int
+}
+
+func (r *errorThenGoodReader) Read(p []byte) (int, error) {
+ r.nread++
+ if !r.didErr {
+ r.didErr = true
+ return 0, errFake
+ }
+ return len(p), nil
+}
+
+func TestReaderClearError(t *testing.T) {
+ r := &errorThenGoodReader{}
+ b := NewReader(r)
+ buf := make([]byte, 1)
+ if _, err := b.Read(nil); err != nil {
+ t.Fatalf("1st nil Read = %v; want nil", err)
+ }
+ if _, err := b.Read(buf); err != errFake {
+ t.Fatalf("1st Read = %v; want errFake", err)
+ }
+ if _, err := b.Read(nil); err != nil {
+ t.Fatalf("2nd nil Read = %v; want nil", err)
+ }
+ if _, err := b.Read(buf); err != nil {
+ t.Fatalf("3rd Read with buffer = %v; want nil", err)
+ }
+ if r.nread != 2 {
+ t.Errorf("num reads = %d; want 2", r.nread)
+ }
+}
+
+// Test for golang.org/issue/5947
+func TestWriterReadFromWhileFull(t *testing.T) {
+ buf := new(bytes.Buffer)
+ w := NewWriterSize(buf, 10)
+
+ // Fill buffer exactly.
+ n, err := w.Write([]byte("0123456789"))
+ if n != 10 || err != nil {
+ t.Fatalf("Write returned (%v, %v), want (10, nil)", n, err)
+ }
+
+ // Use ReadFrom to read in some data.
+ n2, err := w.ReadFrom(strings.NewReader("abcdef"))
+ if n2 != 6 || err != nil {
+ t.Fatalf("ReadFrom returned (%v, %v), want (6, nil)", n2, err)
+ }
+}
+
+type emptyThenNonEmptyReader struct {
+ r io.Reader
+ n int
+}
+
+func (r *emptyThenNonEmptyReader) Read(p []byte) (int, error) {
+ if r.n <= 0 {
+ return r.r.Read(p)
+ }
+ r.n--
+ return 0, nil
+}
+
+// Test for golang.org/issue/7611
+func TestWriterReadFromUntilEOF(t *testing.T) {
+ buf := new(bytes.Buffer)
+ w := NewWriterSize(buf, 5)
+
+ // Partially fill buffer
+ n, err := w.Write([]byte("0123"))
+ if n != 4 || err != nil {
+ t.Fatalf("Write returned (%v, %v), want (4, nil)", n, err)
+ }
+
+ // Use ReadFrom to read in some data.
+ r := &emptyThenNonEmptyReader{r: strings.NewReader("abcd"), n: 3}
+ n2, err := w.ReadFrom(r)
+ if n2 != 4 || err != nil {
+ t.Fatalf("ReadFrom returned (%v, %v), want (4, nil)", n2, err)
+ }
+ w.Flush()
+ if got, want := string(buf.Bytes()), "0123abcd"; got != want {
+ t.Fatalf("buf.Bytes() returned %q, want %q", got, want)
+ }
+}
+
+func TestWriterReadFromErrNoProgress(t *testing.T) {
+ buf := new(bytes.Buffer)
+ w := NewWriterSize(buf, 5)
+
+ // Partially fill buffer
+ n, err := w.Write([]byte("0123"))
+ if n != 4 || err != nil {
+ t.Fatalf("Write returned (%v, %v), want (4, nil)", n, err)
+ }
+
+ // Use ReadFrom to read in some data.
+ r := &emptyThenNonEmptyReader{r: strings.NewReader("abcd"), n: 100}
+ n2, err := w.ReadFrom(r)
+ if n2 != 0 || err != io.ErrNoProgress {
+ t.Fatalf("buf.Bytes() returned (%v, %v), want (0, io.ErrNoProgress)", n2, err)
+ }
+}
+
+func TestReaderReset(t *testing.T) {
+ r := NewReader(strings.NewReader("foo foo"))
+ buf := make([]byte, 3)
+ r.Read(buf)
+ if string(buf) != "foo" {
+ t.Errorf("buf = %q; want foo", buf)
+ }
+ r.Reset(strings.NewReader("bar bar"))
+ all, err := ioutil.ReadAll(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(all) != "bar bar" {
+ t.Errorf("ReadAll = %q; want bar bar", all)
+ }
+}
+
+func TestWriterReset(t *testing.T) {
+ var buf1, buf2 bytes.Buffer
+ w := NewWriter(&buf1)
+ w.WriteString("foo")
+ w.Reset(&buf2) // and not flushed
+ w.WriteString("bar")
+ w.Flush()
+ if buf1.String() != "" {
+ t.Errorf("buf1 = %q; want empty", buf1.String())
+ }
+ if buf2.String() != "bar" {
+ t.Errorf("buf2 = %q; want bar", buf2.String())
+ }
+}
+
+// An onlyReader only implements io.Reader, no matter what other methods the underlying implementation may have.
+type onlyReader struct {
+ io.Reader
+}
+
+// An onlyWriter only implements io.Writer, no matter what other methods the underlying implementation may have.
+type onlyWriter struct {
+ io.Writer
+}
+
+func BenchmarkReaderCopyOptimal(b *testing.B) {
+ // Optimal case is where the underlying reader implements io.WriterTo
+ srcBuf := bytes.NewBuffer(make([]byte, 8192))
+ src := NewReader(srcBuf)
+ dstBuf := new(bytes.Buffer)
+ dst := onlyWriter{dstBuf}
+ for i := 0; i < b.N; i++ {
+ srcBuf.Reset()
+ src.Reset(srcBuf)
+ dstBuf.Reset()
+ io.Copy(dst, src)
+ }
+}
+
+func BenchmarkReaderCopyUnoptimal(b *testing.B) {
+ // Unoptimal case is where the underlying reader doesn't implement io.WriterTo
+ srcBuf := bytes.NewBuffer(make([]byte, 8192))
+ src := NewReader(onlyReader{srcBuf})
+ dstBuf := new(bytes.Buffer)
+ dst := onlyWriter{dstBuf}
+ for i := 0; i < b.N; i++ {
+ srcBuf.Reset()
+ src.Reset(onlyReader{srcBuf})
+ dstBuf.Reset()
+ io.Copy(dst, src)
+ }
+}
+
+func BenchmarkReaderCopyNoWriteTo(b *testing.B) {
+ srcBuf := bytes.NewBuffer(make([]byte, 8192))
+ srcReader := NewReader(srcBuf)
+ src := onlyReader{srcReader}
+ dstBuf := new(bytes.Buffer)
+ dst := onlyWriter{dstBuf}
+ for i := 0; i < b.N; i++ {
+ srcBuf.Reset()
+ srcReader.Reset(srcBuf)
+ dstBuf.Reset()
+ io.Copy(dst, src)
+ }
+}
+
+func BenchmarkReaderWriteToOptimal(b *testing.B) {
+ const bufSize = 16 << 10
+ buf := make([]byte, bufSize)
+ r := bytes.NewReader(buf)
+ srcReader := NewReaderSize(onlyReader{r}, 1<<10)
+ if _, ok := ioutil.Discard.(io.ReaderFrom); !ok {
+ b.Fatal("ioutil.Discard doesn't support ReaderFrom")
+ }
+ for i := 0; i < b.N; i++ {
+ r.Seek(0, 0)
+ srcReader.Reset(onlyReader{r})
+ n, err := srcReader.WriteTo(ioutil.Discard)
+ if err != nil {
+ b.Fatal(err)
+ }
+ if n != bufSize {
+ b.Fatalf("n = %d; want %d", n, bufSize)
+ }
+ }
+}
+
+func BenchmarkWriterCopyOptimal(b *testing.B) {
+ // Optimal case is where the underlying writer implements io.ReaderFrom
+ srcBuf := bytes.NewBuffer(make([]byte, 8192))
+ src := onlyReader{srcBuf}
+ dstBuf := new(bytes.Buffer)
+ dst := NewWriter(dstBuf)
+ for i := 0; i < b.N; i++ {
+ srcBuf.Reset()
+ dstBuf.Reset()
+ dst.Reset(dstBuf)
+ io.Copy(dst, src)
+ }
+}
+
+func BenchmarkWriterCopyUnoptimal(b *testing.B) {
+ srcBuf := bytes.NewBuffer(make([]byte, 8192))
+ src := onlyReader{srcBuf}
+ dstBuf := new(bytes.Buffer)
+ dst := NewWriter(onlyWriter{dstBuf})
+ for i := 0; i < b.N; i++ {
+ srcBuf.Reset()
+ dstBuf.Reset()
+ dst.Reset(onlyWriter{dstBuf})
+ io.Copy(dst, src)
+ }
+}
+
+func BenchmarkWriterCopyNoReadFrom(b *testing.B) {
+ srcBuf := bytes.NewBuffer(make([]byte, 8192))
+ src := onlyReader{srcBuf}
+ dstBuf := new(bytes.Buffer)
+ dstWriter := NewWriter(dstBuf)
+ dst := onlyWriter{dstWriter}
+ for i := 0; i < b.N; i++ {
+ srcBuf.Reset()
+ dstBuf.Reset()
+ dstWriter.Reset(dstBuf)
+ io.Copy(dst, src)
+ }
+}
+
+func BenchmarkReaderEmpty(b *testing.B) {
+ b.ReportAllocs()
+ str := strings.Repeat("x", 16<<10)
+ for i := 0; i < b.N; i++ {
+ br := NewReader(strings.NewReader(str))
+ n, err := io.Copy(ioutil.Discard, br)
+ if err != nil {
+ b.Fatal(err)
+ }
+ if n != int64(len(str)) {
+ b.Fatal("wrong length")
+ }
+ }
+}
+
+func BenchmarkWriterEmpty(b *testing.B) {
+ b.ReportAllocs()
+ str := strings.Repeat("x", 1<<10)
+ bs := []byte(str)
+ for i := 0; i < b.N; i++ {
+ bw := NewWriter(ioutil.Discard)
+ bw.Flush()
+ bw.WriteByte('a')
+ bw.Flush()
+ bw.WriteRune('B')
+ bw.Flush()
+ bw.Write(bs)
+ bw.Flush()
+ bw.WriteString(str)
+ bw.Flush()
+ }
+}
+
+func BenchmarkWriterFlush(b *testing.B) {
+ b.ReportAllocs()
+ bw := NewWriter(ioutil.Discard)
+ str := strings.Repeat("x", 50)
+ for i := 0; i < b.N; i++ {
+ bw.WriteString(str)
+ bw.Flush()
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/export_test.go b/Godeps/_workspace/src/gopkg.in/bufio.v1/export_test.go
new file mode 100644
index 000000000..16629d022
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/export_test.go
@@ -0,0 +1,9 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bufio
+
+func (b *Buffer) Cap() int {
+ return cap(b.buf)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore
new file mode 100644
index 000000000..4cd0cbaf4
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore
@@ -0,0 +1,6 @@
+# Setup a Global .gitignore for OS and editor generated files:
+# https://help.github.com/articles/ignoring-files
+# git config --global core.excludesfile ~/.gitignore_global
+
+.vagrant
+*.sublime-project
diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml
new file mode 100644
index 000000000..67467e140
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml
@@ -0,0 +1,15 @@
+sudo: false
+language: go
+
+go:
+ - 1.4.1
+
+before_script:
+ - FIXED=$(go fmt ./... | wc -l); if [ $FIXED -gt 0 ]; then echo "gofmt - $FIXED file(s) not formatted correctly, please run gofmt to fix this." && exit 1; fi
+
+os:
+ - linux
+ - osx
+
+notifications:
+ email: false
diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS
new file mode 100644
index 000000000..4e0e8284e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS
@@ -0,0 +1,34 @@
+# Names should be added to this file as
+# Name or Organization <email address>
+# The email address is not required for organizations.
+
+# You can update this list using the following command:
+#
+# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
+
+# Please keep the list sorted.
+
+Adrien Bustany <adrien@bustany.org>
+Caleb Spare <cespare@gmail.com>
+Case Nelson <case@teammating.com>
+Chris Howey <howeyc@gmail.com> <chris@howey.me>
+Christoffer Buchholz <christoffer.buchholz@gmail.com>
+Dave Cheney <dave@cheney.net>
+Francisco Souza <f@souza.cc>
+Hari haran <hariharan.uno@gmail.com>
+John C Barstow
+Kelvin Fo <vmirage@gmail.com>
+Matt Layher <mdlayher@gmail.com>
+Nathan Youngman <git@nathany.com>
+Paul Hammond <paul@paulhammond.org>
+Pieter Droogendijk <pieter@binky.org.uk>
+Pursuit92 <JoshChase@techpursuit.net>
+Rob Figueiredo <robfig@gmail.com>
+Soge Zhang <zhssoge@gmail.com>
+Tilak Sharma <tilaks@google.com>
+Travis Cline <travis.cline@gmail.com>
+Tudor Golubenco <tudor.g@gmail.com>
+Yukang <moorekang@gmail.com>
+bronze1man <bronze1man@gmail.com>
+debrando <denis.brandolini@gmail.com>
+henrikedwards <henrik.edwards@gmail.com>
diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md
new file mode 100644
index 000000000..ea9428a2a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md
@@ -0,0 +1,263 @@
+# Changelog
+
+## v1.2.0 / 2015-02-08
+
+* inotify: use epoll to wake up readEvents [#66](https://github.com/go-fsnotify/fsnotify/pull/66) (thanks @PieterD)
+* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/go-fsnotify/fsnotify/pull/63) (thanks @PieterD)
+* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/go-fsnotify/fsnotify/issues/59)
+
+## v1.1.1 / 2015-02-05
+
+* inotify: Retry read on EINTR [#61](https://github.com/go-fsnotify/fsnotify/issues/61) (thanks @PieterD)
+
+## v1.1.0 / 2014-12-12
+
+* kqueue: rework internals [#43](https://github.com/go-fsnotify/fsnotify/pull/43)
+ * add low-level functions
+ * only need to store flags on directories
+ * less mutexes [#13](https://github.com/go-fsnotify/fsnotify/issues/13)
+ * done can be an unbuffered channel
+ * remove calls to os.NewSyscallError
+* More efficient string concatenation for Event.String() [#52](https://github.com/go-fsnotify/fsnotify/pull/52) (thanks @mdlayher)
+* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/go-fsnotify/fsnotify/issues/48)
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/go-fsnotify/fsnotify/issues/51)
+
+## v1.0.4 / 2014-09-07
+
+* kqueue: add dragonfly to the build tags.
+* Rename source code files, rearrange code so exported APIs are at the top.
+* Add done channel to example code. [#37](https://github.com/go-fsnotify/fsnotify/pull/37) (thanks @chenyukang)
+
+## v1.0.3 / 2014-08-19
+
+* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/go-fsnotify/fsnotify/issues/36)
+
+## v1.0.2 / 2014-08-17
+
+* [Fix] Missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso)
+* [Fix] Make ./path and path equivalent. (thanks @zhsso)
+
+## v1.0.0 / 2014-08-15
+
+* [API] Remove AddWatch on Windows, use Add.
+* Improve documentation for exported identifiers. [#30](https://github.com/go-fsnotify/fsnotify/issues/30)
+* Minor updates based on feedback from golint.
+
+## dev / 2014-07-09
+
+* Moved to [github.com/go-fsnotify/fsnotify](https://github.com/go-fsnotify/fsnotify).
+* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
+
+## dev / 2014-07-04
+
+* kqueue: fix incorrect mutex used in Close()
+* Update example to demonstrate usage of Op.
+
+## dev / 2014-06-28
+
+* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/go-fsnotify/fsnotify/issues/4)
+* Fix for String() method on Event (thanks Alex Brainman)
+* Don't build on Plan 9 or Solaris (thanks @4ad)
+
+## dev / 2014-06-21
+
+* Events channel of type Event rather than *Event.
+* [internal] use syscall constants directly for inotify and kqueue.
+* [internal] kqueue: rename events to kevents and fileEvent to event.
+
+## dev / 2014-06-19
+
+* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
+* [internal] remove cookie from Event struct (unused).
+* [internal] Event struct has the same definition across every OS.
+* [internal] remove internal watch and removeWatch methods.
+
+## dev / 2014-06-12
+
+* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
+* [API] Pluralized channel names: Events and Errors.
+* [API] Renamed FileEvent struct to Event.
+* [API] Op constants replace methods like IsCreate().
+
+## dev / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## dev / 2014-05-23
+
+* [API] Remove current implementation of WatchFlags.
+ * current implementation doesn't take advantage of OS for efficiency
+ * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
+ * no tests for the current implementation
+ * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
+
+## v0.9.3 / 2014-12-31
+
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/go-fsnotify/fsnotify/issues/51)
+
+## v0.9.2 / 2014-08-17
+
+* [Backport] Fix missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso)
+
+## v0.9.1 / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## v0.9.0 / 2014-01-17
+
+* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
+* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
+* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
+
+## v0.8.12 / 2013-11-13
+
+* [API] Remove FD_SET and friends from Linux adapter
+
+## v0.8.11 / 2013-11-02
+
+* [Doc] Add Changelog [#72][] (thanks @nathany)
+* [Doc] Spotlight and double modify events on OS X [#62][] (reported by @paulhammond)
+
+## v0.8.10 / 2013-10-19
+
+* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
+* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
+* [Doc] specify OS-specific limits in README (thanks @debrando)
+
+## v0.8.9 / 2013-09-08
+
+* [Doc] Contributing (thanks @nathany)
+* [Doc] update package path in example code [#63][] (thanks @paulhammond)
+* [Doc] GoCI badge in README (Linux only) [#60][]
+* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
+
+## v0.8.8 / 2013-06-17
+
+* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
+
+## v0.8.7 / 2013-06-03
+
+* [API] Make syscall flags internal
+* [Fix] inotify: ignore event changes
+* [Fix] race in symlink test [#45][] (reported by @srid)
+* [Fix] tests on Windows
+* lower case error messages
+
+## v0.8.6 / 2013-05-23
+
+* kqueue: Use EVT_ONLY flag on Darwin
+* [Doc] Update README with full example
+
+## v0.8.5 / 2013-05-09
+
+* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
+
+## v0.8.4 / 2013-04-07
+
+* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
+
+## v0.8.3 / 2013-03-13
+
+* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
+* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
+
+## v0.8.2 / 2013-02-07
+
+* [Doc] add Authors
+* [Fix] fix data races for map access [#29][] (thanks @fsouza)
+
+## v0.8.1 / 2013-01-09
+
+* [Fix] Windows path separators
+* [Doc] BSD License
+
+## v0.8.0 / 2012-11-09
+
+* kqueue: directory watching improvements (thanks @vmirage)
+* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
+* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
+
+## v0.7.4 / 2012-10-09
+
+* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
+* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
+* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
+* [Fix] kqueue: modify after recreation of file
+
+## v0.7.3 / 2012-09-27
+
+* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
+* [Fix] kqueue: no longer get duplicate CREATE events
+
+## v0.7.2 / 2012-09-01
+
+* kqueue: events for created directories
+
+## v0.7.1 / 2012-07-14
+
+* [Fix] for renaming files
+
+## v0.7.0 / 2012-07-02
+
+* [Feature] FSNotify flags
+* [Fix] inotify: Added file name back to event path
+
+## v0.6.0 / 2012-06-06
+
+* kqueue: watch files after directory created (thanks @tmc)
+
+## v0.5.1 / 2012-05-22
+
+* [Fix] inotify: remove all watches before Close()
+
+## v0.5.0 / 2012-05-03
+
+* [API] kqueue: return errors during watch instead of sending over channel
+* kqueue: match symlink behavior on Linux
+* inotify: add `DELETE_SELF` (requested by @taralx)
+* [Fix] kqueue: handle EINTR (reported by @robfig)
+* [Doc] Godoc example [#1][] (thanks @davecheney)
+
+## v0.4.0 / 2012-03-30
+
+* Go 1 released: build with go tool
+* [Feature] Windows support using winfsnotify
+* Windows does not have attribute change notifications
+* Roll attribute notifications into IsModify
+
+## v0.3.0 / 2012-02-19
+
+* kqueue: add files when watch directory
+
+## v0.2.0 / 2011-12-30
+
+* update to latest Go weekly code
+
+## v0.1.0 / 2011-10-19
+
+* kqueue: add watch on file creation to match inotify
+* kqueue: create file event
+* inotify: ignore `IN_IGNORED` events
+* event String()
+* linux: common FileEvent functions
+* initial commit
+
+[#79]: https://github.com/howeyc/fsnotify/pull/79
+[#77]: https://github.com/howeyc/fsnotify/pull/77
+[#72]: https://github.com/howeyc/fsnotify/issues/72
+[#71]: https://github.com/howeyc/fsnotify/issues/71
+[#70]: https://github.com/howeyc/fsnotify/issues/70
+[#63]: https://github.com/howeyc/fsnotify/issues/63
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#60]: https://github.com/howeyc/fsnotify/issues/60
+[#59]: https://github.com/howeyc/fsnotify/issues/59
+[#49]: https://github.com/howeyc/fsnotify/issues/49
+[#45]: https://github.com/howeyc/fsnotify/issues/45
+[#40]: https://github.com/howeyc/fsnotify/issues/40
+[#36]: https://github.com/howeyc/fsnotify/issues/36
+[#33]: https://github.com/howeyc/fsnotify/issues/33
+[#29]: https://github.com/howeyc/fsnotify/issues/29
+[#25]: https://github.com/howeyc/fsnotify/issues/25
+[#24]: https://github.com/howeyc/fsnotify/issues/24
+[#21]: https://github.com/howeyc/fsnotify/issues/21
+
diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md
new file mode 100644
index 000000000..0f377f341
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md
@@ -0,0 +1,77 @@
+# Contributing
+
+## Issues
+
+* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/go-fsnotify/fsnotify/issues).
+* Please indicate the platform you are using fsnotify on.
+* A code example to reproduce the problem is appreciated.
+
+## Pull Requests
+
+### Contributor License Agreement
+
+fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/go-fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/go-fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
+
+Please indicate that you have signed the CLA in your pull request.
+
+### How fsnotify is Developed
+
+* Development is done on feature branches.
+* Tests are run on BSD, Linux, OS X and Windows.
+* Pull requests are reviewed and [applied to master][am] using [hub][].
+ * Maintainers may modify or squash commits rather than asking contributors to.
+* To issue a new release, the maintainers will:
+ * Update the CHANGELOG
+ * Tag a version, which will become available through gopkg.in.
+
+### How to Fork
+
+For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
+
+1. Install from GitHub (`go get -u github.com/go-fsnotify/fsnotify`)
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Ensure everything works and the tests pass (see below)
+4. Commit your changes (`git commit -am 'Add some feature'`)
+
+Contribute upstream:
+
+1. Fork fsnotify on GitHub
+2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
+3. Push to the branch (`git push fork my-new-feature`)
+4. Create a new Pull Request on GitHub
+
+This workflow is [thoroughly explained by Katrina Owen](https://blog.splice.com/contributing-open-source-git-repositories-go/).
+
+### Testing
+
+fsnotify uses build tags to compile different code on Linux, BSD, OS X, and Windows.
+
+Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
+
+To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
+
+* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
+* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
+* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
+* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd go-fsnotify/fsnotify; go test'`.
+* When you're done, you will want to halt or destroy the Vagrant boxes.
+
+Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
+
+Right now there is no equivalent solution for Windows and OS X, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
+
+### Maintainers
+
+Help maintaining fsnotify is welcome. To be a maintainer:
+
+* Submit a pull request and sign the CLA as above.
+* You must be able to run the test suite on Mac, Windows, Linux and BSD.
+
+To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
+
+All code changes should be internal pull requests.
+
+Releases are tagged using [Semantic Versioning](http://semver.org/).
+
+[hub]: https://github.com/github/hub
+[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE
new file mode 100644
index 000000000..f21e54080
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2012 fsnotify Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/NotUsed.xcworkspace b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/NotUsed.xcworkspace
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/NotUsed.xcworkspace
diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml
new file mode 100644
index 000000000..204217fb0
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml
@@ -0,0 +1,26 @@
+## OS X build (CircleCI iOS beta)
+
+# Pretend like it's an Xcode project, at least to get it running.
+machine:
+ environment:
+ XCODE_WORKSPACE: NotUsed.xcworkspace
+ XCODE_SCHEME: NotUsed
+ # This is where the go project is actually checked out to:
+ CIRCLE_BUILD_DIR: $HOME/.go_project/src/github.com/go-fsnotify/fsnotify
+
+dependencies:
+ pre:
+ - brew upgrade go
+
+test:
+ override:
+ - go test ./...
+
+# Idealized future config, eventually with cross-platform build matrix :-)
+
+# machine:
+# go:
+# version: 1.4
+# os:
+# - osx
+# - linux
diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go
new file mode 100644
index 000000000..306379660
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go
@@ -0,0 +1,42 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!solaris
+
+package fsnotify_test
+
+import (
+ "log"
+
+ "github.com/go-fsnotify/fsnotify"
+)
+
+func ExampleNewWatcher() {
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer watcher.Close()
+
+ done := make(chan bool)
+ go func() {
+ for {
+ select {
+ case event := <-watcher.Events:
+ log.Println("event:", event)
+ if event.Op&fsnotify.Write == fsnotify.Write {
+ log.Println("modified file:", event.Name)
+ }
+ case err := <-watcher.Errors:
+ log.Println("error:", err)
+ }
+ }
+ }()
+
+ err = watcher.Add("/tmp/foo")
+ if err != nil {
+ log.Fatal(err)
+ }
+ <-done
+}
diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go
new file mode 100644
index 000000000..c899ee008
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go
@@ -0,0 +1,62 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!solaris
+
+// Package fsnotify provides a platform-independent interface for file system notifications.
+package fsnotify
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Event represents a single file system notification.
+type Event struct {
+ Name string // Relative path to the file or directory.
+ Op Op // File operation that triggered the event.
+}
+
+// Op describes a set of file operations.
+type Op uint32
+
+// These are the generalized file operations that can trigger a notification.
+const (
+ Create Op = 1 << iota
+ Write
+ Remove
+ Rename
+ Chmod
+)
+
+// String returns a string representation of the event in the form
+// "file: REMOVE|WRITE|..."
+func (e Event) String() string {
+ // Use a buffer for efficient string concatenation
+ var buffer bytes.Buffer
+
+ if e.Op&Create == Create {
+ buffer.WriteString("|CREATE")
+ }
+ if e.Op&Remove == Remove {
+ buffer.WriteString("|REMOVE")
+ }
+ if e.Op&Write == Write {
+ buffer.WriteString("|WRITE")
+ }
+ if e.Op&Rename == Rename {
+ buffer.WriteString("|RENAME")
+ }
+ if e.Op&Chmod == Chmod {
+ buffer.WriteString("|CHMOD")
+ }
+
+ // If buffer remains empty, return no event names
+ if buffer.Len() == 0 {
+ return fmt.Sprintf("%q: ", e.Name)
+ }
+
+ // Return a list of event names, with leading pipe character stripped
+ return fmt.Sprintf("%q: %s", e.Name, buffer.String()[1:])
+}
diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go
new file mode 100644
index 000000000..d7759ec8c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go
@@ -0,0 +1,306 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "syscall"
+ "unsafe"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ mu sync.Mutex // Map access
+ fd int
+ poller *fdPoller
+ watches map[string]*watch // Map of inotify watches (key: path)
+ paths map[int]string // Map of watched paths (key: watch descriptor)
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+ doneResp chan struct{} // Channel to respond to Close
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ // Create inotify fd
+ fd, errno := syscall.InotifyInit()
+ if fd == -1 {
+ return nil, errno
+ }
+ // Create epoll
+ poller, err := newFdPoller(fd)
+ if err != nil {
+ syscall.Close(fd)
+ return nil, err
+ }
+ w := &Watcher{
+ fd: fd,
+ poller: poller,
+ watches: make(map[string]*watch),
+ paths: make(map[int]string),
+ Events: make(chan Event),
+ Errors: make(chan error),
+ done: make(chan struct{}),
+ doneResp: make(chan struct{}),
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+func (w *Watcher) isClosed() bool {
+ select {
+ case <-w.done:
+ return true
+ default:
+ return false
+ }
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ if w.isClosed() {
+ return nil
+ }
+
+ // Send 'close' signal to goroutine, and set the Watcher to closed.
+ close(w.done)
+
+ // Wake up goroutine
+ w.poller.wake()
+
+ // Wait for goroutine to close
+ <-w.doneResp
+
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ name = filepath.Clean(name)
+ if w.isClosed() {
+ return errors.New("inotify instance already closed")
+ }
+
+ const agnosticEvents = syscall.IN_MOVED_TO | syscall.IN_MOVED_FROM |
+ syscall.IN_CREATE | syscall.IN_ATTRIB | syscall.IN_MODIFY |
+ syscall.IN_MOVE_SELF | syscall.IN_DELETE | syscall.IN_DELETE_SELF
+
+ var flags uint32 = agnosticEvents
+
+ w.mu.Lock()
+ watchEntry, found := w.watches[name]
+ w.mu.Unlock()
+ if found {
+ watchEntry.flags |= flags
+ flags |= syscall.IN_MASK_ADD
+ }
+ wd, errno := syscall.InotifyAddWatch(w.fd, name, flags)
+ if wd == -1 {
+ return errno
+ }
+
+ w.mu.Lock()
+ w.watches[name] = &watch{wd: uint32(wd), flags: flags}
+ w.paths[wd] = name
+ w.mu.Unlock()
+
+ return nil
+}
+
+// Remove stops watching the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ name = filepath.Clean(name)
+
+ // Fetch the watch.
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ watch, ok := w.watches[name]
+
+ // Remove it from inotify.
+ if !ok {
+ return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
+ }
+ // inotify_rm_watch will return EINVAL if the file has been deleted;
+ // the inotify will already have been removed.
+ // That means we can safely delete it from our watches, whatever inotify_rm_watch does.
+ delete(w.watches, name)
+ success, errno := syscall.InotifyRmWatch(w.fd, watch.wd)
+ if success == -1 {
+ // TODO: Perhaps it's not helpful to return an error here in every case.
+ // the only two possible errors are:
+ // EBADF, which happens when w.fd is not a valid file descriptor of any kind.
+ // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
+ // Watch descriptors are invalidated when they are removed explicitly or implicitly;
+ // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
+ return errno
+ }
+ return nil
+}
+
+type watch struct {
+ wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
+ flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
+}
+
+// readEvents reads from the inotify file descriptor, converts the
+// received events into Event objects and sends them via the Events channel
+func (w *Watcher) readEvents() {
+ var (
+ buf [syscall.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
+ n int // Number of bytes read with read()
+ errno error // Syscall errno
+ ok bool // For poller.wait
+ )
+
+ defer close(w.doneResp)
+ defer close(w.Errors)
+ defer close(w.Events)
+ defer syscall.Close(w.fd)
+ defer w.poller.close()
+
+ for {
+ // See if we have been closed.
+ if w.isClosed() {
+ return
+ }
+
+ ok, errno = w.poller.wait()
+ if errno != nil {
+ select {
+ case w.Errors <- errno:
+ case <-w.done:
+ return
+ }
+ continue
+ }
+
+ if !ok {
+ continue
+ }
+
+ n, errno = syscall.Read(w.fd, buf[:])
+ // If a signal interrupted execution, see if we've been asked to close, and try again.
+ // http://man7.org/linux/man-pages/man7/signal.7.html :
+ // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
+ if errno == syscall.EINTR {
+ continue
+ }
+
+ // syscall.Read might have been woken up by Close. If so, we're done.
+ if w.isClosed() {
+ return
+ }
+
+ if n < syscall.SizeofInotifyEvent {
+ var err error
+ if n == 0 {
+ // If EOF is received. This should really never happen.
+ err = io.EOF
+ } else if n < 0 {
+ // If an error occured while reading.
+ err = errno
+ } else {
+ // Read was too short.
+ err = errors.New("notify: short read in readEvents()")
+ }
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ return
+ }
+ continue
+ }
+
+ var offset uint32
+ // We don't know how many events we just read into the buffer
+ // While the offset points to at least one whole event...
+ for offset <= uint32(n-syscall.SizeofInotifyEvent) {
+ // Point "raw" to the event in the buffer
+ raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset]))
+
+ mask := uint32(raw.Mask)
+ nameLen := uint32(raw.Len)
+ // If the event happened to the watched directory or the watched file, the kernel
+ // doesn't append the filename to the event, but we would like to always fill the
+ // the "Name" field with a valid filename. We retrieve the path of the watch from
+ // the "paths" map.
+ w.mu.Lock()
+ name := w.paths[int(raw.Wd)]
+ w.mu.Unlock()
+ if nameLen > 0 {
+ // Point "bytes" at the first byte of the filename
+ bytes := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent]))
+ // The filename is padded with NULL bytes. TrimRight() gets rid of those.
+ name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
+ }
+
+ event := newEvent(name, mask)
+
+ // Send the events that are not ignored on the events channel
+ if !event.ignoreLinux(mask) {
+ select {
+ case w.Events <- event:
+ case <-w.done:
+ return
+ }
+ }
+
+ // Move to the next event in the buffer
+ offset += syscall.SizeofInotifyEvent + nameLen
+ }
+ }
+}
+
+// Certain types of events can be "ignored" and not sent over the Events
+// channel. Such as events marked ignore by the kernel, or MODIFY events
+// against files that do not exist.
+func (e *Event) ignoreLinux(mask uint32) bool {
+ // Ignore anything the inotify API says to ignore
+ if mask&syscall.IN_IGNORED == syscall.IN_IGNORED {
+ return true
+ }
+
+ // If the event is not a DELETE or RENAME, the file must exist.
+ // Otherwise the event is ignored.
+ // *Note*: this was put in place because it was seen that a MODIFY
+ // event was sent after the DELETE. This ignores that MODIFY and
+ // assumes a DELETE will come or has come if the file doesn't exist.
+ if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
+ _, statErr := os.Lstat(e.Name)
+ return os.IsNotExist(statErr)
+ }
+ return false
+}
+
+// newEvent returns an platform-independent Event based on an inotify mask.
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&syscall.IN_CREATE == syscall.IN_CREATE || mask&syscall.IN_MOVED_TO == syscall.IN_MOVED_TO {
+ e.Op |= Create
+ }
+ if mask&syscall.IN_DELETE_SELF == syscall.IN_DELETE_SELF || mask&syscall.IN_DELETE == syscall.IN_DELETE {
+ e.Op |= Remove
+ }
+ if mask&syscall.IN_MODIFY == syscall.IN_MODIFY {
+ e.Op |= Write
+ }
+ if mask&syscall.IN_MOVE_SELF == syscall.IN_MOVE_SELF || mask&syscall.IN_MOVED_FROM == syscall.IN_MOVED_FROM {
+ e.Op |= Rename
+ }
+ if mask&syscall.IN_ATTRIB == syscall.IN_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go
new file mode 100644
index 000000000..3b4178404
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go
@@ -0,0 +1,186 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "errors"
+ "syscall"
+)
+
+type fdPoller struct {
+ fd int // File descriptor (as returned by the inotify_init() syscall)
+ epfd int // Epoll file descriptor
+ pipe [2]int // Pipe for waking up
+}
+
+func emptyPoller(fd int) *fdPoller {
+ poller := new(fdPoller)
+ poller.fd = fd
+ poller.epfd = -1
+ poller.pipe[0] = -1
+ poller.pipe[1] = -1
+ return poller
+}
+
+// Create a new inotify poller.
+// This creates an inotify handler, and an epoll handler.
+func newFdPoller(fd int) (*fdPoller, error) {
+ var errno error
+ poller := emptyPoller(fd)
+ defer func() {
+ if errno != nil {
+ poller.close()
+ }
+ }()
+ poller.fd = fd
+
+ // Create epoll fd
+ poller.epfd, errno = syscall.EpollCreate(1)
+ if poller.epfd == -1 {
+ return nil, errno
+ }
+ // Create pipe; pipe[0] is the read end, pipe[1] the write end.
+ errno = syscall.Pipe2(poller.pipe[:], syscall.O_NONBLOCK)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Register inotify fd with epoll
+ event := syscall.EpollEvent{
+ Fd: int32(poller.fd),
+ Events: syscall.EPOLLIN,
+ }
+ errno = syscall.EpollCtl(poller.epfd, syscall.EPOLL_CTL_ADD, poller.fd, &event)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Register pipe fd with epoll
+ event = syscall.EpollEvent{
+ Fd: int32(poller.pipe[0]),
+ Events: syscall.EPOLLIN,
+ }
+ errno = syscall.EpollCtl(poller.epfd, syscall.EPOLL_CTL_ADD, poller.pipe[0], &event)
+ if errno != nil {
+ return nil, errno
+ }
+
+ return poller, nil
+}
+
+// Wait using epoll.
+// Returns true if something is ready to be read,
+// false if there is not.
+func (poller *fdPoller) wait() (bool, error) {
+ // 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
+ // I don't know whether epoll_wait returns the number of events returned,
+ // or the total number of events ready.
+ // I decided to catch both by making the buffer one larger than the maximum.
+ events := make([]syscall.EpollEvent, 7)
+ for {
+ n, errno := syscall.EpollWait(poller.epfd, events, -1)
+ if n == -1 {
+ if errno == syscall.EINTR {
+ continue
+ }
+ return false, errno
+ }
+ if n == 0 {
+ // If there are no events, try again.
+ continue
+ }
+ if n > 6 {
+ // This should never happen. More events were returned than should be possible.
+ return false, errors.New("epoll_wait returned more events than I know what to do with")
+ }
+ ready := events[:n]
+ epollhup := false
+ epollerr := false
+ epollin := false
+ for _, event := range ready {
+ if event.Fd == int32(poller.fd) {
+ if event.Events&syscall.EPOLLHUP != 0 {
+ // This should not happen, but if it does, treat it as a wakeup.
+ epollhup = true
+ }
+ if event.Events&syscall.EPOLLERR != 0 {
+ // If an error is waiting on the file descriptor, we should pretend
+ // something is ready to read, and let syscall.Read pick up the error.
+ epollerr = true
+ }
+ if event.Events&syscall.EPOLLIN != 0 {
+ // There is data to read.
+ epollin = true
+ }
+ }
+ if event.Fd == int32(poller.pipe[0]) {
+ if event.Events&syscall.EPOLLHUP != 0 {
+ // Write pipe descriptor was closed, by us. This means we're closing down the
+ // watcher, and we should wake up.
+ }
+ if event.Events&syscall.EPOLLERR != 0 {
+ // If an error is waiting on the pipe file descriptor.
+ // This is an absolute mystery, and should never ever happen.
+ return false, errors.New("Error on the pipe descriptor.")
+ }
+ if event.Events&syscall.EPOLLIN != 0 {
+ // This is a regular wakeup, so we have to clear the buffer.
+ err := poller.clearWake()
+ if err != nil {
+ return false, err
+ }
+ }
+ }
+ }
+
+ if epollhup || epollerr || epollin {
+ return true, nil
+ }
+ return false, nil
+ }
+}
+
+// Close the write end of the poller.
+func (poller *fdPoller) wake() error {
+ buf := make([]byte, 1)
+ n, errno := syscall.Write(poller.pipe[1], buf)
+ if n == -1 {
+ if errno == syscall.EAGAIN {
+ // Buffer is full, poller will wake.
+ return nil
+ }
+ return errno
+ }
+ return nil
+}
+
+func (poller *fdPoller) clearWake() error {
+ // You have to be woken up a LOT in order to get to 100!
+ buf := make([]byte, 100)
+ n, errno := syscall.Read(poller.pipe[0], buf)
+ if n == -1 {
+ if errno == syscall.EAGAIN {
+ // Buffer is empty, someone else cleared our wake.
+ return nil
+ }
+ return errno
+ }
+ return nil
+}
+
+// Close all poller file descriptors, but not the one passed to it.
+func (poller *fdPoller) close() {
+ if poller.pipe[1] != -1 {
+ syscall.Close(poller.pipe[1])
+ }
+ if poller.pipe[0] != -1 {
+ syscall.Close(poller.pipe[0])
+ }
+ if poller.epfd != -1 {
+ syscall.Close(poller.epfd)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go
new file mode 100644
index 000000000..af9f407f8
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go
@@ -0,0 +1,228 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "syscall"
+ "testing"
+ "time"
+)
+
+type testFd [2]int
+
+func makeTestFd(t *testing.T) testFd {
+ var tfd testFd
+ errno := syscall.Pipe(tfd[:])
+ if errno != nil {
+ t.Fatalf("Failed to create pipe: %v", errno)
+ }
+ return tfd
+}
+
+func (tfd testFd) fd() int {
+ return tfd[0]
+}
+
+func (tfd testFd) closeWrite(t *testing.T) {
+ errno := syscall.Close(tfd[1])
+ if errno != nil {
+ t.Fatalf("Failed to close write end of pipe: %v", errno)
+ }
+}
+
+func (tfd testFd) put(t *testing.T) {
+ buf := make([]byte, 10)
+ _, errno := syscall.Write(tfd[1], buf)
+ if errno != nil {
+ t.Fatalf("Failed to write to pipe: %v", errno)
+ }
+}
+
+func (tfd testFd) get(t *testing.T) {
+ buf := make([]byte, 10)
+ _, errno := syscall.Read(tfd[0], buf)
+ if errno != nil {
+ t.Fatalf("Failed to read from pipe: %v", errno)
+ }
+}
+
+func (tfd testFd) close() {
+ syscall.Close(tfd[1])
+ syscall.Close(tfd[0])
+}
+
+func makePoller(t *testing.T) (testFd, *fdPoller) {
+ tfd := makeTestFd(t)
+ poller, err := newFdPoller(tfd.fd())
+ if err != nil {
+ t.Fatalf("Failed to create poller: %v", err)
+ }
+ return tfd, poller
+}
+
+func TestPollerWithBadFd(t *testing.T) {
+ _, err := newFdPoller(-1)
+ if err != syscall.EBADF {
+ t.Fatalf("Expected EBADF, got: %v", err)
+ }
+}
+
+func TestPollerWithData(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ tfd.put(t)
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+ tfd.get(t)
+}
+
+func TestPollerWithWakeup(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ err := poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if ok {
+ t.Fatalf("expected poller to return false")
+ }
+}
+
+func TestPollerWithClose(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ tfd.closeWrite(t)
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+}
+
+func TestPollerWithWakeupAndData(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ tfd.put(t)
+ err := poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+
+ // both data and wakeup
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+
+ // data is still in the buffer, wakeup is cleared
+ ok, err = poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+
+ tfd.get(t)
+ // data is gone, only wakeup now
+ err = poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+ ok, err = poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if ok {
+ t.Fatalf("expected poller to return false")
+ }
+}
+
+func TestPollerConcurrent(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ oks := make(chan bool)
+ live := make(chan bool)
+ defer close(live)
+ go func() {
+ defer close(oks)
+ for {
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ oks <- ok
+ if !<-live {
+ return
+ }
+ }
+ }()
+
+ // Try a write
+ select {
+ case <-time.After(50 * time.Millisecond):
+ case <-oks:
+ t.Fatalf("poller did not wait")
+ }
+ tfd.put(t)
+ if !<-oks {
+ t.Fatalf("expected true")
+ }
+ tfd.get(t)
+ live <- true
+
+ // Try a wakeup
+ select {
+ case <-time.After(50 * time.Millisecond):
+ case <-oks:
+ t.Fatalf("poller did not wait")
+ }
+ err := poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+ if <-oks {
+ t.Fatalf("expected false")
+ }
+ live <- true
+
+ // Try a close
+ select {
+ case <-time.After(50 * time.Millisecond):
+ case <-oks:
+ t.Fatalf("poller did not wait")
+ }
+ tfd.closeWrite(t)
+ if !<-oks {
+ t.Fatalf("expected true")
+ }
+ tfd.get(t)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go
new file mode 100644
index 000000000..035ee8f95
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go
@@ -0,0 +1,292 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "os"
+ "path/filepath"
+ "syscall"
+ "testing"
+ "time"
+)
+
+func TestInotifyCloseRightAway(t *testing.T) {
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+
+ // Close immediately; it won't even reach the first syscall.Read.
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func TestInotifyCloseSlightlyLater(t *testing.T) {
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+
+ // Wait until readEvents has reached syscall.Read, and Close.
+ <-time.After(50 * time.Millisecond)
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func TestInotifyCloseSlightlyLaterWithWatch(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+ w.Add(testDir)
+
+ // Wait until readEvents has reached syscall.Read, and Close.
+ <-time.After(50 * time.Millisecond)
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func TestInotifyCloseAfterRead(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Failed to add .")
+ }
+
+ // Generate an event.
+ os.Create(filepath.Join(testDir, "somethingSOMETHINGsomethingSOMETHING"))
+
+ // Wait for readEvents to read the event, then close the watcher.
+ <-time.After(50 * time.Millisecond)
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func isWatcherReallyClosed(t *testing.T, w *Watcher) {
+ select {
+ case err, ok := <-w.Errors:
+ if ok {
+ t.Fatalf("w.Errors is not closed; readEvents is still alive after closing (error: %v)", err)
+ }
+ default:
+ t.Fatalf("w.Errors would have blocked; readEvents is still alive!")
+ }
+
+ select {
+ case _, ok := <-w.Events:
+ if ok {
+ t.Fatalf("w.Events is not closed; readEvents is still alive after closing")
+ }
+ default:
+ t.Fatalf("w.Events would have blocked; readEvents is still alive!")
+ }
+}
+
+func TestInotifyCloseCreate(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Failed to add testDir: %v", err)
+ }
+ h, err := os.Create(filepath.Join(testDir, "testfile"))
+ if err != nil {
+ t.Fatalf("Failed to create file in testdir: %v", err)
+ }
+ h.Close()
+ select {
+ case _ = <-w.Events:
+ case err := <-w.Errors:
+ t.Fatalf("Error from watcher: %v", err)
+ case <-time.After(50 * time.Millisecond):
+ t.Fatalf("Took too long to wait for event")
+ }
+
+ // At this point, we've received one event, so the goroutine is ready.
+ // It's also blocking on syscall.Read.
+ // Now we try to swap the file descriptor under its nose.
+ w.Close()
+ w, err = NewWatcher()
+ defer w.Close()
+ if err != nil {
+ t.Fatalf("Failed to create second watcher: %v", err)
+ }
+
+ <-time.After(50 * time.Millisecond)
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Error adding testDir again: %v", err)
+ }
+}
+
+func TestInotifyStress(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+ testFile := filepath.Join(testDir, "testfile")
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ killchan := make(chan struct{})
+ defer close(killchan)
+
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Failed to add testDir: %v", err)
+ }
+
+ proc, err := os.FindProcess(os.Getpid())
+ if err != nil {
+ t.Fatalf("Error finding process: %v", err)
+ }
+
+ go func() {
+ for {
+ select {
+ case <-time.After(5 * time.Millisecond):
+ err := proc.Signal(syscall.SIGUSR1)
+ if err != nil {
+ t.Fatalf("Signal failed: %v", err)
+ }
+ case <-killchan:
+ return
+ }
+ }
+ }()
+
+ go func() {
+ for {
+ select {
+ case <-time.After(11 * time.Millisecond):
+ err := w.poller.wake()
+ if err != nil {
+ t.Fatalf("Wake failed: %v", err)
+ }
+ case <-killchan:
+ return
+ }
+ }
+ }()
+
+ go func() {
+ for {
+ select {
+ case <-killchan:
+ return
+ default:
+ handle, err := os.Create(testFile)
+ if err != nil {
+ t.Fatalf("Create failed: %v", err)
+ }
+ handle.Close()
+ time.Sleep(time.Millisecond)
+ err = os.Remove(testFile)
+ if err != nil {
+ t.Fatalf("Remove failed: %v", err)
+ }
+ }
+ }
+ }()
+
+ creates := 0
+ removes := 0
+ after := time.After(5 * time.Second)
+ for {
+ select {
+ case <-after:
+ if creates-removes > 1 || creates-removes < -1 {
+ t.Fatalf("Creates and removes should not be off by more than one: %d creates, %d removes", creates, removes)
+ }
+ if creates < 50 {
+ t.Fatalf("Expected at least 50 creates, got %d", creates)
+ }
+ return
+ case err := <-w.Errors:
+ t.Fatalf("Got an error from watcher: %v", err)
+ case evt := <-w.Events:
+ if evt.Name != testFile {
+ t.Fatalf("Got an event for an unknown file: %s", evt.Name)
+ }
+ if evt.Op == Create {
+ creates++
+ }
+ if evt.Op == Remove {
+ removes++
+ }
+ }
+ }
+}
+
+func TestInotifyRemoveTwice(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+ testFile := filepath.Join(testDir, "testfile")
+
+ handle, err := os.Create(testFile)
+ if err != nil {
+ t.Fatalf("Create failed: %v", err)
+ }
+ handle.Close()
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ err = w.Add(testFile)
+ if err != nil {
+ t.Fatalf("Failed to add testFile: %v", err)
+ }
+
+ err = os.Remove(testFile)
+ if err != nil {
+ t.Fatalf("Failed to remove testFile: %v", err)
+ }
+
+ err = w.Remove(testFile)
+ if err != syscall.EINVAL {
+ t.Fatalf("Expected EINVAL from Remove, got: %v", err)
+ }
+
+ err = w.Remove(testFile)
+ if err == syscall.EINVAL {
+ t.Fatalf("Got EINVAL again, watch was not removed")
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go
new file mode 100644
index 000000000..59169c6af
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go
@@ -0,0 +1,1135 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!solaris
+
+package fsnotify
+
+import (
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+// An atomic counter
+type counter struct {
+ val int32
+}
+
+func (c *counter) increment() {
+ atomic.AddInt32(&c.val, 1)
+}
+
+func (c *counter) value() int32 {
+ return atomic.LoadInt32(&c.val)
+}
+
+func (c *counter) reset() {
+ atomic.StoreInt32(&c.val, 0)
+}
+
+// tempMkdir makes a temporary directory
+func tempMkdir(t *testing.T) string {
+ dir, err := ioutil.TempDir("", "fsnotify")
+ if err != nil {
+ t.Fatalf("failed to create test directory: %s", err)
+ }
+ return dir
+}
+
+// newWatcher initializes an fsnotify Watcher instance.
+func newWatcher(t *testing.T) *Watcher {
+ watcher, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("NewWatcher() failed: %s", err)
+ }
+ return watcher
+}
+
+// addWatch adds a watch for a directory
+func addWatch(t *testing.T, watcher *Watcher, dir string) {
+ if err := watcher.Add(dir); err != nil {
+ t.Fatalf("watcher.Add(%q) failed: %s", dir, err)
+ }
+}
+
+func TestFsnotifyMultipleOperations(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create directory that's not watched
+ testDirToMoveFiles := tempMkdir(t)
+ defer os.RemoveAll(testDirToMoveFiles)
+
+ testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile")
+ testFileRenamed := filepath.Join(testDirToMoveFiles, "TestFsnotifySeqRename.testfile")
+
+ addWatch(t, watcher, testDir)
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, modifyReceived, deleteReceived, renameReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ if event.Op&Rename == Rename {
+ renameReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // Modify the file outside of the watched dir
+ f, err = os.Open(testFileRenamed)
+ if err != nil {
+ t.Fatalf("open test renamed file failed: %s", err)
+ }
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Recreate the file that was moved
+ f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Close()
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 2 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
+ }
+ mReceived := modifyReceived.value()
+ if mReceived != 1 {
+ t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1)
+ }
+ dReceived := deleteReceived.value()
+ rReceived := renameReceived.value()
+ if dReceived+rReceived != 1 {
+ t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", rReceived+dReceived, 1)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyMultipleCreates(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile")
+
+ addWatch(t, watcher, testDir)
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, modifyReceived, deleteReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ os.Remove(testFile)
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Recreate the file
+ f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Close()
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Modify
+ f, err = os.OpenFile(testFile, os.O_WRONLY, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Modify
+ f, err = os.OpenFile(testFile, os.O_WRONLY, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 2 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
+ }
+ mReceived := modifyReceived.value()
+ if mReceived < 3 {
+ t.Fatalf("incorrect number of modify events received after 500 ms (%d vs atleast %d)", mReceived, 3)
+ }
+ dReceived := deleteReceived.value()
+ if dReceived != 1 {
+ t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", dReceived, 1)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyDirOnly(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ // This should NOT add any events to the fsnotify event queue
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDir, "TestFsnotifyDirOnly.testfile")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, modifyReceived, deleteReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileAlreadyExists) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ os.Remove(testFile)
+ os.Remove(testFileAlreadyExists)
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 1 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 1)
+ }
+ mReceived := modifyReceived.value()
+ if mReceived != 1 {
+ t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1)
+ }
+ dReceived := deleteReceived.value()
+ if dReceived != 2 {
+ t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyDeleteWatchedDir(t *testing.T) {
+ watcher := newWatcher(t)
+ defer watcher.Close()
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ addWatch(t, watcher, testDir)
+
+ // Add a watch for testFile
+ addWatch(t, watcher, testFileAlreadyExists)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var deleteReceived counter
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFileAlreadyExists) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ }()
+
+ os.RemoveAll(testDir)
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ dReceived := deleteReceived.value()
+ if dReceived < 2 {
+ t.Fatalf("did not receive at least %d delete events, received %d after 500 ms", 2, dReceived)
+ }
+}
+
+func TestFsnotifySubDir(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ testFile1 := filepath.Join(testDir, "TestFsnotifyFile1.testfile")
+ testSubDir := filepath.Join(testDir, "sub")
+ testSubDirFile := filepath.Join(testDir, "sub/TestFsnotifyFile1.testfile")
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, deleteReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testSubDir) || event.Name == filepath.Clean(testFile1) {
+ t.Logf("event received: %s", event)
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ addWatch(t, watcher, testDir)
+
+ // Create sub-directory
+ if err := os.Mkdir(testSubDir, 0777); err != nil {
+ t.Fatalf("failed to create test sub-directory: %s", err)
+ }
+
+ // Create a file
+ var f *os.File
+ f, err := os.OpenFile(testFile1, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+
+ // Create a file (Should not see this! we are not watching subdir)
+ var fs *os.File
+ fs, err = os.OpenFile(testSubDirFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ fs.Sync()
+ fs.Close()
+
+ time.Sleep(200 * time.Millisecond)
+
+ // Make sure receive deletes for both file and sub-directory
+ os.RemoveAll(testSubDir)
+ os.Remove(testFile1)
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 2 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
+ }
+ dReceived := deleteReceived.value()
+ if dReceived != 2 {
+ t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyRename(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDir, "TestFsnotifyEvents.testfile")
+ testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var renameReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) {
+ if event.Op&Rename == Rename {
+ renameReceived.increment()
+ }
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ // Add a watch for testFile
+ addWatch(t, watcher, testFile)
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ if renameReceived.value() == 0 {
+ t.Fatal("fsnotify rename events have not been received after 500 ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+
+ os.Remove(testFileRenamed)
+}
+
+func TestFsnotifyRenameToCreate(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create directory to get file
+ testDirFrom := tempMkdir(t)
+ defer os.RemoveAll(testDirFrom)
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile")
+ testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) {
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ if createReceived.value() == 0 {
+ t.Fatal("fsnotify create events have not been received after 500 ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+
+ os.Remove(testFileRenamed)
+}
+
+func TestFsnotifyRenameToOverwrite(t *testing.T) {
+ switch runtime.GOOS {
+ case "plan9", "windows":
+ t.Skipf("skipping test on %q (os.Rename over existing file does not create event).", runtime.GOOS)
+ }
+
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create directory to get file
+ testDirFrom := tempMkdir(t)
+ defer os.RemoveAll(testDirFrom)
+
+ testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile")
+ testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
+
+ // Create a file
+ var fr *os.File
+ fr, err := os.OpenFile(testFileRenamed, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ fr.Sync()
+ fr.Close()
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var eventReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testFileRenamed) {
+ eventReceived.increment()
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ if eventReceived.value() == 0 {
+ t.Fatal("fsnotify events have not been received after 500 ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+
+ os.Remove(testFileRenamed)
+}
+
+func TestRemovalOfWatch(t *testing.T) {
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ watcher := newWatcher(t)
+ defer watcher.Close()
+
+ addWatch(t, watcher, testDir)
+ if err := watcher.Remove(testDir); err != nil {
+ t.Fatalf("Could not remove the watch: %v\n", err)
+ }
+
+ go func() {
+ select {
+ case ev := <-watcher.Events:
+ t.Fatalf("We received event: %v\n", ev)
+ case <-time.After(500 * time.Millisecond):
+ t.Log("No event received, as expected.")
+ }
+ }()
+
+ time.Sleep(200 * time.Millisecond)
+ // Modify the file outside of the watched dir
+ f, err := os.Open(testFileAlreadyExists)
+ if err != nil {
+ t.Fatalf("Open test file failed: %s", err)
+ }
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+ if err := os.Chmod(testFileAlreadyExists, 0700); err != nil {
+ t.Fatalf("chmod failed: %s", err)
+ }
+ time.Sleep(400 * time.Millisecond)
+}
+
+func TestFsnotifyAttrib(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("attributes don't work on Windows.")
+ }
+
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDir, "TestFsnotifyAttrib.testfile")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ // The modifyReceived counter counts IsModify events that are not IsAttrib,
+ // and the attribReceived counts IsAttrib events (which are also IsModify as
+ // a consequence).
+ var modifyReceived counter
+ var attribReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ if event.Op&Chmod == Chmod {
+ attribReceived.increment()
+ }
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ // Add a watch for testFile
+ addWatch(t, watcher, testFile)
+
+ if err := os.Chmod(testFile, 0700); err != nil {
+ t.Fatalf("chmod failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ // Creating/writing a file changes also the mtime, so IsAttrib should be set to true here
+ time.Sleep(500 * time.Millisecond)
+ if modifyReceived.value() != 0 {
+ t.Fatal("received an unexpected modify event when creating a test file")
+ }
+ if attribReceived.value() == 0 {
+ t.Fatal("fsnotify attribute events have not received after 500 ms")
+ }
+
+ // Modifying the contents of the file does not set the attrib flag (although eg. the mtime
+ // might have been modified).
+ modifyReceived.reset()
+ attribReceived.reset()
+
+ f, err = os.OpenFile(testFile, os.O_WRONLY, 0)
+ if err != nil {
+ t.Fatalf("reopening test file failed: %s", err)
+ }
+
+ f.WriteString("more data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(500 * time.Millisecond)
+
+ if modifyReceived.value() != 1 {
+ t.Fatal("didn't receive a modify event after changing test file contents")
+ }
+
+ if attribReceived.value() != 0 {
+ t.Fatal("did receive an unexpected attrib event after changing test file contents")
+ }
+
+ modifyReceived.reset()
+ attribReceived.reset()
+
+ // Doing a chmod on the file should trigger an event with the "attrib" flag set (the contents
+ // of the file are not changed though)
+ if err := os.Chmod(testFile, 0600); err != nil {
+ t.Fatalf("chmod failed: %s", err)
+ }
+
+ time.Sleep(500 * time.Millisecond)
+
+ if attribReceived.value() != 1 {
+ t.Fatal("didn't receive an attribute change after 500ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(1e9):
+ t.Fatal("event stream was not closed after 1 second")
+ }
+
+ os.Remove(testFile)
+}
+
+func TestFsnotifyClose(t *testing.T) {
+ watcher := newWatcher(t)
+ watcher.Close()
+
+ var done int32
+ go func() {
+ watcher.Close()
+ atomic.StoreInt32(&done, 1)
+ }()
+
+ time.Sleep(50e6) // 50 ms
+ if atomic.LoadInt32(&done) == 0 {
+ t.Fatal("double Close() test failed: second Close() call didn't return")
+ }
+
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ if err := watcher.Add(testDir); err == nil {
+ t.Fatal("expected error on Watch() after Close(), got nil")
+ }
+}
+
+func TestFsnotifyFakeSymlink(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("symlinks don't work on Windows.")
+ }
+
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ var errorsReceived counter
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for errors := range watcher.Errors {
+ t.Logf("Received error: %s", errors)
+ errorsReceived.increment()
+ }
+ }()
+
+ // Count the CREATE events received
+ var createEventsReceived, otherEventsReceived counter
+ go func() {
+ for ev := range watcher.Events {
+ t.Logf("event received: %s", ev)
+ if ev.Op&Create == Create {
+ createEventsReceived.increment()
+ } else {
+ otherEventsReceived.increment()
+ }
+ }
+ }()
+
+ addWatch(t, watcher, testDir)
+
+ if err := os.Symlink(filepath.Join(testDir, "zzz"), filepath.Join(testDir, "zzznew")); err != nil {
+ t.Fatalf("Failed to create bogus symlink: %s", err)
+ }
+ t.Logf("Created bogus symlink")
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+
+ // Should not be error, just no events for broken links (watching nothing)
+ if errorsReceived.value() > 0 {
+ t.Fatal("fsnotify errors have been received.")
+ }
+ if otherEventsReceived.value() > 0 {
+ t.Fatal("fsnotify other events received on the broken link")
+ }
+
+ // Except for 1 create event (for the link itself)
+ if createEventsReceived.value() == 0 {
+ t.Fatal("fsnotify create events were not received after 500 ms")
+ }
+ if createEventsReceived.value() > 1 {
+ t.Fatal("fsnotify more create events received than expected")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+}
+
+// TestConcurrentRemovalOfWatch tests that concurrent calls to RemoveWatch do not race.
+// See https://codereview.appspot.com/103300045/
+// go test -test.run=TestConcurrentRemovalOfWatch -test.cpu=1,1,1,1,1 -race
+func TestConcurrentRemovalOfWatch(t *testing.T) {
+ if runtime.GOOS != "darwin" {
+ t.Skip("regression test for race only present on darwin")
+ }
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ watcher := newWatcher(t)
+ defer watcher.Close()
+
+ addWatch(t, watcher, testDir)
+
+ // Test that RemoveWatch can be invoked concurrently, with no data races.
+ removed1 := make(chan struct{})
+ go func() {
+ defer close(removed1)
+ watcher.Remove(testDir)
+ }()
+ removed2 := make(chan struct{})
+ go func() {
+ close(removed2)
+ watcher.Remove(testDir)
+ }()
+ <-removed1
+ <-removed2
+}
+
+func TestClose(t *testing.T) {
+ // Regression test for #59 bad file descriptor from Close
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ watcher := newWatcher(t)
+ if err := watcher.Add(testDir); err != nil {
+ t.Fatalf("Expected no error on Add, got %v", err)
+ }
+ err := watcher.Close()
+ if err != nil {
+ t.Fatalf("Expected no error on Close, got %v.", err)
+ }
+}
+
+func testRename(file1, file2 string) error {
+ switch runtime.GOOS {
+ case "windows", "plan9":
+ return os.Rename(file1, file2)
+ default:
+ cmd := exec.Command("mv", file1, file2)
+ return cmd.Run()
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go
new file mode 100644
index 000000000..265622d20
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go
@@ -0,0 +1,463 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly darwin
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+ "syscall"
+ "time"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ done chan bool // Channel for sending a "quit message" to the reader goroutine
+
+ kq int // File descriptor (as returned by the kqueue() syscall).
+
+ mu sync.Mutex // Protects access to watcher data
+ watches map[string]int // Map of watched file descriptors (key: path).
+ externalWatches map[string]bool // Map of watches added by user of the library.
+ dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
+ paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
+ fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
+ isClosed bool // Set to true when Close() is first called
+}
+
+type pathInfo struct {
+ name string
+ isDir bool
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ kq, err := kqueue()
+ if err != nil {
+ return nil, err
+ }
+
+ w := &Watcher{
+ kq: kq,
+ watches: make(map[string]int),
+ dirFlags: make(map[string]uint32),
+ paths: make(map[int]pathInfo),
+ fileExists: make(map[string]bool),
+ externalWatches: make(map[string]bool),
+ Events: make(chan Event),
+ Errors: make(chan error),
+ done: make(chan bool),
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return nil
+ }
+ w.isClosed = true
+ w.mu.Unlock()
+
+ w.mu.Lock()
+ ws := w.watches
+ w.mu.Unlock()
+
+ var err error
+ for name := range ws {
+ if e := w.Remove(name); e != nil && err == nil {
+ err = e
+ }
+ }
+
+ // Send "quit" message to the reader goroutine:
+ w.done <- true
+
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ w.mu.Lock()
+ w.externalWatches[name] = true
+ w.mu.Unlock()
+ return w.addWatch(name, noteAllEvents)
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ name = filepath.Clean(name)
+ w.mu.Lock()
+ watchfd, ok := w.watches[name]
+ w.mu.Unlock()
+ if !ok {
+ return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
+ }
+
+ const registerRemove = syscall.EV_DELETE
+ if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
+ return err
+ }
+
+ syscall.Close(watchfd)
+
+ w.mu.Lock()
+ isDir := w.paths[watchfd].isDir
+ delete(w.watches, name)
+ delete(w.paths, watchfd)
+ delete(w.dirFlags, name)
+ w.mu.Unlock()
+
+ // Find all watched paths that are in this directory that are not external.
+ if isDir {
+ var pathsToRemove []string
+ w.mu.Lock()
+ for _, path := range w.paths {
+ wdir, _ := filepath.Split(path.name)
+ if filepath.Clean(wdir) == name {
+ if !w.externalWatches[path.name] {
+ pathsToRemove = append(pathsToRemove, path.name)
+ }
+ }
+ }
+ w.mu.Unlock()
+ for _, name := range pathsToRemove {
+ // Since these are internal, not much sense in propagating error
+ // to the user, as that will just confuse them with an error about
+ // a path they did not explicitly watch themselves.
+ w.Remove(name)
+ }
+ }
+
+ return nil
+}
+
+// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
+const noteAllEvents = syscall.NOTE_DELETE | syscall.NOTE_WRITE | syscall.NOTE_ATTRIB | syscall.NOTE_RENAME
+
+// keventWaitTime to block on each read from kevent
+var keventWaitTime = durationToTimespec(100 * time.Millisecond)
+
+// addWatch adds name to the watched file set.
+// The flags are interpreted as described in kevent(2).
+func (w *Watcher) addWatch(name string, flags uint32) error {
+ var isDir bool
+ // Make ./name and name equivalent
+ name = filepath.Clean(name)
+
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return errors.New("kevent instance already closed")
+ }
+ watchfd, alreadyWatching := w.watches[name]
+ // We already have a watch, but we can still override flags.
+ if alreadyWatching {
+ isDir = w.paths[watchfd].isDir
+ }
+ w.mu.Unlock()
+
+ if !alreadyWatching {
+ fi, err := os.Lstat(name)
+ if err != nil {
+ return err
+ }
+
+ // Don't watch sockets.
+ if fi.Mode()&os.ModeSocket == os.ModeSocket {
+ return nil
+ }
+
+ // Follow Symlinks
+ // Unfortunately, Linux can add bogus symlinks to watch list without
+ // issue, and Windows can't do symlinks period (AFAIK). To maintain
+ // consistency, we will act like everything is fine. There will simply
+ // be no file events for broken symlinks.
+ // Hence the returns of nil on errors.
+ if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
+ name, err = filepath.EvalSymlinks(name)
+ if err != nil {
+ return nil
+ }
+
+ fi, err = os.Lstat(name)
+ if err != nil {
+ return nil
+ }
+ }
+
+ watchfd, err = syscall.Open(name, openMode, 0700)
+ if watchfd == -1 {
+ return err
+ }
+
+ isDir = fi.IsDir()
+ }
+
+ const registerAdd = syscall.EV_ADD | syscall.EV_CLEAR | syscall.EV_ENABLE
+ if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
+ syscall.Close(watchfd)
+ return err
+ }
+
+ if !alreadyWatching {
+ w.mu.Lock()
+ w.watches[name] = watchfd
+ w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
+ w.mu.Unlock()
+ }
+
+ if isDir {
+ // Watch the directory if it has not been watched before,
+ // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
+ w.mu.Lock()
+ watchDir := (flags&syscall.NOTE_WRITE) == syscall.NOTE_WRITE &&
+ (!alreadyWatching || (w.dirFlags[name]&syscall.NOTE_WRITE) != syscall.NOTE_WRITE)
+ // Store flags so this watch can be updated later
+ w.dirFlags[name] = flags
+ w.mu.Unlock()
+
+ if watchDir {
+ if err := w.watchDirectoryFiles(name); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// readEvents reads from kqueue and converts the received kevents into
+// Event values that it sends down the Events channel.
+func (w *Watcher) readEvents() {
+ eventBuffer := make([]syscall.Kevent_t, 10)
+
+ for {
+ // See if there is a message on the "done" channel
+ select {
+ case <-w.done:
+ err := syscall.Close(w.kq)
+ if err != nil {
+ w.Errors <- err
+ }
+ close(w.Events)
+ close(w.Errors)
+ return
+ default:
+ }
+
+ // Get new events
+ kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
+ // EINTR is okay, the syscall was interrupted before timeout expired.
+ if err != nil && err != syscall.EINTR {
+ w.Errors <- err
+ continue
+ }
+
+ // Flush the events we received to the Events channel
+ for len(kevents) > 0 {
+ kevent := &kevents[0]
+ watchfd := int(kevent.Ident)
+ mask := uint32(kevent.Fflags)
+ w.mu.Lock()
+ path := w.paths[watchfd]
+ w.mu.Unlock()
+ event := newEvent(path.name, mask)
+
+ if path.isDir && !(event.Op&Remove == Remove) {
+ // Double check to make sure the directory exists. This can happen when
+ // we do a rm -fr on a recursively watched folders and we receive a
+ // modification event first but the folder has been deleted and later
+ // receive the delete event
+ if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
+ // mark is as delete event
+ event.Op |= Remove
+ }
+ }
+
+ if event.Op&Rename == Rename || event.Op&Remove == Remove {
+ w.Remove(event.Name)
+ w.mu.Lock()
+ delete(w.fileExists, event.Name)
+ w.mu.Unlock()
+ }
+
+ if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
+ w.sendDirectoryChangeEvents(event.Name)
+ } else {
+ // Send the event on the Events channel
+ w.Events <- event
+ }
+
+ if event.Op&Remove == Remove {
+ // Look for a file that may have overwritten this.
+ // For example, mv f1 f2 will delete f2, then create f2.
+ fileDir, _ := filepath.Split(event.Name)
+ fileDir = filepath.Clean(fileDir)
+ w.mu.Lock()
+ _, found := w.watches[fileDir]
+ w.mu.Unlock()
+ if found {
+ // make sure the directory exists before we watch for changes. When we
+ // do a recursive watch and perform rm -fr, the parent directory might
+ // have gone missing, ignore the missing directory and let the
+ // upcoming delete event remove the watch from the parent directory.
+ if _, err := os.Lstat(fileDir); os.IsExist(err) {
+ w.sendDirectoryChangeEvents(fileDir)
+ // FIXME: should this be for events on files or just isDir?
+ }
+ }
+ }
+
+ // Move to next event
+ kevents = kevents[1:]
+ }
+ }
+}
+
+// newEvent returns an platform-independent Event based on kqueue Fflags.
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&syscall.NOTE_DELETE == syscall.NOTE_DELETE {
+ e.Op |= Remove
+ }
+ if mask&syscall.NOTE_WRITE == syscall.NOTE_WRITE {
+ e.Op |= Write
+ }
+ if mask&syscall.NOTE_RENAME == syscall.NOTE_RENAME {
+ e.Op |= Rename
+ }
+ if mask&syscall.NOTE_ATTRIB == syscall.NOTE_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
+
+func newCreateEvent(name string) Event {
+ return Event{Name: name, Op: Create}
+}
+
+// watchDirectoryFiles to mimic inotify when adding a watch on a directory
+func (w *Watcher) watchDirectoryFiles(dirPath string) error {
+ // Get all files
+ files, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ return err
+ }
+
+ for _, fileInfo := range files {
+ filePath := filepath.Join(dirPath, fileInfo.Name())
+ if err := w.internalWatch(filePath, fileInfo); err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.fileExists[filePath] = true
+ w.mu.Unlock()
+ }
+
+ return nil
+}
+
+// sendDirectoryEvents searches the directory for newly created files
+// and sends them over the event channel. This functionality is to have
+// the BSD version of fsnotify match Linux inotify which provides a
+// create event for files created in a watched directory.
+func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
+ // Get all files
+ files, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ w.Errors <- err
+ }
+
+ // Search for new files
+ for _, fileInfo := range files {
+ filePath := filepath.Join(dirPath, fileInfo.Name())
+ w.mu.Lock()
+ _, doesExist := w.fileExists[filePath]
+ w.mu.Unlock()
+ if !doesExist {
+ // Send create event
+ w.Events <- newCreateEvent(filePath)
+ }
+
+ // like watchDirectoryFiles (but without doing another ReadDir)
+ if err := w.internalWatch(filePath, fileInfo); err != nil {
+ return
+ }
+
+ w.mu.Lock()
+ w.fileExists[filePath] = true
+ w.mu.Unlock()
+ }
+}
+
+func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) error {
+ if fileInfo.IsDir() {
+ // mimic Linux providing delete events for subdirectories
+ // but preserve the flags used if currently watching subdirectory
+ w.mu.Lock()
+ flags := w.dirFlags[name]
+ w.mu.Unlock()
+
+ flags |= syscall.NOTE_DELETE
+ return w.addWatch(name, flags)
+ }
+
+ // watch file to mimic Linux inotify
+ return w.addWatch(name, noteAllEvents)
+}
+
+// kqueue creates a new kernel event queue and returns a descriptor.
+func kqueue() (kq int, err error) {
+ kq, err = syscall.Kqueue()
+ if kq == -1 {
+ return kq, err
+ }
+ return kq, nil
+}
+
+// register events with the queue
+func register(kq int, fds []int, flags int, fflags uint32) error {
+ changes := make([]syscall.Kevent_t, len(fds))
+
+ for i, fd := range fds {
+ // SetKevent converts int to the platform-specific types:
+ syscall.SetKevent(&changes[i], fd, syscall.EVFILT_VNODE, flags)
+ changes[i].Fflags = fflags
+ }
+
+ // register the events
+ success, err := syscall.Kevent(kq, changes, nil, nil)
+ if success == -1 {
+ return err
+ }
+ return nil
+}
+
+// read retrieves pending events, or waits until an event occurs.
+// A timeout of nil blocks indefinitely, while 0 polls the queue.
+func read(kq int, events []syscall.Kevent_t, timeout *syscall.Timespec) ([]syscall.Kevent_t, error) {
+ n, err := syscall.Kevent(kq, nil, events, timeout)
+ if err != nil {
+ return nil, err
+ }
+ return events[0:n], nil
+}
+
+// durationToTimespec prepares a timeout value
+func durationToTimespec(d time.Duration) syscall.Timespec {
+ return syscall.NsecToTimespec(d.Nanoseconds())
+}
diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go
new file mode 100644
index 000000000..c57ccb427
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go
@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly
+
+package fsnotify
+
+import "syscall"
+
+const openMode = syscall.O_NONBLOCK | syscall.O_RDONLY
diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go
new file mode 100644
index 000000000..174b2c331
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go
@@ -0,0 +1,12 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin
+
+package fsnotify
+
+import "syscall"
+
+// note: this constant is not defined on BSD
+const openMode = syscall.O_EVTONLY
diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go
new file mode 100644
index 000000000..811585227
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go
@@ -0,0 +1,561 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sync"
+ "syscall"
+ "unsafe"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ isClosed bool // Set to true when Close() is first called
+ mu sync.Mutex // Map access
+ port syscall.Handle // Handle to completion port
+ watches watchMap // Map of watches (key: i-number)
+ input chan *input // Inputs to the reader are sent on this channel
+ quit chan chan<- error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
+ if e != nil {
+ return nil, os.NewSyscallError("CreateIoCompletionPort", e)
+ }
+ w := &Watcher{
+ port: port,
+ watches: make(watchMap),
+ input: make(chan *input, 1),
+ Events: make(chan Event, 50),
+ Errors: make(chan error),
+ quit: make(chan chan<- error, 1),
+ }
+ go w.readEvents()
+ return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ if w.isClosed {
+ return nil
+ }
+ w.isClosed = true
+
+ // Send "quit" message to the reader goroutine
+ ch := make(chan error)
+ w.quit <- ch
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-ch
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ if w.isClosed {
+ return errors.New("watcher already closed")
+ }
+ in := &input{
+ op: opAddWatch,
+ path: filepath.Clean(name),
+ flags: sys_FS_ALL_EVENTS,
+ reply: make(chan error),
+ }
+ w.input <- in
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-in.reply
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ in := &input{
+ op: opRemoveWatch,
+ path: filepath.Clean(name),
+ reply: make(chan error),
+ }
+ w.input <- in
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-in.reply
+}
+
+const (
+ // Options for AddWatch
+ sys_FS_ONESHOT = 0x80000000
+ sys_FS_ONLYDIR = 0x1000000
+
+ // Events
+ sys_FS_ACCESS = 0x1
+ sys_FS_ALL_EVENTS = 0xfff
+ sys_FS_ATTRIB = 0x4
+ sys_FS_CLOSE = 0x18
+ sys_FS_CREATE = 0x100
+ sys_FS_DELETE = 0x200
+ sys_FS_DELETE_SELF = 0x400
+ sys_FS_MODIFY = 0x2
+ sys_FS_MOVE = 0xc0
+ sys_FS_MOVED_FROM = 0x40
+ sys_FS_MOVED_TO = 0x80
+ sys_FS_MOVE_SELF = 0x800
+
+ // Special events
+ sys_FS_IGNORED = 0x8000
+ sys_FS_Q_OVERFLOW = 0x4000
+)
+
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&sys_FS_CREATE == sys_FS_CREATE || mask&sys_FS_MOVED_TO == sys_FS_MOVED_TO {
+ e.Op |= Create
+ }
+ if mask&sys_FS_DELETE == sys_FS_DELETE || mask&sys_FS_DELETE_SELF == sys_FS_DELETE_SELF {
+ e.Op |= Remove
+ }
+ if mask&sys_FS_MODIFY == sys_FS_MODIFY {
+ e.Op |= Write
+ }
+ if mask&sys_FS_MOVE == sys_FS_MOVE || mask&sys_FS_MOVE_SELF == sys_FS_MOVE_SELF || mask&sys_FS_MOVED_FROM == sys_FS_MOVED_FROM {
+ e.Op |= Rename
+ }
+ if mask&sys_FS_ATTRIB == sys_FS_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
+
+const (
+ opAddWatch = iota
+ opRemoveWatch
+)
+
+const (
+ provisional uint64 = 1 << (32 + iota)
+)
+
+type input struct {
+ op int
+ path string
+ flags uint32
+ reply chan error
+}
+
+type inode struct {
+ handle syscall.Handle
+ volume uint32
+ index uint64
+}
+
+type watch struct {
+ ov syscall.Overlapped
+ ino *inode // i-number
+ path string // Directory path
+ mask uint64 // Directory itself is being watched with these notify flags
+ names map[string]uint64 // Map of names being watched and their notify flags
+ rename string // Remembers the old name while renaming a file
+ buf [4096]byte
+}
+
+type indexMap map[uint64]*watch
+type watchMap map[uint32]indexMap
+
+func (w *Watcher) wakeupReader() error {
+ e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
+ if e != nil {
+ return os.NewSyscallError("PostQueuedCompletionStatus", e)
+ }
+ return nil
+}
+
+func getDir(pathname string) (dir string, err error) {
+ attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
+ if e != nil {
+ return "", os.NewSyscallError("GetFileAttributes", e)
+ }
+ if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
+ dir = pathname
+ } else {
+ dir, _ = filepath.Split(pathname)
+ dir = filepath.Clean(dir)
+ }
+ return
+}
+
+func getIno(path string) (ino *inode, err error) {
+ h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
+ syscall.FILE_LIST_DIRECTORY,
+ syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
+ nil, syscall.OPEN_EXISTING,
+ syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
+ if e != nil {
+ return nil, os.NewSyscallError("CreateFile", e)
+ }
+ var fi syscall.ByHandleFileInformation
+ if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
+ syscall.CloseHandle(h)
+ return nil, os.NewSyscallError("GetFileInformationByHandle", e)
+ }
+ ino = &inode{
+ handle: h,
+ volume: fi.VolumeSerialNumber,
+ index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
+ }
+ return ino, nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) get(ino *inode) *watch {
+ if i := m[ino.volume]; i != nil {
+ return i[ino.index]
+ }
+ return nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) set(ino *inode, watch *watch) {
+ i := m[ino.volume]
+ if i == nil {
+ i = make(indexMap)
+ m[ino.volume] = i
+ }
+ i[ino.index] = watch
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) addWatch(pathname string, flags uint64) error {
+ dir, err := getDir(pathname)
+ if err != nil {
+ return err
+ }
+ if flags&sys_FS_ONLYDIR != 0 && pathname != dir {
+ return nil
+ }
+ ino, err := getIno(dir)
+ if err != nil {
+ return err
+ }
+ w.mu.Lock()
+ watchEntry := w.watches.get(ino)
+ w.mu.Unlock()
+ if watchEntry == nil {
+ if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
+ syscall.CloseHandle(ino.handle)
+ return os.NewSyscallError("CreateIoCompletionPort", e)
+ }
+ watchEntry = &watch{
+ ino: ino,
+ path: dir,
+ names: make(map[string]uint64),
+ }
+ w.mu.Lock()
+ w.watches.set(ino, watchEntry)
+ w.mu.Unlock()
+ flags |= provisional
+ } else {
+ syscall.CloseHandle(ino.handle)
+ }
+ if pathname == dir {
+ watchEntry.mask |= flags
+ } else {
+ watchEntry.names[filepath.Base(pathname)] |= flags
+ }
+ if err = w.startRead(watchEntry); err != nil {
+ return err
+ }
+ if pathname == dir {
+ watchEntry.mask &= ^provisional
+ } else {
+ watchEntry.names[filepath.Base(pathname)] &= ^provisional
+ }
+ return nil
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) remWatch(pathname string) error {
+ dir, err := getDir(pathname)
+ if err != nil {
+ return err
+ }
+ ino, err := getIno(dir)
+ if err != nil {
+ return err
+ }
+ w.mu.Lock()
+ watch := w.watches.get(ino)
+ w.mu.Unlock()
+ if watch == nil {
+ return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
+ }
+ if pathname == dir {
+ w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED)
+ watch.mask = 0
+ } else {
+ name := filepath.Base(pathname)
+ w.sendEvent(watch.path+"\\"+name, watch.names[name]&sys_FS_IGNORED)
+ delete(watch.names, name)
+ }
+ return w.startRead(watch)
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) deleteWatch(watch *watch) {
+ for name, mask := range watch.names {
+ if mask&provisional == 0 {
+ w.sendEvent(watch.path+"\\"+name, mask&sys_FS_IGNORED)
+ }
+ delete(watch.names, name)
+ }
+ if watch.mask != 0 {
+ if watch.mask&provisional == 0 {
+ w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED)
+ }
+ watch.mask = 0
+ }
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) startRead(watch *watch) error {
+ if e := syscall.CancelIo(watch.ino.handle); e != nil {
+ w.Errors <- os.NewSyscallError("CancelIo", e)
+ w.deleteWatch(watch)
+ }
+ mask := toWindowsFlags(watch.mask)
+ for _, m := range watch.names {
+ mask |= toWindowsFlags(m)
+ }
+ if mask == 0 {
+ if e := syscall.CloseHandle(watch.ino.handle); e != nil {
+ w.Errors <- os.NewSyscallError("CloseHandle", e)
+ }
+ w.mu.Lock()
+ delete(w.watches[watch.ino.volume], watch.ino.index)
+ w.mu.Unlock()
+ return nil
+ }
+ e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
+ uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
+ if e != nil {
+ err := os.NewSyscallError("ReadDirectoryChanges", e)
+ if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
+ // Watched directory was probably removed
+ if w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF) {
+ if watch.mask&sys_FS_ONESHOT != 0 {
+ watch.mask = 0
+ }
+ }
+ err = nil
+ }
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ return err
+ }
+ return nil
+}
+
+// readEvents reads from the I/O completion port, converts the
+// received events into Event objects and sends them via the Events channel.
+// Entry point to the I/O thread.
+func (w *Watcher) readEvents() {
+ var (
+ n, key uint32
+ ov *syscall.Overlapped
+ )
+ runtime.LockOSThread()
+
+ for {
+ e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
+ watch := (*watch)(unsafe.Pointer(ov))
+
+ if watch == nil {
+ select {
+ case ch := <-w.quit:
+ w.mu.Lock()
+ var indexes []indexMap
+ for _, index := range w.watches {
+ indexes = append(indexes, index)
+ }
+ w.mu.Unlock()
+ for _, index := range indexes {
+ for _, watch := range index {
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ }
+ }
+ var err error
+ if e := syscall.CloseHandle(w.port); e != nil {
+ err = os.NewSyscallError("CloseHandle", e)
+ }
+ close(w.Events)
+ close(w.Errors)
+ ch <- err
+ return
+ case in := <-w.input:
+ switch in.op {
+ case opAddWatch:
+ in.reply <- w.addWatch(in.path, uint64(in.flags))
+ case opRemoveWatch:
+ in.reply <- w.remWatch(in.path)
+ }
+ default:
+ }
+ continue
+ }
+
+ switch e {
+ case syscall.ERROR_MORE_DATA:
+ if watch == nil {
+ w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
+ } else {
+ // The i/o succeeded but the buffer is full.
+ // In theory we should be building up a full packet.
+ // In practice we can get away with just carrying on.
+ n = uint32(unsafe.Sizeof(watch.buf))
+ }
+ case syscall.ERROR_ACCESS_DENIED:
+ // Watched directory was probably removed
+ w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF)
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ continue
+ case syscall.ERROR_OPERATION_ABORTED:
+ // CancelIo was called on this handle
+ continue
+ default:
+ w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
+ continue
+ case nil:
+ }
+
+ var offset uint32
+ for {
+ if n == 0 {
+ w.Events <- newEvent("", sys_FS_Q_OVERFLOW)
+ w.Errors <- errors.New("short read in readEvents()")
+ break
+ }
+
+ // Point "raw" to the event in the buffer
+ raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
+ buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
+ name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
+ fullname := watch.path + "\\" + name
+
+ var mask uint64
+ switch raw.Action {
+ case syscall.FILE_ACTION_REMOVED:
+ mask = sys_FS_DELETE_SELF
+ case syscall.FILE_ACTION_MODIFIED:
+ mask = sys_FS_MODIFY
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+ watch.rename = name
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+ if watch.names[watch.rename] != 0 {
+ watch.names[name] |= watch.names[watch.rename]
+ delete(watch.names, watch.rename)
+ mask = sys_FS_MOVE_SELF
+ }
+ }
+
+ sendNameEvent := func() {
+ if w.sendEvent(fullname, watch.names[name]&mask) {
+ if watch.names[name]&sys_FS_ONESHOT != 0 {
+ delete(watch.names, name)
+ }
+ }
+ }
+ if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
+ sendNameEvent()
+ }
+ if raw.Action == syscall.FILE_ACTION_REMOVED {
+ w.sendEvent(fullname, watch.names[name]&sys_FS_IGNORED)
+ delete(watch.names, name)
+ }
+ if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
+ if watch.mask&sys_FS_ONESHOT != 0 {
+ watch.mask = 0
+ }
+ }
+ if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
+ fullname = watch.path + "\\" + watch.rename
+ sendNameEvent()
+ }
+
+ // Move to the next event in the buffer
+ if raw.NextEntryOffset == 0 {
+ break
+ }
+ offset += raw.NextEntryOffset
+
+ // Error!
+ if offset >= n {
+ w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
+ break
+ }
+ }
+
+ if err := w.startRead(watch); err != nil {
+ w.Errors <- err
+ }
+ }
+}
+
+func (w *Watcher) sendEvent(name string, mask uint64) bool {
+ if mask == 0 {
+ return false
+ }
+ event := newEvent(name, uint32(mask))
+ select {
+ case ch := <-w.quit:
+ w.quit <- ch
+ case w.Events <- event:
+ }
+ return true
+}
+
+func toWindowsFlags(mask uint64) uint32 {
+ var m uint32
+ if mask&sys_FS_ACCESS != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
+ }
+ if mask&sys_FS_MODIFY != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
+ }
+ if mask&sys_FS_ATTRIB != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
+ }
+ if mask&(sys_FS_MOVE|sys_FS_CREATE|sys_FS_DELETE) != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
+ }
+ return m
+}
+
+func toFSnotifyFlags(action uint32) uint64 {
+ switch action {
+ case syscall.FILE_ACTION_ADDED:
+ return sys_FS_CREATE
+ case syscall.FILE_ACTION_REMOVED:
+ return sys_FS_DELETE
+ case syscall.FILE_ACTION_MODIFIED:
+ return sys_FS_MODIFY
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+ return sys_FS_MOVED_FROM
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+ return sys_FS_MOVED_TO
+ }
+ return 0
+}
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/.travis.yml b/Godeps/_workspace/src/gopkg.in/redis.v2/.travis.yml
new file mode 100644
index 000000000..c3cf4b8a6
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/.travis.yml
@@ -0,0 +1,19 @@
+language: go
+
+services:
+- redis-server
+
+go:
+ - 1.1
+ - 1.2
+ - 1.3
+ - tip
+
+install:
+ - go get gopkg.in/bufio.v1
+ - go get gopkg.in/check.v1
+ - mkdir -p $HOME/gopath/src/gopkg.in
+ - ln -s `pwd` $HOME/gopath/src/gopkg.in/redis.v2
+
+before_script:
+ - redis-server testdata/sentinel.conf --sentinel &
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/LICENSE b/Godeps/_workspace/src/gopkg.in/redis.v2/LICENSE
new file mode 100644
index 000000000..6855a95fe
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Redis Go Client Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/Makefile b/Godeps/_workspace/src/gopkg.in/redis.v2/Makefile
new file mode 100644
index 000000000..b250d9bfa
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/Makefile
@@ -0,0 +1,3 @@
+all:
+ go test gopkg.in/redis.v2 -cpu=1,2,4
+ go test gopkg.in/redis.v2 -short -race
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/command.go b/Godeps/_workspace/src/gopkg.in/redis.v2/command.go
new file mode 100644
index 000000000..d7c76cf92
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/command.go
@@ -0,0 +1,597 @@
+package redis
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "gopkg.in/bufio.v1"
+)
+
+var (
+ _ Cmder = (*Cmd)(nil)
+ _ Cmder = (*SliceCmd)(nil)
+ _ Cmder = (*StatusCmd)(nil)
+ _ Cmder = (*IntCmd)(nil)
+ _ Cmder = (*DurationCmd)(nil)
+ _ Cmder = (*BoolCmd)(nil)
+ _ Cmder = (*StringCmd)(nil)
+ _ Cmder = (*FloatCmd)(nil)
+ _ Cmder = (*StringSliceCmd)(nil)
+ _ Cmder = (*BoolSliceCmd)(nil)
+ _ Cmder = (*StringStringMapCmd)(nil)
+ _ Cmder = (*ZSliceCmd)(nil)
+ _ Cmder = (*ScanCmd)(nil)
+)
+
+type Cmder interface {
+ args() []string
+ parseReply(*bufio.Reader) error
+ setErr(error)
+
+ writeTimeout() *time.Duration
+ readTimeout() *time.Duration
+
+ Err() error
+ String() string
+}
+
+func setCmdsErr(cmds []Cmder, e error) {
+ for _, cmd := range cmds {
+ cmd.setErr(e)
+ }
+}
+
+func cmdString(cmd Cmder, val interface{}) string {
+ s := strings.Join(cmd.args(), " ")
+ if err := cmd.Err(); err != nil {
+ return s + ": " + err.Error()
+ }
+ if val != nil {
+ return s + ": " + fmt.Sprint(val)
+ }
+ return s
+
+}
+
+//------------------------------------------------------------------------------
+
+type baseCmd struct {
+ _args []string
+
+ err error
+
+ _writeTimeout, _readTimeout *time.Duration
+}
+
+func newBaseCmd(args ...string) *baseCmd {
+ return &baseCmd{
+ _args: args,
+ }
+}
+
+func (cmd *baseCmd) Err() error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+ return nil
+}
+
+func (cmd *baseCmd) args() []string {
+ return cmd._args
+}
+
+func (cmd *baseCmd) readTimeout() *time.Duration {
+ return cmd._readTimeout
+}
+
+func (cmd *baseCmd) setReadTimeout(d time.Duration) {
+ cmd._readTimeout = &d
+}
+
+func (cmd *baseCmd) writeTimeout() *time.Duration {
+ return cmd._writeTimeout
+}
+
+func (cmd *baseCmd) setWriteTimeout(d time.Duration) {
+ cmd._writeTimeout = &d
+}
+
+func (cmd *baseCmd) setErr(e error) {
+ cmd.err = e
+}
+
+//------------------------------------------------------------------------------
+
+type Cmd struct {
+ *baseCmd
+
+ val interface{}
+}
+
+func NewCmd(args ...string) *Cmd {
+ return &Cmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *Cmd) Val() interface{} {
+ return cmd.val
+}
+
+func (cmd *Cmd) Result() (interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *Cmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *Cmd) parseReply(rd *bufio.Reader) error {
+ cmd.val, cmd.err = parseReply(rd, parseSlice)
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type SliceCmd struct {
+ *baseCmd
+
+ val []interface{}
+}
+
+func NewSliceCmd(args ...string) *SliceCmd {
+ return &SliceCmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *SliceCmd) Val() []interface{} {
+ return cmd.val
+}
+
+func (cmd *SliceCmd) Result() ([]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *SliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *SliceCmd) parseReply(rd *bufio.Reader) error {
+ v, err := parseReply(rd, parseSlice)
+ if err != nil {
+ cmd.err = err
+ return err
+ }
+ cmd.val = v.([]interface{})
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StatusCmd struct {
+ *baseCmd
+
+ val string
+}
+
+func NewStatusCmd(args ...string) *StatusCmd {
+ return &StatusCmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *StatusCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StatusCmd) Result() (string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StatusCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StatusCmd) parseReply(rd *bufio.Reader) error {
+ v, err := parseReply(rd, nil)
+ if err != nil {
+ cmd.err = err
+ return err
+ }
+ cmd.val = v.(string)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type IntCmd struct {
+ *baseCmd
+
+ val int64
+}
+
+func NewIntCmd(args ...string) *IntCmd {
+ return &IntCmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *IntCmd) Val() int64 {
+ return cmd.val
+}
+
+func (cmd *IntCmd) Result() (int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntCmd) parseReply(rd *bufio.Reader) error {
+ v, err := parseReply(rd, nil)
+ if err != nil {
+ cmd.err = err
+ return err
+ }
+ cmd.val = v.(int64)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type DurationCmd struct {
+ *baseCmd
+
+ val time.Duration
+ precision time.Duration
+}
+
+func NewDurationCmd(precision time.Duration, args ...string) *DurationCmd {
+ return &DurationCmd{
+ baseCmd: newBaseCmd(args...),
+ precision: precision,
+ }
+}
+
+func (cmd *DurationCmd) Val() time.Duration {
+ return cmd.val
+}
+
+func (cmd *DurationCmd) Result() (time.Duration, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *DurationCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *DurationCmd) parseReply(rd *bufio.Reader) error {
+ v, err := parseReply(rd, nil)
+ if err != nil {
+ cmd.err = err
+ return err
+ }
+ cmd.val = time.Duration(v.(int64)) * cmd.precision
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type BoolCmd struct {
+ *baseCmd
+
+ val bool
+}
+
+func NewBoolCmd(args ...string) *BoolCmd {
+ return &BoolCmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *BoolCmd) Val() bool {
+ return cmd.val
+}
+
+func (cmd *BoolCmd) Result() (bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolCmd) parseReply(rd *bufio.Reader) error {
+ v, err := parseReply(rd, nil)
+ if err != nil {
+ cmd.err = err
+ return err
+ }
+ cmd.val = v.(int64) == 1
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StringCmd struct {
+ *baseCmd
+
+ val string
+}
+
+func NewStringCmd(args ...string) *StringCmd {
+ return &StringCmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *StringCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StringCmd) Result() (string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringCmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseInt(cmd.val, 10, 64)
+}
+
+func (cmd *StringCmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseUint(cmd.val, 10, 64)
+}
+
+func (cmd *StringCmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseFloat(cmd.val, 64)
+}
+
+func (cmd *StringCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringCmd) parseReply(rd *bufio.Reader) error {
+ v, err := parseReply(rd, nil)
+ if err != nil {
+ cmd.err = err
+ return err
+ }
+ cmd.val = v.(string)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type FloatCmd struct {
+ *baseCmd
+
+ val float64
+}
+
+func NewFloatCmd(args ...string) *FloatCmd {
+ return &FloatCmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *FloatCmd) Val() float64 {
+ return cmd.val
+}
+
+func (cmd *FloatCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatCmd) parseReply(rd *bufio.Reader) error {
+ v, err := parseReply(rd, nil)
+ if err != nil {
+ cmd.err = err
+ return err
+ }
+ cmd.val, cmd.err = strconv.ParseFloat(v.(string), 64)
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type StringSliceCmd struct {
+ *baseCmd
+
+ val []string
+}
+
+func NewStringSliceCmd(args ...string) *StringSliceCmd {
+ return &StringSliceCmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *StringSliceCmd) Val() []string {
+ return cmd.val
+}
+
+func (cmd *StringSliceCmd) Result() ([]string, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *StringSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringSliceCmd) parseReply(rd *bufio.Reader) error {
+ v, err := parseReply(rd, parseStringSlice)
+ if err != nil {
+ cmd.err = err
+ return err
+ }
+ cmd.val = v.([]string)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type BoolSliceCmd struct {
+ *baseCmd
+
+ val []bool
+}
+
+func NewBoolSliceCmd(args ...string) *BoolSliceCmd {
+ return &BoolSliceCmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *BoolSliceCmd) Val() []bool {
+ return cmd.val
+}
+
+func (cmd *BoolSliceCmd) Result() ([]bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolSliceCmd) parseReply(rd *bufio.Reader) error {
+ v, err := parseReply(rd, parseBoolSlice)
+ if err != nil {
+ cmd.err = err
+ return err
+ }
+ cmd.val = v.([]bool)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StringStringMapCmd struct {
+ *baseCmd
+
+ val map[string]string
+}
+
+func NewStringStringMapCmd(args ...string) *StringStringMapCmd {
+ return &StringStringMapCmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *StringStringMapCmd) Val() map[string]string {
+ return cmd.val
+}
+
+func (cmd *StringStringMapCmd) Result() (map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringStringMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStringMapCmd) parseReply(rd *bufio.Reader) error {
+ v, err := parseReply(rd, parseStringStringMap)
+ if err != nil {
+ cmd.err = err
+ return err
+ }
+ cmd.val = v.(map[string]string)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceCmd struct {
+ *baseCmd
+
+ val []Z
+}
+
+func NewZSliceCmd(args ...string) *ZSliceCmd {
+ return &ZSliceCmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *ZSliceCmd) Val() []Z {
+ return cmd.val
+}
+
+func (cmd *ZSliceCmd) Result() ([]Z, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ZSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceCmd) parseReply(rd *bufio.Reader) error {
+ v, err := parseReply(rd, parseZSlice)
+ if err != nil {
+ cmd.err = err
+ return err
+ }
+ cmd.val = v.([]Z)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ScanCmd struct {
+ *baseCmd
+
+ cursor int64
+ keys []string
+}
+
+func NewScanCmd(args ...string) *ScanCmd {
+ return &ScanCmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *ScanCmd) Val() (int64, []string) {
+ return cmd.cursor, cmd.keys
+}
+
+func (cmd *ScanCmd) Result() (int64, []string, error) {
+ return cmd.cursor, cmd.keys, cmd.err
+}
+
+func (cmd *ScanCmd) String() string {
+ return cmdString(cmd, cmd.keys)
+}
+
+func (cmd *ScanCmd) parseReply(rd *bufio.Reader) error {
+ vi, err := parseReply(rd, parseSlice)
+ if err != nil {
+ cmd.err = err
+ return cmd.err
+ }
+ v := vi.([]interface{})
+
+ cmd.cursor, cmd.err = strconv.ParseInt(v[0].(string), 10, 64)
+ if cmd.err != nil {
+ return cmd.err
+ }
+
+ keys := v[1].([]interface{})
+ for _, keyi := range keys {
+ cmd.keys = append(cmd.keys, keyi.(string))
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/commands.go b/Godeps/_workspace/src/gopkg.in/redis.v2/commands.go
new file mode 100644
index 000000000..6068bab17
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/commands.go
@@ -0,0 +1,1246 @@
+package redis
+
+import (
+ "io"
+ "strconv"
+ "time"
+)
+
+func formatFloat(f float64) string {
+ return strconv.FormatFloat(f, 'f', -1, 64)
+}
+
+func readTimeout(sec int64) time.Duration {
+ if sec == 0 {
+ return 0
+ }
+ return time.Duration(sec+1) * time.Second
+}
+
+//------------------------------------------------------------------------------
+
+func (c *Client) Auth(password string) *StatusCmd {
+ cmd := NewStatusCmd("AUTH", password)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Echo(message string) *StringCmd {
+ cmd := NewStringCmd("ECHO", message)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Ping() *StatusCmd {
+ cmd := NewStatusCmd("PING")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Quit() *StatusCmd {
+ panic("not implemented")
+}
+
+func (c *Client) Select(index int64) *StatusCmd {
+ cmd := NewStatusCmd("SELECT", strconv.FormatInt(index, 10))
+ c.Process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *Client) Del(keys ...string) *IntCmd {
+ args := append([]string{"DEL"}, keys...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Dump(key string) *StringCmd {
+ cmd := NewStringCmd("DUMP", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Exists(key string) *BoolCmd {
+ cmd := NewBoolCmd("EXISTS", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Expire(key string, dur time.Duration) *BoolCmd {
+ cmd := NewBoolCmd("EXPIRE", key, strconv.FormatInt(int64(dur/time.Second), 10))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ExpireAt(key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd("EXPIREAT", key, strconv.FormatInt(tm.Unix(), 10))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Keys(pattern string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("KEYS", pattern)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Migrate(host, port, key string, db, timeout int64) *StatusCmd {
+ cmd := NewStatusCmd(
+ "MIGRATE",
+ host,
+ port,
+ key,
+ strconv.FormatInt(db, 10),
+ strconv.FormatInt(timeout, 10),
+ )
+ cmd.setReadTimeout(readTimeout(timeout))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Move(key string, db int64) *BoolCmd {
+ cmd := NewBoolCmd("MOVE", key, strconv.FormatInt(db, 10))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ObjectRefCount(keys ...string) *IntCmd {
+ args := append([]string{"OBJECT", "REFCOUNT"}, keys...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ObjectEncoding(keys ...string) *StringCmd {
+ args := append([]string{"OBJECT", "ENCODING"}, keys...)
+ cmd := NewStringCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ObjectIdleTime(keys ...string) *DurationCmd {
+ args := append([]string{"OBJECT", "IDLETIME"}, keys...)
+ cmd := NewDurationCmd(time.Second, args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Persist(key string) *BoolCmd {
+ cmd := NewBoolCmd("PERSIST", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) PExpire(key string, dur time.Duration) *BoolCmd {
+ cmd := NewBoolCmd("PEXPIRE", key, strconv.FormatInt(int64(dur/time.Millisecond), 10))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) PExpireAt(key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd(
+ "PEXPIREAT",
+ key,
+ strconv.FormatInt(tm.UnixNano()/int64(time.Millisecond), 10),
+ )
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) PTTL(key string) *DurationCmd {
+ cmd := NewDurationCmd(time.Millisecond, "PTTL", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) RandomKey() *StringCmd {
+ cmd := NewStringCmd("RANDOMKEY")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Rename(key, newkey string) *StatusCmd {
+ cmd := NewStatusCmd("RENAME", key, newkey)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) RenameNX(key, newkey string) *BoolCmd {
+ cmd := NewBoolCmd("RENAMENX", key, newkey)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Restore(key string, ttl int64, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ "RESTORE",
+ key,
+ strconv.FormatInt(ttl, 10),
+ value,
+ )
+ c.Process(cmd)
+ return cmd
+}
+
+type Sort struct {
+ By string
+ Offset, Count float64
+ Get []string
+ Order string
+ IsAlpha bool
+ Store string
+}
+
+func (c *Client) Sort(key string, sort Sort) *StringSliceCmd {
+ args := []string{"SORT", key}
+ if sort.By != "" {
+ args = append(args, "BY", sort.By)
+ }
+ if sort.Offset != 0 || sort.Count != 0 {
+ args = append(args, "LIMIT", formatFloat(sort.Offset), formatFloat(sort.Count))
+ }
+ for _, get := range sort.Get {
+ args = append(args, "GET", get)
+ }
+ if sort.Order != "" {
+ args = append(args, sort.Order)
+ }
+ if sort.IsAlpha {
+ args = append(args, "ALPHA")
+ }
+ if sort.Store != "" {
+ args = append(args, "STORE", sort.Store)
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) TTL(key string) *DurationCmd {
+ cmd := NewDurationCmd(time.Second, "TTL", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Type(key string) *StatusCmd {
+ cmd := NewStatusCmd("TYPE", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Scan(cursor int64, match string, count int64) *ScanCmd {
+ args := []string{"SCAN", strconv.FormatInt(cursor, 10)}
+ if match != "" {
+ args = append(args, "MATCH", match)
+ }
+ if count > 0 {
+ args = append(args, "COUNT", strconv.FormatInt(count, 10))
+ }
+ cmd := NewScanCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SScan(key string, cursor int64, match string, count int64) *ScanCmd {
+ args := []string{"SSCAN", key, strconv.FormatInt(cursor, 10)}
+ if match != "" {
+ args = append(args, "MATCH", match)
+ }
+ if count > 0 {
+ args = append(args, "COUNT", strconv.FormatInt(count, 10))
+ }
+ cmd := NewScanCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HScan(key string, cursor int64, match string, count int64) *ScanCmd {
+ args := []string{"HSCAN", key, strconv.FormatInt(cursor, 10)}
+ if match != "" {
+ args = append(args, "MATCH", match)
+ }
+ if count > 0 {
+ args = append(args, "COUNT", strconv.FormatInt(count, 10))
+ }
+ cmd := NewScanCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZScan(key string, cursor int64, match string, count int64) *ScanCmd {
+ args := []string{"ZSCAN", key, strconv.FormatInt(cursor, 10)}
+ if match != "" {
+ args = append(args, "MATCH", match)
+ }
+ if count > 0 {
+ args = append(args, "COUNT", strconv.FormatInt(count, 10))
+ }
+ cmd := NewScanCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *Client) Append(key, value string) *IntCmd {
+ cmd := NewIntCmd("APPEND", key, value)
+ c.Process(cmd)
+ return cmd
+}
+
+type BitCount struct {
+ Start, End int64
+}
+
+func (c *Client) BitCount(key string, bitCount *BitCount) *IntCmd {
+ args := []string{"BITCOUNT", key}
+ if bitCount != nil {
+ args = append(
+ args,
+ strconv.FormatInt(bitCount.Start, 10),
+ strconv.FormatInt(bitCount.End, 10),
+ )
+ }
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) bitOp(op, destKey string, keys ...string) *IntCmd {
+ args := []string{"BITOP", op, destKey}
+ args = append(args, keys...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) BitOpAnd(destKey string, keys ...string) *IntCmd {
+ return c.bitOp("AND", destKey, keys...)
+}
+
+func (c *Client) BitOpOr(destKey string, keys ...string) *IntCmd {
+ return c.bitOp("OR", destKey, keys...)
+}
+
+func (c *Client) BitOpXor(destKey string, keys ...string) *IntCmd {
+ return c.bitOp("XOR", destKey, keys...)
+}
+
+func (c *Client) BitOpNot(destKey string, key string) *IntCmd {
+ return c.bitOp("NOT", destKey, key)
+}
+
+func (c *Client) Decr(key string) *IntCmd {
+ cmd := NewIntCmd("DECR", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) DecrBy(key string, decrement int64) *IntCmd {
+ cmd := NewIntCmd("DECRBY", key, strconv.FormatInt(decrement, 10))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Get(key string) *StringCmd {
+ cmd := NewStringCmd("GET", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) GetBit(key string, offset int64) *IntCmd {
+ cmd := NewIntCmd("GETBIT", key, strconv.FormatInt(offset, 10))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) GetRange(key string, start, end int64) *StringCmd {
+ cmd := NewStringCmd(
+ "GETRANGE",
+ key,
+ strconv.FormatInt(start, 10),
+ strconv.FormatInt(end, 10),
+ )
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) GetSet(key, value string) *StringCmd {
+ cmd := NewStringCmd("GETSET", key, value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Incr(key string) *IntCmd {
+ cmd := NewIntCmd("INCR", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) IncrBy(key string, value int64) *IntCmd {
+ cmd := NewIntCmd("INCRBY", key, strconv.FormatInt(value, 10))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) IncrByFloat(key string, value float64) *FloatCmd {
+ cmd := NewFloatCmd("INCRBYFLOAT", key, formatFloat(value))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) MGet(keys ...string) *SliceCmd {
+ args := append([]string{"MGET"}, keys...)
+ cmd := NewSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) MSet(pairs ...string) *StatusCmd {
+ args := append([]string{"MSET"}, pairs...)
+ cmd := NewStatusCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) MSetNX(pairs ...string) *BoolCmd {
+ args := append([]string{"MSETNX"}, pairs...)
+ cmd := NewBoolCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) PSetEx(key string, dur time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ "PSETEX",
+ key,
+ strconv.FormatInt(int64(dur/time.Millisecond), 10),
+ value,
+ )
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Set(key, value string) *StatusCmd {
+ cmd := NewStatusCmd("SET", key, value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SetBit(key string, offset int64, value int) *IntCmd {
+ cmd := NewIntCmd(
+ "SETBIT",
+ key,
+ strconv.FormatInt(offset, 10),
+ strconv.FormatInt(int64(value), 10),
+ )
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SetEx(key string, dur time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd("SETEX", key, strconv.FormatInt(int64(dur/time.Second), 10), value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SetNX(key, value string) *BoolCmd {
+ cmd := NewBoolCmd("SETNX", key, value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SetRange(key string, offset int64, value string) *IntCmd {
+ cmd := NewIntCmd("SETRANGE", key, strconv.FormatInt(offset, 10), value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) StrLen(key string) *IntCmd {
+ cmd := NewIntCmd("STRLEN", key)
+ c.Process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *Client) HDel(key string, fields ...string) *IntCmd {
+ args := append([]string{"HDEL", key}, fields...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HExists(key, field string) *BoolCmd {
+ cmd := NewBoolCmd("HEXISTS", key, field)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HGet(key, field string) *StringCmd {
+ cmd := NewStringCmd("HGET", key, field)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HGetAll(key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("HGETALL", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HGetAllMap(key string) *StringStringMapCmd {
+ cmd := NewStringStringMapCmd("HGETALL", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HIncrBy(key, field string, incr int64) *IntCmd {
+ cmd := NewIntCmd("HINCRBY", key, field, strconv.FormatInt(incr, 10))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HIncrByFloat(key, field string, incr float64) *FloatCmd {
+ cmd := NewFloatCmd("HINCRBYFLOAT", key, field, formatFloat(incr))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HKeys(key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("HKEYS", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HLen(key string) *IntCmd {
+ cmd := NewIntCmd("HLEN", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HMGet(key string, fields ...string) *SliceCmd {
+ args := append([]string{"HMGET", key}, fields...)
+ cmd := NewSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HMSet(key, field, value string, pairs ...string) *StatusCmd {
+ args := append([]string{"HMSET", key, field, value}, pairs...)
+ cmd := NewStatusCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HSet(key, field, value string) *BoolCmd {
+ cmd := NewBoolCmd("HSET", key, field, value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HSetNX(key, field, value string) *BoolCmd {
+ cmd := NewBoolCmd("HSETNX", key, field, value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HVals(key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("HVALS", key)
+ c.Process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *Client) BLPop(timeout int64, keys ...string) *StringSliceCmd {
+ args := append([]string{"BLPOP"}, keys...)
+ args = append(args, strconv.FormatInt(timeout, 10))
+ cmd := NewStringSliceCmd(args...)
+ cmd.setReadTimeout(readTimeout(timeout))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) BRPop(timeout int64, keys ...string) *StringSliceCmd {
+ args := append([]string{"BRPOP"}, keys...)
+ args = append(args, strconv.FormatInt(timeout, 10))
+ cmd := NewStringSliceCmd(args...)
+ cmd.setReadTimeout(readTimeout(timeout))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) BRPopLPush(source, destination string, timeout int64) *StringCmd {
+ cmd := NewStringCmd(
+ "BRPOPLPUSH",
+ source,
+ destination,
+ strconv.FormatInt(timeout, 10),
+ )
+ cmd.setReadTimeout(readTimeout(timeout))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) LIndex(key string, index int64) *StringCmd {
+ cmd := NewStringCmd("LINDEX", key, strconv.FormatInt(index, 10))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) LInsert(key, op, pivot, value string) *IntCmd {
+ cmd := NewIntCmd("LINSERT", key, op, pivot, value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) LLen(key string) *IntCmd {
+ cmd := NewIntCmd("LLEN", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) LPop(key string) *StringCmd {
+ cmd := NewStringCmd("LPOP", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) LPush(key string, values ...string) *IntCmd {
+ args := append([]string{"LPUSH", key}, values...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) LPushX(key, value string) *IntCmd {
+ cmd := NewIntCmd("LPUSHX", key, value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) LRange(key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(
+ "LRANGE",
+ key,
+ strconv.FormatInt(start, 10),
+ strconv.FormatInt(stop, 10),
+ )
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) LRem(key string, count int64, value string) *IntCmd {
+ cmd := NewIntCmd("LREM", key, strconv.FormatInt(count, 10), value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) LSet(key string, index int64, value string) *StatusCmd {
+ cmd := NewStatusCmd("LSET", key, strconv.FormatInt(index, 10), value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) LTrim(key string, start, stop int64) *StatusCmd {
+ cmd := NewStatusCmd(
+ "LTRIM",
+ key,
+ strconv.FormatInt(start, 10),
+ strconv.FormatInt(stop, 10),
+ )
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) RPop(key string) *StringCmd {
+ cmd := NewStringCmd("RPOP", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) RPopLPush(source, destination string) *StringCmd {
+ cmd := NewStringCmd("RPOPLPUSH", source, destination)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) RPush(key string, values ...string) *IntCmd {
+ args := append([]string{"RPUSH", key}, values...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) RPushX(key string, value string) *IntCmd {
+ cmd := NewIntCmd("RPUSHX", key, value)
+ c.Process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *Client) SAdd(key string, members ...string) *IntCmd {
+ args := append([]string{"SADD", key}, members...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SCard(key string) *IntCmd {
+ cmd := NewIntCmd("SCARD", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SDiff(keys ...string) *StringSliceCmd {
+ args := append([]string{"SDIFF"}, keys...)
+ cmd := NewStringSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SDiffStore(destination string, keys ...string) *IntCmd {
+ args := append([]string{"SDIFFSTORE", destination}, keys...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SInter(keys ...string) *StringSliceCmd {
+ args := append([]string{"SINTER"}, keys...)
+ cmd := NewStringSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SInterStore(destination string, keys ...string) *IntCmd {
+ args := append([]string{"SINTERSTORE", destination}, keys...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SIsMember(key, member string) *BoolCmd {
+ cmd := NewBoolCmd("SISMEMBER", key, member)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SMembers(key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("SMEMBERS", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SMove(source, destination, member string) *BoolCmd {
+ cmd := NewBoolCmd("SMOVE", source, destination, member)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SPop(key string) *StringCmd {
+ cmd := NewStringCmd("SPOP", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SRandMember(key string) *StringCmd {
+ cmd := NewStringCmd("SRANDMEMBER", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SRem(key string, members ...string) *IntCmd {
+ args := append([]string{"SREM", key}, members...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SUnion(keys ...string) *StringSliceCmd {
+ args := append([]string{"SUNION"}, keys...)
+ cmd := NewStringSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SUnionStore(destination string, keys ...string) *IntCmd {
+ args := append([]string{"SUNIONSTORE", destination}, keys...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+type Z struct {
+ Score float64
+ Member string
+}
+
+type ZStore struct {
+ Weights []int64
+ Aggregate string
+}
+
+func (c *Client) ZAdd(key string, members ...Z) *IntCmd {
+ args := []string{"ZADD", key}
+ for _, m := range members {
+ args = append(args, formatFloat(m.Score), m.Member)
+ }
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZCard(key string) *IntCmd {
+ cmd := NewIntCmd("ZCARD", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZCount(key, min, max string) *IntCmd {
+ cmd := NewIntCmd("ZCOUNT", key, min, max)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZIncrBy(key string, increment float64, member string) *FloatCmd {
+ cmd := NewFloatCmd("ZINCRBY", key, formatFloat(increment), member)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZInterStore(
+ destination string,
+ store ZStore,
+ keys ...string,
+) *IntCmd {
+ args := []string{"ZINTERSTORE", destination, strconv.FormatInt(int64(len(keys)), 10)}
+ args = append(args, keys...)
+ if len(store.Weights) > 0 {
+ args = append(args, "WEIGHTS")
+ for _, weight := range store.Weights {
+ args = append(args, strconv.FormatInt(weight, 10))
+ }
+ }
+ if store.Aggregate != "" {
+ args = append(args, "AGGREGATE", store.Aggregate)
+ }
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) zRange(key string, start, stop int64, withScores bool) *StringSliceCmd {
+ args := []string{
+ "ZRANGE",
+ key,
+ strconv.FormatInt(start, 10),
+ strconv.FormatInt(stop, 10),
+ }
+ if withScores {
+ args = append(args, "WITHSCORES")
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZRange(key string, start, stop int64) *StringSliceCmd {
+ return c.zRange(key, start, stop, false)
+}
+
+func (c *Client) ZRangeWithScores(key string, start, stop int64) *ZSliceCmd {
+ args := []string{
+ "ZRANGE",
+ key,
+ strconv.FormatInt(start, 10),
+ strconv.FormatInt(stop, 10),
+ "WITHSCORES",
+ }
+ cmd := NewZSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+type ZRangeByScore struct {
+ Min, Max string
+
+ Offset, Count int64
+}
+
+func (c *Client) zRangeByScore(key string, opt ZRangeByScore, withScores bool) *StringSliceCmd {
+ args := []string{"ZRANGEBYSCORE", key, opt.Min, opt.Max}
+ if withScores {
+ args = append(args, "WITHSCORES")
+ }
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "LIMIT",
+ strconv.FormatInt(opt.Offset, 10),
+ strconv.FormatInt(opt.Count, 10),
+ )
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZRangeByScore(key string, opt ZRangeByScore) *StringSliceCmd {
+ return c.zRangeByScore(key, opt, false)
+}
+
+func (c *Client) ZRangeByScoreWithScores(key string, opt ZRangeByScore) *ZSliceCmd {
+ args := []string{"ZRANGEBYSCORE", key, opt.Min, opt.Max, "WITHSCORES"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "LIMIT",
+ strconv.FormatInt(opt.Offset, 10),
+ strconv.FormatInt(opt.Count, 10),
+ )
+ }
+ cmd := NewZSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZRank(key, member string) *IntCmd {
+ cmd := NewIntCmd("ZRANK", key, member)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZRem(key string, members ...string) *IntCmd {
+ args := append([]string{"ZREM", key}, members...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZRemRangeByRank(key string, start, stop int64) *IntCmd {
+ cmd := NewIntCmd(
+ "ZREMRANGEBYRANK",
+ key,
+ strconv.FormatInt(start, 10),
+ strconv.FormatInt(stop, 10),
+ )
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZRemRangeByScore(key, min, max string) *IntCmd {
+ cmd := NewIntCmd("ZREMRANGEBYSCORE", key, min, max)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) zRevRange(key, start, stop string, withScores bool) *StringSliceCmd {
+ args := []string{"ZREVRANGE", key, start, stop}
+ if withScores {
+ args = append(args, "WITHSCORES")
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZRevRange(key, start, stop string) *StringSliceCmd {
+ return c.zRevRange(key, start, stop, false)
+}
+
+func (c *Client) ZRevRangeWithScores(key, start, stop string) *ZSliceCmd {
+ args := []string{"ZREVRANGE", key, start, stop, "WITHSCORES"}
+ cmd := NewZSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) zRevRangeByScore(key string, opt ZRangeByScore, withScores bool) *StringSliceCmd {
+ args := []string{"ZREVRANGEBYSCORE", key, opt.Max, opt.Min}
+ if withScores {
+ args = append(args, "WITHSCORES")
+ }
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "LIMIT",
+ strconv.FormatInt(opt.Offset, 10),
+ strconv.FormatInt(opt.Count, 10),
+ )
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZRevRangeByScore(key string, opt ZRangeByScore) *StringSliceCmd {
+ return c.zRevRangeByScore(key, opt, false)
+}
+
+func (c *Client) ZRevRangeByScoreWithScores(key string, opt ZRangeByScore) *ZSliceCmd {
+ args := []string{"ZREVRANGEBYSCORE", key, opt.Max, opt.Min, "WITHSCORES"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "LIMIT",
+ strconv.FormatInt(opt.Offset, 10),
+ strconv.FormatInt(opt.Count, 10),
+ )
+ }
+ cmd := NewZSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZRevRank(key, member string) *IntCmd {
+ cmd := NewIntCmd("ZREVRANK", key, member)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZScore(key, member string) *FloatCmd {
+ cmd := NewFloatCmd("ZSCORE", key, member)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZUnionStore(
+ destination string,
+ store ZStore,
+ keys ...string,
+) *IntCmd {
+ args := []string{"ZUNIONSTORE", destination, strconv.FormatInt(int64(len(keys)), 10)}
+ args = append(args, keys...)
+ if len(store.Weights) > 0 {
+ args = append(args, "WEIGHTS")
+ for _, weight := range store.Weights {
+ args = append(args, strconv.FormatInt(weight, 10))
+ }
+ }
+ if store.Aggregate != "" {
+ args = append(args, "AGGREGATE", store.Aggregate)
+ }
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *Client) BgRewriteAOF() *StatusCmd {
+ cmd := NewStatusCmd("BGREWRITEAOF")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) BgSave() *StatusCmd {
+ cmd := NewStatusCmd("BGSAVE")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ClientKill(ipPort string) *StatusCmd {
+ cmd := NewStatusCmd("CLIENT", "KILL", ipPort)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ClientList() *StringCmd {
+ cmd := NewStringCmd("CLIENT", "LIST")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ConfigGet(parameter string) *SliceCmd {
+ cmd := NewSliceCmd("CONFIG", "GET", parameter)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ConfigResetStat() *StatusCmd {
+ cmd := NewStatusCmd("CONFIG", "RESETSTAT")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ConfigSet(parameter, value string) *StatusCmd {
+ cmd := NewStatusCmd("CONFIG", "SET", parameter, value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) DbSize() *IntCmd {
+ cmd := NewIntCmd("DBSIZE")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) FlushAll() *StatusCmd {
+ cmd := NewStatusCmd("FLUSHALL")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) FlushDb() *StatusCmd {
+ cmd := NewStatusCmd("FLUSHDB")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Info() *StringCmd {
+ cmd := NewStringCmd("INFO")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) LastSave() *IntCmd {
+ cmd := NewIntCmd("LASTSAVE")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Save() *StatusCmd {
+ cmd := NewStatusCmd("SAVE")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) shutdown(modifier string) *StatusCmd {
+ var args []string
+ if modifier == "" {
+ args = []string{"SHUTDOWN"}
+ } else {
+ args = []string{"SHUTDOWN", modifier}
+ }
+ cmd := NewStatusCmd(args...)
+ c.Process(cmd)
+ if err := cmd.Err(); err != nil {
+ if err == io.EOF {
+ // Server quit as expected.
+ cmd.err = nil
+ }
+ } else {
+ // Server did not quit. String reply contains the reason.
+ cmd.err = errorf(cmd.val)
+ cmd.val = ""
+ }
+ return cmd
+}
+
+func (c *Client) Shutdown() *StatusCmd {
+ return c.shutdown("")
+}
+
+func (c *Client) ShutdownSave() *StatusCmd {
+ return c.shutdown("SAVE")
+}
+
+func (c *Client) ShutdownNoSave() *StatusCmd {
+ return c.shutdown("NOSAVE")
+}
+
+func (c *Client) SlaveOf(host, port string) *StatusCmd {
+ cmd := NewStatusCmd("SLAVEOF", host, port)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SlowLog() {
+ panic("not implemented")
+}
+
+func (c *Client) Sync() {
+ panic("not implemented")
+}
+
+func (c *Client) Time() *StringSliceCmd {
+ cmd := NewStringSliceCmd("TIME")
+ c.Process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *Client) Eval(script string, keys []string, args []string) *Cmd {
+ cmdArgs := []string{"EVAL", script, strconv.FormatInt(int64(len(keys)), 10)}
+ cmdArgs = append(cmdArgs, keys...)
+ cmdArgs = append(cmdArgs, args...)
+ cmd := NewCmd(cmdArgs...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) EvalSha(sha1 string, keys []string, args []string) *Cmd {
+ cmdArgs := []string{"EVALSHA", sha1, strconv.FormatInt(int64(len(keys)), 10)}
+ cmdArgs = append(cmdArgs, keys...)
+ cmdArgs = append(cmdArgs, args...)
+ cmd := NewCmd(cmdArgs...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ScriptExists(scripts ...string) *BoolSliceCmd {
+ args := append([]string{"SCRIPT", "EXISTS"}, scripts...)
+ cmd := NewBoolSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ScriptFlush() *StatusCmd {
+ cmd := NewStatusCmd("SCRIPT", "FLUSH")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ScriptKill() *StatusCmd {
+ cmd := NewStatusCmd("SCRIPT", "KILL")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ScriptLoad(script string) *StringCmd {
+ cmd := NewStringCmd("SCRIPT", "LOAD", script)
+ c.Process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *Client) DebugObject(key string) *StringCmd {
+ cmd := NewStringCmd("DEBUG", "OBJECT", key)
+ c.Process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *Client) PubSubChannels(pattern string) *StringSliceCmd {
+ args := []string{"PUBSUB", "CHANNELS"}
+ if pattern != "*" {
+ args = append(args, pattern)
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) PubSubNumSub(channels ...string) *SliceCmd {
+ args := []string{"PUBSUB", "NUMSUB"}
+ args = append(args, channels...)
+ cmd := NewSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) PubSubNumPat() *IntCmd {
+ cmd := NewIntCmd("PUBSUB", "NUMPAT")
+ c.Process(cmd)
+ return cmd
+}
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/doc.go b/Godeps/_workspace/src/gopkg.in/redis.v2/doc.go
new file mode 100644
index 000000000..55262533a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/doc.go
@@ -0,0 +1,4 @@
+/*
+Package redis implements a Redis client.
+*/
+package redis
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/error.go b/Godeps/_workspace/src/gopkg.in/redis.v2/error.go
new file mode 100644
index 000000000..667fffdc6
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/error.go
@@ -0,0 +1,23 @@
+package redis
+
+import (
+ "fmt"
+)
+
+// Redis nil reply.
+var Nil = errorf("redis: nil")
+
+// Redis transaction failed.
+var TxFailedErr = errorf("redis: transaction failed")
+
+type redisError struct {
+ s string
+}
+
+func errorf(s string, args ...interface{}) redisError {
+ return redisError{s: fmt.Sprintf(s, args...)}
+}
+
+func (err redisError) Error() string {
+ return err.s
+}
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/example_test.go b/Godeps/_workspace/src/gopkg.in/redis.v2/example_test.go
new file mode 100644
index 000000000..dbc951310
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/example_test.go
@@ -0,0 +1,180 @@
+package redis_test
+
+import (
+ "fmt"
+ "strconv"
+
+ "gopkg.in/redis.v2"
+)
+
+var client *redis.Client
+
+func init() {
+ client = redis.NewTCPClient(&redis.Options{
+ Addr: ":6379",
+ })
+ client.FlushDb()
+}
+
+func ExampleNewTCPClient() {
+ client := redis.NewTCPClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password set
+ DB: 0, // use default DB
+ })
+
+ pong, err := client.Ping().Result()
+ fmt.Println(pong, err)
+ // Output: PONG <nil>
+}
+
+func ExampleNewFailoverClient() {
+ client := redis.NewFailoverClient(&redis.FailoverOptions{
+ MasterName: "master",
+ SentinelAddrs: []string{":26379"},
+ })
+
+ pong, err := client.Ping().Result()
+ fmt.Println(pong, err)
+ // Output: PONG <nil>
+}
+
+func ExampleClient() {
+ if err := client.Set("foo", "bar").Err(); err != nil {
+ panic(err)
+ }
+
+ v, err := client.Get("hello").Result()
+ fmt.Printf("%q %q %v", v, err, err == redis.Nil)
+ // Output: "" "redis: nil" true
+}
+
+func ExampleClient_Incr() {
+ if err := client.Incr("counter").Err(); err != nil {
+ panic(err)
+ }
+
+ n, err := client.Get("counter").Int64()
+ fmt.Println(n, err)
+ // Output: 1 <nil>
+}
+
+func ExampleClient_Pipelined() {
+ cmds, err := client.Pipelined(func(c *redis.Pipeline) error {
+ c.Set("key1", "hello1")
+ c.Get("key1")
+ return nil
+ })
+ fmt.Println(err)
+ set := cmds[0].(*redis.StatusCmd)
+ fmt.Println(set)
+ get := cmds[1].(*redis.StringCmd)
+ fmt.Println(get)
+ // Output: <nil>
+ // SET key1 hello1: OK
+ // GET key1: hello1
+}
+
+func ExamplePipeline() {
+ pipeline := client.Pipeline()
+ set := pipeline.Set("key1", "hello1")
+ get := pipeline.Get("key1")
+ cmds, err := pipeline.Exec()
+ fmt.Println(cmds, err)
+ fmt.Println(set)
+ fmt.Println(get)
+ // Output: [SET key1 hello1: OK GET key1: hello1] <nil>
+ // SET key1 hello1: OK
+ // GET key1: hello1
+}
+
+func ExampleMulti() {
+ incr := func(tx *redis.Multi) ([]redis.Cmder, error) {
+ s, err := tx.Get("key").Result()
+ if err != nil && err != redis.Nil {
+ return nil, err
+ }
+ n, _ := strconv.ParseInt(s, 10, 64)
+
+ return tx.Exec(func() error {
+ tx.Set("key", strconv.FormatInt(n+1, 10))
+ return nil
+ })
+ }
+
+ client.Del("key")
+
+ tx := client.Multi()
+ defer tx.Close()
+
+ watch := tx.Watch("key")
+ _ = watch.Err()
+
+ for {
+ cmds, err := incr(tx)
+ if err == redis.TxFailedErr {
+ continue
+ } else if err != nil {
+ panic(err)
+ }
+ fmt.Println(cmds, err)
+ break
+ }
+
+ // Output: [SET key 1: OK] <nil>
+}
+
+func ExamplePubSub() {
+ pubsub := client.PubSub()
+ defer pubsub.Close()
+
+ err := pubsub.Subscribe("mychannel")
+ _ = err
+
+ msg, err := pubsub.Receive()
+ fmt.Println(msg, err)
+
+ pub := client.Publish("mychannel", "hello")
+ _ = pub.Err()
+
+ msg, err = pubsub.Receive()
+ fmt.Println(msg, err)
+
+ // Output: subscribe: mychannel <nil>
+ // Message<mychannel: hello> <nil>
+}
+
+func ExampleScript() {
+ setnx := redis.NewScript(`
+ if redis.call("get", KEYS[1]) == false then
+ redis.call("set", KEYS[1], ARGV[1])
+ return 1
+ end
+ return 0
+ `)
+
+ v1, err := setnx.Run(client, []string{"keynx"}, []string{"foo"}).Result()
+ fmt.Println(v1.(int64), err)
+
+ v2, err := setnx.Run(client, []string{"keynx"}, []string{"bar"}).Result()
+ fmt.Println(v2.(int64), err)
+
+ get := client.Get("keynx")
+ fmt.Println(get)
+
+ // Output: 1 <nil>
+ // 0 <nil>
+ // GET keynx: foo
+}
+
+func Example_customCommand() {
+ Get := func(client *redis.Client, key string) *redis.StringCmd {
+ cmd := redis.NewStringCmd("GET", key)
+ client.Process(cmd)
+ return cmd
+ }
+
+ v, err := Get(client, "key_does_not_exist").Result()
+ fmt.Printf("%q %s", v, err)
+ // Output: "" redis: nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/export_test.go b/Godeps/_workspace/src/gopkg.in/redis.v2/export_test.go
new file mode 100644
index 000000000..7f7fa6797
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/export_test.go
@@ -0,0 +1,5 @@
+package redis
+
+func (c *baseClient) Pool() pool {
+ return c.connPool
+}
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/multi.go b/Godeps/_workspace/src/gopkg.in/redis.v2/multi.go
new file mode 100644
index 000000000..bff38dfaa
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/multi.go
@@ -0,0 +1,138 @@
+package redis
+
+import (
+ "errors"
+ "fmt"
+)
+
+var errDiscard = errors.New("redis: Discard can be used only inside Exec")
+
+// Not thread-safe.
+type Multi struct {
+ *Client
+}
+
+func (c *Client) Multi() *Multi {
+ return &Multi{
+ Client: &Client{
+ baseClient: &baseClient{
+ opt: c.opt,
+ connPool: newSingleConnPool(c.connPool, true),
+ },
+ },
+ }
+}
+
+func (c *Multi) Close() error {
+ if err := c.Unwatch().Err(); err != nil {
+ return err
+ }
+ return c.Client.Close()
+}
+
+func (c *Multi) Watch(keys ...string) *StatusCmd {
+ args := append([]string{"WATCH"}, keys...)
+ cmd := NewStatusCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Multi) Unwatch(keys ...string) *StatusCmd {
+ args := append([]string{"UNWATCH"}, keys...)
+ cmd := NewStatusCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Multi) Discard() error {
+ if c.cmds == nil {
+ return errDiscard
+ }
+ c.cmds = c.cmds[:1]
+ return nil
+}
+
+// Exec always returns list of commands. If transaction fails
+// TxFailedErr is returned. Otherwise Exec returns error of the first
+// failed command or nil.
+func (c *Multi) Exec(f func() error) ([]Cmder, error) {
+ c.cmds = []Cmder{NewStatusCmd("MULTI")}
+ if err := f(); err != nil {
+ return nil, err
+ }
+ c.cmds = append(c.cmds, NewSliceCmd("EXEC"))
+
+ cmds := c.cmds
+ c.cmds = nil
+
+ if len(cmds) == 2 {
+ return []Cmder{}, nil
+ }
+
+ cn, err := c.conn()
+ if err != nil {
+ setCmdsErr(cmds[1:len(cmds)-1], err)
+ return cmds[1 : len(cmds)-1], err
+ }
+
+ err = c.execCmds(cn, cmds)
+ if err != nil {
+ c.freeConn(cn, err)
+ return cmds[1 : len(cmds)-1], err
+ }
+
+ c.putConn(cn)
+ return cmds[1 : len(cmds)-1], nil
+}
+
+func (c *Multi) execCmds(cn *conn, cmds []Cmder) error {
+ err := c.writeCmd(cn, cmds...)
+ if err != nil {
+ setCmdsErr(cmds[1:len(cmds)-1], err)
+ return err
+ }
+
+ statusCmd := NewStatusCmd()
+
+ // Omit last command (EXEC).
+ cmdsLen := len(cmds) - 1
+
+ // Parse queued replies.
+ for i := 0; i < cmdsLen; i++ {
+ if err := statusCmd.parseReply(cn.rd); err != nil {
+ setCmdsErr(cmds[1:len(cmds)-1], err)
+ return err
+ }
+ }
+
+ // Parse number of replies.
+ line, err := readLine(cn.rd)
+ if err != nil {
+ setCmdsErr(cmds[1:len(cmds)-1], err)
+ return err
+ }
+ if line[0] != '*' {
+ err := fmt.Errorf("redis: expected '*', but got line %q", line)
+ setCmdsErr(cmds[1:len(cmds)-1], err)
+ return err
+ }
+ if len(line) == 3 && line[1] == '-' && line[2] == '1' {
+ setCmdsErr(cmds[1:len(cmds)-1], TxFailedErr)
+ return TxFailedErr
+ }
+
+ var firstCmdErr error
+
+ // Parse replies.
+ // Loop starts from 1 to omit MULTI cmd.
+ for i := 1; i < cmdsLen; i++ {
+ cmd := cmds[i]
+ if err := cmd.parseReply(cn.rd); err != nil {
+ if firstCmdErr == nil {
+ firstCmdErr = err
+ }
+ }
+ }
+
+ return firstCmdErr
+}
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/parser.go b/Godeps/_workspace/src/gopkg.in/redis.v2/parser.go
new file mode 100644
index 000000000..b4c380c76
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/parser.go
@@ -0,0 +1,262 @@
+package redis
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+
+ "gopkg.in/bufio.v1"
+)
+
+type multiBulkParser func(rd *bufio.Reader, n int64) (interface{}, error)
+
+var (
+ errReaderTooSmall = errors.New("redis: reader is too small")
+)
+
+//------------------------------------------------------------------------------
+
+func appendArgs(buf []byte, args []string) []byte {
+ buf = append(buf, '*')
+ buf = strconv.AppendUint(buf, uint64(len(args)), 10)
+ buf = append(buf, '\r', '\n')
+ for _, arg := range args {
+ buf = append(buf, '$')
+ buf = strconv.AppendUint(buf, uint64(len(arg)), 10)
+ buf = append(buf, '\r', '\n')
+ buf = append(buf, arg...)
+ buf = append(buf, '\r', '\n')
+ }
+ return buf
+}
+
+//------------------------------------------------------------------------------
+
+func readLine(rd *bufio.Reader) ([]byte, error) {
+ line, isPrefix, err := rd.ReadLine()
+ if err != nil {
+ return line, err
+ }
+ if isPrefix {
+ return line, errReaderTooSmall
+ }
+ return line, nil
+}
+
+func readN(rd *bufio.Reader, n int) ([]byte, error) {
+ b, err := rd.ReadN(n)
+ if err == bufio.ErrBufferFull {
+ tmp := make([]byte, n)
+ r := copy(tmp, b)
+ b = tmp
+
+ for {
+ nn, err := rd.Read(b[r:])
+ r += nn
+ if r >= n {
+ // Ignore error if we read enough.
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ } else if err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+//------------------------------------------------------------------------------
+
+func parseReq(rd *bufio.Reader) ([]string, error) {
+ line, err := readLine(rd)
+ if err != nil {
+ return nil, err
+ }
+
+ if line[0] != '*' {
+ return []string{string(line)}, nil
+ }
+ numReplies, err := strconv.ParseInt(string(line[1:]), 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ args := make([]string, 0, numReplies)
+ for i := int64(0); i < numReplies; i++ {
+ line, err = readLine(rd)
+ if err != nil {
+ return nil, err
+ }
+ if line[0] != '$' {
+ return nil, fmt.Errorf("redis: expected '$', but got %q", line)
+ }
+
+ argLen, err := strconv.ParseInt(string(line[1:]), 10, 32)
+ if err != nil {
+ return nil, err
+ }
+
+ arg, err := readN(rd, int(argLen)+2)
+ if err != nil {
+ return nil, err
+ }
+ args = append(args, string(arg[:argLen]))
+ }
+ return args, nil
+}
+
+//------------------------------------------------------------------------------
+
+func parseReply(rd *bufio.Reader, p multiBulkParser) (interface{}, error) {
+ line, err := readLine(rd)
+ if err != nil {
+ return nil, err
+ }
+
+ switch line[0] {
+ case '-':
+ return nil, errorf(string(line[1:]))
+ case '+':
+ return string(line[1:]), nil
+ case ':':
+ v, err := strconv.ParseInt(string(line[1:]), 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ return v, nil
+ case '$':
+ if len(line) == 3 && line[1] == '-' && line[2] == '1' {
+ return nil, Nil
+ }
+
+ replyLen, err := strconv.Atoi(string(line[1:]))
+ if err != nil {
+ return nil, err
+ }
+
+ b, err := readN(rd, replyLen+2)
+ if err != nil {
+ return nil, err
+ }
+ return string(b[:replyLen]), nil
+ case '*':
+ if len(line) == 3 && line[1] == '-' && line[2] == '1' {
+ return nil, Nil
+ }
+
+ repliesNum, err := strconv.ParseInt(string(line[1:]), 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ return p(rd, repliesNum)
+ }
+ return nil, fmt.Errorf("redis: can't parse %q", line)
+}
+
+func parseSlice(rd *bufio.Reader, n int64) (interface{}, error) {
+ vals := make([]interface{}, 0, n)
+ for i := int64(0); i < n; i++ {
+ v, err := parseReply(rd, parseSlice)
+ if err == Nil {
+ vals = append(vals, nil)
+ } else if err != nil {
+ return nil, err
+ } else {
+ vals = append(vals, v)
+ }
+ }
+ return vals, nil
+}
+
+func parseStringSlice(rd *bufio.Reader, n int64) (interface{}, error) {
+ vals := make([]string, 0, n)
+ for i := int64(0); i < n; i++ {
+ viface, err := parseReply(rd, nil)
+ if err != nil {
+ return nil, err
+ }
+ v, ok := viface.(string)
+ if !ok {
+ return nil, fmt.Errorf("got %T, expected string", viface)
+ }
+ vals = append(vals, v)
+ }
+ return vals, nil
+}
+
+func parseBoolSlice(rd *bufio.Reader, n int64) (interface{}, error) {
+ vals := make([]bool, 0, n)
+ for i := int64(0); i < n; i++ {
+ viface, err := parseReply(rd, nil)
+ if err != nil {
+ return nil, err
+ }
+ v, ok := viface.(int64)
+ if !ok {
+ return nil, fmt.Errorf("got %T, expected int64", viface)
+ }
+ vals = append(vals, v == 1)
+ }
+ return vals, nil
+}
+
+func parseStringStringMap(rd *bufio.Reader, n int64) (interface{}, error) {
+ m := make(map[string]string, n/2)
+ for i := int64(0); i < n; i += 2 {
+ keyiface, err := parseReply(rd, nil)
+ if err != nil {
+ return nil, err
+ }
+ key, ok := keyiface.(string)
+ if !ok {
+ return nil, fmt.Errorf("got %T, expected string", keyiface)
+ }
+
+ valueiface, err := parseReply(rd, nil)
+ if err != nil {
+ return nil, err
+ }
+ value, ok := valueiface.(string)
+ if !ok {
+ return nil, fmt.Errorf("got %T, expected string", valueiface)
+ }
+
+ m[key] = value
+ }
+ return m, nil
+}
+
+func parseZSlice(rd *bufio.Reader, n int64) (interface{}, error) {
+ zz := make([]Z, n/2)
+ for i := int64(0); i < n; i += 2 {
+ z := &zz[i/2]
+
+ memberiface, err := parseReply(rd, nil)
+ if err != nil {
+ return nil, err
+ }
+ member, ok := memberiface.(string)
+ if !ok {
+ return nil, fmt.Errorf("got %T, expected string", memberiface)
+ }
+ z.Member = member
+
+ scoreiface, err := parseReply(rd, nil)
+ if err != nil {
+ return nil, err
+ }
+ scorestr, ok := scoreiface.(string)
+ if !ok {
+ return nil, fmt.Errorf("got %T, expected string", scoreiface)
+ }
+ score, err := strconv.ParseFloat(scorestr, 64)
+ if err != nil {
+ return nil, err
+ }
+ z.Score = score
+ }
+ return zz, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/parser_test.go b/Godeps/_workspace/src/gopkg.in/redis.v2/parser_test.go
new file mode 100644
index 000000000..1b9e15810
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/parser_test.go
@@ -0,0 +1,54 @@
+package redis
+
+import (
+ "testing"
+
+ "gopkg.in/bufio.v1"
+)
+
+func BenchmarkParseReplyStatus(b *testing.B) {
+ benchmarkParseReply(b, "+OK\r\n", nil, false)
+}
+
+func BenchmarkParseReplyInt(b *testing.B) {
+ benchmarkParseReply(b, ":1\r\n", nil, false)
+}
+
+func BenchmarkParseReplyError(b *testing.B) {
+ benchmarkParseReply(b, "-Error message\r\n", nil, true)
+}
+
+func BenchmarkParseReplyString(b *testing.B) {
+ benchmarkParseReply(b, "$5\r\nhello\r\n", nil, false)
+}
+
+func BenchmarkParseReplySlice(b *testing.B) {
+ benchmarkParseReply(b, "*2\r\n$5\r\nhello\r\n$5\r\nworld\r\n", parseSlice, false)
+}
+
+func benchmarkParseReply(b *testing.B, reply string, p multiBulkParser, wanterr bool) {
+ b.StopTimer()
+
+ buf := &bufio.Buffer{}
+ rd := bufio.NewReader(buf)
+ for i := 0; i < b.N; i++ {
+ buf.WriteString(reply)
+ }
+
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ _, err := parseReply(rd, p)
+ if !wanterr && err != nil {
+ panic(err)
+ }
+ }
+}
+
+func BenchmarkAppendArgs(b *testing.B) {
+ buf := make([]byte, 0, 64)
+ args := []string{"hello", "world", "foo", "bar"}
+ for i := 0; i < b.N; i++ {
+ appendArgs(buf, args)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/pipeline.go b/Godeps/_workspace/src/gopkg.in/redis.v2/pipeline.go
new file mode 100644
index 000000000..540d6c51d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/pipeline.go
@@ -0,0 +1,91 @@
+package redis
+
+// Not thread-safe.
+type Pipeline struct {
+ *Client
+
+ closed bool
+}
+
+func (c *Client) Pipeline() *Pipeline {
+ return &Pipeline{
+ Client: &Client{
+ baseClient: &baseClient{
+ opt: c.opt,
+ connPool: c.connPool,
+
+ cmds: make([]Cmder, 0),
+ },
+ },
+ }
+}
+
+func (c *Client) Pipelined(f func(*Pipeline) error) ([]Cmder, error) {
+ pc := c.Pipeline()
+ if err := f(pc); err != nil {
+ return nil, err
+ }
+ cmds, err := pc.Exec()
+ pc.Close()
+ return cmds, err
+}
+
+func (c *Pipeline) Close() error {
+ c.closed = true
+ return nil
+}
+
+func (c *Pipeline) Discard() error {
+ if c.closed {
+ return errClosed
+ }
+ c.cmds = c.cmds[:0]
+ return nil
+}
+
+// Exec always returns list of commands and error of the first failed
+// command if any.
+func (c *Pipeline) Exec() ([]Cmder, error) {
+ if c.closed {
+ return nil, errClosed
+ }
+
+ cmds := c.cmds
+ c.cmds = make([]Cmder, 0)
+
+ if len(cmds) == 0 {
+ return []Cmder{}, nil
+ }
+
+ cn, err := c.conn()
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return cmds, err
+ }
+
+ if err := c.execCmds(cn, cmds); err != nil {
+ c.freeConn(cn, err)
+ return cmds, err
+ }
+
+ c.putConn(cn)
+ return cmds, nil
+}
+
+func (c *Pipeline) execCmds(cn *conn, cmds []Cmder) error {
+ if err := c.writeCmd(cn, cmds...); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ var firstCmdErr error
+ for _, cmd := range cmds {
+ if err := cmd.parseReply(cn.rd); err != nil {
+ if firstCmdErr == nil {
+ firstCmdErr = err
+ }
+ }
+ }
+
+ return firstCmdErr
+}
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/pool.go b/Godeps/_workspace/src/gopkg.in/redis.v2/pool.go
new file mode 100644
index 000000000..bca4d1963
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/pool.go
@@ -0,0 +1,405 @@
+package redis
+
+import (
+ "container/list"
+ "errors"
+ "log"
+ "net"
+ "sync"
+ "time"
+
+ "gopkg.in/bufio.v1"
+)
+
+var (
+ errClosed = errors.New("redis: client is closed")
+ errRateLimited = errors.New("redis: you open connections too fast")
+)
+
+var (
+ zeroTime = time.Time{}
+)
+
+type pool interface {
+ Get() (*conn, bool, error)
+ Put(*conn) error
+ Remove(*conn) error
+ Len() int
+ Size() int
+ Close() error
+ Filter(func(*conn) bool)
+}
+
+//------------------------------------------------------------------------------
+
+type conn struct {
+ netcn net.Conn
+ rd *bufio.Reader
+ buf []byte
+
+ inUse bool
+ usedAt time.Time
+
+ readTimeout time.Duration
+ writeTimeout time.Duration
+
+ elem *list.Element
+}
+
+func newConnFunc(dial func() (net.Conn, error)) func() (*conn, error) {
+ return func() (*conn, error) {
+ netcn, err := dial()
+ if err != nil {
+ return nil, err
+ }
+ cn := &conn{
+ netcn: netcn,
+ buf: make([]byte, 0, 64),
+ }
+ cn.rd = bufio.NewReader(cn)
+ return cn, nil
+ }
+}
+
+func (cn *conn) Read(b []byte) (int, error) {
+ if cn.readTimeout != 0 {
+ cn.netcn.SetReadDeadline(time.Now().Add(cn.readTimeout))
+ } else {
+ cn.netcn.SetReadDeadline(zeroTime)
+ }
+ return cn.netcn.Read(b)
+}
+
+func (cn *conn) Write(b []byte) (int, error) {
+ if cn.writeTimeout != 0 {
+ cn.netcn.SetWriteDeadline(time.Now().Add(cn.writeTimeout))
+ } else {
+ cn.netcn.SetWriteDeadline(zeroTime)
+ }
+ return cn.netcn.Write(b)
+}
+
+func (cn *conn) RemoteAddr() net.Addr {
+ return cn.netcn.RemoteAddr()
+}
+
+func (cn *conn) Close() error {
+ return cn.netcn.Close()
+}
+
+//------------------------------------------------------------------------------
+
+type connPool struct {
+ dial func() (*conn, error)
+ rl *rateLimiter
+
+ opt *options
+
+ cond *sync.Cond
+ conns *list.List
+
+ idleNum int
+ closed bool
+}
+
+func newConnPool(dial func() (*conn, error), opt *options) *connPool {
+ return &connPool{
+ dial: dial,
+ rl: newRateLimiter(time.Second, 2*opt.PoolSize),
+
+ opt: opt,
+
+ cond: sync.NewCond(&sync.Mutex{}),
+ conns: list.New(),
+ }
+}
+
+func (p *connPool) new() (*conn, error) {
+ if !p.rl.Check() {
+ return nil, errRateLimited
+ }
+ return p.dial()
+}
+
+func (p *connPool) Get() (*conn, bool, error) {
+ p.cond.L.Lock()
+
+ if p.closed {
+ p.cond.L.Unlock()
+ return nil, false, errClosed
+ }
+
+ if p.opt.IdleTimeout > 0 {
+ for el := p.conns.Front(); el != nil; el = el.Next() {
+ cn := el.Value.(*conn)
+ if cn.inUse {
+ break
+ }
+ if time.Since(cn.usedAt) > p.opt.IdleTimeout {
+ if err := p.remove(cn); err != nil {
+ log.Printf("remove failed: %s", err)
+ }
+ }
+ }
+ }
+
+ for p.conns.Len() >= p.opt.PoolSize && p.idleNum == 0 {
+ p.cond.Wait()
+ }
+
+ if p.idleNum > 0 {
+ elem := p.conns.Front()
+ cn := elem.Value.(*conn)
+ if cn.inUse {
+ panic("pool: precondition failed")
+ }
+ cn.inUse = true
+ p.conns.MoveToBack(elem)
+ p.idleNum--
+
+ p.cond.L.Unlock()
+ return cn, false, nil
+ }
+
+ if p.conns.Len() < p.opt.PoolSize {
+ cn, err := p.new()
+ if err != nil {
+ p.cond.L.Unlock()
+ return nil, false, err
+ }
+
+ cn.inUse = true
+ cn.elem = p.conns.PushBack(cn)
+
+ p.cond.L.Unlock()
+ return cn, true, nil
+ }
+
+ panic("not reached")
+}
+
+func (p *connPool) Put(cn *conn) error {
+ if cn.rd.Buffered() != 0 {
+ b, _ := cn.rd.ReadN(cn.rd.Buffered())
+ log.Printf("redis: connection has unread data: %q", b)
+ return p.Remove(cn)
+ }
+
+ if p.opt.IdleTimeout > 0 {
+ cn.usedAt = time.Now()
+ }
+
+ p.cond.L.Lock()
+ if p.closed {
+ p.cond.L.Unlock()
+ return errClosed
+ }
+ cn.inUse = false
+ p.conns.MoveToFront(cn.elem)
+ p.idleNum++
+ p.cond.Signal()
+ p.cond.L.Unlock()
+
+ return nil
+}
+
+func (p *connPool) Remove(cn *conn) error {
+ p.cond.L.Lock()
+ if p.closed {
+ // Noop, connection is already closed.
+ p.cond.L.Unlock()
+ return nil
+ }
+ err := p.remove(cn)
+ p.cond.Signal()
+ p.cond.L.Unlock()
+ return err
+}
+
+func (p *connPool) remove(cn *conn) error {
+ p.conns.Remove(cn.elem)
+ cn.elem = nil
+ if !cn.inUse {
+ p.idleNum--
+ }
+ return cn.Close()
+}
+
+// Len returns number of idle connections.
+func (p *connPool) Len() int {
+ defer p.cond.L.Unlock()
+ p.cond.L.Lock()
+ return p.idleNum
+}
+
+// Size returns number of connections in the pool.
+func (p *connPool) Size() int {
+ defer p.cond.L.Unlock()
+ p.cond.L.Lock()
+ return p.conns.Len()
+}
+
+func (p *connPool) Filter(f func(*conn) bool) {
+ p.cond.L.Lock()
+ for el, next := p.conns.Front(), p.conns.Front(); el != nil; el = next {
+ next = el.Next()
+ cn := el.Value.(*conn)
+ if !f(cn) {
+ p.remove(cn)
+ }
+ }
+ p.cond.L.Unlock()
+}
+
+func (p *connPool) Close() error {
+ defer p.cond.L.Unlock()
+ p.cond.L.Lock()
+ if p.closed {
+ return nil
+ }
+ p.closed = true
+ p.rl.Close()
+ var retErr error
+ for {
+ e := p.conns.Front()
+ if e == nil {
+ break
+ }
+ if err := p.remove(e.Value.(*conn)); err != nil {
+ log.Printf("cn.Close failed: %s", err)
+ retErr = err
+ }
+ }
+ return retErr
+}
+
+//------------------------------------------------------------------------------
+
+type singleConnPool struct {
+ pool pool
+
+ cnMtx sync.Mutex
+ cn *conn
+
+ reusable bool
+
+ closed bool
+}
+
+func newSingleConnPool(pool pool, reusable bool) *singleConnPool {
+ return &singleConnPool{
+ pool: pool,
+ reusable: reusable,
+ }
+}
+
+func (p *singleConnPool) SetConn(cn *conn) {
+ p.cnMtx.Lock()
+ p.cn = cn
+ p.cnMtx.Unlock()
+}
+
+func (p *singleConnPool) Get() (*conn, bool, error) {
+ defer p.cnMtx.Unlock()
+ p.cnMtx.Lock()
+
+ if p.closed {
+ return nil, false, errClosed
+ }
+ if p.cn != nil {
+ return p.cn, false, nil
+ }
+
+ cn, isNew, err := p.pool.Get()
+ if err != nil {
+ return nil, false, err
+ }
+ p.cn = cn
+
+ return p.cn, isNew, nil
+}
+
+func (p *singleConnPool) Put(cn *conn) error {
+ defer p.cnMtx.Unlock()
+ p.cnMtx.Lock()
+ if p.cn != cn {
+ panic("p.cn != cn")
+ }
+ if p.closed {
+ return errClosed
+ }
+ return nil
+}
+
+func (p *singleConnPool) put() error {
+ err := p.pool.Put(p.cn)
+ p.cn = nil
+ return err
+}
+
+func (p *singleConnPool) Remove(cn *conn) error {
+ defer p.cnMtx.Unlock()
+ p.cnMtx.Lock()
+ if p.cn == nil {
+ panic("p.cn == nil")
+ }
+ if p.cn != cn {
+ panic("p.cn != cn")
+ }
+ if p.closed {
+ return errClosed
+ }
+ return p.remove()
+}
+
+func (p *singleConnPool) remove() error {
+ err := p.pool.Remove(p.cn)
+ p.cn = nil
+ return err
+}
+
+func (p *singleConnPool) Len() int {
+ defer p.cnMtx.Unlock()
+ p.cnMtx.Lock()
+ if p.cn == nil {
+ return 0
+ }
+ return 1
+}
+
+func (p *singleConnPool) Size() int {
+ defer p.cnMtx.Unlock()
+ p.cnMtx.Lock()
+ if p.cn == nil {
+ return 0
+ }
+ return 1
+}
+
+func (p *singleConnPool) Filter(f func(*conn) bool) {
+ p.cnMtx.Lock()
+ if p.cn != nil {
+ if !f(p.cn) {
+ p.remove()
+ }
+ }
+ p.cnMtx.Unlock()
+}
+
+func (p *singleConnPool) Close() error {
+ defer p.cnMtx.Unlock()
+ p.cnMtx.Lock()
+ if p.closed {
+ return nil
+ }
+ p.closed = true
+ var err error
+ if p.cn != nil {
+ if p.reusable {
+ err = p.put()
+ } else {
+ err = p.remove()
+ }
+ }
+ return err
+}
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/pubsub.go b/Godeps/_workspace/src/gopkg.in/redis.v2/pubsub.go
new file mode 100644
index 000000000..6ac130bac
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/pubsub.go
@@ -0,0 +1,134 @@
+package redis
+
+import (
+ "fmt"
+ "time"
+)
+
+// Not thread-safe.
+type PubSub struct {
+ *baseClient
+}
+
+func (c *Client) PubSub() *PubSub {
+ return &PubSub{
+ baseClient: &baseClient{
+ opt: c.opt,
+ connPool: newSingleConnPool(c.connPool, false),
+ },
+ }
+}
+
+func (c *Client) Publish(channel, message string) *IntCmd {
+ req := NewIntCmd("PUBLISH", channel, message)
+ c.Process(req)
+ return req
+}
+
+type Message struct {
+ Channel string
+ Payload string
+}
+
+func (m *Message) String() string {
+ return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload)
+}
+
+type PMessage struct {
+ Channel string
+ Pattern string
+ Payload string
+}
+
+func (m *PMessage) String() string {
+ return fmt.Sprintf("PMessage<%s: %s>", m.Channel, m.Payload)
+}
+
+type Subscription struct {
+ Kind string
+ Channel string
+ Count int
+}
+
+func (m *Subscription) String() string {
+ return fmt.Sprintf("%s: %s", m.Kind, m.Channel)
+}
+
+func (c *PubSub) Receive() (interface{}, error) {
+ return c.ReceiveTimeout(0)
+}
+
+func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) {
+ cn, err := c.conn()
+ if err != nil {
+ return nil, err
+ }
+ cn.readTimeout = timeout
+
+ cmd := NewSliceCmd()
+ if err := cmd.parseReply(cn.rd); err != nil {
+ return nil, err
+ }
+
+ reply := cmd.Val()
+
+ msgName := reply[0].(string)
+ switch msgName {
+ case "subscribe", "unsubscribe", "psubscribe", "punsubscribe":
+ return &Subscription{
+ Kind: msgName,
+ Channel: reply[1].(string),
+ Count: int(reply[2].(int64)),
+ }, nil
+ case "message":
+ return &Message{
+ Channel: reply[1].(string),
+ Payload: reply[2].(string),
+ }, nil
+ case "pmessage":
+ return &PMessage{
+ Pattern: reply[1].(string),
+ Channel: reply[2].(string),
+ Payload: reply[3].(string),
+ }, nil
+ }
+ return nil, fmt.Errorf("redis: unsupported message name: %q", msgName)
+}
+
+func (c *PubSub) subscribe(cmd string, channels ...string) error {
+ cn, err := c.conn()
+ if err != nil {
+ return err
+ }
+
+ args := append([]string{cmd}, channels...)
+ req := NewSliceCmd(args...)
+ return c.writeCmd(cn, req)
+}
+
+func (c *PubSub) Subscribe(channels ...string) error {
+ return c.subscribe("SUBSCRIBE", channels...)
+}
+
+func (c *PubSub) PSubscribe(patterns ...string) error {
+ return c.subscribe("PSUBSCRIBE", patterns...)
+}
+
+func (c *PubSub) unsubscribe(cmd string, channels ...string) error {
+ cn, err := c.conn()
+ if err != nil {
+ return err
+ }
+
+ args := append([]string{cmd}, channels...)
+ req := NewSliceCmd(args...)
+ return c.writeCmd(cn, req)
+}
+
+func (c *PubSub) Unsubscribe(channels ...string) error {
+ return c.unsubscribe("UNSUBSCRIBE", channels...)
+}
+
+func (c *PubSub) PUnsubscribe(patterns ...string) error {
+ return c.unsubscribe("PUNSUBSCRIBE", patterns...)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit.go b/Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit.go
new file mode 100644
index 000000000..20d851270
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit.go
@@ -0,0 +1,53 @@
+package redis
+
+import (
+ "sync/atomic"
+ "time"
+)
+
+type rateLimiter struct {
+ v int64
+
+ _closed int64
+}
+
+func newRateLimiter(limit time.Duration, bucketSize int) *rateLimiter {
+ rl := &rateLimiter{
+ v: int64(bucketSize),
+ }
+ go rl.loop(limit, int64(bucketSize))
+ return rl
+}
+
+func (rl *rateLimiter) loop(limit time.Duration, bucketSize int64) {
+ for {
+ if rl.closed() {
+ break
+ }
+ if v := atomic.LoadInt64(&rl.v); v < bucketSize {
+ atomic.AddInt64(&rl.v, 1)
+ }
+ time.Sleep(limit)
+ }
+}
+
+func (rl *rateLimiter) Check() bool {
+ for {
+ if v := atomic.LoadInt64(&rl.v); v > 0 {
+ if atomic.CompareAndSwapInt64(&rl.v, v, v-1) {
+ return true
+ }
+ } else {
+ return false
+ }
+ }
+}
+
+func (rl *rateLimiter) Close() error {
+ atomic.StoreInt64(&rl._closed, 1)
+ return nil
+}
+
+func (rl *rateLimiter) closed() bool {
+ return atomic.LoadInt64(&rl._closed) == 1
+}
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit_test.go b/Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit_test.go
new file mode 100644
index 000000000..2f0d41a2e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit_test.go
@@ -0,0 +1,31 @@
+package redis
+
+import (
+ "sync"
+ "testing"
+ "time"
+)
+
+func TestRateLimiter(t *testing.T) {
+ var n = 100000
+ if testing.Short() {
+ n = 1000
+ }
+ rl := newRateLimiter(time.Minute, n)
+
+ wg := &sync.WaitGroup{}
+ for i := 0; i < n; i++ {
+ wg.Add(1)
+ go func() {
+ if !rl.Check() {
+ panic("check failed")
+ }
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+
+ if rl.Check() && rl.Check() {
+ t.Fatal("check passed")
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/redis.go b/Godeps/_workspace/src/gopkg.in/redis.v2/redis.go
new file mode 100644
index 000000000..0d15dc8f8
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/redis.go
@@ -0,0 +1,231 @@
+package redis
+
+import (
+ "log"
+ "net"
+ "time"
+)
+
+type baseClient struct {
+ connPool pool
+ opt *options
+ cmds []Cmder
+}
+
+func (c *baseClient) writeCmd(cn *conn, cmds ...Cmder) error {
+ buf := cn.buf[:0]
+ for _, cmd := range cmds {
+ buf = appendArgs(buf, cmd.args())
+ }
+
+ _, err := cn.Write(buf)
+ return err
+}
+
+func (c *baseClient) conn() (*conn, error) {
+ cn, isNew, err := c.connPool.Get()
+ if err != nil {
+ return nil, err
+ }
+
+ if isNew {
+ if err := c.initConn(cn); err != nil {
+ c.removeConn(cn)
+ return nil, err
+ }
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) initConn(cn *conn) error {
+ if c.opt.Password == "" && c.opt.DB == 0 {
+ return nil
+ }
+
+ pool := newSingleConnPool(c.connPool, false)
+ pool.SetConn(cn)
+
+ // Client is not closed because we want to reuse underlying connection.
+ client := &Client{
+ baseClient: &baseClient{
+ opt: c.opt,
+ connPool: pool,
+ },
+ }
+
+ if c.opt.Password != "" {
+ if err := client.Auth(c.opt.Password).Err(); err != nil {
+ return err
+ }
+ }
+
+ if c.opt.DB > 0 {
+ if err := client.Select(c.opt.DB).Err(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *baseClient) freeConn(cn *conn, ei error) error {
+ if cn.rd.Buffered() > 0 {
+ return c.connPool.Remove(cn)
+ }
+ if _, ok := ei.(redisError); ok {
+ return c.connPool.Put(cn)
+ }
+ return c.connPool.Remove(cn)
+}
+
+func (c *baseClient) removeConn(cn *conn) {
+ if err := c.connPool.Remove(cn); err != nil {
+ log.Printf("pool.Remove failed: %s", err)
+ }
+}
+
+func (c *baseClient) putConn(cn *conn) {
+ if err := c.connPool.Put(cn); err != nil {
+ log.Printf("pool.Put failed: %s", err)
+ }
+}
+
+func (c *baseClient) Process(cmd Cmder) {
+ if c.cmds == nil {
+ c.run(cmd)
+ } else {
+ c.cmds = append(c.cmds, cmd)
+ }
+}
+
+func (c *baseClient) run(cmd Cmder) {
+ cn, err := c.conn()
+ if err != nil {
+ cmd.setErr(err)
+ return
+ }
+
+ if timeout := cmd.writeTimeout(); timeout != nil {
+ cn.writeTimeout = *timeout
+ } else {
+ cn.writeTimeout = c.opt.WriteTimeout
+ }
+
+ if timeout := cmd.readTimeout(); timeout != nil {
+ cn.readTimeout = *timeout
+ } else {
+ cn.readTimeout = c.opt.ReadTimeout
+ }
+
+ if err := c.writeCmd(cn, cmd); err != nil {
+ c.freeConn(cn, err)
+ cmd.setErr(err)
+ return
+ }
+
+ if err := cmd.parseReply(cn.rd); err != nil {
+ c.freeConn(cn, err)
+ return
+ }
+
+ c.putConn(cn)
+}
+
+// Close closes the client, releasing any open resources.
+func (c *baseClient) Close() error {
+ return c.connPool.Close()
+}
+
+//------------------------------------------------------------------------------
+
+type options struct {
+ Password string
+ DB int64
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ PoolSize int
+ IdleTimeout time.Duration
+}
+
+type Options struct {
+ Network string
+ Addr string
+
+ // Dialer creates new network connection and has priority over
+ // Network and Addr options.
+ Dialer func() (net.Conn, error)
+
+ Password string
+ DB int64
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ PoolSize int
+ IdleTimeout time.Duration
+}
+
+func (opt *Options) getPoolSize() int {
+ if opt.PoolSize == 0 {
+ return 10
+ }
+ return opt.PoolSize
+}
+
+func (opt *Options) getDialTimeout() time.Duration {
+ if opt.DialTimeout == 0 {
+ return 5 * time.Second
+ }
+ return opt.DialTimeout
+}
+
+func (opt *Options) options() *options {
+ return &options{
+ DB: opt.DB,
+ Password: opt.Password,
+
+ DialTimeout: opt.getDialTimeout(),
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.getPoolSize(),
+ IdleTimeout: opt.IdleTimeout,
+ }
+}
+
+type Client struct {
+ *baseClient
+}
+
+func NewClient(clOpt *Options) *Client {
+ opt := clOpt.options()
+ dialer := clOpt.Dialer
+ if dialer == nil {
+ dialer = func() (net.Conn, error) {
+ return net.DialTimeout(clOpt.Network, clOpt.Addr, opt.DialTimeout)
+ }
+ }
+ return &Client{
+ baseClient: &baseClient{
+ opt: opt,
+ connPool: newConnPool(newConnFunc(dialer), opt),
+ },
+ }
+}
+
+// Deprecated. Use NewClient instead.
+func NewTCPClient(opt *Options) *Client {
+ opt.Network = "tcp"
+ return NewClient(opt)
+}
+
+// Deprecated. Use NewClient instead.
+func NewUnixClient(opt *Options) *Client {
+ opt.Network = "unix"
+ return NewClient(opt)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/redis_test.go b/Godeps/_workspace/src/gopkg.in/redis.v2/redis_test.go
new file mode 100644
index 000000000..49f84d0e1
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/redis_test.go
@@ -0,0 +1,3333 @@
+package redis_test
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net"
+ "sort"
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+
+ "gopkg.in/redis.v2"
+
+ . "gopkg.in/check.v1"
+)
+
+const redisAddr = ":6379"
+
+//------------------------------------------------------------------------------
+
+func sortStrings(slice []string) []string {
+ sort.Strings(slice)
+ return slice
+}
+
+//------------------------------------------------------------------------------
+
+type RedisConnectorTest struct{}
+
+var _ = Suite(&RedisConnectorTest{})
+
+func (t *RedisConnectorTest) TestShutdown(c *C) {
+ c.Skip("shutdowns server")
+
+ client := redis.NewTCPClient(&redis.Options{
+ Addr: redisAddr,
+ })
+
+ shutdown := client.Shutdown()
+ c.Check(shutdown.Err(), Equals, io.EOF)
+ c.Check(shutdown.Val(), Equals, "")
+
+ ping := client.Ping()
+ c.Check(ping.Err(), ErrorMatches, "dial tcp <nil>:[0-9]+: connection refused")
+ c.Check(ping.Val(), Equals, "")
+}
+
+func (t *RedisConnectorTest) TestNewTCPClient(c *C) {
+ client := redis.NewTCPClient(&redis.Options{
+ Addr: redisAddr,
+ })
+ ping := client.Ping()
+ c.Check(ping.Err(), IsNil)
+ c.Check(ping.Val(), Equals, "PONG")
+ c.Assert(client.Close(), IsNil)
+}
+
+func (t *RedisConnectorTest) TestNewUnixClient(c *C) {
+ c.Skip("not available on Travis CI")
+
+ client := redis.NewUnixClient(&redis.Options{
+ Addr: "/tmp/redis.sock",
+ })
+ ping := client.Ping()
+ c.Check(ping.Err(), IsNil)
+ c.Check(ping.Val(), Equals, "PONG")
+ c.Assert(client.Close(), IsNil)
+}
+
+func (t *RedisConnectorTest) TestDialer(c *C) {
+ client := redis.NewClient(&redis.Options{
+ Dialer: func() (net.Conn, error) {
+ return net.Dial("tcp", redisAddr)
+ },
+ })
+ ping := client.Ping()
+ c.Check(ping.Err(), IsNil)
+ c.Check(ping.Val(), Equals, "PONG")
+ c.Assert(client.Close(), IsNil)
+}
+
+func (t *RedisConnectorTest) TestClose(c *C) {
+ client := redis.NewTCPClient(&redis.Options{
+ Addr: redisAddr,
+ })
+ c.Assert(client.Close(), IsNil)
+
+ ping := client.Ping()
+ c.Assert(ping.Err(), Not(IsNil))
+ c.Assert(ping.Err().Error(), Equals, "redis: client is closed")
+
+ c.Assert(client.Close(), IsNil)
+}
+
+func (t *RedisConnectorTest) TestPubSubClose(c *C) {
+ client := redis.NewTCPClient(&redis.Options{
+ Addr: redisAddr,
+ })
+
+ pubsub := client.PubSub()
+ c.Assert(pubsub.Close(), IsNil)
+
+ _, err := pubsub.Receive()
+ c.Assert(err, Not(IsNil))
+ c.Assert(err.Error(), Equals, "redis: client is closed")
+
+ ping := client.Ping()
+ c.Assert(ping.Err(), IsNil)
+
+ c.Assert(client.Close(), IsNil)
+}
+
+func (t *RedisConnectorTest) TestMultiClose(c *C) {
+ client := redis.NewTCPClient(&redis.Options{
+ Addr: redisAddr,
+ })
+
+ multi := client.Multi()
+ c.Assert(multi.Close(), IsNil)
+
+ _, err := multi.Exec(func() error {
+ multi.Ping()
+ return nil
+ })
+ c.Assert(err, Not(IsNil))
+ c.Assert(err.Error(), Equals, "redis: client is closed")
+
+ ping := client.Ping()
+ c.Assert(ping.Err(), IsNil)
+
+ c.Assert(client.Close(), IsNil)
+}
+
+func (t *RedisConnectorTest) TestPipelineClose(c *C) {
+ client := redis.NewTCPClient(&redis.Options{
+ Addr: redisAddr,
+ })
+
+ _, err := client.Pipelined(func(pipeline *redis.Pipeline) error {
+ c.Assert(pipeline.Close(), IsNil)
+ pipeline.Ping()
+ return nil
+ })
+ c.Assert(err, Not(IsNil))
+ c.Assert(err.Error(), Equals, "redis: client is closed")
+
+ ping := client.Ping()
+ c.Assert(ping.Err(), IsNil)
+
+ c.Assert(client.Close(), IsNil)
+}
+
+func (t *RedisConnectorTest) TestIdleTimeout(c *C) {
+ client := redis.NewTCPClient(&redis.Options{
+ Addr: redisAddr,
+ IdleTimeout: time.Nanosecond,
+ })
+ for i := 0; i < 10; i++ {
+ c.Assert(client.Ping().Err(), IsNil)
+ }
+}
+
+func (t *RedisConnectorTest) TestSelectDb(c *C) {
+ client1 := redis.NewTCPClient(&redis.Options{
+ Addr: redisAddr,
+ DB: 1,
+ })
+ c.Assert(client1.Set("key", "db1").Err(), IsNil)
+
+ client2 := redis.NewTCPClient(&redis.Options{
+ Addr: redisAddr,
+ DB: 2,
+ })
+ c.Assert(client2.Get("key").Err(), Equals, redis.Nil)
+}
+
+//------------------------------------------------------------------------------
+
+type RedisConnPoolTest struct {
+ client *redis.Client
+}
+
+var _ = Suite(&RedisConnPoolTest{})
+
+func (t *RedisConnPoolTest) SetUpTest(c *C) {
+ t.client = redis.NewTCPClient(&redis.Options{
+ Addr: redisAddr,
+ })
+}
+
+func (t *RedisConnPoolTest) TearDownTest(c *C) {
+ c.Assert(t.client.FlushDb().Err(), IsNil)
+ c.Assert(t.client.Close(), IsNil)
+}
+
+func (t *RedisConnPoolTest) TestConnPoolMaxSize(c *C) {
+ wg := &sync.WaitGroup{}
+ for i := 0; i < 1000; i++ {
+ wg.Add(1)
+ go func() {
+ ping := t.client.Ping()
+ c.Assert(ping.Err(), IsNil)
+ c.Assert(ping.Val(), Equals, "PONG")
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+
+ c.Assert(t.client.Pool().Size(), Equals, 10)
+ c.Assert(t.client.Pool().Len(), Equals, 10)
+}
+
+func (t *RedisConnPoolTest) TestConnPoolMaxSizeOnPipelineClient(c *C) {
+ const N = 1000
+
+ wg := &sync.WaitGroup{}
+ wg.Add(N)
+ for i := 0; i < N; i++ {
+ go func() {
+ pipeline := t.client.Pipeline()
+ ping := pipeline.Ping()
+ cmds, err := pipeline.Exec()
+ c.Assert(err, IsNil)
+ c.Assert(cmds, HasLen, 1)
+ c.Assert(ping.Err(), IsNil)
+ c.Assert(ping.Val(), Equals, "PONG")
+
+ c.Assert(pipeline.Close(), IsNil)
+
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+
+ c.Assert(t.client.Pool().Size(), Equals, 10)
+ c.Assert(t.client.Pool().Len(), Equals, 10)
+}
+
+func (t *RedisConnPoolTest) TestConnPoolMaxSizeOnMultiClient(c *C) {
+ const N = 1000
+
+ wg := &sync.WaitGroup{}
+ wg.Add(N)
+ for i := 0; i < N; i++ {
+ go func() {
+ multi := t.client.Multi()
+ var ping *redis.StatusCmd
+ cmds, err := multi.Exec(func() error {
+ ping = multi.Ping()
+ return nil
+ })
+ c.Assert(err, IsNil)
+ c.Assert(cmds, HasLen, 1)
+ c.Assert(ping.Err(), IsNil)
+ c.Assert(ping.Val(), Equals, "PONG")
+
+ c.Assert(multi.Close(), IsNil)
+
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+
+ c.Assert(t.client.Pool().Size(), Equals, 10)
+ c.Assert(t.client.Pool().Len(), Equals, 10)
+}
+
+func (t *RedisConnPoolTest) TestConnPoolMaxSizeOnPubSub(c *C) {
+ const N = 10
+
+ wg := &sync.WaitGroup{}
+ wg.Add(N)
+ for i := 0; i < N; i++ {
+ go func() {
+ defer wg.Done()
+ pubsub := t.client.PubSub()
+ c.Assert(pubsub.Subscribe(), IsNil)
+ c.Assert(pubsub.Close(), IsNil)
+ }()
+ }
+ wg.Wait()
+
+ c.Assert(t.client.Pool().Size(), Equals, 0)
+ c.Assert(t.client.Pool().Len(), Equals, 0)
+}
+
+func (t *RedisConnPoolTest) TestConnPoolRemovesBrokenConn(c *C) {
+ cn, _, err := t.client.Pool().Get()
+ c.Assert(err, IsNil)
+ c.Assert(cn.Close(), IsNil)
+ c.Assert(t.client.Pool().Put(cn), IsNil)
+
+ ping := t.client.Ping()
+ c.Assert(ping.Err().Error(), Equals, "use of closed network connection")
+ c.Assert(ping.Val(), Equals, "")
+
+ ping = t.client.Ping()
+ c.Assert(ping.Err(), IsNil)
+ c.Assert(ping.Val(), Equals, "PONG")
+
+ c.Assert(t.client.Pool().Size(), Equals, 1)
+ c.Assert(t.client.Pool().Len(), Equals, 1)
+}
+
+func (t *RedisConnPoolTest) TestConnPoolReusesConn(c *C) {
+ for i := 0; i < 1000; i++ {
+ ping := t.client.Ping()
+ c.Assert(ping.Err(), IsNil)
+ c.Assert(ping.Val(), Equals, "PONG")
+ }
+
+ c.Assert(t.client.Pool().Size(), Equals, 1)
+ c.Assert(t.client.Pool().Len(), Equals, 1)
+}
+
+//------------------------------------------------------------------------------
+
+type RedisTest struct {
+ client *redis.Client
+}
+
+var _ = Suite(&RedisTest{})
+
+func Test(t *testing.T) { TestingT(t) }
+
+func (t *RedisTest) SetUpTest(c *C) {
+ t.client = redis.NewTCPClient(&redis.Options{
+ Addr: ":6379",
+ })
+
+ // This is much faster than Flushall.
+ c.Assert(t.client.Select(1).Err(), IsNil)
+ c.Assert(t.client.FlushDb().Err(), IsNil)
+ c.Assert(t.client.Select(0).Err(), IsNil)
+ c.Assert(t.client.FlushDb().Err(), IsNil)
+}
+
+func (t *RedisTest) TearDownTest(c *C) {
+ c.Assert(t.client.Close(), IsNil)
+}
+
+//------------------------------------------------------------------------------
+
+func (t *RedisTest) TestCmdStringMethod(c *C) {
+ set := t.client.Set("foo", "bar")
+ c.Assert(set.String(), Equals, "SET foo bar: OK")
+
+ get := t.client.Get("foo")
+ c.Assert(get.String(), Equals, "GET foo: bar")
+}
+
+func (t *RedisTest) TestCmdStringMethodError(c *C) {
+ get2 := t.client.Get("key_does_not_exists")
+ c.Assert(get2.String(), Equals, "GET key_does_not_exists: redis: nil")
+}
+
+func (t *RedisTest) TestRunWithouthCheckingErrVal(c *C) {
+ set := t.client.Set("key", "hello")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ get := t.client.Get("key")
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, "hello")
+
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+}
+
+func (t *RedisTest) TestGetSpecChars(c *C) {
+ set := t.client.Set("key", "hello1\r\nhello2\r\n")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ get := t.client.Get("key")
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, "hello1\r\nhello2\r\n")
+}
+
+func (t *RedisTest) TestGetBigVal(c *C) {
+ val := string(bytes.Repeat([]byte{'*'}, 1<<16))
+
+ set := t.client.Set("key", val)
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ get := t.client.Get("key")
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, val)
+}
+
+func (t *RedisTest) TestManyKeys(c *C) {
+ var n = 100000
+
+ for i := 0; i < n; i++ {
+ t.client.Set("keys.key"+strconv.Itoa(i), "hello"+strconv.Itoa(i))
+ }
+ keys := t.client.Keys("keys.*")
+ c.Assert(keys.Err(), IsNil)
+ c.Assert(len(keys.Val()), Equals, n)
+}
+
+func (t *RedisTest) TestManyKeys2(c *C) {
+ var n = 100000
+
+ keys := []string{"non-existent-key"}
+ for i := 0; i < n; i++ {
+ key := "keys.key" + strconv.Itoa(i)
+ t.client.Set(key, "hello"+strconv.Itoa(i))
+ keys = append(keys, key)
+ }
+ keys = append(keys, "non-existent-key")
+
+ mget := t.client.MGet(keys...)
+ c.Assert(mget.Err(), IsNil)
+ c.Assert(len(mget.Val()), Equals, n+2)
+ vals := mget.Val()
+ for i := 0; i < n; i++ {
+ c.Assert(vals[i+1], Equals, "hello"+strconv.Itoa(i))
+ }
+ c.Assert(vals[0], Equals, nil)
+ c.Assert(vals[n+1], Equals, nil)
+}
+
+func (t *RedisTest) TestStringCmdHelpers(c *C) {
+ set := t.client.Set("key", "10")
+ c.Assert(set.Err(), IsNil)
+
+ n, err := t.client.Get("key").Int64()
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, int64(10))
+
+ un, err := t.client.Get("key").Uint64()
+ c.Assert(err, IsNil)
+ c.Assert(un, Equals, uint64(10))
+
+ f, err := t.client.Get("key").Float64()
+ c.Assert(err, IsNil)
+ c.Assert(f, Equals, float64(10))
+}
+
+//------------------------------------------------------------------------------
+
+func (t *RedisTest) TestAuth(c *C) {
+ auth := t.client.Auth("password")
+ c.Assert(auth.Err(), ErrorMatches, "ERR Client sent AUTH, but no password is set")
+ c.Assert(auth.Val(), Equals, "")
+}
+
+func (t *RedisTest) TestEcho(c *C) {
+ echo := t.client.Echo("hello")
+ c.Assert(echo.Err(), IsNil)
+ c.Assert(echo.Val(), Equals, "hello")
+}
+
+func (t *RedisTest) TestPing(c *C) {
+ ping := t.client.Ping()
+ c.Assert(ping.Err(), IsNil)
+ c.Assert(ping.Val(), Equals, "PONG")
+}
+
+func (t *RedisTest) TestSelect(c *C) {
+ sel := t.client.Select(1)
+ c.Assert(sel.Err(), IsNil)
+ c.Assert(sel.Val(), Equals, "OK")
+}
+
+//------------------------------------------------------------------------------
+
+func (t *RedisTest) TestCmdKeysDel(c *C) {
+ set := t.client.Set("key1", "Hello")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+ set = t.client.Set("key2", "World")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ del := t.client.Del("key1", "key2", "key3")
+ c.Assert(del.Err(), IsNil)
+ c.Assert(del.Val(), Equals, int64(2))
+}
+
+func (t *RedisTest) TestCmdKeysDump(c *C) {
+ set := t.client.Set("key", "hello")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ dump := t.client.Dump("key")
+ c.Assert(dump.Err(), IsNil)
+ c.Assert(dump.Val(), Equals, "\x00\x05hello\x06\x00\xf5\x9f\xb7\xf6\x90a\x1c\x99")
+}
+
+func (t *RedisTest) TestCmdKeysExists(c *C) {
+ set := t.client.Set("key1", "Hello")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ exists := t.client.Exists("key1")
+ c.Assert(exists.Err(), IsNil)
+ c.Assert(exists.Val(), Equals, true)
+
+ exists = t.client.Exists("key2")
+ c.Assert(exists.Err(), IsNil)
+ c.Assert(exists.Val(), Equals, false)
+}
+
+func (t *RedisTest) TestCmdKeysExpire(c *C) {
+ set := t.client.Set("key", "Hello")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ expire := t.client.Expire("key", 10*time.Second)
+ c.Assert(expire.Err(), IsNil)
+ c.Assert(expire.Val(), Equals, true)
+
+ ttl := t.client.TTL("key")
+ c.Assert(ttl.Err(), IsNil)
+ c.Assert(ttl.Val(), Equals, 10*time.Second)
+
+ set = t.client.Set("key", "Hello World")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ ttl = t.client.TTL("key")
+ c.Assert(ttl.Err(), IsNil)
+ c.Assert(ttl.Val() < 0, Equals, true)
+}
+
+func (t *RedisTest) TestCmdKeysExpireAt(c *C) {
+ set := t.client.Set("key", "Hello")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ exists := t.client.Exists("key")
+ c.Assert(exists.Err(), IsNil)
+ c.Assert(exists.Val(), Equals, true)
+
+ expireAt := t.client.ExpireAt("key", time.Now().Add(-time.Hour))
+ c.Assert(expireAt.Err(), IsNil)
+ c.Assert(expireAt.Val(), Equals, true)
+
+ exists = t.client.Exists("key")
+ c.Assert(exists.Err(), IsNil)
+ c.Assert(exists.Val(), Equals, false)
+}
+
+func (t *RedisTest) TestCmdKeysKeys(c *C) {
+ mset := t.client.MSet("one", "1", "two", "2", "three", "3", "four", "4")
+ c.Assert(mset.Err(), IsNil)
+ c.Assert(mset.Val(), Equals, "OK")
+
+ keys := t.client.Keys("*o*")
+ c.Assert(keys.Err(), IsNil)
+ c.Assert(sortStrings(keys.Val()), DeepEquals, []string{"four", "one", "two"})
+
+ keys = t.client.Keys("t??")
+ c.Assert(keys.Err(), IsNil)
+ c.Assert(keys.Val(), DeepEquals, []string{"two"})
+
+ keys = t.client.Keys("*")
+ c.Assert(keys.Err(), IsNil)
+ c.Assert(
+ sortStrings(keys.Val()),
+ DeepEquals,
+ []string{"four", "one", "three", "two"},
+ )
+}
+
+func (t *RedisTest) TestCmdKeysMigrate(c *C) {
+ migrate := t.client.Migrate("localhost", "6380", "key", 0, 0)
+ c.Assert(migrate.Err(), IsNil)
+ c.Assert(migrate.Val(), Equals, "NOKEY")
+
+ set := t.client.Set("key", "hello")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ migrate = t.client.Migrate("localhost", "6380", "key", 0, 0)
+ c.Assert(migrate.Err(), ErrorMatches, "IOERR error or timeout writing to target instance")
+ c.Assert(migrate.Val(), Equals, "")
+}
+
+func (t *RedisTest) TestCmdKeysMove(c *C) {
+ move := t.client.Move("key", 1)
+ c.Assert(move.Err(), IsNil)
+ c.Assert(move.Val(), Equals, false)
+
+ set := t.client.Set("key", "hello")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ move = t.client.Move("key", 1)
+ c.Assert(move.Err(), IsNil)
+ c.Assert(move.Val(), Equals, true)
+
+ get := t.client.Get("key")
+ c.Assert(get.Err(), Equals, redis.Nil)
+ c.Assert(get.Val(), Equals, "")
+
+ sel := t.client.Select(1)
+ c.Assert(sel.Err(), IsNil)
+ c.Assert(sel.Val(), Equals, "OK")
+
+ get = t.client.Get("key")
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, "hello")
+}
+
+func (t *RedisTest) TestCmdKeysObject(c *C) {
+ set := t.client.Set("key", "hello")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ refCount := t.client.ObjectRefCount("key")
+ c.Assert(refCount.Err(), IsNil)
+ c.Assert(refCount.Val(), Equals, int64(1))
+
+ enc := t.client.ObjectEncoding("key")
+ c.Assert(enc.Err(), IsNil)
+ c.Assert(enc.Val(), Equals, "raw")
+
+ idleTime := t.client.ObjectIdleTime("key")
+ c.Assert(idleTime.Err(), IsNil)
+ c.Assert(idleTime.Val(), Equals, time.Duration(0))
+}
+
+func (t *RedisTest) TestCmdKeysPersist(c *C) {
+ set := t.client.Set("key", "Hello")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ expire := t.client.Expire("key", 10*time.Second)
+ c.Assert(expire.Err(), IsNil)
+ c.Assert(expire.Val(), Equals, true)
+
+ ttl := t.client.TTL("key")
+ c.Assert(ttl.Err(), IsNil)
+ c.Assert(ttl.Val(), Equals, 10*time.Second)
+
+ persist := t.client.Persist("key")
+ c.Assert(persist.Err(), IsNil)
+ c.Assert(persist.Val(), Equals, true)
+
+ ttl = t.client.TTL("key")
+ c.Assert(ttl.Err(), IsNil)
+ c.Assert(ttl.Val() < 0, Equals, true)
+}
+
+func (t *RedisTest) TestCmdKeysPExpire(c *C) {
+ set := t.client.Set("key", "Hello")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ expiration := 900 * time.Millisecond
+ pexpire := t.client.PExpire("key", expiration)
+ c.Assert(pexpire.Err(), IsNil)
+ c.Assert(pexpire.Val(), Equals, true)
+
+ ttl := t.client.TTL("key")
+ c.Assert(ttl.Err(), IsNil)
+ c.Assert(ttl.Val(), Equals, time.Second)
+
+ pttl := t.client.PTTL("key")
+ c.Assert(pttl.Err(), IsNil)
+ c.Assert(pttl.Val() <= expiration, Equals, true)
+ c.Assert(pttl.Val() >= expiration-time.Millisecond, Equals, true)
+}
+
+func (t *RedisTest) TestCmdKeysPExpireAt(c *C) {
+ set := t.client.Set("key", "Hello")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ expiration := 900 * time.Millisecond
+ pexpireat := t.client.PExpireAt("key", time.Now().Add(expiration))
+ c.Assert(pexpireat.Err(), IsNil)
+ c.Assert(pexpireat.Val(), Equals, true)
+
+ ttl := t.client.TTL("key")
+ c.Assert(ttl.Err(), IsNil)
+ c.Assert(ttl.Val(), Equals, time.Second)
+
+ pttl := t.client.PTTL("key")
+ c.Assert(pttl.Err(), IsNil)
+ c.Assert(pttl.Val() <= expiration, Equals, true)
+ c.Assert(pttl.Val() >= expiration-time.Millisecond, Equals, true)
+}
+
+func (t *RedisTest) TestCmdKeysPTTL(c *C) {
+ set := t.client.Set("key", "Hello")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ expiration := time.Second
+ expire := t.client.Expire("key", expiration)
+ c.Assert(expire.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ pttl := t.client.PTTL("key")
+ c.Assert(pttl.Err(), IsNil)
+ c.Assert(pttl.Val() <= expiration, Equals, true)
+ c.Assert(pttl.Val() >= expiration-time.Millisecond, Equals, true)
+}
+
+func (t *RedisTest) TestCmdKeysRandomKey(c *C) {
+ randomKey := t.client.RandomKey()
+ c.Assert(randomKey.Err(), Equals, redis.Nil)
+ c.Assert(randomKey.Val(), Equals, "")
+
+ set := t.client.Set("key", "hello")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ randomKey = t.client.RandomKey()
+ c.Assert(randomKey.Err(), IsNil)
+ c.Assert(randomKey.Val(), Equals, "key")
+}
+
+func (t *RedisTest) TestCmdKeysRename(c *C) {
+ set := t.client.Set("key", "hello")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ status := t.client.Rename("key", "key1")
+ c.Assert(status.Err(), IsNil)
+ c.Assert(status.Val(), Equals, "OK")
+
+ get := t.client.Get("key1")
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, "hello")
+}
+
+func (t *RedisTest) TestCmdKeysRenameNX(c *C) {
+ set := t.client.Set("key", "hello")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ renameNX := t.client.RenameNX("key", "key1")
+ c.Assert(renameNX.Err(), IsNil)
+ c.Assert(renameNX.Val(), Equals, true)
+
+ get := t.client.Get("key1")
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, "hello")
+}
+
+func (t *RedisTest) TestCmdKeysRestore(c *C) {
+ set := t.client.Set("key", "hello")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ dump := t.client.Dump("key")
+ c.Assert(dump.Err(), IsNil)
+
+ del := t.client.Del("key")
+ c.Assert(del.Err(), IsNil)
+
+ restore := t.client.Restore("key", 0, dump.Val())
+ c.Assert(restore.Err(), IsNil)
+ c.Assert(restore.Val(), Equals, "OK")
+
+ type_ := t.client.Type("key")
+ c.Assert(type_.Err(), IsNil)
+ c.Assert(type_.Val(), Equals, "string")
+
+ lRange := t.client.Get("key")
+ c.Assert(lRange.Err(), IsNil)
+ c.Assert(lRange.Val(), Equals, "hello")
+}
+
+func (t *RedisTest) TestCmdKeysSort(c *C) {
+ lPush := t.client.LPush("list", "1")
+ c.Assert(lPush.Err(), IsNil)
+ c.Assert(lPush.Val(), Equals, int64(1))
+ lPush = t.client.LPush("list", "3")
+ c.Assert(lPush.Err(), IsNil)
+ c.Assert(lPush.Val(), Equals, int64(2))
+ lPush = t.client.LPush("list", "2")
+ c.Assert(lPush.Err(), IsNil)
+ c.Assert(lPush.Val(), Equals, int64(3))
+
+ sort := t.client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"})
+ c.Assert(sort.Err(), IsNil)
+ c.Assert(sort.Val(), DeepEquals, []string{"1", "2"})
+}
+
+func (t *RedisTest) TestCmdKeysSortBy(c *C) {
+ lPush := t.client.LPush("list", "1")
+ c.Assert(lPush.Err(), IsNil)
+ c.Assert(lPush.Val(), Equals, int64(1))
+ lPush = t.client.LPush("list", "3")
+ c.Assert(lPush.Err(), IsNil)
+ c.Assert(lPush.Val(), Equals, int64(2))
+ lPush = t.client.LPush("list", "2")
+ c.Assert(lPush.Err(), IsNil)
+ c.Assert(lPush.Val(), Equals, int64(3))
+
+ set := t.client.Set("weight_1", "5")
+ c.Assert(set.Err(), IsNil)
+ set = t.client.Set("weight_2", "2")
+ c.Assert(set.Err(), IsNil)
+ set = t.client.Set("weight_3", "8")
+ c.Assert(set.Err(), IsNil)
+
+ sort := t.client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC", By: "weight_*"})
+ c.Assert(sort.Err(), IsNil)
+ c.Assert(sort.Val(), DeepEquals, []string{"2", "1"})
+}
+
+func (t *RedisTest) TestCmdKeysTTL(c *C) {
+ ttl := t.client.TTL("key")
+ c.Assert(ttl.Err(), IsNil)
+ c.Assert(ttl.Val() < 0, Equals, true)
+
+ set := t.client.Set("key", "hello")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ expire := t.client.Expire("key", 60*time.Second)
+ c.Assert(expire.Err(), IsNil)
+ c.Assert(expire.Val(), Equals, true)
+
+ ttl = t.client.TTL("key")
+ c.Assert(ttl.Err(), IsNil)
+ c.Assert(ttl.Val(), Equals, 60*time.Second)
+}
+
+func (t *RedisTest) TestCmdKeysType(c *C) {
+ set := t.client.Set("key", "hello")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ type_ := t.client.Type("key")
+ c.Assert(type_.Err(), IsNil)
+ c.Assert(type_.Val(), Equals, "string")
+}
+
+func (t *RedisTest) TestCmdScan(c *C) {
+ for i := 0; i < 1000; i++ {
+ set := t.client.Set(fmt.Sprintf("key%d", i), "hello")
+ c.Assert(set.Err(), IsNil)
+ }
+
+ cursor, keys, err := t.client.Scan(0, "", 0).Result()
+ c.Assert(err, IsNil)
+ c.Assert(cursor > 0, Equals, true)
+ c.Assert(len(keys) > 0, Equals, true)
+}
+
+func (t *RedisTest) TestCmdSScan(c *C) {
+ for i := 0; i < 1000; i++ {
+ sadd := t.client.SAdd("myset", fmt.Sprintf("member%d", i))
+ c.Assert(sadd.Err(), IsNil)
+ }
+
+ cursor, keys, err := t.client.SScan("myset", 0, "", 0).Result()
+ c.Assert(err, IsNil)
+ c.Assert(cursor > 0, Equals, true)
+ c.Assert(len(keys) > 0, Equals, true)
+}
+
+func (t *RedisTest) TestCmdHScan(c *C) {
+ for i := 0; i < 1000; i++ {
+ sadd := t.client.HSet("myhash", fmt.Sprintf("key%d", i), "hello")
+ c.Assert(sadd.Err(), IsNil)
+ }
+
+ cursor, keys, err := t.client.HScan("myhash", 0, "", 0).Result()
+ c.Assert(err, IsNil)
+ c.Assert(cursor > 0, Equals, true)
+ c.Assert(len(keys) > 0, Equals, true)
+}
+
+func (t *RedisTest) TestCmdZScan(c *C) {
+ for i := 0; i < 1000; i++ {
+ sadd := t.client.ZAdd("myset", redis.Z{float64(i), fmt.Sprintf("member%d", i)})
+ c.Assert(sadd.Err(), IsNil)
+ }
+
+ cursor, keys, err := t.client.ZScan("myset", 0, "", 0).Result()
+ c.Assert(err, IsNil)
+ c.Assert(cursor > 0, Equals, true)
+ c.Assert(len(keys) > 0, Equals, true)
+}
+
+//------------------------------------------------------------------------------
+
+func (t *RedisTest) TestStringsAppend(c *C) {
+ exists := t.client.Exists("key")
+ c.Assert(exists.Err(), IsNil)
+ c.Assert(exists.Val(), Equals, false)
+
+ append := t.client.Append("key", "Hello")
+ c.Assert(append.Err(), IsNil)
+ c.Assert(append.Val(), Equals, int64(5))
+
+ append = t.client.Append("key", " World")
+ c.Assert(append.Err(), IsNil)
+ c.Assert(append.Val(), Equals, int64(11))
+
+ get := t.client.Get("key")
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, "Hello World")
+}
+
+func (t *RedisTest) TestStringsBitCount(c *C) {
+ set := t.client.Set("key", "foobar")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ bitCount := t.client.BitCount("key", nil)
+ c.Assert(bitCount.Err(), IsNil)
+ c.Assert(bitCount.Val(), Equals, int64(26))
+
+ bitCount = t.client.BitCount("key", &redis.BitCount{0, 0})
+ c.Assert(bitCount.Err(), IsNil)
+ c.Assert(bitCount.Val(), Equals, int64(4))
+
+ bitCount = t.client.BitCount("key", &redis.BitCount{1, 1})
+ c.Assert(bitCount.Err(), IsNil)
+ c.Assert(bitCount.Val(), Equals, int64(6))
+}
+
+func (t *RedisTest) TestStringsBitOpAnd(c *C) {
+ set := t.client.Set("key1", "1")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ set = t.client.Set("key2", "0")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ bitOpAnd := t.client.BitOpAnd("dest", "key1", "key2")
+ c.Assert(bitOpAnd.Err(), IsNil)
+ c.Assert(bitOpAnd.Val(), Equals, int64(1))
+
+ get := t.client.Get("dest")
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, "0")
+}
+
+func (t *RedisTest) TestStringsBitOpOr(c *C) {
+ set := t.client.Set("key1", "1")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ set = t.client.Set("key2", "0")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ bitOpOr := t.client.BitOpOr("dest", "key1", "key2")
+ c.Assert(bitOpOr.Err(), IsNil)
+ c.Assert(bitOpOr.Val(), Equals, int64(1))
+
+ get := t.client.Get("dest")
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, "1")
+}
+
+func (t *RedisTest) TestStringsBitOpXor(c *C) {
+ set := t.client.Set("key1", "\xff")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ set = t.client.Set("key2", "\x0f")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ bitOpXor := t.client.BitOpXor("dest", "key1", "key2")
+ c.Assert(bitOpXor.Err(), IsNil)
+ c.Assert(bitOpXor.Val(), Equals, int64(1))
+
+ get := t.client.Get("dest")
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, "\xf0")
+}
+
+func (t *RedisTest) TestStringsBitOpNot(c *C) {
+ set := t.client.Set("key1", "\x00")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ bitOpNot := t.client.BitOpNot("dest", "key1")
+ c.Assert(bitOpNot.Err(), IsNil)
+ c.Assert(bitOpNot.Val(), Equals, int64(1))
+
+ get := t.client.Get("dest")
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, "\xff")
+}
+
+func (t *RedisTest) TestStringsDecr(c *C) {
+ set := t.client.Set("key", "10")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ decr := t.client.Decr("key")
+ c.Assert(decr.Err(), IsNil)
+ c.Assert(decr.Val(), Equals, int64(9))
+
+ set = t.client.Set("key", "234293482390480948029348230948")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ decr = t.client.Decr("key")
+ c.Assert(decr.Err(), ErrorMatches, "ERR value is not an integer or out of range")
+ c.Assert(decr.Val(), Equals, int64(0))
+}
+
+func (t *RedisTest) TestStringsDecrBy(c *C) {
+ set := t.client.Set("key", "10")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ decrBy := t.client.DecrBy("key", 5)
+ c.Assert(decrBy.Err(), IsNil)
+ c.Assert(decrBy.Val(), Equals, int64(5))
+}
+
+func (t *RedisTest) TestStringsGet(c *C) {
+ get := t.client.Get("_")
+ c.Assert(get.Err(), Equals, redis.Nil)
+ c.Assert(get.Val(), Equals, "")
+
+ set := t.client.Set("key", "hello")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ get = t.client.Get("key")
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, "hello")
+}
+
+func (t *RedisTest) TestStringsGetBit(c *C) {
+ setBit := t.client.SetBit("key", 7, 1)
+ c.Assert(setBit.Err(), IsNil)
+ c.Assert(setBit.Val(), Equals, int64(0))
+
+ getBit := t.client.GetBit("key", 0)
+ c.Assert(getBit.Err(), IsNil)
+ c.Assert(getBit.Val(), Equals, int64(0))
+
+ getBit = t.client.GetBit("key", 7)
+ c.Assert(getBit.Err(), IsNil)
+ c.Assert(getBit.Val(), Equals, int64(1))
+
+ getBit = t.client.GetBit("key", 100)
+ c.Assert(getBit.Err(), IsNil)
+ c.Assert(getBit.Val(), Equals, int64(0))
+}
+
+func (t *RedisTest) TestStringsGetRange(c *C) {
+ set := t.client.Set("key", "This is a string")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ getRange := t.client.GetRange("key", 0, 3)
+ c.Assert(getRange.Err(), IsNil)
+ c.Assert(getRange.Val(), Equals, "This")
+
+ getRange = t.client.GetRange("key", -3, -1)
+ c.Assert(getRange.Err(), IsNil)
+ c.Assert(getRange.Val(), Equals, "ing")
+
+ getRange = t.client.GetRange("key", 0, -1)
+ c.Assert(getRange.Err(), IsNil)
+ c.Assert(getRange.Val(), Equals, "This is a string")
+
+ getRange = t.client.GetRange("key", 10, 100)
+ c.Assert(getRange.Err(), IsNil)
+ c.Assert(getRange.Val(), Equals, "string")
+}
+
+func (t *RedisTest) TestStringsGetSet(c *C) {
+ incr := t.client.Incr("key")
+ c.Assert(incr.Err(), IsNil)
+ c.Assert(incr.Val(), Equals, int64(1))
+
+ getSet := t.client.GetSet("key", "0")
+ c.Assert(getSet.Err(), IsNil)
+ c.Assert(getSet.Val(), Equals, "1")
+
+ get := t.client.Get("key")
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, "0")
+}
+
+func (t *RedisTest) TestStringsIncr(c *C) {
+ set := t.client.Set("key", "10")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ incr := t.client.Incr("key")
+ c.Assert(incr.Err(), IsNil)
+ c.Assert(incr.Val(), Equals, int64(11))
+
+ get := t.client.Get("key")
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, "11")
+}
+
+func (t *RedisTest) TestStringsIncrBy(c *C) {
+ set := t.client.Set("key", "10")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ incrBy := t.client.IncrBy("key", 5)
+ c.Assert(incrBy.Err(), IsNil)
+ c.Assert(incrBy.Val(), Equals, int64(15))
+}
+
+func (t *RedisTest) TestIncrByFloat(c *C) {
+ set := t.client.Set("key", "10.50")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ incrByFloat := t.client.IncrByFloat("key", 0.1)
+ c.Assert(incrByFloat.Err(), IsNil)
+ c.Assert(incrByFloat.Val(), Equals, 10.6)
+
+ set = t.client.Set("key", "5.0e3")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ incrByFloat = t.client.IncrByFloat("key", 2.0e2)
+ c.Assert(incrByFloat.Err(), IsNil)
+ c.Assert(incrByFloat.Val(), Equals, float64(5200))
+}
+
+func (t *RedisTest) TestIncrByFloatOverflow(c *C) {
+ incrByFloat := t.client.IncrByFloat("key", 996945661)
+ c.Assert(incrByFloat.Err(), IsNil)
+ c.Assert(incrByFloat.Val(), Equals, float64(996945661))
+}
+
+func (t *RedisTest) TestStringsMSetMGet(c *C) {
+ mSet := t.client.MSet("key1", "hello1", "key2", "hello2")
+ c.Assert(mSet.Err(), IsNil)
+ c.Assert(mSet.Val(), Equals, "OK")
+
+ mGet := t.client.MGet("key1", "key2", "_")
+ c.Assert(mGet.Err(), IsNil)
+ c.Assert(mGet.Val(), DeepEquals, []interface{}{"hello1", "hello2", nil})
+}
+
+func (t *RedisTest) TestStringsMSetNX(c *C) {
+ mSetNX := t.client.MSetNX("key1", "hello1", "key2", "hello2")
+ c.Assert(mSetNX.Err(), IsNil)
+ c.Assert(mSetNX.Val(), Equals, true)
+
+ mSetNX = t.client.MSetNX("key2", "hello1", "key3", "hello2")
+ c.Assert(mSetNX.Err(), IsNil)
+ c.Assert(mSetNX.Val(), Equals, false)
+}
+
+func (t *RedisTest) TestStringsPSetEx(c *C) {
+ expiration := 50 * time.Millisecond
+ psetex := t.client.PSetEx("key", expiration, "hello")
+ c.Assert(psetex.Err(), IsNil)
+ c.Assert(psetex.Val(), Equals, "OK")
+
+ pttl := t.client.PTTL("key")
+ c.Assert(pttl.Err(), IsNil)
+ c.Assert(pttl.Val() <= expiration, Equals, true)
+ c.Assert(pttl.Val() >= expiration-time.Millisecond, Equals, true)
+
+ get := t.client.Get("key")
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, "hello")
+}
+
+func (t *RedisTest) TestStringsSetGet(c *C) {
+ set := t.client.Set("key", "hello")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ get := t.client.Get("key")
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, "hello")
+}
+
+func (t *RedisTest) TestStringsSetEx(c *C) {
+ setEx := t.client.SetEx("key", 10*time.Second, "hello")
+ c.Assert(setEx.Err(), IsNil)
+ c.Assert(setEx.Val(), Equals, "OK")
+
+ ttl := t.client.TTL("key")
+ c.Assert(ttl.Err(), IsNil)
+ c.Assert(ttl.Val(), Equals, 10*time.Second)
+}
+
+func (t *RedisTest) TestStringsSetNX(c *C) {
+ setNX := t.client.SetNX("key", "hello")
+ c.Assert(setNX.Err(), IsNil)
+ c.Assert(setNX.Val(), Equals, true)
+
+ setNX = t.client.SetNX("key", "hello2")
+ c.Assert(setNX.Err(), IsNil)
+ c.Assert(setNX.Val(), Equals, false)
+
+ get := t.client.Get("key")
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, "hello")
+}
+
+func (t *RedisTest) TestStringsSetRange(c *C) {
+ set := t.client.Set("key", "Hello World")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ range_ := t.client.SetRange("key", 6, "Redis")
+ c.Assert(range_.Err(), IsNil)
+ c.Assert(range_.Val(), Equals, int64(11))
+
+ get := t.client.Get("key")
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, "Hello Redis")
+}
+
+func (t *RedisTest) TestStringsStrLen(c *C) {
+ set := t.client.Set("key", "hello")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ strLen := t.client.StrLen("key")
+ c.Assert(strLen.Err(), IsNil)
+ c.Assert(strLen.Val(), Equals, int64(5))
+
+ strLen = t.client.StrLen("_")
+ c.Assert(strLen.Err(), IsNil)
+ c.Assert(strLen.Val(), Equals, int64(0))
+}
+
+//------------------------------------------------------------------------------
+
+func (t *RedisTest) TestCmdHDel(c *C) {
+ hSet := t.client.HSet("hash", "key", "hello")
+ c.Assert(hSet.Err(), IsNil)
+
+ hDel := t.client.HDel("hash", "key")
+ c.Assert(hDel.Err(), IsNil)
+ c.Assert(hDel.Val(), Equals, int64(1))
+
+ hDel = t.client.HDel("hash", "key")
+ c.Assert(hDel.Err(), IsNil)
+ c.Assert(hDel.Val(), Equals, int64(0))
+}
+
+func (t *RedisTest) TestCmdHExists(c *C) {
+ hSet := t.client.HSet("hash", "key", "hello")
+ c.Assert(hSet.Err(), IsNil)
+
+ hExists := t.client.HExists("hash", "key")
+ c.Assert(hExists.Err(), IsNil)
+ c.Assert(hExists.Val(), Equals, true)
+
+ hExists = t.client.HExists("hash", "key1")
+ c.Assert(hExists.Err(), IsNil)
+ c.Assert(hExists.Val(), Equals, false)
+}
+
+func (t *RedisTest) TestCmdHGet(c *C) {
+ hSet := t.client.HSet("hash", "key", "hello")
+ c.Assert(hSet.Err(), IsNil)
+
+ hGet := t.client.HGet("hash", "key")
+ c.Assert(hGet.Err(), IsNil)
+ c.Assert(hGet.Val(), Equals, "hello")
+
+ hGet = t.client.HGet("hash", "key1")
+ c.Assert(hGet.Err(), Equals, redis.Nil)
+ c.Assert(hGet.Val(), Equals, "")
+}
+
+func (t *RedisTest) TestCmdHGetAll(c *C) {
+ hSet := t.client.HSet("hash", "key1", "hello1")
+ c.Assert(hSet.Err(), IsNil)
+ hSet = t.client.HSet("hash", "key2", "hello2")
+ c.Assert(hSet.Err(), IsNil)
+
+ hGetAll := t.client.HGetAll("hash")
+ c.Assert(hGetAll.Err(), IsNil)
+ c.Assert(hGetAll.Val(), DeepEquals, []string{"key1", "hello1", "key2", "hello2"})
+}
+
+func (t *RedisTest) TestCmdHGetAllMap(c *C) {
+ hSet := t.client.HSet("hash", "key1", "hello1")
+ c.Assert(hSet.Err(), IsNil)
+ hSet = t.client.HSet("hash", "key2", "hello2")
+ c.Assert(hSet.Err(), IsNil)
+
+ hGetAll := t.client.HGetAllMap("hash")
+ c.Assert(hGetAll.Err(), IsNil)
+ c.Assert(hGetAll.Val(), DeepEquals, map[string]string{"key1": "hello1", "key2": "hello2"})
+}
+
+func (t *RedisTest) TestCmdHIncrBy(c *C) {
+ hSet := t.client.HSet("hash", "key", "5")
+ c.Assert(hSet.Err(), IsNil)
+
+ hIncrBy := t.client.HIncrBy("hash", "key", 1)
+ c.Assert(hIncrBy.Err(), IsNil)
+ c.Assert(hIncrBy.Val(), Equals, int64(6))
+
+ hIncrBy = t.client.HIncrBy("hash", "key", -1)
+ c.Assert(hIncrBy.Err(), IsNil)
+ c.Assert(hIncrBy.Val(), Equals, int64(5))
+
+ hIncrBy = t.client.HIncrBy("hash", "key", -10)
+ c.Assert(hIncrBy.Err(), IsNil)
+ c.Assert(hIncrBy.Val(), Equals, int64(-5))
+}
+
+func (t *RedisTest) TestCmdHIncrByFloat(c *C) {
+ hSet := t.client.HSet("hash", "field", "10.50")
+ c.Assert(hSet.Err(), IsNil)
+ c.Assert(hSet.Val(), Equals, true)
+
+ hIncrByFloat := t.client.HIncrByFloat("hash", "field", 0.1)
+ c.Assert(hIncrByFloat.Err(), IsNil)
+ c.Assert(hIncrByFloat.Val(), Equals, 10.6)
+
+ hSet = t.client.HSet("hash", "field", "5.0e3")
+ c.Assert(hSet.Err(), IsNil)
+ c.Assert(hSet.Val(), Equals, false)
+
+ hIncrByFloat = t.client.HIncrByFloat("hash", "field", 2.0e2)
+ c.Assert(hIncrByFloat.Err(), IsNil)
+ c.Assert(hIncrByFloat.Val(), Equals, float64(5200))
+}
+
+func (t *RedisTest) TestCmdHKeys(c *C) {
+ hkeys := t.client.HKeys("hash")
+ c.Assert(hkeys.Err(), IsNil)
+ c.Assert(hkeys.Val(), DeepEquals, []string{})
+
+ hset := t.client.HSet("hash", "key1", "hello1")
+ c.Assert(hset.Err(), IsNil)
+ hset = t.client.HSet("hash", "key2", "hello2")
+ c.Assert(hset.Err(), IsNil)
+
+ hkeys = t.client.HKeys("hash")
+ c.Assert(hkeys.Err(), IsNil)
+ c.Assert(hkeys.Val(), DeepEquals, []string{"key1", "key2"})
+}
+
+func (t *RedisTest) TestCmdHLen(c *C) {
+ hSet := t.client.HSet("hash", "key1", "hello1")
+ c.Assert(hSet.Err(), IsNil)
+ hSet = t.client.HSet("hash", "key2", "hello2")
+ c.Assert(hSet.Err(), IsNil)
+
+ hLen := t.client.HLen("hash")
+ c.Assert(hLen.Err(), IsNil)
+ c.Assert(hLen.Val(), Equals, int64(2))
+}
+
+func (t *RedisTest) TestCmdHMGet(c *C) {
+ hSet := t.client.HSet("hash", "key1", "hello1")
+ c.Assert(hSet.Err(), IsNil)
+ hSet = t.client.HSet("hash", "key2", "hello2")
+ c.Assert(hSet.Err(), IsNil)
+
+ hMGet := t.client.HMGet("hash", "key1", "key2", "_")
+ c.Assert(hMGet.Err(), IsNil)
+ c.Assert(hMGet.Val(), DeepEquals, []interface{}{"hello1", "hello2", nil})
+}
+
+func (t *RedisTest) TestCmdHMSet(c *C) {
+ hMSet := t.client.HMSet("hash", "key1", "hello1", "key2", "hello2")
+ c.Assert(hMSet.Err(), IsNil)
+ c.Assert(hMSet.Val(), Equals, "OK")
+
+ hGet := t.client.HGet("hash", "key1")
+ c.Assert(hGet.Err(), IsNil)
+ c.Assert(hGet.Val(), Equals, "hello1")
+
+ hGet = t.client.HGet("hash", "key2")
+ c.Assert(hGet.Err(), IsNil)
+ c.Assert(hGet.Val(), Equals, "hello2")
+}
+
+func (t *RedisTest) TestCmdHSet(c *C) {
+ hSet := t.client.HSet("hash", "key", "hello")
+ c.Assert(hSet.Err(), IsNil)
+ c.Assert(hSet.Val(), Equals, true)
+
+ hGet := t.client.HGet("hash", "key")
+ c.Assert(hGet.Err(), IsNil)
+ c.Assert(hGet.Val(), Equals, "hello")
+}
+
+func (t *RedisTest) TestCmdHSetNX(c *C) {
+ hSetNX := t.client.HSetNX("hash", "key", "hello")
+ c.Assert(hSetNX.Err(), IsNil)
+ c.Assert(hSetNX.Val(), Equals, true)
+
+ hSetNX = t.client.HSetNX("hash", "key", "hello")
+ c.Assert(hSetNX.Err(), IsNil)
+ c.Assert(hSetNX.Val(), Equals, false)
+
+ hGet := t.client.HGet("hash", "key")
+ c.Assert(hGet.Err(), IsNil)
+ c.Assert(hGet.Val(), Equals, "hello")
+}
+
+func (t *RedisTest) TestCmdHVals(c *C) {
+ hSet := t.client.HSet("hash", "key1", "hello1")
+ c.Assert(hSet.Err(), IsNil)
+ hSet = t.client.HSet("hash", "key2", "hello2")
+ c.Assert(hSet.Err(), IsNil)
+
+ hVals := t.client.HVals("hash")
+ c.Assert(hVals.Err(), IsNil)
+ c.Assert(hVals.Val(), DeepEquals, []string{"hello1", "hello2"})
+}
+
+//------------------------------------------------------------------------------
+
+func (t *RedisTest) TestCmdListsBLPop(c *C) {
+ rPush := t.client.RPush("list1", "a", "b", "c")
+ c.Assert(rPush.Err(), IsNil)
+
+ bLPop := t.client.BLPop(0, "list1", "list2")
+ c.Assert(bLPop.Err(), IsNil)
+ c.Assert(bLPop.Val(), DeepEquals, []string{"list1", "a"})
+}
+
+func (t *RedisTest) TestCmdListsBLPopBlocks(c *C) {
+ started := make(chan bool)
+ done := make(chan bool)
+ go func() {
+ started <- true
+ bLPop := t.client.BLPop(0, "list")
+ c.Assert(bLPop.Err(), IsNil)
+ c.Assert(bLPop.Val(), DeepEquals, []string{"list", "a"})
+ done <- true
+ }()
+ <-started
+
+ select {
+ case <-done:
+ c.Error("BLPop is not blocked")
+ case <-time.After(time.Second):
+ // ok
+ }
+
+ rPush := t.client.RPush("list", "a")
+ c.Assert(rPush.Err(), IsNil)
+
+ select {
+ case <-done:
+ // ok
+ case <-time.After(time.Second):
+ c.Error("BLPop is still blocked")
+ // ok
+ }
+}
+
+func (t *RedisTest) TestCmdListsBLPopTimeout(c *C) {
+ bLPop := t.client.BLPop(1, "list1")
+ c.Assert(bLPop.Err(), Equals, redis.Nil)
+ c.Assert(bLPop.Val(), IsNil)
+}
+
+func (t *RedisTest) TestCmdListsBRPop(c *C) {
+ rPush := t.client.RPush("list1", "a", "b", "c")
+ c.Assert(rPush.Err(), IsNil)
+
+ bRPop := t.client.BRPop(0, "list1", "list2")
+ c.Assert(bRPop.Err(), IsNil)
+ c.Assert(bRPop.Val(), DeepEquals, []string{"list1", "c"})
+}
+
+func (t *RedisTest) TestCmdListsBRPopBlocks(c *C) {
+ started := make(chan bool)
+ done := make(chan bool)
+ go func() {
+ started <- true
+ brpop := t.client.BRPop(0, "list")
+ c.Assert(brpop.Err(), IsNil)
+ c.Assert(brpop.Val(), DeepEquals, []string{"list", "a"})
+ done <- true
+ }()
+ <-started
+
+ select {
+ case <-done:
+ c.Error("BRPop is not blocked")
+ case <-time.After(time.Second):
+ // ok
+ }
+
+ rPush := t.client.RPush("list", "a")
+ c.Assert(rPush.Err(), IsNil)
+
+ select {
+ case <-done:
+ // ok
+ case <-time.After(time.Second):
+ c.Error("BRPop is still blocked")
+ // ok
+ }
+}
+
+func (t *RedisTest) TestCmdListsBRPopLPush(c *C) {
+ rPush := t.client.RPush("list1", "a", "b", "c")
+ c.Assert(rPush.Err(), IsNil)
+
+ bRPopLPush := t.client.BRPopLPush("list1", "list2", 0)
+ c.Assert(bRPopLPush.Err(), IsNil)
+ c.Assert(bRPopLPush.Val(), Equals, "c")
+}
+
+func (t *RedisTest) TestCmdListsLIndex(c *C) {
+ lPush := t.client.LPush("list", "World")
+ c.Assert(lPush.Err(), IsNil)
+ lPush = t.client.LPush("list", "Hello")
+ c.Assert(lPush.Err(), IsNil)
+
+ lIndex := t.client.LIndex("list", 0)
+ c.Assert(lIndex.Err(), IsNil)
+ c.Assert(lIndex.Val(), Equals, "Hello")
+
+ lIndex = t.client.LIndex("list", -1)
+ c.Assert(lIndex.Err(), IsNil)
+ c.Assert(lIndex.Val(), Equals, "World")
+
+ lIndex = t.client.LIndex("list", 3)
+ c.Assert(lIndex.Err(), Equals, redis.Nil)
+ c.Assert(lIndex.Val(), Equals, "")
+}
+
+func (t *RedisTest) TestCmdListsLInsert(c *C) {
+ rPush := t.client.RPush("list", "Hello")
+ c.Assert(rPush.Err(), IsNil)
+ rPush = t.client.RPush("list", "World")
+ c.Assert(rPush.Err(), IsNil)
+
+ lInsert := t.client.LInsert("list", "BEFORE", "World", "There")
+ c.Assert(lInsert.Err(), IsNil)
+ c.Assert(lInsert.Val(), Equals, int64(3))
+
+ lRange := t.client.LRange("list", 0, -1)
+ c.Assert(lRange.Err(), IsNil)
+ c.Assert(lRange.Val(), DeepEquals, []string{"Hello", "There", "World"})
+}
+
+func (t *RedisTest) TestCmdListsLLen(c *C) {
+ lPush := t.client.LPush("list", "World")
+ c.Assert(lPush.Err(), IsNil)
+ lPush = t.client.LPush("list", "Hello")
+ c.Assert(lPush.Err(), IsNil)
+
+ lLen := t.client.LLen("list")
+ c.Assert(lLen.Err(), IsNil)
+ c.Assert(lLen.Val(), Equals, int64(2))
+}
+
+func (t *RedisTest) TestCmdListsLPop(c *C) {
+ rPush := t.client.RPush("list", "one")
+ c.Assert(rPush.Err(), IsNil)
+ rPush = t.client.RPush("list", "two")
+ c.Assert(rPush.Err(), IsNil)
+ rPush = t.client.RPush("list", "three")
+ c.Assert(rPush.Err(), IsNil)
+
+ lPop := t.client.LPop("list")
+ c.Assert(lPop.Err(), IsNil)
+ c.Assert(lPop.Val(), Equals, "one")
+
+ lRange := t.client.LRange("list", 0, -1)
+ c.Assert(lRange.Err(), IsNil)
+ c.Assert(lRange.Val(), DeepEquals, []string{"two", "three"})
+}
+
+func (t *RedisTest) TestCmdListsLPush(c *C) {
+ lPush := t.client.LPush("list", "World")
+ c.Assert(lPush.Err(), IsNil)
+ lPush = t.client.LPush("list", "Hello")
+ c.Assert(lPush.Err(), IsNil)
+
+ lRange := t.client.LRange("list", 0, -1)
+ c.Assert(lRange.Err(), IsNil)
+ c.Assert(lRange.Val(), DeepEquals, []string{"Hello", "World"})
+}
+
+func (t *RedisTest) TestCmdListsLPushX(c *C) {
+ lPush := t.client.LPush("list", "World")
+ c.Assert(lPush.Err(), IsNil)
+
+ lPushX := t.client.LPushX("list", "Hello")
+ c.Assert(lPushX.Err(), IsNil)
+ c.Assert(lPushX.Val(), Equals, int64(2))
+
+ lPushX = t.client.LPushX("list2", "Hello")
+ c.Assert(lPushX.Err(), IsNil)
+ c.Assert(lPushX.Val(), Equals, int64(0))
+
+ lRange := t.client.LRange("list", 0, -1)
+ c.Assert(lRange.Err(), IsNil)
+ c.Assert(lRange.Val(), DeepEquals, []string{"Hello", "World"})
+
+ lRange = t.client.LRange("list2", 0, -1)
+ c.Assert(lRange.Err(), IsNil)
+ c.Assert(lRange.Val(), DeepEquals, []string{})
+}
+
+func (t *RedisTest) TestCmdListsLRange(c *C) {
+ rPush := t.client.RPush("list", "one")
+ c.Assert(rPush.Err(), IsNil)
+ rPush = t.client.RPush("list", "two")
+ c.Assert(rPush.Err(), IsNil)
+ rPush = t.client.RPush("list", "three")
+ c.Assert(rPush.Err(), IsNil)
+
+ lRange := t.client.LRange("list", 0, 0)
+ c.Assert(lRange.Err(), IsNil)
+ c.Assert(lRange.Val(), DeepEquals, []string{"one"})
+
+ lRange = t.client.LRange("list", -3, 2)
+ c.Assert(lRange.Err(), IsNil)
+ c.Assert(lRange.Val(), DeepEquals, []string{"one", "two", "three"})
+
+ lRange = t.client.LRange("list", -100, 100)
+ c.Assert(lRange.Err(), IsNil)
+ c.Assert(lRange.Val(), DeepEquals, []string{"one", "two", "three"})
+
+ lRange = t.client.LRange("list", 5, 10)
+ c.Assert(lRange.Err(), IsNil)
+ c.Assert(lRange.Val(), DeepEquals, []string{})
+}
+
+func (t *RedisTest) TestCmdListsLRem(c *C) {
+ rPush := t.client.RPush("list", "hello")
+ c.Assert(rPush.Err(), IsNil)
+ rPush = t.client.RPush("list", "hello")
+ c.Assert(rPush.Err(), IsNil)
+ rPush = t.client.RPush("list", "key")
+ c.Assert(rPush.Err(), IsNil)
+ rPush = t.client.RPush("list", "hello")
+ c.Assert(rPush.Err(), IsNil)
+
+ lRem := t.client.LRem("list", -2, "hello")
+ c.Assert(lRem.Err(), IsNil)
+ c.Assert(lRem.Val(), Equals, int64(2))
+
+ lRange := t.client.LRange("list", 0, -1)
+ c.Assert(lRange.Err(), IsNil)
+ c.Assert(lRange.Val(), DeepEquals, []string{"hello", "key"})
+}
+
+func (t *RedisTest) TestCmdListsLSet(c *C) {
+ rPush := t.client.RPush("list", "one")
+ c.Assert(rPush.Err(), IsNil)
+ rPush = t.client.RPush("list", "two")
+ c.Assert(rPush.Err(), IsNil)
+ rPush = t.client.RPush("list", "three")
+ c.Assert(rPush.Err(), IsNil)
+
+ lSet := t.client.LSet("list", 0, "four")
+ c.Assert(lSet.Err(), IsNil)
+ c.Assert(lSet.Val(), Equals, "OK")
+
+ lSet = t.client.LSet("list", -2, "five")
+ c.Assert(lSet.Err(), IsNil)
+ c.Assert(lSet.Val(), Equals, "OK")
+
+ lRange := t.client.LRange("list", 0, -1)
+ c.Assert(lRange.Err(), IsNil)
+ c.Assert(lRange.Val(), DeepEquals, []string{"four", "five", "three"})
+}
+
+func (t *RedisTest) TestCmdListsLTrim(c *C) {
+ rPush := t.client.RPush("list", "one")
+ c.Assert(rPush.Err(), IsNil)
+ rPush = t.client.RPush("list", "two")
+ c.Assert(rPush.Err(), IsNil)
+ rPush = t.client.RPush("list", "three")
+ c.Assert(rPush.Err(), IsNil)
+
+ lTrim := t.client.LTrim("list", 1, -1)
+ c.Assert(lTrim.Err(), IsNil)
+ c.Assert(lTrim.Val(), Equals, "OK")
+
+ lRange := t.client.LRange("list", 0, -1)
+ c.Assert(lRange.Err(), IsNil)
+ c.Assert(lRange.Val(), DeepEquals, []string{"two", "three"})
+}
+
+func (t *RedisTest) TestCmdListsRPop(c *C) {
+ rPush := t.client.RPush("list", "one")
+ c.Assert(rPush.Err(), IsNil)
+ rPush = t.client.RPush("list", "two")
+ c.Assert(rPush.Err(), IsNil)
+ rPush = t.client.RPush("list", "three")
+ c.Assert(rPush.Err(), IsNil)
+
+ rPop := t.client.RPop("list")
+ c.Assert(rPop.Err(), IsNil)
+ c.Assert(rPop.Val(), Equals, "three")
+
+ lRange := t.client.LRange("list", 0, -1)
+ c.Assert(lRange.Err(), IsNil)
+ c.Assert(lRange.Val(), DeepEquals, []string{"one", "two"})
+}
+
+func (t *RedisTest) TestCmdListsRPopLPush(c *C) {
+ rPush := t.client.RPush("list", "one")
+ c.Assert(rPush.Err(), IsNil)
+ rPush = t.client.RPush("list", "two")
+ c.Assert(rPush.Err(), IsNil)
+ rPush = t.client.RPush("list", "three")
+ c.Assert(rPush.Err(), IsNil)
+
+ rPopLPush := t.client.RPopLPush("list", "list2")
+ c.Assert(rPopLPush.Err(), IsNil)
+ c.Assert(rPopLPush.Val(), Equals, "three")
+
+ lRange := t.client.LRange("list", 0, -1)
+ c.Assert(lRange.Err(), IsNil)
+ c.Assert(lRange.Val(), DeepEquals, []string{"one", "two"})
+
+ lRange = t.client.LRange("list2", 0, -1)
+ c.Assert(lRange.Err(), IsNil)
+ c.Assert(lRange.Val(), DeepEquals, []string{"three"})
+}
+
+func (t *RedisTest) TestCmdListsRPush(c *C) {
+ rPush := t.client.RPush("list", "Hello")
+ c.Assert(rPush.Err(), IsNil)
+ c.Assert(rPush.Val(), Equals, int64(1))
+
+ rPush = t.client.RPush("list", "World")
+ c.Assert(rPush.Err(), IsNil)
+ c.Assert(rPush.Val(), Equals, int64(2))
+
+ lRange := t.client.LRange("list", 0, -1)
+ c.Assert(lRange.Err(), IsNil)
+ c.Assert(lRange.Val(), DeepEquals, []string{"Hello", "World"})
+}
+
+func (t *RedisTest) TestCmdListsRPushX(c *C) {
+ rPush := t.client.RPush("list", "Hello")
+ c.Assert(rPush.Err(), IsNil)
+ c.Assert(rPush.Val(), Equals, int64(1))
+
+ rPushX := t.client.RPushX("list", "World")
+ c.Assert(rPushX.Err(), IsNil)
+ c.Assert(rPushX.Val(), Equals, int64(2))
+
+ rPushX = t.client.RPushX("list2", "World")
+ c.Assert(rPushX.Err(), IsNil)
+ c.Assert(rPushX.Val(), Equals, int64(0))
+
+ lRange := t.client.LRange("list", 0, -1)
+ c.Assert(lRange.Err(), IsNil)
+ c.Assert(lRange.Val(), DeepEquals, []string{"Hello", "World"})
+
+ lRange = t.client.LRange("list2", 0, -1)
+ c.Assert(lRange.Err(), IsNil)
+ c.Assert(lRange.Val(), DeepEquals, []string{})
+}
+
+//------------------------------------------------------------------------------
+
+func (t *RedisTest) TestSAdd(c *C) {
+ sAdd := t.client.SAdd("set", "Hello")
+ c.Assert(sAdd.Err(), IsNil)
+ c.Assert(sAdd.Val(), Equals, int64(1))
+
+ sAdd = t.client.SAdd("set", "World")
+ c.Assert(sAdd.Err(), IsNil)
+ c.Assert(sAdd.Val(), Equals, int64(1))
+
+ sAdd = t.client.SAdd("set", "World")
+ c.Assert(sAdd.Err(), IsNil)
+ c.Assert(sAdd.Val(), Equals, int64(0))
+
+ sMembers := t.client.SMembers("set")
+ c.Assert(sMembers.Err(), IsNil)
+ c.Assert(sortStrings(sMembers.Val()), DeepEquals, []string{"Hello", "World"})
+}
+
+func (t *RedisTest) TestSCard(c *C) {
+ sAdd := t.client.SAdd("set", "Hello")
+ c.Assert(sAdd.Err(), IsNil)
+ c.Assert(sAdd.Val(), Equals, int64(1))
+
+ sAdd = t.client.SAdd("set", "World")
+ c.Assert(sAdd.Err(), IsNil)
+ c.Assert(sAdd.Val(), Equals, int64(1))
+
+ sCard := t.client.SCard("set")
+ c.Assert(sCard.Err(), IsNil)
+ c.Assert(sCard.Val(), Equals, int64(2))
+}
+
+func (t *RedisTest) TestSDiff(c *C) {
+ sAdd := t.client.SAdd("set1", "a")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set1", "b")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set1", "c")
+ c.Assert(sAdd.Err(), IsNil)
+
+ sAdd = t.client.SAdd("set2", "c")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set2", "d")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set2", "e")
+ c.Assert(sAdd.Err(), IsNil)
+
+ sDiff := t.client.SDiff("set1", "set2")
+ c.Assert(sDiff.Err(), IsNil)
+ c.Assert(sortStrings(sDiff.Val()), DeepEquals, []string{"a", "b"})
+}
+
+func (t *RedisTest) TestSDiffStore(c *C) {
+ sAdd := t.client.SAdd("set1", "a")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set1", "b")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set1", "c")
+ c.Assert(sAdd.Err(), IsNil)
+
+ sAdd = t.client.SAdd("set2", "c")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set2", "d")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set2", "e")
+ c.Assert(sAdd.Err(), IsNil)
+
+ sDiffStore := t.client.SDiffStore("set", "set1", "set2")
+ c.Assert(sDiffStore.Err(), IsNil)
+ c.Assert(sDiffStore.Val(), Equals, int64(2))
+
+ sMembers := t.client.SMembers("set")
+ c.Assert(sMembers.Err(), IsNil)
+ c.Assert(sortStrings(sMembers.Val()), DeepEquals, []string{"a", "b"})
+}
+
+func (t *RedisTest) TestSInter(c *C) {
+ sAdd := t.client.SAdd("set1", "a")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set1", "b")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set1", "c")
+ c.Assert(sAdd.Err(), IsNil)
+
+ sAdd = t.client.SAdd("set2", "c")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set2", "d")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set2", "e")
+ c.Assert(sAdd.Err(), IsNil)
+
+ sInter := t.client.SInter("set1", "set2")
+ c.Assert(sInter.Err(), IsNil)
+ c.Assert(sInter.Val(), DeepEquals, []string{"c"})
+}
+
+func (t *RedisTest) TestSInterStore(c *C) {
+ sAdd := t.client.SAdd("set1", "a")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set1", "b")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set1", "c")
+ c.Assert(sAdd.Err(), IsNil)
+
+ sAdd = t.client.SAdd("set2", "c")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set2", "d")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set2", "e")
+ c.Assert(sAdd.Err(), IsNil)
+
+ sInterStore := t.client.SInterStore("set", "set1", "set2")
+ c.Assert(sInterStore.Err(), IsNil)
+ c.Assert(sInterStore.Val(), Equals, int64(1))
+
+ sMembers := t.client.SMembers("set")
+ c.Assert(sMembers.Err(), IsNil)
+ c.Assert(sMembers.Val(), DeepEquals, []string{"c"})
+}
+
+func (t *RedisTest) TestIsMember(c *C) {
+ sAdd := t.client.SAdd("set", "one")
+ c.Assert(sAdd.Err(), IsNil)
+
+ sIsMember := t.client.SIsMember("set", "one")
+ c.Assert(sIsMember.Err(), IsNil)
+ c.Assert(sIsMember.Val(), Equals, true)
+
+ sIsMember = t.client.SIsMember("set", "two")
+ c.Assert(sIsMember.Err(), IsNil)
+ c.Assert(sIsMember.Val(), Equals, false)
+}
+
+func (t *RedisTest) TestSMembers(c *C) {
+ sAdd := t.client.SAdd("set", "Hello")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set", "World")
+ c.Assert(sAdd.Err(), IsNil)
+
+ sMembers := t.client.SMembers("set")
+ c.Assert(sMembers.Err(), IsNil)
+ c.Assert(sortStrings(sMembers.Val()), DeepEquals, []string{"Hello", "World"})
+}
+
+func (t *RedisTest) TestSMove(c *C) {
+ sAdd := t.client.SAdd("set1", "one")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set1", "two")
+ c.Assert(sAdd.Err(), IsNil)
+
+ sAdd = t.client.SAdd("set2", "three")
+ c.Assert(sAdd.Err(), IsNil)
+
+ sMove := t.client.SMove("set1", "set2", "two")
+ c.Assert(sMove.Err(), IsNil)
+ c.Assert(sMove.Val(), Equals, true)
+
+ sMembers := t.client.SMembers("set1")
+ c.Assert(sMembers.Err(), IsNil)
+ c.Assert(sMembers.Val(), DeepEquals, []string{"one"})
+
+ sMembers = t.client.SMembers("set2")
+ c.Assert(sMembers.Err(), IsNil)
+ c.Assert(sortStrings(sMembers.Val()), DeepEquals, []string{"three", "two"})
+}
+
+func (t *RedisTest) TestSPop(c *C) {
+ sAdd := t.client.SAdd("set", "one")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set", "two")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set", "three")
+ c.Assert(sAdd.Err(), IsNil)
+
+ sPop := t.client.SPop("set")
+ c.Assert(sPop.Err(), IsNil)
+ c.Assert(sPop.Val(), Not(Equals), "")
+
+ sMembers := t.client.SMembers("set")
+ c.Assert(sMembers.Err(), IsNil)
+ c.Assert(sMembers.Val(), HasLen, 2)
+}
+
+func (t *RedisTest) TestSRandMember(c *C) {
+ sAdd := t.client.SAdd("set", "one")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set", "two")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set", "three")
+ c.Assert(sAdd.Err(), IsNil)
+
+ sRandMember := t.client.SRandMember("set")
+ c.Assert(sRandMember.Err(), IsNil)
+ c.Assert(sRandMember.Val(), Not(Equals), "")
+
+ sMembers := t.client.SMembers("set")
+ c.Assert(sMembers.Err(), IsNil)
+ c.Assert(sMembers.Val(), HasLen, 3)
+}
+
+func (t *RedisTest) TestSRem(c *C) {
+ sAdd := t.client.SAdd("set", "one")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set", "two")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set", "three")
+ c.Assert(sAdd.Err(), IsNil)
+
+ sRem := t.client.SRem("set", "one")
+ c.Assert(sRem.Err(), IsNil)
+ c.Assert(sRem.Val(), Equals, int64(1))
+
+ sRem = t.client.SRem("set", "four")
+ c.Assert(sRem.Err(), IsNil)
+ c.Assert(sRem.Val(), Equals, int64(0))
+
+ sMembers := t.client.SMembers("set")
+ c.Assert(sMembers.Err(), IsNil)
+ c.Assert(
+ sortStrings(sMembers.Val()),
+ DeepEquals,
+ []string{"three", "two"},
+ )
+}
+
+func (t *RedisTest) TestSUnion(c *C) {
+ sAdd := t.client.SAdd("set1", "a")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set1", "b")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set1", "c")
+ c.Assert(sAdd.Err(), IsNil)
+
+ sAdd = t.client.SAdd("set2", "c")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set2", "d")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set2", "e")
+ c.Assert(sAdd.Err(), IsNil)
+
+ sUnion := t.client.SUnion("set1", "set2")
+ c.Assert(sUnion.Err(), IsNil)
+ c.Assert(sUnion.Val(), HasLen, 5)
+}
+
+func (t *RedisTest) TestSUnionStore(c *C) {
+ sAdd := t.client.SAdd("set1", "a")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set1", "b")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set1", "c")
+ c.Assert(sAdd.Err(), IsNil)
+
+ sAdd = t.client.SAdd("set2", "c")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set2", "d")
+ c.Assert(sAdd.Err(), IsNil)
+ sAdd = t.client.SAdd("set2", "e")
+ c.Assert(sAdd.Err(), IsNil)
+
+ sUnionStore := t.client.SUnionStore("set", "set1", "set2")
+ c.Assert(sUnionStore.Err(), IsNil)
+ c.Assert(sUnionStore.Val(), Equals, int64(5))
+
+ sMembers := t.client.SMembers("set")
+ c.Assert(sMembers.Err(), IsNil)
+ c.Assert(sMembers.Val(), HasLen, 5)
+}
+
+//------------------------------------------------------------------------------
+
+func (t *RedisTest) TestZAdd(c *C) {
+ zAdd := t.client.ZAdd("zset", redis.Z{1, "one"})
+ c.Assert(zAdd.Err(), IsNil)
+ c.Assert(zAdd.Val(), Equals, int64(1))
+
+ zAdd = t.client.ZAdd("zset", redis.Z{1, "uno"})
+ c.Assert(zAdd.Err(), IsNil)
+ c.Assert(zAdd.Val(), Equals, int64(1))
+
+ zAdd = t.client.ZAdd("zset", redis.Z{2, "two"})
+ c.Assert(zAdd.Err(), IsNil)
+ c.Assert(zAdd.Val(), Equals, int64(1))
+
+ zAdd = t.client.ZAdd("zset", redis.Z{3, "two"})
+ c.Assert(zAdd.Err(), IsNil)
+ c.Assert(zAdd.Val(), Equals, int64(0))
+
+ val, err := t.client.ZRangeWithScores("zset", 0, -1).Result()
+ c.Assert(err, IsNil)
+ c.Assert(val, DeepEquals, []redis.Z{{1, "one"}, {1, "uno"}, {3, "two"}})
+}
+
+func (t *RedisTest) TestZCard(c *C) {
+ zAdd := t.client.ZAdd("zset", redis.Z{1, "one"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{2, "two"})
+ c.Assert(zAdd.Err(), IsNil)
+
+ zCard := t.client.ZCard("zset")
+ c.Assert(zCard.Err(), IsNil)
+ c.Assert(zCard.Val(), Equals, int64(2))
+}
+
+func (t *RedisTest) TestZCount(c *C) {
+ zAdd := t.client.ZAdd("zset", redis.Z{1, "one"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{2, "two"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{3, "three"})
+ c.Assert(zAdd.Err(), IsNil)
+
+ zCount := t.client.ZCount("zset", "-inf", "+inf")
+ c.Assert(zCount.Err(), IsNil)
+ c.Assert(zCount.Val(), Equals, int64(3))
+
+ zCount = t.client.ZCount("zset", "(1", "3")
+ c.Assert(zCount.Err(), IsNil)
+ c.Assert(zCount.Val(), Equals, int64(2))
+}
+
+func (t *RedisTest) TestZIncrBy(c *C) {
+ zAdd := t.client.ZAdd("zset", redis.Z{1, "one"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{2, "two"})
+ c.Assert(zAdd.Err(), IsNil)
+
+ zIncrBy := t.client.ZIncrBy("zset", 2, "one")
+ c.Assert(zIncrBy.Err(), IsNil)
+ c.Assert(zIncrBy.Val(), Equals, float64(3))
+
+ val, err := t.client.ZRangeWithScores("zset", 0, -1).Result()
+ c.Assert(err, IsNil)
+ c.Assert(val, DeepEquals, []redis.Z{{2, "two"}, {3, "one"}})
+}
+
+func (t *RedisTest) TestZInterStore(c *C) {
+ zAdd := t.client.ZAdd("zset1", redis.Z{1, "one"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset1", redis.Z{2, "two"})
+ c.Assert(zAdd.Err(), IsNil)
+
+ zAdd = t.client.ZAdd("zset2", redis.Z{1, "one"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset2", redis.Z{2, "two"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset3", redis.Z{3, "two"})
+ c.Assert(zAdd.Err(), IsNil)
+
+ zInterStore := t.client.ZInterStore(
+ "out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2")
+ c.Assert(zInterStore.Err(), IsNil)
+ c.Assert(zInterStore.Val(), Equals, int64(2))
+
+ val, err := t.client.ZRangeWithScores("out", 0, -1).Result()
+ c.Assert(err, IsNil)
+ c.Assert(val, DeepEquals, []redis.Z{{5, "one"}, {10, "two"}})
+}
+
+func (t *RedisTest) TestZRange(c *C) {
+ zAdd := t.client.ZAdd("zset", redis.Z{1, "one"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{2, "two"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{3, "three"})
+ c.Assert(zAdd.Err(), IsNil)
+
+ zRange := t.client.ZRange("zset", 0, -1)
+ c.Assert(zRange.Err(), IsNil)
+ c.Assert(zRange.Val(), DeepEquals, []string{"one", "two", "three"})
+
+ zRange = t.client.ZRange("zset", 2, 3)
+ c.Assert(zRange.Err(), IsNil)
+ c.Assert(zRange.Val(), DeepEquals, []string{"three"})
+
+ zRange = t.client.ZRange("zset", -2, -1)
+ c.Assert(zRange.Err(), IsNil)
+ c.Assert(zRange.Val(), DeepEquals, []string{"two", "three"})
+}
+
+func (t *RedisTest) TestZRangeWithScores(c *C) {
+ zAdd := t.client.ZAdd("zset", redis.Z{1, "one"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{2, "two"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{3, "three"})
+ c.Assert(zAdd.Err(), IsNil)
+
+ val, err := t.client.ZRangeWithScores("zset", 0, -1).Result()
+ c.Assert(err, IsNil)
+ c.Assert(val, DeepEquals, []redis.Z{{1, "one"}, {2, "two"}, {3, "three"}})
+
+ val, err = t.client.ZRangeWithScores("zset", 2, 3).Result()
+ c.Assert(err, IsNil)
+ c.Assert(val, DeepEquals, []redis.Z{{3, "three"}})
+
+ val, err = t.client.ZRangeWithScores("zset", -2, -1).Result()
+ c.Assert(err, IsNil)
+ c.Assert(val, DeepEquals, []redis.Z{{2, "two"}, {3, "three"}})
+}
+
+func (t *RedisTest) TestZRangeByScore(c *C) {
+ zAdd := t.client.ZAdd("zset", redis.Z{1, "one"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{2, "two"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{3, "three"})
+ c.Assert(zAdd.Err(), IsNil)
+
+ zRangeByScore := t.client.ZRangeByScore("zset", redis.ZRangeByScore{
+ Min: "-inf",
+ Max: "+inf",
+ })
+ c.Assert(zRangeByScore.Err(), IsNil)
+ c.Assert(zRangeByScore.Val(), DeepEquals, []string{"one", "two", "three"})
+
+ zRangeByScore = t.client.ZRangeByScore("zset", redis.ZRangeByScore{
+ Min: "1",
+ Max: "2",
+ })
+ c.Assert(zRangeByScore.Err(), IsNil)
+ c.Assert(zRangeByScore.Val(), DeepEquals, []string{"one", "two"})
+
+ zRangeByScore = t.client.ZRangeByScore("zset", redis.ZRangeByScore{
+ Min: "(1",
+ Max: "2",
+ })
+ c.Assert(zRangeByScore.Err(), IsNil)
+ c.Assert(zRangeByScore.Val(), DeepEquals, []string{"two"})
+
+ zRangeByScore = t.client.ZRangeByScore("zset", redis.ZRangeByScore{
+ Min: "(1",
+ Max: "(2",
+ })
+ c.Assert(zRangeByScore.Err(), IsNil)
+ c.Assert(zRangeByScore.Val(), DeepEquals, []string{})
+}
+
+func (t *RedisTest) TestZRangeByScoreWithScoresMap(c *C) {
+ zAdd := t.client.ZAdd("zset", redis.Z{1, "one"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{2, "two"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{3, "three"})
+ c.Assert(zAdd.Err(), IsNil)
+
+ val, err := t.client.ZRangeByScoreWithScores("zset", redis.ZRangeByScore{
+ Min: "-inf",
+ Max: "+inf",
+ }).Result()
+ c.Assert(err, IsNil)
+ c.Assert(val, DeepEquals, []redis.Z{{1, "one"}, {2, "two"}, {3, "three"}})
+
+ val, err = t.client.ZRangeByScoreWithScores("zset", redis.ZRangeByScore{
+ Min: "1",
+ Max: "2",
+ }).Result()
+ c.Assert(err, IsNil)
+ c.Assert(val, DeepEquals, []redis.Z{{1, "one"}, {2, "two"}})
+
+ val, err = t.client.ZRangeByScoreWithScores("zset", redis.ZRangeByScore{
+ Min: "(1",
+ Max: "2",
+ }).Result()
+ c.Assert(err, IsNil)
+ c.Assert(val, DeepEquals, []redis.Z{{2, "two"}})
+
+ val, err = t.client.ZRangeByScoreWithScores("zset", redis.ZRangeByScore{
+ Min: "(1",
+ Max: "(2",
+ }).Result()
+ c.Assert(err, IsNil)
+ c.Assert(val, DeepEquals, []redis.Z{})
+}
+
+func (t *RedisTest) TestZRank(c *C) {
+ zAdd := t.client.ZAdd("zset", redis.Z{1, "one"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{2, "two"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{3, "three"})
+ c.Assert(zAdd.Err(), IsNil)
+
+ zRank := t.client.ZRank("zset", "three")
+ c.Assert(zRank.Err(), IsNil)
+ c.Assert(zRank.Val(), Equals, int64(2))
+
+ zRank = t.client.ZRank("zset", "four")
+ c.Assert(zRank.Err(), Equals, redis.Nil)
+ c.Assert(zRank.Val(), Equals, int64(0))
+}
+
+func (t *RedisTest) TestZRem(c *C) {
+ zAdd := t.client.ZAdd("zset", redis.Z{1, "one"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{2, "two"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{3, "three"})
+ c.Assert(zAdd.Err(), IsNil)
+
+ zRem := t.client.ZRem("zset", "two")
+ c.Assert(zRem.Err(), IsNil)
+ c.Assert(zRem.Val(), Equals, int64(1))
+
+ val, err := t.client.ZRangeWithScores("zset", 0, -1).Result()
+ c.Assert(err, IsNil)
+ c.Assert(val, DeepEquals, []redis.Z{{1, "one"}, {3, "three"}})
+}
+
+func (t *RedisTest) TestZRemRangeByRank(c *C) {
+ zAdd := t.client.ZAdd("zset", redis.Z{1, "one"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{2, "two"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{3, "three"})
+ c.Assert(zAdd.Err(), IsNil)
+
+ zRemRangeByRank := t.client.ZRemRangeByRank("zset", 0, 1)
+ c.Assert(zRemRangeByRank.Err(), IsNil)
+ c.Assert(zRemRangeByRank.Val(), Equals, int64(2))
+
+ val, err := t.client.ZRangeWithScores("zset", 0, -1).Result()
+ c.Assert(err, IsNil)
+ c.Assert(val, DeepEquals, []redis.Z{{3, "three"}})
+}
+
+func (t *RedisTest) TestZRemRangeByScore(c *C) {
+ zAdd := t.client.ZAdd("zset", redis.Z{1, "one"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{2, "two"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{3, "three"})
+ c.Assert(zAdd.Err(), IsNil)
+
+ zRemRangeByScore := t.client.ZRemRangeByScore("zset", "-inf", "(2")
+ c.Assert(zRemRangeByScore.Err(), IsNil)
+ c.Assert(zRemRangeByScore.Val(), Equals, int64(1))
+
+ val, err := t.client.ZRangeWithScores("zset", 0, -1).Result()
+ c.Assert(err, IsNil)
+ c.Assert(val, DeepEquals, []redis.Z{{2, "two"}, {3, "three"}})
+}
+
+func (t *RedisTest) TestZRevRange(c *C) {
+ zAdd := t.client.ZAdd("zset", redis.Z{1, "one"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{2, "two"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{3, "three"})
+ c.Assert(zAdd.Err(), IsNil)
+
+ zRevRange := t.client.ZRevRange("zset", "0", "-1")
+ c.Assert(zRevRange.Err(), IsNil)
+ c.Assert(zRevRange.Val(), DeepEquals, []string{"three", "two", "one"})
+
+ zRevRange = t.client.ZRevRange("zset", "2", "3")
+ c.Assert(zRevRange.Err(), IsNil)
+ c.Assert(zRevRange.Val(), DeepEquals, []string{"one"})
+
+ zRevRange = t.client.ZRevRange("zset", "-2", "-1")
+ c.Assert(zRevRange.Err(), IsNil)
+ c.Assert(zRevRange.Val(), DeepEquals, []string{"two", "one"})
+}
+
+func (t *RedisTest) TestZRevRangeWithScoresMap(c *C) {
+ zAdd := t.client.ZAdd("zset", redis.Z{1, "one"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{2, "two"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{3, "three"})
+ c.Assert(zAdd.Err(), IsNil)
+
+ val, err := t.client.ZRevRangeWithScores("zset", "0", "-1").Result()
+ c.Assert(err, IsNil)
+ c.Assert(val, DeepEquals, []redis.Z{{3, "three"}, {2, "two"}, {1, "one"}})
+
+ val, err = t.client.ZRevRangeWithScores("zset", "2", "3").Result()
+ c.Assert(err, IsNil)
+ c.Assert(val, DeepEquals, []redis.Z{{1, "one"}})
+
+ val, err = t.client.ZRevRangeWithScores("zset", "-2", "-1").Result()
+ c.Assert(err, IsNil)
+ c.Assert(val, DeepEquals, []redis.Z{{2, "two"}, {1, "one"}})
+}
+
+func (t *RedisTest) TestZRevRangeByScore(c *C) {
+ zadd := t.client.ZAdd("zset", redis.Z{1, "one"})
+ c.Assert(zadd.Err(), IsNil)
+ zadd = t.client.ZAdd("zset", redis.Z{2, "two"})
+ c.Assert(zadd.Err(), IsNil)
+ zadd = t.client.ZAdd("zset", redis.Z{3, "three"})
+ c.Assert(zadd.Err(), IsNil)
+
+ vals, err := t.client.ZRevRangeByScore(
+ "zset", redis.ZRangeByScore{Max: "+inf", Min: "-inf"}).Result()
+ c.Assert(err, IsNil)
+ c.Assert(vals, DeepEquals, []string{"three", "two", "one"})
+
+ vals, err = t.client.ZRevRangeByScore(
+ "zset", redis.ZRangeByScore{Max: "2", Min: "(1"}).Result()
+ c.Assert(err, IsNil)
+ c.Assert(vals, DeepEquals, []string{"two"})
+
+ vals, err = t.client.ZRevRangeByScore(
+ "zset", redis.ZRangeByScore{Max: "(2", Min: "(1"}).Result()
+ c.Assert(err, IsNil)
+ c.Assert(vals, DeepEquals, []string{})
+}
+
+func (t *RedisTest) TestZRevRangeByScoreWithScores(c *C) {
+ zadd := t.client.ZAdd("zset", redis.Z{1, "one"})
+ c.Assert(zadd.Err(), IsNil)
+ zadd = t.client.ZAdd("zset", redis.Z{2, "two"})
+ c.Assert(zadd.Err(), IsNil)
+ zadd = t.client.ZAdd("zset", redis.Z{3, "three"})
+ c.Assert(zadd.Err(), IsNil)
+
+ vals, err := t.client.ZRevRangeByScoreWithScores(
+ "zset", redis.ZRangeByScore{Max: "+inf", Min: "-inf"}).Result()
+ c.Assert(err, IsNil)
+ c.Assert(vals, DeepEquals, []redis.Z{{3, "three"}, {2, "two"}, {1, "one"}})
+}
+
+func (t *RedisTest) TestZRevRangeByScoreWithScoresMap(c *C) {
+ zAdd := t.client.ZAdd("zset", redis.Z{1, "one"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{2, "two"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{3, "three"})
+ c.Assert(zAdd.Err(), IsNil)
+
+ val, err := t.client.ZRevRangeByScoreWithScores(
+ "zset", redis.ZRangeByScore{Max: "+inf", Min: "-inf"}).Result()
+ c.Assert(err, IsNil)
+ c.Assert(val, DeepEquals, []redis.Z{{3, "three"}, {2, "two"}, {1, "one"}})
+
+ val, err = t.client.ZRevRangeByScoreWithScores(
+ "zset", redis.ZRangeByScore{Max: "2", Min: "(1"}).Result()
+ c.Assert(err, IsNil)
+ c.Assert(val, DeepEquals, []redis.Z{{2, "two"}})
+
+ val, err = t.client.ZRevRangeByScoreWithScores(
+ "zset", redis.ZRangeByScore{Max: "(2", Min: "(1"}).Result()
+ c.Assert(err, IsNil)
+ c.Assert(val, DeepEquals, []redis.Z{})
+}
+
+func (t *RedisTest) TestZRevRank(c *C) {
+ zAdd := t.client.ZAdd("zset", redis.Z{1, "one"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{2, "two"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset", redis.Z{3, "three"})
+ c.Assert(zAdd.Err(), IsNil)
+
+ zRevRank := t.client.ZRevRank("zset", "one")
+ c.Assert(zRevRank.Err(), IsNil)
+ c.Assert(zRevRank.Val(), Equals, int64(2))
+
+ zRevRank = t.client.ZRevRank("zset", "four")
+ c.Assert(zRevRank.Err(), Equals, redis.Nil)
+ c.Assert(zRevRank.Val(), Equals, int64(0))
+}
+
+func (t *RedisTest) TestZScore(c *C) {
+ zAdd := t.client.ZAdd("zset", redis.Z{1.001, "one"})
+ c.Assert(zAdd.Err(), IsNil)
+
+ zScore := t.client.ZScore("zset", "one")
+ c.Assert(zScore.Err(), IsNil)
+ c.Assert(zScore.Val(), Equals, float64(1.001))
+}
+
+func (t *RedisTest) TestZUnionStore(c *C) {
+ zAdd := t.client.ZAdd("zset1", redis.Z{1, "one"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset1", redis.Z{2, "two"})
+ c.Assert(zAdd.Err(), IsNil)
+
+ zAdd = t.client.ZAdd("zset2", redis.Z{1, "one"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset2", redis.Z{2, "two"})
+ c.Assert(zAdd.Err(), IsNil)
+ zAdd = t.client.ZAdd("zset2", redis.Z{3, "three"})
+ c.Assert(zAdd.Err(), IsNil)
+
+ zUnionStore := t.client.ZUnionStore(
+ "out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2")
+ c.Assert(zUnionStore.Err(), IsNil)
+ c.Assert(zUnionStore.Val(), Equals, int64(3))
+
+ val, err := t.client.ZRangeWithScores("out", 0, -1).Result()
+ c.Assert(err, IsNil)
+ c.Assert(val, DeepEquals, []redis.Z{{5, "one"}, {9, "three"}, {10, "two"}})
+}
+
+//------------------------------------------------------------------------------
+
+func (t *RedisTest) TestPatternPubSub(c *C) {
+ pubsub := t.client.PubSub()
+ defer func() {
+ c.Assert(pubsub.Close(), IsNil)
+ }()
+
+ c.Assert(pubsub.PSubscribe("mychannel*"), IsNil)
+
+ pub := t.client.Publish("mychannel1", "hello")
+ c.Assert(pub.Err(), IsNil)
+ c.Assert(pub.Val(), Equals, int64(1))
+
+ c.Assert(pubsub.PUnsubscribe("mychannel*"), IsNil)
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ c.Assert(err, IsNil)
+ subscr := msgi.(*redis.Subscription)
+ c.Assert(subscr.Kind, Equals, "psubscribe")
+ c.Assert(subscr.Channel, Equals, "mychannel*")
+ c.Assert(subscr.Count, Equals, 1)
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ c.Assert(err, IsNil)
+ subscr := msgi.(*redis.PMessage)
+ c.Assert(subscr.Channel, Equals, "mychannel1")
+ c.Assert(subscr.Pattern, Equals, "mychannel*")
+ c.Assert(subscr.Payload, Equals, "hello")
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ c.Assert(err, IsNil)
+ subscr := msgi.(*redis.Subscription)
+ c.Assert(subscr.Kind, Equals, "punsubscribe")
+ c.Assert(subscr.Channel, Equals, "mychannel*")
+ c.Assert(subscr.Count, Equals, 0)
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ c.Assert(err.(net.Error).Timeout(), Equals, true)
+ c.Assert(msgi, IsNil)
+ }
+}
+
+func (t *RedisTest) TestPubSub(c *C) {
+ pubsub := t.client.PubSub()
+ defer func() {
+ c.Assert(pubsub.Close(), IsNil)
+ }()
+
+ c.Assert(pubsub.Subscribe("mychannel", "mychannel2"), IsNil)
+
+ pub := t.client.Publish("mychannel", "hello")
+ c.Assert(pub.Err(), IsNil)
+ c.Assert(pub.Val(), Equals, int64(1))
+
+ pub = t.client.Publish("mychannel2", "hello2")
+ c.Assert(pub.Err(), IsNil)
+ c.Assert(pub.Val(), Equals, int64(1))
+
+ c.Assert(pubsub.Unsubscribe("mychannel", "mychannel2"), IsNil)
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ c.Assert(err, IsNil)
+ subscr := msgi.(*redis.Subscription)
+ c.Assert(subscr.Kind, Equals, "subscribe")
+ c.Assert(subscr.Channel, Equals, "mychannel")
+ c.Assert(subscr.Count, Equals, 1)
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ c.Assert(err, IsNil)
+ subscr := msgi.(*redis.Subscription)
+ c.Assert(subscr.Kind, Equals, "subscribe")
+ c.Assert(subscr.Channel, Equals, "mychannel2")
+ c.Assert(subscr.Count, Equals, 2)
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ c.Assert(err, IsNil)
+ subscr := msgi.(*redis.Message)
+ c.Assert(subscr.Channel, Equals, "mychannel")
+ c.Assert(subscr.Payload, Equals, "hello")
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ c.Assert(err, IsNil)
+ msg := msgi.(*redis.Message)
+ c.Assert(msg.Channel, Equals, "mychannel2")
+ c.Assert(msg.Payload, Equals, "hello2")
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ c.Assert(err, IsNil)
+ subscr := msgi.(*redis.Subscription)
+ c.Assert(subscr.Kind, Equals, "unsubscribe")
+ c.Assert(subscr.Channel, Equals, "mychannel")
+ c.Assert(subscr.Count, Equals, 1)
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ c.Assert(err, IsNil)
+ subscr := msgi.(*redis.Subscription)
+ c.Assert(subscr.Kind, Equals, "unsubscribe")
+ c.Assert(subscr.Channel, Equals, "mychannel2")
+ c.Assert(subscr.Count, Equals, 0)
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ c.Assert(err.(net.Error).Timeout(), Equals, true)
+ c.Assert(msgi, IsNil)
+ }
+}
+
+func (t *RedisTest) TestPubSubChannels(c *C) {
+ channels, err := t.client.PubSubChannels("mychannel*").Result()
+ c.Assert(err, IsNil)
+ c.Assert(channels, HasLen, 0)
+ c.Assert(channels, Not(IsNil))
+
+ pubsub := t.client.PubSub()
+ defer pubsub.Close()
+
+ c.Assert(pubsub.Subscribe("mychannel", "mychannel2"), IsNil)
+
+ channels, err = t.client.PubSubChannels("mychannel*").Result()
+ c.Assert(err, IsNil)
+ c.Assert(sortStrings(channels), DeepEquals, []string{"mychannel", "mychannel2"})
+
+ channels, err = t.client.PubSubChannels("").Result()
+ c.Assert(err, IsNil)
+ c.Assert(channels, HasLen, 0)
+
+ channels, err = t.client.PubSubChannels("*").Result()
+ c.Assert(err, IsNil)
+ c.Assert(len(channels) >= 2, Equals, true)
+}
+
+func (t *RedisTest) TestPubSubNumSub(c *C) {
+ pubsub := t.client.PubSub()
+ defer pubsub.Close()
+
+ c.Assert(pubsub.Subscribe("mychannel", "mychannel2"), IsNil)
+
+ channels, err := t.client.PubSubNumSub("mychannel", "mychannel2", "mychannel3").Result()
+ c.Assert(err, IsNil)
+ c.Assert(
+ channels,
+ DeepEquals,
+ []interface{}{"mychannel", int64(1), "mychannel2", int64(1), "mychannel3", int64(0)},
+ )
+}
+
+func (t *RedisTest) TestPubSubNumPat(c *C) {
+ num, err := t.client.PubSubNumPat().Result()
+ c.Assert(err, IsNil)
+ c.Assert(num, Equals, int64(0))
+
+ pubsub := t.client.PubSub()
+ defer pubsub.Close()
+
+ c.Assert(pubsub.PSubscribe("mychannel*"), IsNil)
+
+ num, err = t.client.PubSubNumPat().Result()
+ c.Assert(err, IsNil)
+ c.Assert(num, Equals, int64(1))
+}
+
+//------------------------------------------------------------------------------
+
+func (t *RedisTest) TestPipeline(c *C) {
+ set := t.client.Set("key2", "hello2")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ pipeline := t.client.Pipeline()
+ defer func() {
+ c.Assert(pipeline.Close(), IsNil)
+ }()
+
+ set = pipeline.Set("key1", "hello1")
+ get := pipeline.Get("key2")
+ incr := pipeline.Incr("key3")
+ getNil := pipeline.Get("key4")
+
+ cmds, err := pipeline.Exec()
+ c.Assert(err, Equals, redis.Nil)
+ c.Assert(cmds, HasLen, 4)
+
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, "hello2")
+
+ c.Assert(incr.Err(), IsNil)
+ c.Assert(incr.Val(), Equals, int64(1))
+
+ c.Assert(getNil.Err(), Equals, redis.Nil)
+ c.Assert(getNil.Val(), Equals, "")
+}
+
+func (t *RedisTest) TestPipelineDiscardQueued(c *C) {
+ pipeline := t.client.Pipeline()
+
+ pipeline.Get("key")
+ pipeline.Discard()
+ cmds, err := pipeline.Exec()
+ c.Assert(err, IsNil)
+ c.Assert(cmds, HasLen, 0)
+
+ c.Assert(pipeline.Close(), IsNil)
+}
+
+func (t *RedisTest) TestPipelined(c *C) {
+ var get *redis.StringCmd
+ cmds, err := t.client.Pipelined(func(pipe *redis.Pipeline) error {
+ get = pipe.Get("foo")
+ return nil
+ })
+ c.Assert(err, Equals, redis.Nil)
+ c.Assert(cmds, HasLen, 1)
+ c.Assert(cmds[0], Equals, get)
+ c.Assert(get.Err(), Equals, redis.Nil)
+ c.Assert(get.Val(), Equals, "")
+}
+
+func (t *RedisTest) TestPipelineErrValNotSet(c *C) {
+ pipeline := t.client.Pipeline()
+ defer func() {
+ c.Assert(pipeline.Close(), IsNil)
+ }()
+
+ get := pipeline.Get("key")
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, "")
+}
+
+func (t *RedisTest) TestPipelineRunQueuedOnEmptyQueue(c *C) {
+ pipeline := t.client.Pipeline()
+ defer func() {
+ c.Assert(pipeline.Close(), IsNil)
+ }()
+
+ cmds, err := pipeline.Exec()
+ c.Assert(err, IsNil)
+ c.Assert(cmds, HasLen, 0)
+}
+
+// TODO: make thread safe?
+func (t *RedisTest) TestPipelineIncr(c *C) {
+ const N = 20000
+ key := "TestPipelineIncr"
+
+ pipeline := t.client.Pipeline()
+
+ wg := &sync.WaitGroup{}
+ wg.Add(N)
+ for i := 0; i < N; i++ {
+ pipeline.Incr(key)
+ wg.Done()
+ }
+ wg.Wait()
+
+ cmds, err := pipeline.Exec()
+ c.Assert(err, IsNil)
+ c.Assert(len(cmds), Equals, 20000)
+ for _, cmd := range cmds {
+ if cmd.Err() != nil {
+ c.Errorf("got %v, expected nil", cmd.Err())
+ }
+ }
+
+ get := t.client.Get(key)
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, strconv.Itoa(N))
+
+ c.Assert(pipeline.Close(), IsNil)
+}
+
+func (t *RedisTest) TestPipelineEcho(c *C) {
+ const N = 1000
+
+ wg := &sync.WaitGroup{}
+ wg.Add(N)
+ for i := 0; i < N; i++ {
+ go func(i int) {
+ pipeline := t.client.Pipeline()
+
+ msg1 := "echo" + strconv.Itoa(i)
+ msg2 := "echo" + strconv.Itoa(i+1)
+
+ echo1 := pipeline.Echo(msg1)
+ echo2 := pipeline.Echo(msg2)
+
+ cmds, err := pipeline.Exec()
+ c.Assert(err, IsNil)
+ c.Assert(cmds, HasLen, 2)
+
+ c.Assert(echo1.Err(), IsNil)
+ c.Assert(echo1.Val(), Equals, msg1)
+
+ c.Assert(echo2.Err(), IsNil)
+ c.Assert(echo2.Val(), Equals, msg2)
+
+ c.Assert(pipeline.Close(), IsNil)
+
+ wg.Done()
+ }(i)
+ }
+ wg.Wait()
+}
+
+//------------------------------------------------------------------------------
+
+func (t *RedisTest) TestMultiExec(c *C) {
+ multi := t.client.Multi()
+ defer func() {
+ c.Assert(multi.Close(), IsNil)
+ }()
+
+ var (
+ set *redis.StatusCmd
+ get *redis.StringCmd
+ )
+ cmds, err := multi.Exec(func() error {
+ set = multi.Set("key", "hello")
+ get = multi.Get("key")
+ return nil
+ })
+ c.Assert(err, IsNil)
+ c.Assert(cmds, HasLen, 2)
+
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, "hello")
+}
+
+func (t *RedisTest) TestMultiExecDiscard(c *C) {
+ multi := t.client.Multi()
+ defer func() {
+ c.Assert(multi.Close(), IsNil)
+ }()
+
+ cmds, err := multi.Exec(func() error {
+ multi.Set("key1", "hello1")
+ multi.Discard()
+ multi.Set("key2", "hello2")
+ return nil
+ })
+ c.Assert(err, IsNil)
+ c.Assert(cmds, HasLen, 1)
+
+ get := t.client.Get("key1")
+ c.Assert(get.Err(), Equals, redis.Nil)
+ c.Assert(get.Val(), Equals, "")
+
+ get = t.client.Get("key2")
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, "hello2")
+}
+
+func (t *RedisTest) TestMultiExecEmpty(c *C) {
+ multi := t.client.Multi()
+ defer func() {
+ c.Assert(multi.Close(), IsNil)
+ }()
+
+ cmds, err := multi.Exec(func() error { return nil })
+ c.Assert(err, IsNil)
+ c.Assert(cmds, HasLen, 0)
+
+ ping := multi.Ping()
+ c.Check(ping.Err(), IsNil)
+ c.Check(ping.Val(), Equals, "PONG")
+}
+
+func (t *RedisTest) TestMultiExecOnEmptyQueue(c *C) {
+ multi := t.client.Multi()
+ defer func() {
+ c.Assert(multi.Close(), IsNil)
+ }()
+
+ cmds, err := multi.Exec(func() error { return nil })
+ c.Assert(err, IsNil)
+ c.Assert(cmds, HasLen, 0)
+}
+
+func (t *RedisTest) TestMultiExecIncr(c *C) {
+ multi := t.client.Multi()
+ defer func() {
+ c.Assert(multi.Close(), IsNil)
+ }()
+
+ cmds, err := multi.Exec(func() error {
+ for i := int64(0); i < 20000; i++ {
+ multi.Incr("key")
+ }
+ return nil
+ })
+ c.Assert(err, IsNil)
+ c.Assert(len(cmds), Equals, 20000)
+ for _, cmd := range cmds {
+ if cmd.Err() != nil {
+ c.Errorf("got %v, expected nil", cmd.Err())
+ }
+ }
+
+ get := t.client.Get("key")
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Equals, "20000")
+}
+
+func (t *RedisTest) transactionalIncr(c *C) ([]redis.Cmder, error) {
+ multi := t.client.Multi()
+ defer func() {
+ c.Assert(multi.Close(), IsNil)
+ }()
+
+ watch := multi.Watch("key")
+ c.Assert(watch.Err(), IsNil)
+ c.Assert(watch.Val(), Equals, "OK")
+
+ get := multi.Get("key")
+ c.Assert(get.Err(), IsNil)
+ c.Assert(get.Val(), Not(Equals), redis.Nil)
+
+ v, err := strconv.ParseInt(get.Val(), 10, 64)
+ c.Assert(err, IsNil)
+
+ return multi.Exec(func() error {
+ multi.Set("key", strconv.FormatInt(v+1, 10))
+ return nil
+ })
+}
+
+func (t *RedisTest) TestWatchUnwatch(c *C) {
+ var n = 10000
+ if testing.Short() {
+ n = 1000
+ }
+
+ set := t.client.Set("key", "0")
+ c.Assert(set.Err(), IsNil)
+
+ wg := &sync.WaitGroup{}
+ for i := 0; i < n; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for {
+ cmds, err := t.transactionalIncr(c)
+ if err == redis.TxFailedErr {
+ continue
+ }
+ c.Assert(err, IsNil)
+ c.Assert(cmds, HasLen, 1)
+ c.Assert(cmds[0].Err(), IsNil)
+ break
+ }
+ }()
+ }
+ wg.Wait()
+
+ val, err := t.client.Get("key").Int64()
+ c.Assert(err, IsNil)
+ c.Assert(val, Equals, int64(n))
+}
+
+//------------------------------------------------------------------------------
+
+func (t *RedisTest) TestRaceEcho(c *C) {
+ var n = 10000
+ if testing.Short() {
+ n = 1000
+ }
+
+ wg := &sync.WaitGroup{}
+ wg.Add(n)
+ for i := 0; i < n; i++ {
+ go func(i int) {
+ msg := "echo" + strconv.Itoa(i)
+ echo := t.client.Echo(msg)
+ c.Assert(echo.Err(), IsNil)
+ c.Assert(echo.Val(), Equals, msg)
+ wg.Done()
+ }(i)
+ }
+ wg.Wait()
+}
+
+func (t *RedisTest) TestRaceIncr(c *C) {
+ var n = 10000
+ if testing.Short() {
+ n = 1000
+ }
+
+ wg := &sync.WaitGroup{}
+ wg.Add(n)
+ for i := 0; i < n; i++ {
+ go func() {
+ incr := t.client.Incr("TestRaceIncr")
+ if err := incr.Err(); err != nil {
+ panic(err)
+ }
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+
+ val, err := t.client.Get("TestRaceIncr").Result()
+ c.Assert(err, IsNil)
+ c.Assert(val, Equals, strconv.Itoa(n))
+}
+
+//------------------------------------------------------------------------------
+
+func (t *RedisTest) TestCmdBgRewriteAOF(c *C) {
+ r := t.client.BgRewriteAOF()
+ c.Assert(r.Err(), IsNil)
+ c.Assert(r.Val(), Equals, "Background append only file rewriting started")
+}
+
+func (t *RedisTest) TestCmdBgSave(c *C) {
+ // workaround for "ERR Can't BGSAVE while AOF log rewriting is in progress"
+ time.Sleep(time.Second)
+
+ r := t.client.BgSave()
+ c.Assert(r.Err(), IsNil)
+ c.Assert(r.Val(), Equals, "Background saving started")
+}
+
+func (t *RedisTest) TestCmdClientKill(c *C) {
+ r := t.client.ClientKill("1.1.1.1:1111")
+ c.Assert(r.Err(), ErrorMatches, "ERR No such client")
+ c.Assert(r.Val(), Equals, "")
+}
+
+func (t *RedisTest) TestCmdConfigGet(c *C) {
+ r := t.client.ConfigGet("*")
+ c.Assert(r.Err(), IsNil)
+ c.Assert(len(r.Val()) > 0, Equals, true)
+}
+
+func (t *RedisTest) TestCmdConfigResetStat(c *C) {
+ r := t.client.ConfigResetStat()
+ c.Assert(r.Err(), IsNil)
+ c.Assert(r.Val(), Equals, "OK")
+}
+
+func (t *RedisTest) TestCmdConfigSet(c *C) {
+ configGet := t.client.ConfigGet("maxmemory")
+ c.Assert(configGet.Err(), IsNil)
+ c.Assert(configGet.Val(), HasLen, 2)
+ c.Assert(configGet.Val()[0], Equals, "maxmemory")
+
+ configSet := t.client.ConfigSet("maxmemory", configGet.Val()[1].(string))
+ c.Assert(configSet.Err(), IsNil)
+ c.Assert(configSet.Val(), Equals, "OK")
+}
+
+func (t *RedisTest) TestCmdDbSize(c *C) {
+ dbSize := t.client.DbSize()
+ c.Assert(dbSize.Err(), IsNil)
+ c.Assert(dbSize.Val(), Equals, int64(0))
+}
+
+func (t *RedisTest) TestCmdFlushAll(c *C) {
+ // TODO
+}
+
+func (t *RedisTest) TestCmdFlushDb(c *C) {
+ // TODO
+}
+
+func (t *RedisTest) TestCmdInfo(c *C) {
+ info := t.client.Info()
+ c.Assert(info.Err(), IsNil)
+ c.Assert(info.Val(), Not(Equals), "")
+}
+
+func (t *RedisTest) TestCmdLastSave(c *C) {
+ lastSave := t.client.LastSave()
+ c.Assert(lastSave.Err(), IsNil)
+ c.Assert(lastSave.Val(), Not(Equals), 0)
+}
+
+func (t *RedisTest) TestCmdSave(c *C) {
+ save := t.client.Save()
+ c.Assert(save.Err(), IsNil)
+ c.Assert(save.Val(), Equals, "OK")
+}
+
+func (t *RedisTest) TestSlaveOf(c *C) {
+ slaveOf := t.client.SlaveOf("localhost", "8888")
+ c.Assert(slaveOf.Err(), IsNil)
+ c.Assert(slaveOf.Val(), Equals, "OK")
+
+ slaveOf = t.client.SlaveOf("NO", "ONE")
+ c.Assert(slaveOf.Err(), IsNil)
+ c.Assert(slaveOf.Val(), Equals, "OK")
+}
+
+func (t *RedisTest) TestTime(c *C) {
+ time := t.client.Time()
+ c.Assert(time.Err(), IsNil)
+ c.Assert(time.Val(), HasLen, 2)
+}
+
+//------------------------------------------------------------------------------
+
+func (t *RedisTest) TestScriptingEval(c *C) {
+ eval := t.client.Eval(
+ "return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}",
+ []string{"key1", "key2"},
+ []string{"first", "second"},
+ )
+ c.Assert(eval.Err(), IsNil)
+ c.Assert(eval.Val(), DeepEquals, []interface{}{"key1", "key2", "first", "second"})
+
+ eval = t.client.Eval(
+ "return redis.call('set',KEYS[1],'bar')",
+ []string{"foo"},
+ []string{},
+ )
+ c.Assert(eval.Err(), IsNil)
+ c.Assert(eval.Val(), Equals, "OK")
+
+ eval = t.client.Eval("return 10", []string{}, []string{})
+ c.Assert(eval.Err(), IsNil)
+ c.Assert(eval.Val(), Equals, int64(10))
+
+ eval = t.client.Eval("return {1,2,{3,'Hello World!'}}", []string{}, []string{})
+ c.Assert(eval.Err(), IsNil)
+ // DeepEquals can't compare nested slices.
+ c.Assert(
+ fmt.Sprintf("%#v", eval.Val()),
+ Equals,
+ `[]interface {}{1, 2, []interface {}{3, "Hello World!"}}`,
+ )
+}
+
+func (t *RedisTest) TestScriptingEvalSha(c *C) {
+ set := t.client.Set("foo", "bar")
+ c.Assert(set.Err(), IsNil)
+ c.Assert(set.Val(), Equals, "OK")
+
+ eval := t.client.Eval("return redis.call('get','foo')", nil, nil)
+ c.Assert(eval.Err(), IsNil)
+ c.Assert(eval.Val(), Equals, "bar")
+
+ evalSha := t.client.EvalSha("6b1bf486c81ceb7edf3c093f4c48582e38c0e791", nil, nil)
+ c.Assert(evalSha.Err(), IsNil)
+ c.Assert(evalSha.Val(), Equals, "bar")
+
+ evalSha = t.client.EvalSha("ffffffffffffffffffffffffffffffffffffffff", nil, nil)
+ c.Assert(evalSha.Err(), ErrorMatches, "NOSCRIPT No matching script. Please use EVAL.")
+ c.Assert(evalSha.Val(), Equals, nil)
+}
+
+func (t *RedisTest) TestScriptingScriptExists(c *C) {
+ scriptLoad := t.client.ScriptLoad("return 1")
+ c.Assert(scriptLoad.Err(), IsNil)
+ c.Assert(scriptLoad.Val(), Equals, "e0e1f9fabfc9d4800c877a703b823ac0578ff8db")
+
+ scriptExists := t.client.ScriptExists(
+ "e0e1f9fabfc9d4800c877a703b823ac0578ff8db",
+ "ffffffffffffffffffffffffffffffffffffffff",
+ )
+ c.Assert(scriptExists.Err(), IsNil)
+ c.Assert(scriptExists.Val(), DeepEquals, []bool{true, false})
+}
+
+func (t *RedisTest) TestScriptingScriptFlush(c *C) {
+ scriptFlush := t.client.ScriptFlush()
+ c.Assert(scriptFlush.Err(), IsNil)
+ c.Assert(scriptFlush.Val(), Equals, "OK")
+}
+
+func (t *RedisTest) TestScriptingScriptKill(c *C) {
+ scriptKill := t.client.ScriptKill()
+ c.Assert(scriptKill.Err(), ErrorMatches, ".*No scripts in execution right now.")
+ c.Assert(scriptKill.Val(), Equals, "")
+}
+
+func (t *RedisTest) TestScriptingScriptLoad(c *C) {
+ scriptLoad := t.client.ScriptLoad("return redis.call('get','foo')")
+ c.Assert(scriptLoad.Err(), IsNil)
+ c.Assert(scriptLoad.Val(), Equals, "6b1bf486c81ceb7edf3c093f4c48582e38c0e791")
+}
+
+func (t *RedisTest) TestScriptingNewScript(c *C) {
+ s := redis.NewScript("return 1")
+ run := s.Run(t.client, nil, nil)
+ c.Assert(run.Err(), IsNil)
+ c.Assert(run.Val(), Equals, int64(1))
+}
+
+func (t *RedisTest) TestScriptingEvalAndPipeline(c *C) {
+ pipeline := t.client.Pipeline()
+ s := redis.NewScript("return 1")
+ run := s.Eval(pipeline, nil, nil)
+ _, err := pipeline.Exec()
+ c.Assert(err, IsNil)
+ c.Assert(run.Err(), IsNil)
+ c.Assert(run.Val(), Equals, int64(1))
+}
+
+func (t *RedisTest) TestScriptingEvalShaAndPipeline(c *C) {
+ s := redis.NewScript("return 1")
+ c.Assert(s.Load(t.client).Err(), IsNil)
+
+ pipeline := t.client.Pipeline()
+ run := s.Eval(pipeline, nil, nil)
+ _, err := pipeline.Exec()
+ c.Assert(err, IsNil)
+ c.Assert(run.Err(), IsNil)
+ c.Assert(run.Val(), Equals, int64(1))
+}
+
+//------------------------------------------------------------------------------
+
+func (t *RedisTest) TestCmdDebugObject(c *C) {
+ {
+ debug := t.client.DebugObject("foo")
+ c.Assert(debug.Err(), Not(IsNil))
+ c.Assert(debug.Err().Error(), Equals, "ERR no such key")
+ }
+
+ {
+ t.client.Set("foo", "bar")
+ debug := t.client.DebugObject("foo")
+ c.Assert(debug.Err(), IsNil)
+ c.Assert(debug.Val(), FitsTypeOf, "")
+ c.Assert(debug.Val(), Not(Equals), "")
+ }
+}
+
+//------------------------------------------------------------------------------
+
+func BenchmarkRedisPing(b *testing.B) {
+ b.StopTimer()
+ client := redis.NewTCPClient(&redis.Options{
+ Addr: redisAddr,
+ })
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ if err := client.Ping().Err(); err != nil {
+ panic(err)
+ }
+ }
+}
+
+func BenchmarkRedisSet(b *testing.B) {
+ b.StopTimer()
+ client := redis.NewTCPClient(&redis.Options{
+ Addr: redisAddr,
+ })
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ if err := client.Set("key", "hello").Err(); err != nil {
+ panic(err)
+ }
+ }
+}
+
+func BenchmarkRedisGetNil(b *testing.B) {
+ b.StopTimer()
+ client := redis.NewTCPClient(&redis.Options{
+ Addr: redisAddr,
+ })
+ if err := client.FlushDb().Err(); err != nil {
+ b.Fatal(err)
+ }
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ if err := client.Get("key").Err(); err != redis.Nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkRedisGet(b *testing.B) {
+ b.StopTimer()
+ client := redis.NewTCPClient(&redis.Options{
+ Addr: redisAddr,
+ })
+ if err := client.Set("key", "hello").Err(); err != nil {
+ b.Fatal(err)
+ }
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ if err := client.Get("key").Err(); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkRedisMGet(b *testing.B) {
+ b.StopTimer()
+ client := redis.NewTCPClient(&redis.Options{
+ Addr: redisAddr,
+ })
+ if err := client.MSet("key1", "hello1", "key2", "hello2").Err(); err != nil {
+ b.Fatal(err)
+ }
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ if err := client.MGet("key1", "key2").Err(); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkSetExpire(b *testing.B) {
+ b.StopTimer()
+ client := redis.NewTCPClient(&redis.Options{
+ Addr: redisAddr,
+ })
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ if err := client.Set("key", "hello").Err(); err != nil {
+ b.Fatal(err)
+ }
+ if err := client.Expire("key", time.Second).Err(); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkPipeline(b *testing.B) {
+ b.StopTimer()
+ client := redis.NewTCPClient(&redis.Options{
+ Addr: redisAddr,
+ })
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ _, err := client.Pipelined(func(pipe *redis.Pipeline) error {
+ pipe.Set("key", "hello")
+ pipe.Expire("key", time.Second)
+ return nil
+ })
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/script.go b/Godeps/_workspace/src/gopkg.in/redis.v2/script.go
new file mode 100644
index 000000000..96c35f514
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/script.go
@@ -0,0 +1,52 @@
+package redis
+
+import (
+ "crypto/sha1"
+ "encoding/hex"
+ "io"
+ "strings"
+)
+
+type scripter interface {
+ Eval(script string, keys []string, args []string) *Cmd
+ EvalSha(sha1 string, keys []string, args []string) *Cmd
+ ScriptExists(scripts ...string) *BoolSliceCmd
+ ScriptLoad(script string) *StringCmd
+}
+
+type Script struct {
+ src, hash string
+}
+
+func NewScript(src string) *Script {
+ h := sha1.New()
+ io.WriteString(h, src)
+ return &Script{
+ src: src,
+ hash: hex.EncodeToString(h.Sum(nil)),
+ }
+}
+
+func (s *Script) Load(c scripter) *StringCmd {
+ return c.ScriptLoad(s.src)
+}
+
+func (s *Script) Exists(c scripter) *BoolSliceCmd {
+ return c.ScriptExists(s.src)
+}
+
+func (s *Script) Eval(c scripter, keys []string, args []string) *Cmd {
+ return c.Eval(s.src, keys, args)
+}
+
+func (s *Script) EvalSha(c scripter, keys []string, args []string) *Cmd {
+ return c.EvalSha(s.hash, keys, args)
+}
+
+func (s *Script) Run(c *Client, keys []string, args []string) *Cmd {
+ r := s.EvalSha(c, keys, args)
+ if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
+ return s.Eval(c, keys, args)
+ }
+ return r
+}
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/sentinel.go b/Godeps/_workspace/src/gopkg.in/redis.v2/sentinel.go
new file mode 100644
index 000000000..d3ffeca9a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/sentinel.go
@@ -0,0 +1,291 @@
+package redis
+
+import (
+ "errors"
+ "log"
+ "net"
+ "strings"
+ "sync"
+ "time"
+)
+
+//------------------------------------------------------------------------------
+
+type FailoverOptions struct {
+ MasterName string
+ SentinelAddrs []string
+
+ Password string
+ DB int64
+
+ PoolSize int
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ IdleTimeout time.Duration
+}
+
+func (opt *FailoverOptions) getPoolSize() int {
+ if opt.PoolSize == 0 {
+ return 10
+ }
+ return opt.PoolSize
+}
+
+func (opt *FailoverOptions) getDialTimeout() time.Duration {
+ if opt.DialTimeout == 0 {
+ return 5 * time.Second
+ }
+ return opt.DialTimeout
+}
+
+func (opt *FailoverOptions) options() *options {
+ return &options{
+ DB: opt.DB,
+ Password: opt.Password,
+
+ DialTimeout: opt.getDialTimeout(),
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.getPoolSize(),
+ IdleTimeout: opt.IdleTimeout,
+ }
+}
+
+func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
+ opt := failoverOpt.options()
+ failover := &sentinelFailover{
+ masterName: failoverOpt.MasterName,
+ sentinelAddrs: failoverOpt.SentinelAddrs,
+
+ opt: opt,
+ }
+ return &Client{
+ baseClient: &baseClient{
+ opt: opt,
+ connPool: failover.Pool(),
+ },
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type sentinelClient struct {
+ *baseClient
+}
+
+func newSentinel(clOpt *Options) *sentinelClient {
+ opt := clOpt.options()
+ opt.Password = ""
+ opt.DB = 0
+ dialer := func() (net.Conn, error) {
+ return net.DialTimeout("tcp", clOpt.Addr, opt.DialTimeout)
+ }
+ return &sentinelClient{
+ baseClient: &baseClient{
+ opt: opt,
+ connPool: newConnPool(newConnFunc(dialer), opt),
+ },
+ }
+}
+
+func (c *sentinelClient) PubSub() *PubSub {
+ return &PubSub{
+ baseClient: &baseClient{
+ opt: c.opt,
+ connPool: newSingleConnPool(c.connPool, false),
+ },
+ }
+}
+
+func (c *sentinelClient) GetMasterAddrByName(name string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("SENTINEL", "get-master-addr-by-name", name)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *sentinelClient) Sentinels(name string) *SliceCmd {
+ cmd := NewSliceCmd("SENTINEL", "sentinels", name)
+ c.Process(cmd)
+ return cmd
+}
+
+type sentinelFailover struct {
+ masterName string
+ sentinelAddrs []string
+
+ opt *options
+
+ pool pool
+ poolOnce sync.Once
+
+ lock sync.RWMutex
+ _sentinel *sentinelClient
+}
+
+func (d *sentinelFailover) dial() (net.Conn, error) {
+ addr, err := d.MasterAddr()
+ if err != nil {
+ return nil, err
+ }
+ return net.DialTimeout("tcp", addr, d.opt.DialTimeout)
+}
+
+func (d *sentinelFailover) Pool() pool {
+ d.poolOnce.Do(func() {
+ d.pool = newConnPool(newConnFunc(d.dial), d.opt)
+ })
+ return d.pool
+}
+
+func (d *sentinelFailover) MasterAddr() (string, error) {
+ defer d.lock.Unlock()
+ d.lock.Lock()
+
+ // Try last working sentinel.
+ if d._sentinel != nil {
+ addr, err := d._sentinel.GetMasterAddrByName(d.masterName).Result()
+ if err != nil {
+ log.Printf("redis-sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err)
+ d.resetSentinel()
+ } else {
+ addr := net.JoinHostPort(addr[0], addr[1])
+ log.Printf("redis-sentinel: %q addr is %s", d.masterName, addr)
+ return addr, nil
+ }
+ }
+
+ for i, sentinelAddr := range d.sentinelAddrs {
+ sentinel := newSentinel(&Options{
+ Addr: sentinelAddr,
+
+ DB: d.opt.DB,
+ Password: d.opt.Password,
+
+ DialTimeout: d.opt.DialTimeout,
+ ReadTimeout: d.opt.ReadTimeout,
+ WriteTimeout: d.opt.WriteTimeout,
+
+ PoolSize: d.opt.PoolSize,
+ IdleTimeout: d.opt.IdleTimeout,
+ })
+ masterAddr, err := sentinel.GetMasterAddrByName(d.masterName).Result()
+ if err != nil {
+ log.Printf("redis-sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err)
+ sentinel.Close()
+ continue
+ }
+
+ // Push working sentinel to the top.
+ d.sentinelAddrs[0], d.sentinelAddrs[i] = d.sentinelAddrs[i], d.sentinelAddrs[0]
+
+ d.setSentinel(sentinel)
+ addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
+ log.Printf("redis-sentinel: %q addr is %s", d.masterName, addr)
+ return addr, nil
+ }
+
+ return "", errors.New("redis: all sentinels are unreachable")
+}
+
+func (d *sentinelFailover) setSentinel(sentinel *sentinelClient) {
+ d.discoverSentinels(sentinel)
+ d._sentinel = sentinel
+ go d.listen()
+}
+
+func (d *sentinelFailover) discoverSentinels(sentinel *sentinelClient) {
+ sentinels, err := sentinel.Sentinels(d.masterName).Result()
+ if err != nil {
+ log.Printf("redis-sentinel: Sentinels %q failed: %s", d.masterName, err)
+ return
+ }
+ for _, sentinel := range sentinels {
+ vals := sentinel.([]interface{})
+ for i := 0; i < len(vals); i += 2 {
+ key := vals[i].(string)
+ if key == "name" {
+ sentinelAddr := vals[i+1].(string)
+ if !contains(d.sentinelAddrs, sentinelAddr) {
+ log.Printf(
+ "redis-sentinel: discovered new %q sentinel: %s",
+ d.masterName, sentinelAddr,
+ )
+ d.sentinelAddrs = append(d.sentinelAddrs, sentinelAddr)
+ }
+ }
+ }
+ }
+}
+
+func (d *sentinelFailover) listen() {
+ var pubsub *PubSub
+ for {
+ if pubsub == nil {
+ pubsub = d._sentinel.PubSub()
+ if err := pubsub.Subscribe("+switch-master"); err != nil {
+ log.Printf("redis-sentinel: Subscribe failed: %s", err)
+ d.lock.Lock()
+ d.resetSentinel()
+ d.lock.Unlock()
+ return
+ }
+ }
+
+ msgIface, err := pubsub.Receive()
+ if err != nil {
+ log.Printf("redis-sentinel: Receive failed: %s", err)
+ pubsub.Close()
+ return
+ }
+
+ switch msg := msgIface.(type) {
+ case *Message:
+ switch msg.Channel {
+ case "+switch-master":
+ parts := strings.Split(msg.Payload, " ")
+ if parts[0] != d.masterName {
+ log.Printf("redis-sentinel: ignore new %s addr", parts[0])
+ continue
+ }
+ addr := net.JoinHostPort(parts[3], parts[4])
+ log.Printf(
+ "redis-sentinel: new %q addr is %s",
+ d.masterName, addr,
+ )
+ d.pool.Filter(func(cn *conn) bool {
+ if cn.RemoteAddr().String() != addr {
+ log.Printf(
+ "redis-sentinel: closing connection to old master %s",
+ cn.RemoteAddr(),
+ )
+ return false
+ }
+ return true
+ })
+ default:
+ log.Printf("redis-sentinel: unsupported message: %s", msg)
+ }
+ case *Subscription:
+ // Ignore.
+ default:
+ log.Printf("redis-sentinel: unsupported message: %s", msgIface)
+ }
+ }
+}
+
+func (d *sentinelFailover) resetSentinel() {
+ d._sentinel.Close()
+ d._sentinel = nil
+}
+
+func contains(slice []string, str string) bool {
+ for _, s := range slice {
+ if s == str {
+ return true
+ }
+ }
+ return false
+}
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/sentinel_test.go b/Godeps/_workspace/src/gopkg.in/redis.v2/sentinel_test.go
new file mode 100644
index 000000000..ede59bd51
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/sentinel_test.go
@@ -0,0 +1,185 @@
+package redis_test
+
+import (
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "testing"
+ "text/template"
+ "time"
+
+ "gopkg.in/redis.v2"
+)
+
+func startRedis(port string) (*exec.Cmd, error) {
+ cmd := exec.Command("redis-server", "--port", port)
+ if false {
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ }
+ if err := cmd.Start(); err != nil {
+ return nil, err
+ }
+ return cmd, nil
+}
+
+func startRedisSlave(port, slave string) (*exec.Cmd, error) {
+ cmd := exec.Command("redis-server", "--port", port, "--slaveof", "127.0.0.1", slave)
+ if false {
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ }
+ if err := cmd.Start(); err != nil {
+ return nil, err
+ }
+ return cmd, nil
+}
+
+func startRedisSentinel(port, masterName, masterPort string) (*exec.Cmd, error) {
+ dir, err := ioutil.TempDir("", "sentinel")
+ if err != nil {
+ return nil, err
+ }
+
+ sentinelConfFilepath := filepath.Join(dir, "sentinel.conf")
+ tpl, err := template.New("sentinel.conf").Parse(sentinelConf)
+ if err != nil {
+ return nil, err
+ }
+
+ data := struct {
+ Port string
+ MasterName string
+ MasterPort string
+ }{
+ Port: port,
+ MasterName: masterName,
+ MasterPort: masterPort,
+ }
+ if err := writeTemplateToFile(sentinelConfFilepath, tpl, data); err != nil {
+ return nil, err
+ }
+
+ cmd := exec.Command("redis-server", sentinelConfFilepath, "--sentinel")
+ if true {
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ }
+ if err := cmd.Start(); err != nil {
+ return nil, err
+ }
+
+ return cmd, nil
+}
+
+func writeTemplateToFile(path string, t *template.Template, data interface{}) error {
+ f, err := os.Create(path)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ return t.Execute(f, data)
+}
+
+func TestSentinel(t *testing.T) {
+ masterName := "mymaster"
+ masterPort := "8123"
+ slavePort := "8124"
+ sentinelPort := "8125"
+
+ masterCmd, err := startRedis(masterPort)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer masterCmd.Process.Kill()
+
+ // Wait for master to start.
+ time.Sleep(200 * time.Millisecond)
+
+ master := redis.NewTCPClient(&redis.Options{
+ Addr: ":" + masterPort,
+ })
+ if err := master.Ping().Err(); err != nil {
+ t.Fatal(err)
+ }
+
+ slaveCmd, err := startRedisSlave(slavePort, masterPort)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer slaveCmd.Process.Kill()
+
+ // Wait for slave to start.
+ time.Sleep(200 * time.Millisecond)
+
+ slave := redis.NewTCPClient(&redis.Options{
+ Addr: ":" + slavePort,
+ })
+ if err := slave.Ping().Err(); err != nil {
+ t.Fatal(err)
+ }
+
+ sentinelCmd, err := startRedisSentinel(sentinelPort, masterName, masterPort)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer sentinelCmd.Process.Kill()
+
+ // Wait for sentinel to start.
+ time.Sleep(200 * time.Millisecond)
+
+ sentinel := redis.NewTCPClient(&redis.Options{
+ Addr: ":" + sentinelPort,
+ })
+ if err := sentinel.Ping().Err(); err != nil {
+ t.Fatal(err)
+ }
+ defer sentinel.Shutdown()
+
+ client := redis.NewFailoverClient(&redis.FailoverOptions{
+ MasterName: masterName,
+ SentinelAddrs: []string{":" + sentinelPort},
+ })
+
+ if err := client.Set("foo", "master").Err(); err != nil {
+ t.Fatal(err)
+ }
+
+ val, err := master.Get("foo").Result()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if val != "master" {
+ t.Fatalf(`got %q, expected "master"`, val)
+ }
+
+ // Kill Redis master.
+ if err := masterCmd.Process.Kill(); err != nil {
+ t.Fatal(err)
+ }
+ if err := master.Ping().Err(); err == nil {
+ t.Fatalf("master was not killed")
+ }
+
+ // Wait for Redis sentinel to elect new master.
+ time.Sleep(5 * time.Second)
+
+ // Check that client picked up new master.
+ val, err = client.Get("foo").Result()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if val != "master" {
+ t.Fatalf(`got %q, expected "master"`, val)
+ }
+}
+
+var sentinelConf = `
+port {{ .Port }}
+
+sentinel monitor {{ .MasterName }} 127.0.0.1 {{ .MasterPort }} 1
+sentinel down-after-milliseconds {{ .MasterName }} 1000
+sentinel failover-timeout {{ .MasterName }} 2000
+sentinel parallel-syncs {{ .MasterName }} 1
+`
diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/testdata/sentinel.conf b/Godeps/_workspace/src/gopkg.in/redis.v2/testdata/sentinel.conf
new file mode 100644
index 000000000..3da90b380
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/redis.v2/testdata/sentinel.conf
@@ -0,0 +1,6 @@
+port 26379
+
+sentinel monitor master 127.0.0.1 6379 1
+sentinel down-after-milliseconds master 2000
+sentinel failover-timeout master 5000
+sentinel parallel-syncs master 4