summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristopher Speller <crspeller@gmail.com>2017-07-31 08:15:23 -0700
committerGitHub <noreply@github.com>2017-07-31 08:15:23 -0700
commit09b49c26ddfdb20ced61e7dfd4192e750ce40449 (patch)
tree1288d069cc8a199b8eb3b858935dffd377ee3d2d
parent6f4e38d129ffaf469d40fc8596d3957ee94d21e9 (diff)
downloadchat-09b49c26ddfdb20ced61e7dfd4192e750ce40449.tar.gz
chat-09b49c26ddfdb20ced61e7dfd4192e750ce40449.tar.bz2
chat-09b49c26ddfdb20ced61e7dfd4192e750ce40449.zip
PLT-5308 Caching layer part 2 (#6973)
* Adding Reaction store cache layer example * Implementing reaction store in new caching system. * Redis for reaction store * Adding redis library * Adding invalidation for DeleteAllWithEmojiName and other minor enhancements
-rw-r--r--Makefile8
-rw-r--r--app/cluster_handlers.go5
-rw-r--r--app/reaction.go4
-rw-r--r--app/web_hub.go17
-rw-r--r--glide.lock8
-rw-r--r--glide.yaml2
-rw-r--r--store/layered_store.go70
-rw-r--r--store/layered_store_hints.go20
-rw-r--r--store/layered_store_supplier.go26
-rw-r--r--store/local_cache_supplier.go104
-rw-r--r--store/local_cache_supplier_reactions.go47
-rw-r--r--store/redis_supplier.go133
-rw-r--r--store/sql_reaction_store.go271
-rw-r--r--store/sql_supplier.go18
-rw-r--r--store/sql_supplier_reactions.go165
-rw-r--r--store/store.go2
-rw-r--r--utils/lru.go45
-rw-r--r--vendor/github.com/go-redis/redis/.gitignore2
-rw-r--r--vendor/github.com/go-redis/redis/.travis.yml20
-rw-r--r--vendor/github.com/go-redis/redis/LICENSE25
-rw-r--r--vendor/github.com/go-redis/redis/Makefile19
-rw-r--r--vendor/github.com/go-redis/redis/README.md141
-rw-r--r--vendor/github.com/go-redis/redis/bench_test.go216
-rw-r--r--vendor/github.com/go-redis/redis/cluster.go981
-rw-r--r--vendor/github.com/go-redis/redis/cluster_test.go740
-rw-r--r--vendor/github.com/go-redis/redis/command.go946
-rw-r--r--vendor/github.com/go-redis/redis/command_test.go60
-rw-r--r--vendor/github.com/go-redis/redis/commands.go2109
-rw-r--r--vendor/github.com/go-redis/redis/commands_test.go2938
-rw-r--r--vendor/github.com/go-redis/redis/doc.go4
-rw-r--r--vendor/github.com/go-redis/redis/example_instrumentation_test.go59
-rw-r--r--vendor/github.com/go-redis/redis/example_test.go414
-rw-r--r--vendor/github.com/go-redis/redis/export_test.go35
-rw-r--r--vendor/github.com/go-redis/redis/internal/consistenthash/consistenthash.go81
-rw-r--r--vendor/github.com/go-redis/redis/internal/consistenthash/consistenthash_test.go110
-rw-r--r--vendor/github.com/go-redis/redis/internal/errors.go75
-rw-r--r--vendor/github.com/go-redis/redis/internal/hashtag/hashtag.go73
-rw-r--r--vendor/github.com/go-redis/redis/internal/hashtag/hashtag_test.go74
-rw-r--r--vendor/github.com/go-redis/redis/internal/internal.go23
-rw-r--r--vendor/github.com/go-redis/redis/internal/internal_test.go17
-rw-r--r--vendor/github.com/go-redis/redis/internal/log.go15
-rw-r--r--vendor/github.com/go-redis/redis/internal/once.go60
-rw-r--r--vendor/github.com/go-redis/redis/internal/pool/bench_test.go80
-rw-r--r--vendor/github.com/go-redis/redis/internal/pool/conn.go78
-rw-r--r--vendor/github.com/go-redis/redis/internal/pool/main_test.go35
-rw-r--r--vendor/github.com/go-redis/redis/internal/pool/pool.go367
-rw-r--r--vendor/github.com/go-redis/redis/internal/pool/pool_single.go55
-rw-r--r--vendor/github.com/go-redis/redis/internal/pool/pool_sticky.go123
-rw-r--r--vendor/github.com/go-redis/redis/internal/pool/pool_test.go241
-rw-r--r--vendor/github.com/go-redis/redis/internal/proto/proto_test.go13
-rw-r--r--vendor/github.com/go-redis/redis/internal/proto/reader.go334
-rw-r--r--vendor/github.com/go-redis/redis/internal/proto/reader_test.go87
-rw-r--r--vendor/github.com/go-redis/redis/internal/proto/scan.go131
-rw-r--r--vendor/github.com/go-redis/redis/internal/proto/scan_test.go48
-rw-r--r--vendor/github.com/go-redis/redis/internal/proto/write_buffer.go103
-rw-r--r--vendor/github.com/go-redis/redis/internal/proto/write_buffer_test.go63
-rw-r--r--vendor/github.com/go-redis/redis/internal/safe.go11
-rw-r--r--vendor/github.com/go-redis/redis/internal/unsafe.go27
-rw-r--r--vendor/github.com/go-redis/redis/internal/util.go47
-rw-r--r--vendor/github.com/go-redis/redis/iterator.go73
-rw-r--r--vendor/github.com/go-redis/redis/iterator_test.go136
-rw-r--r--vendor/github.com/go-redis/redis/main_test.go355
-rw-r--r--vendor/github.com/go-redis/redis/options.go201
-rw-r--r--vendor/github.com/go-redis/redis/options_test.go94
-rw-r--r--vendor/github.com/go-redis/redis/parser.go374
-rw-r--r--vendor/github.com/go-redis/redis/pipeline.go106
-rw-r--r--vendor/github.com/go-redis/redis/pipeline_test.go80
-rw-r--r--vendor/github.com/go-redis/redis/pool_test.go141
-rw-r--r--vendor/github.com/go-redis/redis/pubsub.go396
-rw-r--r--vendor/github.com/go-redis/redis/pubsub_test.go406
-rw-r--r--vendor/github.com/go-redis/redis/race_test.go247
-rw-r--r--vendor/github.com/go-redis/redis/redis.go436
-rw-r--r--vendor/github.com/go-redis/redis/redis_context.go35
-rw-r--r--vendor/github.com/go-redis/redis/redis_no_context.go15
-rw-r--r--vendor/github.com/go-redis/redis/redis_test.go364
-rw-r--r--vendor/github.com/go-redis/redis/result.go140
-rw-r--r--vendor/github.com/go-redis/redis/ring.go458
-rw-r--r--vendor/github.com/go-redis/redis/ring_test.go193
-rw-r--r--vendor/github.com/go-redis/redis/script.go62
-rw-r--r--vendor/github.com/go-redis/redis/sentinel.go333
-rw-r--r--vendor/github.com/go-redis/redis/sentinel_test.go88
-rw-r--r--vendor/github.com/go-redis/redis/testdata/redis.conf10
-rw-r--r--vendor/github.com/go-redis/redis/tx.go96
-rw-r--r--vendor/github.com/go-redis/redis/tx_test.go151
-rw-r--r--vendor/github.com/go-redis/redis/universal.go135
-rw-r--r--vendor/github.com/go-redis/redis/universal_test.go41
86 files changed, 16829 insertions, 362 deletions
diff --git a/Makefile b/Makefile
index ca44de85a..194ab3eeb 100644
--- a/Makefile
+++ b/Makefile
@@ -131,6 +131,14 @@ ifeq ($(BUILD_ENTERPRISE_READY),true)
echo restarting mattermost-elasticsearch; \
docker start mattermost-elasticsearch> /dev/null; \
fi
+
+ @if [ $(shell docker ps -a | grep -ci mattermost-redis) -eq 0 ]; then \
+ echo starting mattermost-redis; \
+ docker run --name mattermost-redis -p 6379:6379 -d redis > /dev/null; \
+ elif [ $(shell docker ps | grep -ci mattermost-redis) -eq 0 ]; then \
+ echo restarting mattermost-redis; \
+ docker start mattermost-redis > /dev/null; \
+ fi
endif
stop-docker:
diff --git a/app/cluster_handlers.go b/app/cluster_handlers.go
index deb086c22..892bf00d3 100644
--- a/app/cluster_handlers.go
+++ b/app/cluster_handlers.go
@@ -14,7 +14,6 @@ func RegisterAllClusterMessageHandlers() {
einterfaces.GetClusterInterface().RegisterClusterMessageHandler(model.CLUSTER_EVENT_PUBLISH, ClusterPublishHandler)
einterfaces.GetClusterInterface().RegisterClusterMessageHandler(model.CLUSTER_EVENT_UPDATE_STATUS, ClusterUpdateStatusHandler)
einterfaces.GetClusterInterface().RegisterClusterMessageHandler(model.CLUSTER_EVENT_INVALIDATE_ALL_CACHES, ClusterInvalidateAllCachesHandler)
- einterfaces.GetClusterInterface().RegisterClusterMessageHandler(model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_REACTIONS, ClusterInvalidateCacheForReactionsHandler)
einterfaces.GetClusterInterface().RegisterClusterMessageHandler(model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_WEBHOOK, ClusterInvalidateCacheForWebhookHandler)
einterfaces.GetClusterInterface().RegisterClusterMessageHandler(model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_POSTS, ClusterInvalidateCacheForChannelPostsHandler)
einterfaces.GetClusterInterface().RegisterClusterMessageHandler(model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_MEMBERS_NOTIFY_PROPS, ClusterInvalidateCacheForChannelMembersNotifyPropHandler)
@@ -40,10 +39,6 @@ func ClusterInvalidateAllCachesHandler(msg *model.ClusterMessage) {
InvalidateAllCachesSkipSend()
}
-func ClusterInvalidateCacheForReactionsHandler(msg *model.ClusterMessage) {
- InvalidateCacheForReactionsSkipClusterSend(msg.Data)
-}
-
func ClusterInvalidateCacheForWebhookHandler(msg *model.ClusterMessage) {
InvalidateCacheForWebhookSkipClusterSend(msg.Data)
}
diff --git a/app/reaction.go b/app/reaction.go
index cc57e1c4c..adb92476f 100644
--- a/app/reaction.go
+++ b/app/reaction.go
@@ -20,8 +20,6 @@ func SaveReactionForPost(reaction *model.Reaction) (*model.Reaction, *model.AppE
go sendReactionEvent(model.WEBSOCKET_EVENT_REACTION_ADDED, reaction, post)
- InvalidateCacheForReactions(reaction.PostId)
-
return reaction, nil
}
}
@@ -44,8 +42,6 @@ func DeleteReactionForPost(reaction *model.Reaction) *model.AppError {
return result.Err
} else {
go sendReactionEvent(model.WEBSOCKET_EVENT_REACTION_REMOVED, reaction, post)
-
- InvalidateCacheForReactions(reaction.PostId)
}
return nil
diff --git a/app/web_hub.go b/app/web_hub.go
index cadad0de4..0af73f39f 100644
--- a/app/web_hub.go
+++ b/app/web_hub.go
@@ -314,23 +314,6 @@ func InvalidateWebConnSessionCacheForUser(userId string) {
}
}
-func InvalidateCacheForReactions(postId string) {
- InvalidateCacheForReactionsSkipClusterSend(postId)
-
- if einterfaces.GetClusterInterface() != nil {
- msg := &model.ClusterMessage{
- Event: model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_REACTIONS,
- SendType: model.CLUSTER_SEND_BEST_EFFORT,
- Data: postId,
- }
- einterfaces.GetClusterInterface().SendClusterMessage(msg)
- }
-}
-
-func InvalidateCacheForReactionsSkipClusterSend(postId string) {
- Srv.Store.Reaction().InvalidateCacheForPost(postId)
-}
-
func (h *Hub) Register(webConn *WebConn) {
h.register <- webConn
diff --git a/glide.lock b/glide.lock
index 7c73afa1e..2a59822c9 100644
--- a/glide.lock
+++ b/glide.lock
@@ -28,6 +28,14 @@ imports:
version: 3d73f4b845efdf9989fffd4b4e562727744a34ba
- name: github.com/go-ldap/ldap
version: 8168ee085ee43257585e50c6441aadf54ecb2c9f
+- name: github.com/go-redis/redis
+ version: 564772f045d89bd55843d7dc8042e0a599401e09
+ subpackages:
+ - internal
+ - internal/consistenthash
+ - internal/hashtag
+ - internal/pool
+ - internal/proto
- name: github.com/go-sql-driver/mysql
version: a0583e0143b1624142adab07e0e97fe106d99561
- name: github.com/golang/freetype
diff --git a/glide.yaml b/glide.yaml
index a015823df..4bfd9e468 100644
--- a/glide.yaml
+++ b/glide.yaml
@@ -84,3 +84,5 @@ import:
version: v5.0.43
- package: github.com/mattermost/gorp
version: 995ddf2264c4ad45fbaf342f7500e4787ebae84a
+- package: github.com/go-redis/redis
+ version: v6.5.2
diff --git a/store/layered_store.go b/store/layered_store.go
index ab9859c80..3d3f941e8 100644
--- a/store/layered_store.go
+++ b/store/layered_store.go
@@ -6,38 +6,54 @@ package store
import (
"context"
+ l4g "github.com/alecthomas/log4go"
"github.com/mattermost/platform/model"
)
+const (
+ ENABLE_EXPERIMENTAL_REDIS = false
+)
+
type LayeredStore struct {
- TmpContext context.Context
- ReactionStore ReactionStore
- DatabaseLayer *SqlSupplier
+ TmpContext context.Context
+ ReactionStore ReactionStore
+ DatabaseLayer *SqlSupplier
+ LocalCacheLayer *LocalCacheSupplier
+ RedisLayer *RedisSupplier
+ LayerChainHead LayeredStoreSupplier
}
func NewLayeredStore() Store {
- return &LayeredStore{
- TmpContext: context.TODO(),
- ReactionStore: &LayeredReactionStore{},
- DatabaseLayer: NewSqlSupplier(),
+ store := &LayeredStore{
+ TmpContext: context.TODO(),
+ DatabaseLayer: NewSqlSupplier(),
+ LocalCacheLayer: NewLocalCacheSupplier(),
}
+
+ store.ReactionStore = &LayeredReactionStore{store}
+
+ // Setup the chain
+ if ENABLE_EXPERIMENTAL_REDIS {
+ l4g.Debug("Experimental redis enabled.")
+ store.RedisLayer = NewRedisSupplier()
+ store.RedisLayer.SetChainNext(store.DatabaseLayer)
+ store.LayerChainHead = store.RedisLayer
+ } else {
+ store.LocalCacheLayer.SetChainNext(store.DatabaseLayer)
+ store.LayerChainHead = store.LocalCacheLayer
+ }
+
+ return store
}
-type QueryFunction func(LayeredStoreSupplier) LayeredStoreSupplierResult
+type QueryFunction func(LayeredStoreSupplier) *LayeredStoreSupplierResult
func (s *LayeredStore) RunQuery(queryFunction QueryFunction) StoreChannel {
storeChannel := make(StoreChannel)
go func() {
- finalResult := StoreResult{}
- // Logic for determining what layers to run
- if result := queryFunction(s.DatabaseLayer); result.Err == nil {
- finalResult.Data = result.Result
- } else {
- finalResult.Err = result.Err
- }
-
- storeChannel <- finalResult
+ result := queryFunction(s.LayerChainHead)
+ storeChannel <- result.StoreResult
}()
return storeChannel
@@ -116,7 +132,7 @@ func (s *LayeredStore) FileInfo() FileInfoStore {
}
func (s *LayeredStore) Reaction() ReactionStore {
- return s.DatabaseLayer.Reaction()
+ return s.ReactionStore
}
func (s *LayeredStore) Job() JobStore {
@@ -152,35 +168,25 @@ type LayeredReactionStore struct {
}
func (s *LayeredReactionStore) Save(reaction *model.Reaction) StoreChannel {
- return s.RunQuery(func(supplier LayeredStoreSupplier) LayeredStoreSupplierResult {
+ return s.RunQuery(func(supplier LayeredStoreSupplier) *LayeredStoreSupplierResult {
return supplier.ReactionSave(s.TmpContext, reaction)
})
}
func (s *LayeredReactionStore) Delete(reaction *model.Reaction) StoreChannel {
- return s.RunQuery(func(supplier LayeredStoreSupplier) LayeredStoreSupplierResult {
+ return s.RunQuery(func(supplier LayeredStoreSupplier) *LayeredStoreSupplierResult {
return supplier.ReactionDelete(s.TmpContext, reaction)
})
}
-// TODO: DELETE ME
-func (s *LayeredReactionStore) InvalidateCacheForPost(postId string) {
- return
-}
-
-// TODO: DELETE ME
-func (s *LayeredReactionStore) InvalidateCache() {
- return
-}
-
func (s *LayeredReactionStore) GetForPost(postId string, allowFromCache bool) StoreChannel {
- return s.RunQuery(func(supplier LayeredStoreSupplier) LayeredStoreSupplierResult {
+ return s.RunQuery(func(supplier LayeredStoreSupplier) *LayeredStoreSupplierResult {
return supplier.ReactionGetForPost(s.TmpContext, postId)
})
}
func (s *LayeredReactionStore) DeleteAllWithEmojiName(emojiName string) StoreChannel {
- return s.RunQuery(func(supplier LayeredStoreSupplier) LayeredStoreSupplierResult {
+ return s.RunQuery(func(supplier LayeredStoreSupplier) *LayeredStoreSupplierResult {
return supplier.ReactionDeleteAllWithEmojiName(s.TmpContext, emojiName)
})
}
diff --git a/store/layered_store_hints.go b/store/layered_store_hints.go
index 6154af7c9..064f4f326 100644
--- a/store/layered_store_hints.go
+++ b/store/layered_store_hints.go
@@ -9,3 +9,23 @@ const (
LSH_NO_CACHE LayeredStoreHint = iota
LSH_MASTER_ONLY
)
+
+func hintsContains(hints []LayeredStoreHint, contains LayeredStoreHint) bool {
+ for _, hint := range hints {
+ if hint == contains {
+ return true
+ }
+ }
+ return false
+}
+
+func hintsContainsAny(hints []LayeredStoreHint, contains ...LayeredStoreHint) bool {
+ for _, hint := range hints {
+ for _, hint2 := range contains {
+ if hint == hint2 {
+ return true
+ }
+ }
+ }
+ return false
+}
diff --git a/store/layered_store_supplier.go b/store/layered_store_supplier.go
index 7b7da5710..22c90ab17 100644
--- a/store/layered_store_supplier.go
+++ b/store/layered_store_supplier.go
@@ -6,24 +6,28 @@ package store
import "github.com/mattermost/platform/model"
import "context"
+type ResultHandler func(*StoreResult)
+
type LayeredStoreSupplierResult struct {
- Result StoreResult
- Err *model.AppError
+ StoreResult
}
-func NewSupplierResult() LayeredStoreSupplierResult {
- return LayeredStoreSupplierResult{
- Result: StoreResult{},
- Err: nil,
- }
+func NewSupplierResult() *LayeredStoreSupplierResult {
+ return &LayeredStoreSupplierResult{}
}
type LayeredStoreSupplier interface {
//
+ // Control
+ //
+ SetChainNext(LayeredStoreSupplier)
+ Next() LayeredStoreSupplier
+
+ //
// Reactions
//), hints ...LayeredStoreHint)
- ReactionSave(ctx context.Context, reaction *model.Reaction, hints ...LayeredStoreHint) LayeredStoreSupplierResult
- ReactionDelete(ctx context.Context, reaction *model.Reaction, hints ...LayeredStoreHint) LayeredStoreSupplierResult
- ReactionGetForPost(ctx context.Context, postId string, hints ...LayeredStoreHint) LayeredStoreSupplierResult
- ReactionDeleteAllWithEmojiName(ctx context.Context, emojiName string, hints ...LayeredStoreHint) LayeredStoreSupplierResult
+ ReactionSave(ctx context.Context, reaction *model.Reaction, hints ...LayeredStoreHint) *LayeredStoreSupplierResult
+ ReactionDelete(ctx context.Context, reaction *model.Reaction, hints ...LayeredStoreHint) *LayeredStoreSupplierResult
+ ReactionGetForPost(ctx context.Context, postId string, hints ...LayeredStoreHint) *LayeredStoreSupplierResult
+ ReactionDeleteAllWithEmojiName(ctx context.Context, emojiName string, hints ...LayeredStoreHint) *LayeredStoreSupplierResult
}
diff --git a/store/local_cache_supplier.go b/store/local_cache_supplier.go
new file mode 100644
index 000000000..63c050485
--- /dev/null
+++ b/store/local_cache_supplier.go
@@ -0,0 +1,104 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package store
+
+import (
+ "context"
+
+ "github.com/mattermost/platform/einterfaces"
+ "github.com/mattermost/platform/model"
+ "github.com/mattermost/platform/utils"
+)
+
+const (
+ REACTION_CACHE_SIZE = 20000
+ REACTION_CACHE_SEC = 1800 // 30 minutes
+
+ CLEAR_CACHE_MESSAGE_DATA = ""
+)
+
+type LocalCacheSupplier struct {
+ next LayeredStoreSupplier
+ reactionCache *utils.Cache
+}
+
+func NewLocalCacheSupplier() *LocalCacheSupplier {
+ supplier := &LocalCacheSupplier{
+ reactionCache: utils.NewLruWithParams(REACTION_CACHE_SIZE, "Reaction", REACTION_CACHE_SEC, model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_REACTIONS),
+ }
+
+ registerClusterHandlers(supplier)
+
+ return supplier
+}
+
+func registerClusterHandlers(supplier *LocalCacheSupplier) {
+ if cluster := einterfaces.GetClusterInterface(); cluster != nil {
+ cluster.RegisterClusterMessageHandler(model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_REACTIONS, supplier.handleClusterInvalidateReaction)
+ }
+}
+
+func (s *LocalCacheSupplier) SetChainNext(next LayeredStoreSupplier) {
+ s.next = next
+}
+
+func (s *LocalCacheSupplier) Next() LayeredStoreSupplier {
+ return s.next
+}
+
+func doStandardReadCache(ctx context.Context, cache utils.ObjectCache, key string, hints ...LayeredStoreHint) *LayeredStoreSupplierResult {
+ metrics := einterfaces.GetMetricsInterface()
+
+ if hintsContains(hints, LSH_NO_CACHE) {
+ if metrics != nil {
+ metrics.IncrementMemCacheMissCounter(cache.Name())
+ }
+ return nil
+ }
+
+ if cacheItem, ok := cache.Get(key); ok {
+ if metrics != nil {
+ metrics.IncrementMemCacheHitCounter(cache.Name())
+ }
+ result := NewSupplierResult()
+ result.Data = cacheItem
+ return result
+ }
+
+ if metrics != nil {
+ metrics.IncrementMemCacheMissCounter(cache.Name())
+ }
+
+ return nil
+}
+
+func doStandardAddToCache(ctx context.Context, cache utils.ObjectCache, key string, result *LayeredStoreSupplierResult, hints ...LayeredStoreHint) {
+ if result.Err == nil && result.Data != nil {
+ cache.AddWithDefaultExpires(key, result.Data)
+ }
+}
+
+func doInvalidateCacheCluster(cache utils.ObjectCache, key string) {
+ cache.Remove(key)
+ if einterfaces.GetClusterInterface() != nil {
+ msg := &model.ClusterMessage{
+ Event: cache.GetInvalidateClusterEvent(),
+ SendType: model.CLUSTER_SEND_BEST_EFFORT,
+ Data: key,
+ }
+ einterfaces.GetClusterInterface().SendClusterMessage(msg)
+ }
+}
+
+func doClearCacheCluster(cache utils.ObjectCache) {
+ cache.Purge()
+ if einterfaces.GetClusterInterface() != nil {
+ msg := &model.ClusterMessage{
+ Event: cache.GetInvalidateClusterEvent(),
+ SendType: model.CLUSTER_SEND_BEST_EFFORT,
+ Data: CLEAR_CACHE_MESSAGE_DATA,
+ }
+ einterfaces.GetClusterInterface().SendClusterMessage(msg)
+ }
+}
diff --git a/store/local_cache_supplier_reactions.go b/store/local_cache_supplier_reactions.go
new file mode 100644
index 000000000..7d2c9f065
--- /dev/null
+++ b/store/local_cache_supplier_reactions.go
@@ -0,0 +1,47 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package store
+
+import (
+ "context"
+
+ "github.com/mattermost/platform/model"
+)
+
+func (s *LocalCacheSupplier) handleClusterInvalidateReaction(msg *model.ClusterMessage) {
+ if msg.Data == CLEAR_CACHE_MESSAGE_DATA {
+ s.reactionCache.Purge()
+ } else {
+ s.reactionCache.Remove(msg.Data)
+ }
+}
+
+func (s *LocalCacheSupplier) ReactionSave(ctx context.Context, reaction *model.Reaction, hints ...LayeredStoreHint) *LayeredStoreSupplierResult {
+ doInvalidateCacheCluster(s.reactionCache, reaction.PostId)
+ return s.Next().ReactionSave(ctx, reaction, hints...)
+}
+
+func (s *LocalCacheSupplier) ReactionDelete(ctx context.Context, reaction *model.Reaction, hints ...LayeredStoreHint) *LayeredStoreSupplierResult {
+ doInvalidateCacheCluster(s.reactionCache, reaction.PostId)
+ return s.Next().ReactionDelete(ctx, reaction, hints...)
+}
+
+func (s *LocalCacheSupplier) ReactionGetForPost(ctx context.Context, postId string, hints ...LayeredStoreHint) *LayeredStoreSupplierResult {
+ if result := doStandardReadCache(ctx, s.reactionCache, postId, hints...); result != nil {
+ return result
+ }
+
+ result := s.Next().ReactionGetForPost(ctx, postId, hints...)
+
+ doStandardAddToCache(ctx, s.reactionCache, postId, result, hints...)
+
+ return result
+}
+
+func (s *LocalCacheSupplier) ReactionDeleteAllWithEmojiName(ctx context.Context, emojiName string, hints ...LayeredStoreHint) *LayeredStoreSupplierResult {
+ // This could be improved. Right now we just clear the whole
+ // cache because we don't have a way find what post Ids have this emoji name.
+ doClearCacheCluster(s.reactionCache)
+ return s.Next().ReactionDeleteAllWithEmojiName(ctx, emojiName, hints...)
+}
diff --git a/store/redis_supplier.go b/store/redis_supplier.go
new file mode 100644
index 000000000..eede36ef2
--- /dev/null
+++ b/store/redis_supplier.go
@@ -0,0 +1,133 @@
+// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
+// See License.txt for license information.
+
+package store
+
+import (
+ "bytes"
+ "context"
+ "encoding/gob"
+
+ "time"
+
+ l4g "github.com/alecthomas/log4go"
+ "github.com/go-redis/redis"
+ "github.com/mattermost/platform/model"
+)
+
+const REDIS_EXPIRY_TIME = 30 * time.Minute
+
+type RedisSupplier struct {
+ next LayeredStoreSupplier
+ client *redis.Client
+}
+
+func GetBytes(key interface{}) ([]byte, error) {
+ var buf bytes.Buffer
+ enc := gob.NewEncoder(&buf)
+ err := enc.Encode(key)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func DecodeBytes(input []byte, thing interface{}) error {
+ dec := gob.NewDecoder(bytes.NewReader(input))
+ err := dec.Decode(thing)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func NewRedisSupplier() *RedisSupplier {
+ supplier := &RedisSupplier{}
+
+ supplier.client = redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "",
+ DB: 0,
+ })
+
+ if _, err := supplier.client.Ping().Result(); err != nil {
+ l4g.Error("Unable to ping redis server: " + err.Error())
+ return nil
+ }
+
+ return supplier
+}
+
+func (s *RedisSupplier) save(key string, value interface{}, expiry time.Duration) error {
+ if bytes, err := GetBytes(value); err != nil {
+ return err
+ } else {
+ if err := s.client.Set(key, bytes, expiry).Err(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *RedisSupplier) load(key string, writeTo interface{}) (bool, error) {
+ if data, err := s.client.Get(key).Bytes(); err != nil {
+ if err == redis.Nil {
+ return false, nil
+ } else {
+ return false, err
+ }
+ } else {
+ if err := DecodeBytes(data, writeTo); err != nil {
+ return false, err
+ }
+ }
+ return true, nil
+}
+
+func (s *RedisSupplier) SetChainNext(next LayeredStoreSupplier) {
+ s.next = next
+}
+
+func (s *RedisSupplier) Next() LayeredStoreSupplier {
+ return s.next
+}
+
+func (s *RedisSupplier) ReactionSave(ctx context.Context, reaction *model.Reaction, hints ...LayeredStoreHint) *LayeredStoreSupplierResult {
+ if err := s.client.Del("reactions:" + reaction.PostId).Err(); err != nil {
+ l4g.Error("Redis failed to remove key reactions:" + reaction.PostId + " Error: " + err.Error())
+ }
+ return s.Next().ReactionSave(ctx, reaction, hints...)
+}
+
+func (s *RedisSupplier) ReactionDelete(ctx context.Context, reaction *model.Reaction, hints ...LayeredStoreHint) *LayeredStoreSupplierResult {
+ if err := s.client.Del("reactions:" + reaction.PostId).Err(); err != nil {
+ l4g.Error("Redis failed to remove key reactions:" + reaction.PostId + " Error: " + err.Error())
+ }
+ return s.Next().ReactionDelete(ctx, reaction, hints...)
+}
+
+func (s *RedisSupplier) ReactionGetForPost(ctx context.Context, postId string, hints ...LayeredStoreHint) *LayeredStoreSupplierResult {
+ var resultdata []*model.Reaction
+ found, err := s.load("reactions:"+postId, &resultdata)
+ if found {
+ result := NewSupplierResult()
+ result.Data = resultdata
+ return result
+ }
+ if err != nil {
+ l4g.Error("Redis encountered an error on read: " + err.Error())
+ }
+
+ result := s.Next().ReactionGetForPost(ctx, postId, hints...)
+
+ if err := s.save("reactions:"+postId, result.Data, REDIS_EXPIRY_TIME); err != nil {
+ l4g.Error("Redis encountered and error on write: " + err.Error())
+ }
+
+ return result
+}
+
+func (s *RedisSupplier) ReactionDeleteAllWithEmojiName(ctx context.Context, emojiName string, hints ...LayeredStoreHint) *LayeredStoreSupplierResult {
+ // Ignoring this. It's probably OK to have the emoji slowly expire from Redis.
+ return s.Next().ReactionDeleteAllWithEmojiName(ctx, emojiName, hints...)
+}
diff --git a/store/sql_reaction_store.go b/store/sql_reaction_store.go
deleted file mode 100644
index 87845421e..000000000
--- a/store/sql_reaction_store.go
+++ /dev/null
@@ -1,271 +0,0 @@
-// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
-// See License.txt for license information.
-
-package store
-
-import (
- "github.com/mattermost/gorp"
- "github.com/mattermost/platform/einterfaces"
- "github.com/mattermost/platform/model"
- "github.com/mattermost/platform/utils"
-
- l4g "github.com/alecthomas/log4go"
-)
-
-const (
- REACTION_CACHE_SIZE = 20000
- REACTION_CACHE_SEC = 1800 // 30 minutes
-)
-
-var reactionCache *utils.Cache = utils.NewLru(REACTION_CACHE_SIZE)
-
-type SqlReactionStore struct {
- SqlStore
-}
-
-func NewSqlReactionStore(sqlStore SqlStore) ReactionStore {
- s := &SqlReactionStore{sqlStore}
-
- for _, db := range sqlStore.GetAllConns() {
- table := db.AddTableWithName(model.Reaction{}, "Reactions").SetKeys(false, "UserId", "PostId", "EmojiName")
- table.ColMap("UserId").SetMaxSize(26)
- table.ColMap("PostId").SetMaxSize(26)
- table.ColMap("EmojiName").SetMaxSize(64)
- }
-
- return s
-}
-
-func (s SqlReactionStore) CreateIndexesIfNotExists() {
- s.CreateIndexIfNotExists("idx_reactions_post_id", "Reactions", "PostId")
- s.CreateIndexIfNotExists("idx_reactions_user_id", "Reactions", "UserId")
- s.CreateIndexIfNotExists("idx_reactions_emoji_name", "Reactions", "EmojiName")
-}
-
-func (s SqlReactionStore) Save(reaction *model.Reaction) StoreChannel {
- storeChannel := make(StoreChannel)
-
- go func() {
- result := StoreResult{}
-
- reaction.PreSave()
- if result.Err = reaction.IsValid(); result.Err != nil {
- storeChannel <- result
- close(storeChannel)
- return
- }
-
- if transaction, err := s.GetMaster().Begin(); err != nil {
- result.Err = model.NewLocAppError("SqlReactionStore.Save", "store.sql_reaction.save.begin.app_error", nil, err.Error())
- } else {
- err := saveReactionAndUpdatePost(transaction, reaction)
-
- if err != nil {
- transaction.Rollback()
-
- // We don't consider duplicated save calls as an error
- if !IsUniqueConstraintError(err.Error(), []string{"reactions_pkey", "PRIMARY"}) {
- result.Err = model.NewLocAppError("SqlPreferenceStore.Save", "store.sql_reaction.save.save.app_error", nil, err.Error())
- }
- } else {
- if err := transaction.Commit(); err != nil {
- // don't need to rollback here since the transaction is already closed
- result.Err = model.NewLocAppError("SqlPreferenceStore.Save", "store.sql_reaction.save.commit.app_error", nil, err.Error())
- }
- }
-
- if result.Err == nil {
- result.Data = reaction
- }
- }
-
- storeChannel <- result
- close(storeChannel)
- }()
-
- return storeChannel
-}
-
-func (s SqlReactionStore) Delete(reaction *model.Reaction) StoreChannel {
- storeChannel := make(StoreChannel)
-
- go func() {
- result := StoreResult{}
-
- if transaction, err := s.GetMaster().Begin(); err != nil {
- result.Err = model.NewLocAppError("SqlReactionStore.Delete", "store.sql_reaction.delete.begin.app_error", nil, err.Error())
- } else {
- err := deleteReactionAndUpdatePost(transaction, reaction)
-
- if err != nil {
- transaction.Rollback()
-
- result.Err = model.NewLocAppError("SqlPreferenceStore.Delete", "store.sql_reaction.delete.app_error", nil, err.Error())
- } else if err := transaction.Commit(); err != nil {
- // don't need to rollback here since the transaction is already closed
- result.Err = model.NewLocAppError("SqlPreferenceStore.Delete", "store.sql_reaction.delete.commit.app_error", nil, err.Error())
- } else {
- result.Data = reaction
- }
- }
-
- storeChannel <- result
- close(storeChannel)
- }()
-
- return storeChannel
-}
-
-func saveReactionAndUpdatePost(transaction *gorp.Transaction, reaction *model.Reaction) error {
- if err := transaction.Insert(reaction); err != nil {
- return err
- }
-
- return updatePostForReactions(transaction, reaction.PostId)
-}
-
-func deleteReactionAndUpdatePost(transaction *gorp.Transaction, reaction *model.Reaction) error {
- if _, err := transaction.Exec(
- `DELETE FROM
- Reactions
- WHERE
- PostId = :PostId AND
- UserId = :UserId AND
- EmojiName = :EmojiName`,
- map[string]interface{}{"PostId": reaction.PostId, "UserId": reaction.UserId, "EmojiName": reaction.EmojiName}); err != nil {
- return err
- }
-
- return updatePostForReactions(transaction, reaction.PostId)
-}
-
-const (
- // Set HasReactions = true if and only if the post has reactions, update UpdateAt only if HasReactions changes
- UPDATE_POST_HAS_REACTIONS_QUERY = `UPDATE
- Posts
- SET
- UpdateAt = (CASE
- WHEN HasReactions != (SELECT count(0) > 0 FROM Reactions WHERE PostId = :PostId) THEN :UpdateAt
- ELSE UpdateAt
- END),
- HasReactions = (SELECT count(0) > 0 FROM Reactions WHERE PostId = :PostId)
- WHERE
- Id = :PostId`
-)
-
-func updatePostForReactions(transaction *gorp.Transaction, postId string) error {
- _, err := transaction.Exec(UPDATE_POST_HAS_REACTIONS_QUERY, map[string]interface{}{"PostId": postId, "UpdateAt": model.GetMillis()})
-
- return err
-}
-
-func (s SqlReactionStore) InvalidateCacheForPost(postId string) {
- reactionCache.Remove(postId)
-}
-
-func (s SqlReactionStore) InvalidateCache() {
- reactionCache.Purge()
-}
-
-func (s SqlReactionStore) GetForPost(postId string, allowFromCache bool) StoreChannel {
- storeChannel := make(StoreChannel)
-
- go func() {
- result := StoreResult{}
- metrics := einterfaces.GetMetricsInterface()
-
- if allowFromCache {
- if cacheItem, ok := reactionCache.Get(postId); ok {
- if metrics != nil {
- metrics.IncrementMemCacheHitCounter("Reactions")
- }
- result.Data = cacheItem.([]*model.Reaction)
- storeChannel <- result
- close(storeChannel)
- return
- } else {
- if metrics != nil {
- metrics.IncrementMemCacheMissCounter("Reactions")
- }
- }
- } else {
- if metrics != nil {
- metrics.IncrementMemCacheMissCounter("Reactions")
- }
- }
-
- var reactions []*model.Reaction
-
- if _, err := s.GetReplica().Select(&reactions,
- `SELECT
- *
- FROM
- Reactions
- WHERE
- PostId = :PostId
- ORDER BY
- CreateAt`, map[string]interface{}{"PostId": postId}); err != nil {
- result.Err = model.NewLocAppError("SqlReactionStore.GetForPost", "store.sql_reaction.get_for_post.app_error", nil, "")
- } else {
- result.Data = reactions
-
- reactionCache.AddWithExpiresInSecs(postId, reactions, REACTION_CACHE_SEC)
- }
-
- storeChannel <- result
- close(storeChannel)
- }()
-
- return storeChannel
-}
-
-func (s SqlReactionStore) DeleteAllWithEmojiName(emojiName string) StoreChannel {
- storeChannel := make(StoreChannel)
-
- go func() {
- result := StoreResult{}
-
- // doesn't use a transaction since it's better for this to half-finish than to not commit anything
- var reactions []*model.Reaction
-
- if _, err := s.GetReplica().Select(&reactions,
- `SELECT
- *
- FROM
- Reactions
- WHERE
- EmojiName = :EmojiName`, map[string]interface{}{"EmojiName": emojiName}); err != nil {
- result.Err = model.NewLocAppError("SqlReactionStore.DeleteAllWithEmojiName",
- "store.sql_reaction.delete_all_with_emoji_name.get_reactions.app_error", nil,
- "emoji_name="+emojiName+", error="+err.Error())
- storeChannel <- result
- close(storeChannel)
- return
- }
-
- if _, err := s.GetMaster().Exec(
- `DELETE FROM
- Reactions
- WHERE
- EmojiName = :EmojiName`, map[string]interface{}{"EmojiName": emojiName}); err != nil {
- result.Err = model.NewLocAppError("SqlReactionStore.DeleteAllWithEmojiName",
- "store.sql_reaction.delete_all_with_emoji_name.delete_reactions.app_error", nil,
- "emoji_name="+emojiName+", error="+err.Error())
- storeChannel <- result
- close(storeChannel)
- return
- }
-
- for _, reaction := range reactions {
- if _, err := s.GetMaster().Exec(UPDATE_POST_HAS_REACTIONS_QUERY,
- map[string]interface{}{"PostId": reaction.PostId, "UpdateAt": model.GetMillis()}); err != nil {
- l4g.Warn(utils.T("store.sql_reaction.delete_all_with_emoji_name.update_post.warn"), reaction.PostId, err.Error())
- }
- }
-
- storeChannel <- result
- close(storeChannel)
- }()
-
- return storeChannel
-}
diff --git a/store/sql_supplier.go b/store/sql_supplier.go
index 0f4ab8380..df934f2cb 100644
--- a/store/sql_supplier.go
+++ b/store/sql_supplier.go
@@ -57,11 +57,6 @@ const (
EXIT_REMOVE_TABLE = 134
)
-type SqlSupplierResult struct {
- Err model.AppError
- Result interface{}
-}
-
type SqlSupplierOldStores struct {
team TeamStore
channel ChannelStore
@@ -86,6 +81,7 @@ type SqlSupplierOldStores struct {
}
type SqlSupplier struct {
+ next LayeredStoreSupplier
master *gorp.DbMap
replicas []*gorp.DbMap
searchReplicas []*gorp.DbMap
@@ -120,9 +116,10 @@ func NewSqlSupplier() *SqlSupplier {
supplier.oldStores.emoji = NewSqlEmojiStore(supplier)
supplier.oldStores.status = NewSqlStatusStore(supplier)
supplier.oldStores.fileInfo = NewSqlFileInfoStore(supplier)
- supplier.oldStores.reaction = NewSqlReactionStore(supplier)
supplier.oldStores.job = NewSqlJobStore(supplier)
+ initSqlSupplierReactions(supplier)
+
err := supplier.GetMaster().CreateTablesIfNotExists()
if err != nil {
l4g.Critical(utils.T("store.sql.creating_tables.critical"), err)
@@ -149,7 +146,6 @@ func NewSqlSupplier() *SqlSupplier {
supplier.oldStores.emoji.(*SqlEmojiStore).CreateIndexesIfNotExists()
supplier.oldStores.status.(*SqlStatusStore).CreateIndexesIfNotExists()
supplier.oldStores.fileInfo.(*SqlFileInfoStore).CreateIndexesIfNotExists()
- supplier.oldStores.reaction.(*SqlReactionStore).CreateIndexesIfNotExists()
supplier.oldStores.job.(*SqlJobStore).CreateIndexesIfNotExists()
supplier.oldStores.preference.(*SqlPreferenceStore).DeleteUnusedFeatures()
@@ -157,6 +153,14 @@ func NewSqlSupplier() *SqlSupplier {
return supplier
}
+func (s *SqlSupplier) SetChainNext(next LayeredStoreSupplier) {
+ s.next = next
+}
+
+func (s *SqlSupplier) Next() LayeredStoreSupplier {
+ return s.next
+}
+
func setupConnection(con_type string, driver string, dataSource string, maxIdle int, maxOpen int, trace bool) *gorp.DbMap {
db, err := dbsql.Open(driver, dataSource)
if err != nil {
diff --git a/store/sql_supplier_reactions.go b/store/sql_supplier_reactions.go
index 14f13cce6..30ca6beed 100644
--- a/store/sql_supplier_reactions.go
+++ b/store/sql_supplier_reactions.go
@@ -6,7 +6,10 @@ package store
import (
"context"
+ l4g "github.com/alecthomas/log4go"
+ "github.com/mattermost/gorp"
"github.com/mattermost/platform/model"
+ "github.com/mattermost/platform/utils"
)
func initSqlSupplierReactions(sqlStore SqlStore) {
@@ -18,18 +21,164 @@ func initSqlSupplierReactions(sqlStore SqlStore) {
}
}
-func (s *SqlSupplier) ReactionSave(ctx context.Context, reaction *model.Reaction, hints ...LayeredStoreHint) LayeredStoreSupplierResult {
- panic("not implemented")
+func (s *SqlSupplier) ReactionSave(ctx context.Context, reaction *model.Reaction, hints ...LayeredStoreHint) *LayeredStoreSupplierResult {
+ result := NewSupplierResult()
+
+ reaction.PreSave()
+ if result.Err = reaction.IsValid(); result.Err != nil {
+ return result
+ }
+
+ if transaction, err := s.GetMaster().Begin(); err != nil {
+ result.Err = model.NewLocAppError("SqlReactionStore.Save", "store.sql_reaction.save.begin.app_error", nil, err.Error())
+ } else {
+ err := saveReactionAndUpdatePost(transaction, reaction)
+
+ if err != nil {
+ transaction.Rollback()
+
+ // We don't consider duplicated save calls as an error
+ if !IsUniqueConstraintError(err.Error(), []string{"reactions_pkey", "PRIMARY"}) {
+ result.Err = model.NewLocAppError("SqlPreferenceStore.Save", "store.sql_reaction.save.save.app_error", nil, err.Error())
+ }
+ } else {
+ if err := transaction.Commit(); err != nil {
+ // don't need to rollback here since the transaction is already closed
+ result.Err = model.NewLocAppError("SqlPreferenceStore.Save", "store.sql_reaction.save.commit.app_error", nil, err.Error())
+ }
+ }
+
+ if result.Err == nil {
+ result.Data = reaction
+ }
+ }
+
+ return result
+}
+
+func (s *SqlSupplier) ReactionDelete(ctx context.Context, reaction *model.Reaction, hints ...LayeredStoreHint) *LayeredStoreSupplierResult {
+ result := NewSupplierResult()
+
+ if transaction, err := s.GetMaster().Begin(); err != nil {
+ result.Err = model.NewLocAppError("SqlReactionStore.Delete", "store.sql_reaction.delete.begin.app_error", nil, err.Error())
+ } else {
+ err := deleteReactionAndUpdatePost(transaction, reaction)
+
+ if err != nil {
+ transaction.Rollback()
+
+ result.Err = model.NewLocAppError("SqlPreferenceStore.Delete", "store.sql_reaction.delete.app_error", nil, err.Error())
+ } else if err := transaction.Commit(); err != nil {
+ // don't need to rollback here since the transaction is already closed
+ result.Err = model.NewLocAppError("SqlPreferenceStore.Delete", "store.sql_reaction.delete.commit.app_error", nil, err.Error())
+ } else {
+ result.Data = reaction
+ }
+ }
+
+ return result
+}
+
+func (s *SqlSupplier) ReactionGetForPost(ctx context.Context, postId string, hints ...LayeredStoreHint) *LayeredStoreSupplierResult {
+ result := NewSupplierResult()
+
+ var reactions []*model.Reaction
+
+ if _, err := s.GetReplica().Select(&reactions,
+ `SELECT
+ *
+ FROM
+ Reactions
+ WHERE
+ PostId = :PostId
+ ORDER BY
+ CreateAt`, map[string]interface{}{"PostId": postId}); err != nil {
+ result.Err = model.NewLocAppError("SqlReactionStore.GetForPost", "store.sql_reaction.get_for_post.app_error", nil, "")
+ } else {
+ result.Data = reactions
+ }
+
+ return result
}
-func (s *SqlSupplier) ReactionDelete(ctx context.Context, reaction *model.Reaction, hints ...LayeredStoreHint) LayeredStoreSupplierResult {
- panic("not implemented")
+func (s *SqlSupplier) ReactionDeleteAllWithEmojiName(ctx context.Context, emojiName string, hints ...LayeredStoreHint) *LayeredStoreSupplierResult {
+ result := NewSupplierResult()
+
+ var reactions []*model.Reaction
+
+ if _, err := s.GetReplica().Select(&reactions,
+ `SELECT
+ *
+ FROM
+ Reactions
+ WHERE
+ EmojiName = :EmojiName`, map[string]interface{}{"EmojiName": emojiName}); err != nil {
+ result.Err = model.NewLocAppError("SqlReactionStore.DeleteAllWithEmojiName",
+ "store.sql_reaction.delete_all_with_emoji_name.get_reactions.app_error", nil,
+ "emoji_name="+emojiName+", error="+err.Error())
+ return result
+ }
+
+ if _, err := s.GetMaster().Exec(
+ `DELETE FROM
+ Reactions
+ WHERE
+ EmojiName = :EmojiName`, map[string]interface{}{"EmojiName": emojiName}); err != nil {
+ result.Err = model.NewLocAppError("SqlReactionStore.DeleteAllWithEmojiName",
+ "store.sql_reaction.delete_all_with_emoji_name.delete_reactions.app_error", nil,
+ "emoji_name="+emojiName+", error="+err.Error())
+ return result
+ }
+
+ for _, reaction := range reactions {
+ if _, err := s.GetMaster().Exec(UPDATE_POST_HAS_REACTIONS_QUERY,
+ map[string]interface{}{"PostId": reaction.PostId, "UpdateAt": model.GetMillis()}); err != nil {
+ l4g.Warn(utils.T("store.sql_reaction.delete_all_with_emoji_name.update_post.warn"), reaction.PostId, err.Error())
+ }
+ }
+
+ return result
}
-func (s *SqlSupplier) ReactionGetForPost(ctx context.Context, postId string, hints ...LayeredStoreHint) LayeredStoreSupplierResult {
- panic("not implemented")
+func saveReactionAndUpdatePost(transaction *gorp.Transaction, reaction *model.Reaction) error {
+ if err := transaction.Insert(reaction); err != nil {
+ return err
+ }
+
+ return updatePostForReactions(transaction, reaction.PostId)
}
-func (s *SqlSupplier) ReactionDeleteAllWithEmojiName(ctx context.Context, emojiName string, hints ...LayeredStoreHint) LayeredStoreSupplierResult {
- panic("not implemented")
+func deleteReactionAndUpdatePost(transaction *gorp.Transaction, reaction *model.Reaction) error {
+ if _, err := transaction.Exec(
+ `DELETE FROM
+ Reactions
+ WHERE
+ PostId = :PostId AND
+ UserId = :UserId AND
+ EmojiName = :EmojiName`,
+ map[string]interface{}{"PostId": reaction.PostId, "UserId": reaction.UserId, "EmojiName": reaction.EmojiName}); err != nil {
+ return err
+ }
+
+ return updatePostForReactions(transaction, reaction.PostId)
+}
+
+const (
+ // Set HasReactions = true if and only if the post has reactions, update UpdateAt only if HasReactions changes
+ UPDATE_POST_HAS_REACTIONS_QUERY = `UPDATE
+ Posts
+ SET
+ UpdateAt = (CASE
+ WHEN HasReactions != (SELECT count(0) > 0 FROM Reactions WHERE PostId = :PostId) THEN :UpdateAt
+ ELSE UpdateAt
+ END),
+ HasReactions = (SELECT count(0) > 0 FROM Reactions WHERE PostId = :PostId)
+ WHERE
+ Id = :PostId`
+)
+
+func updatePostForReactions(transaction *gorp.Transaction, postId string) error {
+ _, err := transaction.Exec(UPDATE_POST_HAS_REACTIONS_QUERY, map[string]interface{}{"PostId": postId, "UpdateAt": model.GetMillis()})
+
+ return err
}
diff --git a/store/store.go b/store/store.go
index ab3d97d9b..0fa2a96b3 100644
--- a/store/store.go
+++ b/store/store.go
@@ -379,8 +379,6 @@ type FileInfoStore interface {
type ReactionStore interface {
Save(reaction *model.Reaction) StoreChannel
Delete(reaction *model.Reaction) StoreChannel
- InvalidateCacheForPost(postId string)
- InvalidateCache()
GetForPost(postId string, allowFromCache bool) StoreChannel
DeleteAllWithEmojiName(emojiName string) StoreChannel
}
diff --git a/utils/lru.go b/utils/lru.go
index 41ba72d71..576331563 100644
--- a/utils/lru.go
+++ b/utils/lru.go
@@ -14,13 +14,28 @@ import (
"time"
)
+// Caching Interface
+type ObjectCache interface {
+ AddWithExpiresInSecs(key, value interface{}, expireAtSecs int64) bool
+ AddWithDefaultExpires(key, value interface{}) bool
+ Purge()
+ Get(key interface{}) (value interface{}, ok bool)
+ Remove(key interface{})
+ Len() int
+ Name() string
+ GetInvalidateClusterEvent() string
+}
+
// Cache is a thread-safe fixed size LRU cache.
type Cache struct {
- size int
- evictList *list.List
- items map[interface{}]*list.Element
- lock sync.RWMutex
- onEvicted func(key interface{}, value interface{})
+ size int
+ evictList *list.List
+ items map[interface{}]*list.Element
+ lock sync.RWMutex
+ onEvicted func(key interface{}, value interface{})
+ name string
+ defaultExpiry int64
+ invalidateClusterEvent string
}
// entry is used to hold a value in the evictList
@@ -49,6 +64,14 @@ func NewLruWithEvict(size int, onEvicted func(key interface{}, value interface{}
return c, nil
}
+func NewLruWithParams(size int, name string, defaultExpiry int64, invalidateClusterEvent string) *Cache {
+ lru := NewLru(size)
+ lru.name = name
+ lru.defaultExpiry = defaultExpiry
+ lru.invalidateClusterEvent = invalidateClusterEvent
+ return lru
+}
+
// Purge is used to completely clear the cache
func (c *Cache) Purge() {
c.lock.Lock()
@@ -68,6 +91,10 @@ func (c *Cache) Add(key, value interface{}) bool {
return c.AddWithExpiresInSecs(key, value, 0)
}
+func (c *Cache) AddWithDefaultExpires(key, value interface{}) bool {
+ return c.AddWithExpiresInSecs(key, value, c.defaultExpiry)
+}
+
// Add adds a value to the cache. Returns true if an eviction occurred.
func (c *Cache) AddWithExpiresInSecs(key, value interface{}, expireAtSecs int64) bool {
c.lock.Lock()
@@ -159,6 +186,14 @@ func (c *Cache) Len() int {
return c.evictList.Len()
}
+func (c *Cache) Name() string {
+ return c.name
+}
+
+func (c *Cache) GetInvalidateClusterEvent() string {
+ return c.invalidateClusterEvent
+}
+
// removeOldest removes the oldest item from the cache.
func (c *Cache) removeOldest() {
ent := c.evictList.Back()
diff --git a/vendor/github.com/go-redis/redis/.gitignore b/vendor/github.com/go-redis/redis/.gitignore
new file mode 100644
index 000000000..ebfe903bc
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/.gitignore
@@ -0,0 +1,2 @@
+*.rdb
+testdata/*/
diff --git a/vendor/github.com/go-redis/redis/.travis.yml b/vendor/github.com/go-redis/redis/.travis.yml
new file mode 100644
index 000000000..f8e0d652e
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/.travis.yml
@@ -0,0 +1,20 @@
+sudo: false
+language: go
+
+services:
+ - redis-server
+
+go:
+ - 1.4
+ - 1.7
+ - 1.8
+ - tip
+
+matrix:
+ allow_failures:
+ - go: 1.4
+ - go: tip
+
+install:
+ - go get github.com/onsi/ginkgo
+ - go get github.com/onsi/gomega
diff --git a/vendor/github.com/go-redis/redis/LICENSE b/vendor/github.com/go-redis/redis/LICENSE
new file mode 100644
index 000000000..298bed9be
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2013 The github.com/go-redis/redis Authors.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/go-redis/redis/Makefile b/vendor/github.com/go-redis/redis/Makefile
new file mode 100644
index 000000000..50fdc55a1
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/Makefile
@@ -0,0 +1,19 @@
+all: testdeps
+ go test ./...
+ go test ./... -short -race
+ go vet
+
+testdeps: testdata/redis/src/redis-server
+
+bench: testdeps
+ go test ./... -test.run=NONE -test.bench=. -test.benchmem
+
+.PHONY: all test testdeps bench
+
+testdata/redis:
+ mkdir -p $@
+ wget -qO- https://github.com/antirez/redis/archive/unstable.tar.gz | tar xvz --strip-components=1 -C $@
+
+testdata/redis/src/redis-server: testdata/redis
+ sed -i 's/libjemalloc.a/libjemalloc.a -lrt/g' $</src/Makefile
+ cd $< && make all
diff --git a/vendor/github.com/go-redis/redis/README.md b/vendor/github.com/go-redis/redis/README.md
new file mode 100644
index 000000000..f3c61795e
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/README.md
@@ -0,0 +1,141 @@
+# Redis client for Golang
+
+[![Build Status](https://travis-ci.org/go-redis/redis.png?branch=master)](https://travis-ci.org/go-redis/redis)
+[![GoDoc](https://godoc.org/github.com/go-redis/redis?status.svg)](https://godoc.org/github.com/go-redis/redis)
+
+Supports:
+
+- Redis 3 commands except QUIT, MONITOR, SLOWLOG and SYNC.
+- [Pub/Sub](https://godoc.org/github.com/go-redis/redis#PubSub).
+- [Transactions](https://godoc.org/github.com/go-redis/redis#Multi).
+- [Pipeline](https://godoc.org/github.com/go-redis/redis#example-Client-Pipeline) and [TxPipeline](https://godoc.org/github.com/go-redis/redis#example-Client-TxPipeline).
+- [Scripting](https://godoc.org/github.com/go-redis/redis#Script).
+- [Timeouts](https://godoc.org/github.com/go-redis/redis#Options).
+- [Redis Sentinel](https://godoc.org/github.com/go-redis/redis#NewFailoverClient).
+- [Redis Cluster](https://godoc.org/github.com/go-redis/redis#NewClusterClient).
+- [Ring](https://godoc.org/github.com/go-redis/redis#NewRing).
+- [Instrumentation](https://godoc.org/github.com/go-redis/redis#ex-package--Instrumentation).
+- [Cache friendly](https://github.com/go-redis/cache).
+- [Rate limiting](https://github.com/go-redis/rate).
+- [Distributed Locks](https://github.com/bsm/redis-lock).
+
+API docs: https://godoc.org/github.com/go-redis/redis.
+Examples: https://godoc.org/github.com/go-redis/redis#pkg-examples.
+
+## Installation
+
+Install:
+
+```shell
+go get -u github.com/go-redis/redis
+```
+
+Import:
+
+```go
+import "github.com/go-redis/redis"
+```
+
+## Quickstart
+
+```go
+func ExampleNewClient() {
+ client := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password set
+ DB: 0, // use default DB
+ })
+
+ pong, err := client.Ping().Result()
+ fmt.Println(pong, err)
+ // Output: PONG <nil>
+}
+
+func ExampleClient() {
+ err := client.Set("key", "value", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ val, err := client.Get("key").Result()
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println("key", val)
+
+ val2, err := client.Get("key2").Result()
+ if err == redis.Nil {
+ fmt.Println("key2 does not exists")
+ } else if err != nil {
+ panic(err)
+ } else {
+ fmt.Println("key2", val2)
+ }
+ // Output: key value
+ // key2 does not exists
+}
+```
+
+## Howto
+
+Please go through [examples](https://godoc.org/github.com/go-redis/redis#pkg-examples) to get an idea how to use this package.
+
+## Look and feel
+
+Some corner cases:
+
+ SET key value EX 10 NX
+ set, err := client.SetNX("key", "value", 10*time.Second).Result()
+
+ SORT list LIMIT 0 2 ASC
+ vals, err := client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
+
+ ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
+ vals, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeBy{
+ Min: "-inf",
+ Max: "+inf",
+ Offset: 0,
+ Count: 2,
+ }).Result()
+
+ ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
+ vals, err := client.ZInterStore("out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2").Result()
+
+ EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
+ vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, []string{"hello"}).Result()
+
+## Benchmark
+
+go-redis vs redigo:
+
+```
+BenchmarkSetGoRedis10Conns64Bytes-4 200000 7621 ns/op 210 B/op 6 allocs/op
+BenchmarkSetGoRedis100Conns64Bytes-4 200000 7554 ns/op 210 B/op 6 allocs/op
+BenchmarkSetGoRedis10Conns1KB-4 200000 7697 ns/op 210 B/op 6 allocs/op
+BenchmarkSetGoRedis100Conns1KB-4 200000 7688 ns/op 210 B/op 6 allocs/op
+BenchmarkSetGoRedis10Conns10KB-4 200000 9214 ns/op 210 B/op 6 allocs/op
+BenchmarkSetGoRedis100Conns10KB-4 200000 9181 ns/op 210 B/op 6 allocs/op
+BenchmarkSetGoRedis10Conns1MB-4 2000 583242 ns/op 2337 B/op 6 allocs/op
+BenchmarkSetGoRedis100Conns1MB-4 2000 583089 ns/op 2338 B/op 6 allocs/op
+BenchmarkSetRedigo10Conns64Bytes-4 200000 7576 ns/op 208 B/op 7 allocs/op
+BenchmarkSetRedigo100Conns64Bytes-4 200000 7782 ns/op 208 B/op 7 allocs/op
+BenchmarkSetRedigo10Conns1KB-4 200000 7958 ns/op 208 B/op 7 allocs/op
+BenchmarkSetRedigo100Conns1KB-4 200000 7725 ns/op 208 B/op 7 allocs/op
+BenchmarkSetRedigo10Conns10KB-4 100000 18442 ns/op 208 B/op 7 allocs/op
+BenchmarkSetRedigo100Conns10KB-4 100000 18818 ns/op 208 B/op 7 allocs/op
+BenchmarkSetRedigo10Conns1MB-4 2000 668829 ns/op 226 B/op 7 allocs/op
+BenchmarkSetRedigo100Conns1MB-4 2000 679542 ns/op 226 B/op 7 allocs/op
+```
+
+Redis Cluster:
+
+```
+BenchmarkRedisPing-4 200000 6983 ns/op 116 B/op 4 allocs/op
+BenchmarkRedisClusterPing-4 100000 11535 ns/op 117 B/op 4 allocs/op
+```
+
+## See also
+
+- [Golang PostgreSQL ORM](https://github.com/go-pg/pg)
+- [Golang msgpack](https://github.com/vmihailenco/msgpack)
+- [Golang message task queue](https://github.com/go-msgqueue/msgqueue)
diff --git a/vendor/github.com/go-redis/redis/bench_test.go b/vendor/github.com/go-redis/redis/bench_test.go
new file mode 100644
index 000000000..f6b75c72a
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/bench_test.go
@@ -0,0 +1,216 @@
+package redis_test
+
+import (
+ "bytes"
+ "testing"
+ "time"
+
+ "github.com/go-redis/redis"
+)
+
+func benchmarkRedisClient(poolSize int) *redis.Client {
+ client := redis.NewClient(&redis.Options{
+ Addr: ":6379",
+ DialTimeout: time.Second,
+ ReadTimeout: time.Second,
+ WriteTimeout: time.Second,
+ PoolSize: poolSize,
+ })
+ if err := client.FlushDB().Err(); err != nil {
+ panic(err)
+ }
+ return client
+}
+
+func BenchmarkRedisPing(b *testing.B) {
+ client := benchmarkRedisClient(10)
+ defer client.Close()
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if err := client.Ping().Err(); err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+func BenchmarkRedisSetString(b *testing.B) {
+ client := benchmarkRedisClient(10)
+ defer client.Close()
+
+ value := string(bytes.Repeat([]byte{'1'}, 10000))
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if err := client.Set("key", value, 0).Err(); err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+func BenchmarkRedisGetNil(b *testing.B) {
+ client := benchmarkRedisClient(10)
+ defer client.Close()
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if err := client.Get("key").Err(); err != redis.Nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+func benchmarkSetRedis(b *testing.B, poolSize, payloadSize int) {
+ client := benchmarkRedisClient(poolSize)
+ defer client.Close()
+
+ value := string(bytes.Repeat([]byte{'1'}, payloadSize))
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if err := client.Set("key", value, 0).Err(); err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+func BenchmarkSetRedis10Conns64Bytes(b *testing.B) {
+ benchmarkSetRedis(b, 10, 64)
+}
+
+func BenchmarkSetRedis100Conns64Bytes(b *testing.B) {
+ benchmarkSetRedis(b, 100, 64)
+}
+
+func BenchmarkSetRedis10Conns1KB(b *testing.B) {
+ benchmarkSetRedis(b, 10, 1024)
+}
+
+func BenchmarkSetRedis100Conns1KB(b *testing.B) {
+ benchmarkSetRedis(b, 100, 1024)
+}
+
+func BenchmarkSetRedis10Conns10KB(b *testing.B) {
+ benchmarkSetRedis(b, 10, 10*1024)
+}
+
+func BenchmarkSetRedis100Conns10KB(b *testing.B) {
+ benchmarkSetRedis(b, 100, 10*1024)
+}
+
+func BenchmarkSetRedis10Conns1MB(b *testing.B) {
+ benchmarkSetRedis(b, 10, 1024*1024)
+}
+
+func BenchmarkSetRedis100Conns1MB(b *testing.B) {
+ benchmarkSetRedis(b, 100, 1024*1024)
+}
+
+func BenchmarkRedisSetGetBytes(b *testing.B) {
+ client := benchmarkRedisClient(10)
+ defer client.Close()
+
+ value := bytes.Repeat([]byte{'1'}, 10000)
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if err := client.Set("key", value, 0).Err(); err != nil {
+ b.Fatal(err)
+ }
+
+ got, err := client.Get("key").Bytes()
+ if err != nil {
+ b.Fatal(err)
+ }
+ if !bytes.Equal(got, value) {
+ b.Fatalf("got != value")
+ }
+ }
+ })
+}
+
+func BenchmarkRedisMGet(b *testing.B) {
+ client := benchmarkRedisClient(10)
+ defer client.Close()
+
+ if err := client.MSet("key1", "hello1", "key2", "hello2").Err(); err != nil {
+ b.Fatal(err)
+ }
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if err := client.MGet("key1", "key2").Err(); err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+func BenchmarkSetExpire(b *testing.B) {
+ client := benchmarkRedisClient(10)
+ defer client.Close()
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if err := client.Set("key", "hello", 0).Err(); err != nil {
+ b.Fatal(err)
+ }
+ if err := client.Expire("key", time.Second).Err(); err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+func BenchmarkPipeline(b *testing.B) {
+ client := benchmarkRedisClient(10)
+ defer client.Close()
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ _, err := client.Pipelined(func(pipe redis.Pipeliner) error {
+ pipe.Set("key", "hello", 0)
+ pipe.Expire("key", time.Second)
+ return nil
+ })
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+func BenchmarkZAdd(b *testing.B) {
+ client := benchmarkRedisClient(10)
+ defer client.Close()
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if err := client.ZAdd("key", redis.Z{float64(1), "hello"}).Err(); err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
diff --git a/vendor/github.com/go-redis/redis/cluster.go b/vendor/github.com/go-redis/redis/cluster.go
new file mode 100644
index 000000000..f758b01b9
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/cluster.go
@@ -0,0 +1,981 @@
+package redis
+
+import (
+ "fmt"
+ "math/rand"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/go-redis/redis/internal"
+ "github.com/go-redis/redis/internal/hashtag"
+ "github.com/go-redis/redis/internal/pool"
+ "github.com/go-redis/redis/internal/proto"
+)
+
+var errClusterNoNodes = internal.RedisError("redis: cluster has no nodes")
+var errNilClusterState = internal.RedisError("redis: cannot load cluster slots")
+
+// ClusterOptions are used to configure a cluster client and should be
+// passed to NewClusterClient.
+type ClusterOptions struct {
+ // A seed list of host:port addresses of cluster nodes.
+ Addrs []string
+
+ // The maximum number of retries before giving up. Command is retried
+ // on network errors and MOVED/ASK redirects.
+ // Default is 16.
+ MaxRedirects int
+
+ // Enables read queries for a connection to a Redis Cluster slave node.
+ ReadOnly bool
+
+ // Enables routing read-only queries to the closest master or slave node.
+ RouteByLatency bool
+
+ // Following options are copied from Options struct.
+
+ OnConnect func(*Conn) error
+
+ MaxRetries int
+ Password string
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ // PoolSize applies per cluster node and not for the whole cluster.
+ PoolSize int
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+}
+
+func (opt *ClusterOptions) init() {
+ if opt.MaxRedirects == -1 {
+ opt.MaxRedirects = 0
+ } else if opt.MaxRedirects == 0 {
+ opt.MaxRedirects = 16
+ }
+
+ if opt.RouteByLatency {
+ opt.ReadOnly = true
+ }
+}
+
+func (opt *ClusterOptions) clientOptions() *Options {
+ const disableIdleCheck = -1
+
+ return &Options{
+ OnConnect: opt.OnConnect,
+
+ MaxRetries: opt.MaxRetries,
+ Password: opt.Password,
+ ReadOnly: opt.ReadOnly,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+
+ IdleCheckFrequency: disableIdleCheck,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNode struct {
+ Client *Client
+ Latency time.Duration
+ loading time.Time
+}
+
+func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode {
+ opt := clOpt.clientOptions()
+ opt.Addr = addr
+ node := clusterNode{
+ Client: NewClient(opt),
+ }
+
+ if clOpt.RouteByLatency {
+ node.updateLatency()
+ }
+
+ return &node
+}
+
+func (n *clusterNode) updateLatency() {
+ const probes = 10
+ for i := 0; i < probes; i++ {
+ start := time.Now()
+ n.Client.Ping()
+ n.Latency += time.Since(start)
+ }
+ n.Latency = n.Latency / probes
+}
+
+func (n *clusterNode) Loading() bool {
+ return !n.loading.IsZero() && time.Since(n.loading) < time.Minute
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNodes struct {
+ opt *ClusterOptions
+
+ mu sync.RWMutex
+ addrs []string
+ nodes map[string]*clusterNode
+ closed bool
+}
+
+func newClusterNodes(opt *ClusterOptions) *clusterNodes {
+ return &clusterNodes{
+ opt: opt,
+ nodes: make(map[string]*clusterNode),
+ }
+}
+
+func (c *clusterNodes) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil
+ }
+ c.closed = true
+
+ var firstErr error
+ for _, node := range c.nodes {
+ if err := node.Client.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ c.addrs = nil
+ c.nodes = nil
+
+ return firstErr
+}
+
+func (c *clusterNodes) All() ([]*clusterNode, error) {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ nodes := make([]*clusterNode, 0, len(c.nodes))
+ for _, node := range c.nodes {
+ nodes = append(nodes, node)
+ }
+ return nodes, nil
+}
+
+func (c *clusterNodes) Get(addr string) (*clusterNode, error) {
+ var node *clusterNode
+ var ok bool
+
+ c.mu.RLock()
+ if !c.closed {
+ node, ok = c.nodes[addr]
+ }
+ c.mu.RUnlock()
+ if ok {
+ return node, nil
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ node, ok = c.nodes[addr]
+ if ok {
+ return node, nil
+ }
+
+ c.addrs = append(c.addrs, addr)
+ node = newClusterNode(c.opt, addr)
+ c.nodes[addr] = node
+ return node, nil
+}
+
+func (c *clusterNodes) Random() (*clusterNode, error) {
+ c.mu.RLock()
+ closed := c.closed
+ addrs := c.addrs
+ c.mu.RUnlock()
+
+ if closed {
+ return nil, pool.ErrClosed
+ }
+ if len(addrs) == 0 {
+ return nil, errClusterNoNodes
+ }
+
+ var nodeErr error
+ for i := 0; i <= c.opt.MaxRedirects; i++ {
+ n := rand.Intn(len(addrs))
+ node, err := c.Get(addrs[n])
+ if err != nil {
+ return nil, err
+ }
+
+ nodeErr = node.Client.ClusterInfo().Err()
+ if nodeErr == nil {
+ return node, nil
+ }
+ }
+ return nil, nodeErr
+}
+
+//------------------------------------------------------------------------------
+
+type clusterState struct {
+ nodes *clusterNodes
+ slots [][]*clusterNode
+}
+
+func newClusterState(nodes *clusterNodes, slots []ClusterSlot, origin string) (*clusterState, error) {
+ c := clusterState{
+ nodes: nodes,
+ slots: make([][]*clusterNode, hashtag.SlotNumber),
+ }
+
+ isLoopbackOrigin := isLoopbackAddr(origin)
+ for _, slot := range slots {
+ var nodes []*clusterNode
+ for _, slotNode := range slot.Nodes {
+ addr := slotNode.Addr
+ if !isLoopbackOrigin && isLoopbackAddr(addr) {
+ addr = origin
+ }
+
+ node, err := c.nodes.Get(addr)
+ if err != nil {
+ return nil, err
+ }
+ nodes = append(nodes, node)
+ }
+
+ for i := slot.Start; i <= slot.End; i++ {
+ c.slots[i] = nodes
+ }
+ }
+
+ return &c, nil
+}
+
+func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ if len(nodes) > 0 {
+ return nodes[0], nil
+ }
+ return c.nodes.Random()
+}
+
+func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ switch len(nodes) {
+ case 0:
+ return c.nodes.Random()
+ case 1:
+ return nodes[0], nil
+ case 2:
+ if slave := nodes[1]; !slave.Loading() {
+ return slave, nil
+ }
+ return nodes[0], nil
+ default:
+ var slave *clusterNode
+ for i := 0; i < 10; i++ {
+ n := rand.Intn(len(nodes)-1) + 1
+ slave = nodes[n]
+ if !slave.Loading() {
+ break
+ }
+ }
+ return slave, nil
+ }
+}
+
+func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) {
+ const threshold = time.Millisecond
+
+ nodes := c.slotNodes(slot)
+ if len(nodes) == 0 {
+ return c.nodes.Random()
+ }
+
+ var node *clusterNode
+ for _, n := range nodes {
+ if n.Loading() {
+ continue
+ }
+ if node == nil || node.Latency-n.Latency > threshold {
+ node = n
+ }
+ }
+ return node, nil
+}
+
+func (c *clusterState) slotNodes(slot int) []*clusterNode {
+ if slot < len(c.slots) {
+ return c.slots[slot]
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// ClusterClient is a Redis Cluster client representing a pool of zero
+// or more underlying connections. It's safe for concurrent use by
+// multiple goroutines.
+type ClusterClient struct {
+ cmdable
+
+ opt *ClusterOptions
+ nodes *clusterNodes
+ _state atomic.Value
+
+ cmdsInfoOnce internal.Once
+ cmdsInfo map[string]*CommandInfo
+
+ // Reports where slots reloading is in progress.
+ reloading uint32
+}
+
+// NewClusterClient returns a Redis Cluster client as described in
+// http://redis.io/topics/cluster-spec.
+func NewClusterClient(opt *ClusterOptions) *ClusterClient {
+ opt.init()
+
+ c := &ClusterClient{
+ opt: opt,
+ nodes: newClusterNodes(opt),
+ }
+ c.setProcessor(c.Process)
+
+ // Add initial nodes.
+ for _, addr := range opt.Addrs {
+ _, _ = c.nodes.Get(addr)
+ }
+
+ // Preload cluster slots.
+ for i := 0; i < 10; i++ {
+ state, err := c.reloadSlots()
+ if err == nil {
+ c._state.Store(state)
+ break
+ }
+ }
+
+ if opt.IdleCheckFrequency > 0 {
+ go c.reaper(opt.IdleCheckFrequency)
+ }
+
+ return c
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *ClusterClient) Options() *ClusterOptions {
+ return c.opt
+}
+
+func (c *ClusterClient) state() *clusterState {
+ v := c._state.Load()
+ if v != nil {
+ return v.(*clusterState)
+ }
+ c.lazyReloadSlots()
+ return nil
+}
+
+func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
+ err := c.cmdsInfoOnce.Do(func() error {
+ node, err := c.nodes.Random()
+ if err != nil {
+ return err
+ }
+
+ cmdsInfo, err := node.Client.Command().Result()
+ if err != nil {
+ return err
+ }
+
+ c.cmdsInfo = cmdsInfo
+ return nil
+ })
+ if err != nil {
+ return nil
+ }
+ return c.cmdsInfo[name]
+}
+
+func (c *ClusterClient) cmdSlotAndNode(state *clusterState, cmd Cmder) (int, *clusterNode, error) {
+ if state == nil {
+ node, err := c.nodes.Random()
+ return 0, node, err
+ }
+
+ cmdInfo := c.cmdInfo(cmd.Name())
+ firstKey := cmd.arg(cmdFirstKeyPos(cmd, cmdInfo))
+ slot := hashtag.Slot(firstKey)
+
+ if cmdInfo != nil && cmdInfo.ReadOnly && c.opt.ReadOnly {
+ if c.opt.RouteByLatency {
+ node, err := state.slotClosestNode(slot)
+ return slot, node, err
+ }
+
+ node, err := state.slotSlaveNode(slot)
+ return slot, node, err
+ }
+
+ node, err := state.slotMasterNode(slot)
+ return slot, node, err
+}
+
+func (c *ClusterClient) Watch(fn func(*Tx) error, keys ...string) error {
+ state := c.state()
+
+ var node *clusterNode
+ var err error
+ if state != nil && len(keys) > 0 {
+ node, err = state.slotMasterNode(hashtag.Slot(keys[0]))
+ } else {
+ node, err = c.nodes.Random()
+ }
+ if err != nil {
+ return err
+ }
+ return node.Client.Watch(fn, keys...)
+}
+
+// Close closes the cluster client, releasing any open resources.
+//
+// It is rare to Close a ClusterClient, as the ClusterClient is meant
+// to be long-lived and shared between many goroutines.
+func (c *ClusterClient) Close() error {
+ return c.nodes.Close()
+}
+
+func (c *ClusterClient) Process(cmd Cmder) error {
+ slot, node, err := c.cmdSlotAndNode(c.state(), cmd)
+ if err != nil {
+ cmd.setErr(err)
+ return err
+ }
+
+ var ask bool
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if ask {
+ pipe := node.Client.Pipeline()
+ pipe.Process(NewCmd("ASKING"))
+ pipe.Process(cmd)
+ _, err = pipe.Exec()
+ pipe.Close()
+ ask = false
+ } else {
+ err = node.Client.Process(cmd)
+ }
+
+ // If there is no (real) error - we are done.
+ if err == nil {
+ return nil
+ }
+
+ // If slave is loading - read from master.
+ if c.opt.ReadOnly && internal.IsLoadingError(err) {
+ node.loading = time.Now()
+ continue
+ }
+
+ // On network errors try random node.
+ if internal.IsRetryableError(err) {
+ node, err = c.nodes.Random()
+ if err != nil {
+ cmd.setErr(err)
+ return err
+ }
+ continue
+ }
+
+ var moved bool
+ var addr string
+ moved, ask, addr = internal.IsMovedError(err)
+ if moved || ask {
+ state := c.state()
+ if state != nil && slot >= 0 {
+ master, _ := state.slotMasterNode(slot)
+ if moved && (master == nil || master.Client.getAddr() != addr) {
+ c.lazyReloadSlots()
+ }
+ }
+
+ node, err = c.nodes.Get(addr)
+ if err != nil {
+ cmd.setErr(err)
+ return err
+ }
+
+ continue
+ }
+
+ break
+ }
+
+ return cmd.Err()
+}
+
+// ForEachNode concurrently calls the fn on each ever known node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error {
+ nodes, err := c.nodes.All()
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+ for _, node := range nodes {
+ wg.Add(1)
+ go func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(node)
+ }
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// ForEachMaster concurrently calls the fn on each master node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
+ state := c.state()
+ if state == nil {
+ return errNilClusterState
+ }
+
+ var wg sync.WaitGroup
+ visited := make(map[*clusterNode]struct{})
+ errCh := make(chan error, 1)
+ for _, nodes := range state.slots {
+ if len(nodes) == 0 {
+ continue
+ }
+
+ master := nodes[0]
+ if _, ok := visited[master]; ok {
+ continue
+ }
+ visited[master] = struct{}{}
+
+ wg.Add(1)
+ go func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(master)
+ }
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *ClusterClient) PoolStats() *PoolStats {
+ var acc PoolStats
+
+ nodes, err := c.nodes.All()
+ if err != nil {
+ return &acc
+ }
+
+ for _, node := range nodes {
+ s := node.Client.connPool.Stats()
+ acc.Requests += s.Requests
+ acc.Hits += s.Hits
+ acc.Timeouts += s.Timeouts
+ acc.TotalConns += s.TotalConns
+ acc.FreeConns += s.FreeConns
+ }
+ return &acc
+}
+
+func (c *ClusterClient) lazyReloadSlots() {
+ if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
+ return
+ }
+
+ go func() {
+ for i := 0; i < 1000; i++ {
+ state, err := c.reloadSlots()
+ if err == pool.ErrClosed {
+ break
+ }
+ if err == nil {
+ c._state.Store(state)
+ break
+ }
+ time.Sleep(time.Millisecond)
+ }
+
+ time.Sleep(3 * time.Second)
+ atomic.StoreUint32(&c.reloading, 0)
+ }()
+}
+
+func (c *ClusterClient) reloadSlots() (*clusterState, error) {
+ node, err := c.nodes.Random()
+ if err != nil {
+ return nil, err
+ }
+
+ slots, err := node.Client.ClusterSlots().Result()
+ if err != nil {
+ return nil, err
+ }
+
+ return newClusterState(c.nodes, slots, node.Client.opt.Addr)
+}
+
+// reaper closes idle connections to the cluster.
+func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) {
+ ticker := time.NewTicker(idleCheckFrequency)
+ defer ticker.Stop()
+
+ for range ticker.C {
+ nodes, err := c.nodes.All()
+ if err != nil {
+ break
+ }
+
+ var n int
+ for _, node := range nodes {
+ nn, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns()
+ if err != nil {
+ internal.Logf("ReapStaleConns failed: %s", err)
+ } else {
+ n += nn
+ }
+ }
+
+ s := c.PoolStats()
+ internal.Logf(
+ "reaper: removed %d stale conns (TotalConns=%d FreeConns=%d Requests=%d Hits=%d Timeouts=%d)",
+ n, s.TotalConns, s.FreeConns, s.Requests, s.Hits, s.Timeouts,
+ )
+ }
+}
+
+func (c *ClusterClient) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: c.pipelineExec,
+ }
+ pipe.setProcessor(pipe.Process)
+ return &pipe
+}
+
+func (c *ClusterClient) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().pipelined(fn)
+}
+
+func (c *ClusterClient) pipelineExec(cmds []Cmder) error {
+ cmdsMap, err := c.mapCmdsByNode(cmds)
+ if err != nil {
+ return err
+ }
+
+ for i := 0; i <= c.opt.MaxRedirects; i++ {
+ failedCmds := make(map[*clusterNode][]Cmder)
+
+ for node, cmds := range cmdsMap {
+ cn, _, err := node.Client.conn()
+ if err != nil {
+ setCmdsErr(cmds, err)
+ continue
+ }
+
+ err = c.pipelineProcessCmds(cn, cmds, failedCmds)
+ node.Client.putConn(cn, err)
+ }
+
+ if len(failedCmds) == 0 {
+ break
+ }
+ cmdsMap = failedCmds
+ }
+
+ var firstErr error
+ for _, cmd := range cmds {
+ if err := cmd.Err(); err != nil {
+ firstErr = err
+ break
+ }
+ }
+ return firstErr
+}
+
+func (c *ClusterClient) mapCmdsByNode(cmds []Cmder) (map[*clusterNode][]Cmder, error) {
+ state := c.state()
+ cmdsMap := make(map[*clusterNode][]Cmder)
+ for _, cmd := range cmds {
+ _, node, err := c.cmdSlotAndNode(state, cmd)
+ if err != nil {
+ return nil, err
+ }
+ cmdsMap[node] = append(cmdsMap[node], cmd)
+ }
+ return cmdsMap, nil
+}
+
+func (c *ClusterClient) pipelineProcessCmds(
+ cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
+) error {
+ cn.SetWriteTimeout(c.opt.WriteTimeout)
+ if err := writeCmd(cn, cmds...); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ // Set read timeout for all commands.
+ cn.SetReadTimeout(c.opt.ReadTimeout)
+
+ return c.pipelineReadCmds(cn, cmds, failedCmds)
+}
+
+func (c *ClusterClient) pipelineReadCmds(
+ cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
+) error {
+ var firstErr error
+ for _, cmd := range cmds {
+ err := cmd.readReply(cn)
+ if err == nil {
+ continue
+ }
+
+ if firstErr == nil {
+ firstErr = err
+ }
+
+ err = c.checkMovedErr(cmd, failedCmds)
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ return firstErr
+}
+
+func (c *ClusterClient) checkMovedErr(cmd Cmder, failedCmds map[*clusterNode][]Cmder) error {
+ moved, ask, addr := internal.IsMovedError(cmd.Err())
+ if moved {
+ c.lazyReloadSlots()
+
+ node, err := c.nodes.Get(addr)
+ if err != nil {
+ return err
+ }
+
+ failedCmds[node] = append(failedCmds[node], cmd)
+ }
+ if ask {
+ node, err := c.nodes.Get(addr)
+ if err != nil {
+ return err
+ }
+
+ failedCmds[node] = append(failedCmds[node], NewCmd("ASKING"), cmd)
+ }
+ return nil
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *ClusterClient) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: c.txPipelineExec,
+ }
+ pipe.setProcessor(pipe.Process)
+ return &pipe
+}
+
+func (c *ClusterClient) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().pipelined(fn)
+}
+
+func (c *ClusterClient) txPipelineExec(cmds []Cmder) error {
+ cmdsMap, err := c.mapCmdsBySlot(cmds)
+ if err != nil {
+ return err
+ }
+
+ state := c.state()
+ if state == nil {
+ return errNilClusterState
+ }
+
+ for slot, cmds := range cmdsMap {
+ node, err := state.slotMasterNode(slot)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ continue
+ }
+
+ cmdsMap := map[*clusterNode][]Cmder{node: cmds}
+ for i := 0; i <= c.opt.MaxRedirects; i++ {
+ failedCmds := make(map[*clusterNode][]Cmder)
+
+ for node, cmds := range cmdsMap {
+ cn, _, err := node.Client.conn()
+ if err != nil {
+ setCmdsErr(cmds, err)
+ continue
+ }
+
+ err = c.txPipelineProcessCmds(node, cn, cmds, failedCmds)
+ node.Client.putConn(cn, err)
+ }
+
+ if len(failedCmds) == 0 {
+ break
+ }
+ cmdsMap = failedCmds
+ }
+ }
+
+ var firstErr error
+ for _, cmd := range cmds {
+ if err := cmd.Err(); err != nil {
+ firstErr = err
+ break
+ }
+ }
+ return firstErr
+}
+
+func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) (map[int][]Cmder, error) {
+ state := c.state()
+ cmdsMap := make(map[int][]Cmder)
+ for _, cmd := range cmds {
+ slot, _, err := c.cmdSlotAndNode(state, cmd)
+ if err != nil {
+ return nil, err
+ }
+ cmdsMap[slot] = append(cmdsMap[slot], cmd)
+ }
+ return cmdsMap, nil
+}
+
+func (c *ClusterClient) txPipelineProcessCmds(
+ node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
+) error {
+ cn.SetWriteTimeout(c.opt.WriteTimeout)
+ if err := txPipelineWriteMulti(cn, cmds); err != nil {
+ setCmdsErr(cmds, err)
+ failedCmds[node] = cmds
+ return err
+ }
+
+ // Set read timeout for all commands.
+ cn.SetReadTimeout(c.opt.ReadTimeout)
+
+ if err := c.txPipelineReadQueued(cn, cmds, failedCmds); err != nil {
+ return err
+ }
+
+ _, err := pipelineReadCmds(cn, cmds)
+ return err
+}
+
+func (c *ClusterClient) txPipelineReadQueued(
+ cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
+) error {
+ var firstErr error
+
+ // Parse queued replies.
+ var statusCmd StatusCmd
+ if err := statusCmd.readReply(cn); err != nil && firstErr == nil {
+ firstErr = err
+ }
+
+ for _, cmd := range cmds {
+ err := statusCmd.readReply(cn)
+ if err == nil {
+ continue
+ }
+
+ cmd.setErr(err)
+ if firstErr == nil {
+ firstErr = err
+ }
+
+ err = c.checkMovedErr(cmd, failedCmds)
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ // Parse number of replies.
+ line, err := cn.Rd.ReadLine()
+ if err != nil {
+ if err == Nil {
+ err = TxFailedErr
+ }
+ return err
+ }
+
+ switch line[0] {
+ case proto.ErrorReply:
+ return proto.ParseErrorReply(line)
+ case proto.ArrayReply:
+ // ok
+ default:
+ err := fmt.Errorf("redis: expected '*', but got line %q", line)
+ return err
+ }
+
+ return firstErr
+}
+
+func isLoopbackAddr(addr string) bool {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return false
+ }
+
+ ip := net.ParseIP(host)
+ if ip == nil {
+ return false
+ }
+
+ return ip.IsLoopback()
+}
diff --git a/vendor/github.com/go-redis/redis/cluster_test.go b/vendor/github.com/go-redis/redis/cluster_test.go
new file mode 100644
index 000000000..3a69255a4
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/cluster_test.go
@@ -0,0 +1,740 @@
+package redis_test
+
+import (
+ "bytes"
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/go-redis/redis"
+ "github.com/go-redis/redis/internal/hashtag"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+type clusterScenario struct {
+ ports []string
+ nodeIds []string
+ processes map[string]*redisProcess
+ clients map[string]*redis.Client
+}
+
+func (s *clusterScenario) masters() []*redis.Client {
+ result := make([]*redis.Client, 3)
+ for pos, port := range s.ports[:3] {
+ result[pos] = s.clients[port]
+ }
+ return result
+}
+
+func (s *clusterScenario) slaves() []*redis.Client {
+ result := make([]*redis.Client, 3)
+ for pos, port := range s.ports[3:] {
+ result[pos] = s.clients[port]
+ }
+ return result
+}
+
+func (s *clusterScenario) addrs() []string {
+ addrs := make([]string, len(s.ports))
+ for i, port := range s.ports {
+ addrs[i] = net.JoinHostPort("127.0.0.1", port)
+ }
+ return addrs
+}
+
+func (s *clusterScenario) clusterClient(opt *redis.ClusterOptions) *redis.ClusterClient {
+ opt.Addrs = s.addrs()
+ return redis.NewClusterClient(opt)
+}
+
+func startCluster(scenario *clusterScenario) error {
+ // Start processes and collect node ids
+ for pos, port := range scenario.ports {
+ process, err := startRedis(port, "--cluster-enabled", "yes")
+ if err != nil {
+ return err
+ }
+
+ client := redis.NewClient(&redis.Options{
+ Addr: ":" + port,
+ })
+
+ info, err := client.ClusterNodes().Result()
+ if err != nil {
+ return err
+ }
+
+ scenario.processes[port] = process
+ scenario.clients[port] = client
+ scenario.nodeIds[pos] = info[:40]
+ }
+
+ // Meet cluster nodes
+ for _, client := range scenario.clients {
+ err := client.ClusterMeet("127.0.0.1", scenario.ports[0]).Err()
+ if err != nil {
+ return err
+ }
+ }
+
+ // Bootstrap masters
+ slots := []int{0, 5000, 10000, 16384}
+ for pos, master := range scenario.masters() {
+ err := master.ClusterAddSlotsRange(slots[pos], slots[pos+1]-1).Err()
+ if err != nil {
+ return err
+ }
+ }
+
+ // Bootstrap slaves
+ for idx, slave := range scenario.slaves() {
+ masterId := scenario.nodeIds[idx]
+
+ // Wait until master is available
+ err := eventually(func() error {
+ s := slave.ClusterNodes().Val()
+ wanted := masterId
+ if !strings.Contains(s, wanted) {
+ return fmt.Errorf("%q does not contain %q", s, wanted)
+ }
+ return nil
+ }, 10*time.Second)
+ if err != nil {
+ return err
+ }
+
+ err = slave.ClusterReplicate(masterId).Err()
+ if err != nil {
+ return err
+ }
+ }
+
+ // Wait until all nodes have consistent info
+ for _, client := range scenario.clients {
+ err := eventually(func() error {
+ res, err := client.ClusterSlots().Result()
+ if err != nil {
+ return err
+ }
+ wanted := []redis.ClusterSlot{
+ {0, 4999, []redis.ClusterNode{{"", "127.0.0.1:8220"}, {"", "127.0.0.1:8223"}}},
+ {5000, 9999, []redis.ClusterNode{{"", "127.0.0.1:8221"}, {"", "127.0.0.1:8224"}}},
+ {10000, 16383, []redis.ClusterNode{{"", "127.0.0.1:8222"}, {"", "127.0.0.1:8225"}}},
+ }
+ return assertSlotsEqual(res, wanted)
+ }, 30*time.Second)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func assertSlotsEqual(slots, wanted []redis.ClusterSlot) error {
+outer_loop:
+ for _, s2 := range wanted {
+ for _, s1 := range slots {
+ if slotEqual(s1, s2) {
+ continue outer_loop
+ }
+ }
+ return fmt.Errorf("%v not found in %v", s2, slots)
+ }
+ return nil
+}
+
+func slotEqual(s1, s2 redis.ClusterSlot) bool {
+ if s1.Start != s2.Start {
+ return false
+ }
+ if s1.End != s2.End {
+ return false
+ }
+ if len(s1.Nodes) != len(s2.Nodes) {
+ return false
+ }
+ for i, n1 := range s1.Nodes {
+ if n1.Addr != s2.Nodes[i].Addr {
+ return false
+ }
+ }
+ return true
+}
+
+func stopCluster(scenario *clusterScenario) error {
+ for _, client := range scenario.clients {
+ if err := client.Close(); err != nil {
+ return err
+ }
+ }
+ for _, process := range scenario.processes {
+ if err := process.Close(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+var _ = Describe("ClusterClient", func() {
+ var opt *redis.ClusterOptions
+ var client *redis.ClusterClient
+
+ assertClusterClient := func() {
+ It("should CLUSTER SLOTS", func() {
+ res, err := client.ClusterSlots().Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(HaveLen(3))
+
+ wanted := []redis.ClusterSlot{
+ {0, 4999, []redis.ClusterNode{{"", "127.0.0.1:8220"}, {"", "127.0.0.1:8223"}}},
+ {5000, 9999, []redis.ClusterNode{{"", "127.0.0.1:8221"}, {"", "127.0.0.1:8224"}}},
+ {10000, 16383, []redis.ClusterNode{{"", "127.0.0.1:8222"}, {"", "127.0.0.1:8225"}}},
+ }
+ Expect(assertSlotsEqual(res, wanted)).NotTo(HaveOccurred())
+ })
+
+ It("should CLUSTER NODES", func() {
+ res, err := client.ClusterNodes().Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(res)).To(BeNumerically(">", 400))
+ })
+
+ It("should CLUSTER INFO", func() {
+ res, err := client.ClusterInfo().Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(ContainSubstring("cluster_known_nodes:6"))
+ })
+
+ It("should CLUSTER KEYSLOT", func() {
+ hashSlot, err := client.ClusterKeySlot("somekey").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(hashSlot).To(Equal(int64(hashtag.Slot("somekey"))))
+ })
+
+ It("should CLUSTER COUNT-FAILURE-REPORTS", func() {
+ n, err := client.ClusterCountFailureReports(cluster.nodeIds[0]).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(0)))
+ })
+
+ It("should CLUSTER COUNTKEYSINSLOT", func() {
+ n, err := client.ClusterCountKeysInSlot(10).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(0)))
+ })
+
+ It("should CLUSTER SAVECONFIG", func() {
+ res, err := client.ClusterSaveConfig().Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(Equal("OK"))
+ })
+
+ It("should CLUSTER SLAVES", func() {
+ nodesList, err := client.ClusterSlaves(cluster.nodeIds[0]).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(nodesList).Should(ContainElement(ContainSubstring("slave")))
+ Expect(nodesList).Should(HaveLen(1))
+ })
+
+ It("should GET/SET/DEL", func() {
+ val, err := client.Get("A").Result()
+ Expect(err).To(Equal(redis.Nil))
+ Expect(val).To(Equal(""))
+
+ val, err = client.Set("A", "VALUE", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("OK"))
+
+ val, err = client.Get("A").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("VALUE"))
+
+ cnt, err := client.Del("A").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cnt).To(Equal(int64(1)))
+ })
+
+ It("returns pool stats", func() {
+ Expect(client.PoolStats()).To(BeAssignableToTypeOf(&redis.PoolStats{}))
+ })
+
+ It("removes idle connections", func() {
+ stats := client.PoolStats()
+ Expect(stats.TotalConns).NotTo(BeZero())
+ Expect(stats.FreeConns).NotTo(BeZero())
+
+ time.Sleep(2 * time.Second)
+
+ stats = client.PoolStats()
+ Expect(stats.TotalConns).To(BeZero())
+ Expect(stats.FreeConns).To(BeZero())
+ })
+
+ It("follows redirects", func() {
+ Expect(client.Set("A", "VALUE", 0).Err()).NotTo(HaveOccurred())
+
+ slot := hashtag.Slot("A")
+ Expect(client.SwapSlotNodes(slot)).To(Equal([]string{"127.0.0.1:8224", "127.0.0.1:8221"}))
+
+ val, err := client.Get("A").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("VALUE"))
+ })
+
+ It("returns an error when there are no attempts left", func() {
+ opt := redisClusterOptions()
+ opt.MaxRedirects = -1
+ client := cluster.clusterClient(opt)
+
+ slot := hashtag.Slot("A")
+ Expect(client.SwapSlotNodes(slot)).To(Equal([]string{"127.0.0.1:8224", "127.0.0.1:8221"}))
+
+ err := client.Get("A").Err()
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(ContainSubstring("MOVED"))
+
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("distributes keys", func() {
+ for i := 0; i < 100; i++ {
+ err := client.Set(fmt.Sprintf("key%d", i), "value", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ wanted := []string{"keys=31", "keys=29", "keys=40"}
+ for i, master := range cluster.masters() {
+ Expect(master.Info().Val()).To(ContainSubstring(wanted[i]))
+ }
+ })
+
+ It("distributes keys when using EVAL", func() {
+ script := redis.NewScript(`
+ local r = redis.call('SET', KEYS[1], ARGV[1])
+ return r
+ `)
+
+ var key string
+ for i := 0; i < 100; i++ {
+ key = fmt.Sprintf("key%d", i)
+ err := script.Run(client, []string{key}, "value").Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ wanted := []string{"keys=31", "keys=29", "keys=40"}
+ for i, master := range cluster.masters() {
+ Expect(master.Info().Val()).To(ContainSubstring(wanted[i]))
+ }
+ })
+
+ It("supports Watch", func() {
+ var incr func(string) error
+
+ // Transactionally increments key using GET and SET commands.
+ incr = func(key string) error {
+ err := client.Watch(func(tx *redis.Tx) error {
+ n, err := tx.Get(key).Int64()
+ if err != nil && err != redis.Nil {
+ return err
+ }
+
+ _, err = tx.Pipelined(func(pipe redis.Pipeliner) error {
+ pipe.Set(key, strconv.FormatInt(n+1, 10), 0)
+ return nil
+ })
+ return err
+ }, key)
+ if err == redis.TxFailedErr {
+ return incr(key)
+ }
+ return err
+ }
+
+ var wg sync.WaitGroup
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func() {
+ defer GinkgoRecover()
+ defer wg.Done()
+
+ err := incr("key")
+ Expect(err).NotTo(HaveOccurred())
+ }()
+ }
+ wg.Wait()
+
+ n, err := client.Get("key").Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(100)))
+ })
+
+ Describe("pipelining", func() {
+ var pipe *redis.Pipeline
+
+ assertPipeline := func() {
+ keys := []string{"A", "B", "C", "D", "E", "F", "G"}
+
+ It("follows redirects", func() {
+ for _, key := range keys {
+ slot := hashtag.Slot(key)
+ client.SwapSlotNodes(slot)
+ }
+
+ for i, key := range keys {
+ pipe.Set(key, key+"_value", 0)
+ pipe.Expire(key, time.Duration(i+1)*time.Hour)
+ }
+ cmds, err := pipe.Exec()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(14))
+
+ if opt.RouteByLatency {
+ return
+ }
+
+ for _, key := range keys {
+ slot := hashtag.Slot(key)
+ client.SwapSlotNodes(slot)
+ }
+
+ for _, key := range keys {
+ pipe.Get(key)
+ pipe.TTL(key)
+ }
+ cmds, err = pipe.Exec()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(14))
+
+ for i, key := range keys {
+ get := cmds[i*2].(*redis.StringCmd)
+ Expect(get.Val()).To(Equal(key + "_value"))
+
+ ttl := cmds[(i*2)+1].(*redis.DurationCmd)
+ Expect(ttl.Val()).To(BeNumerically("~", time.Duration(i+1)*time.Hour, time.Second))
+ }
+ })
+
+ It("works with missing keys", func() {
+ pipe.Set("A", "A_value", 0)
+ pipe.Set("C", "C_value", 0)
+ _, err := pipe.Exec()
+ Expect(err).NotTo(HaveOccurred())
+
+ a := pipe.Get("A")
+ b := pipe.Get("B")
+ c := pipe.Get("C")
+ cmds, err := pipe.Exec()
+ Expect(err).To(Equal(redis.Nil))
+ Expect(cmds).To(HaveLen(3))
+
+ Expect(a.Err()).NotTo(HaveOccurred())
+ Expect(a.Val()).To(Equal("A_value"))
+
+ Expect(b.Err()).To(Equal(redis.Nil))
+ Expect(b.Val()).To(Equal(""))
+
+ Expect(c.Err()).NotTo(HaveOccurred())
+ Expect(c.Val()).To(Equal("C_value"))
+ })
+ }
+
+ Describe("Pipeline", func() {
+ BeforeEach(func() {
+ pipe = client.Pipeline().(*redis.Pipeline)
+ })
+
+ AfterEach(func() {
+ Expect(pipe.Close()).NotTo(HaveOccurred())
+ })
+
+ assertPipeline()
+ })
+
+ Describe("TxPipeline", func() {
+ BeforeEach(func() {
+ pipe = client.TxPipeline().(*redis.Pipeline)
+ })
+
+ AfterEach(func() {
+ Expect(pipe.Close()).NotTo(HaveOccurred())
+ })
+
+ assertPipeline()
+ })
+ })
+
+ It("calls fn for every master node", func() {
+ for i := 0; i < 10; i++ {
+ Expect(client.Set(strconv.Itoa(i), "", 0).Err()).NotTo(HaveOccurred())
+ }
+
+ err := client.ForEachMaster(func(master *redis.Client) error {
+ return master.FlushDB().Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ for _, client := range cluster.masters() {
+ keys, err := client.Keys("*").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(keys).To(HaveLen(0))
+ }
+ })
+ }
+
+ Describe("default ClusterClient", func() {
+ BeforeEach(func() {
+ opt = redisClusterOptions()
+ client = cluster.clusterClient(opt)
+
+ _ = client.ForEachMaster(func(master *redis.Client) error {
+ return master.FlushDB().Err()
+ })
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ assertClusterClient()
+ })
+
+ Describe("ClusterClient with RouteByLatency", func() {
+ BeforeEach(func() {
+ opt = redisClusterOptions()
+ opt.RouteByLatency = true
+ client = cluster.clusterClient(opt)
+
+ _ = client.ForEachMaster(func(master *redis.Client) error {
+ return master.FlushDB().Err()
+ })
+ })
+
+ AfterEach(func() {
+ client.FlushDB()
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ assertClusterClient()
+ })
+})
+
+var _ = Describe("ClusterClient without nodes", func() {
+ var client *redis.ClusterClient
+
+ BeforeEach(func() {
+ client = redis.NewClusterClient(&redis.ClusterOptions{})
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("returns an error", func() {
+ err := client.Ping().Err()
+ Expect(err).To(MatchError("redis: cluster has no nodes"))
+ })
+
+ It("pipeline returns an error", func() {
+ _, err := client.Pipelined(func(pipe redis.Pipeliner) error {
+ pipe.Ping()
+ return nil
+ })
+ Expect(err).To(MatchError("redis: cluster has no nodes"))
+ })
+})
+
+var _ = Describe("ClusterClient without valid nodes", func() {
+ var client *redis.ClusterClient
+
+ BeforeEach(func() {
+ client = redis.NewClusterClient(&redis.ClusterOptions{
+ Addrs: []string{redisAddr},
+ })
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("returns an error", func() {
+ err := client.Ping().Err()
+ Expect(err).To(MatchError("ERR This instance has cluster support disabled"))
+ })
+
+ It("pipeline returns an error", func() {
+ _, err := client.Pipelined(func(pipe redis.Pipeliner) error {
+ pipe.Ping()
+ return nil
+ })
+ Expect(err).To(MatchError("ERR This instance has cluster support disabled"))
+ })
+})
+
+var _ = Describe("ClusterClient timeout", func() {
+ var client *redis.ClusterClient
+
+ AfterEach(func() {
+ _ = client.Close()
+ })
+
+ testTimeout := func() {
+ It("Ping timeouts", func() {
+ err := client.Ping().Err()
+ Expect(err).To(HaveOccurred())
+ Expect(err.(net.Error).Timeout()).To(BeTrue())
+ })
+
+ It("Pipeline timeouts", func() {
+ _, err := client.Pipelined(func(pipe redis.Pipeliner) error {
+ pipe.Ping()
+ return nil
+ })
+ Expect(err).To(HaveOccurred())
+ Expect(err.(net.Error).Timeout()).To(BeTrue())
+ })
+
+ It("Tx timeouts", func() {
+ err := client.Watch(func(tx *redis.Tx) error {
+ return tx.Ping().Err()
+ })
+ Expect(err).To(HaveOccurred())
+ Expect(err.(net.Error).Timeout()).To(BeTrue())
+ })
+
+ It("Tx Pipeline timeouts", func() {
+ err := client.Watch(func(tx *redis.Tx) error {
+ _, err := tx.Pipelined(func(pipe redis.Pipeliner) error {
+ pipe.Ping()
+ return nil
+ })
+ return err
+ })
+ Expect(err).To(HaveOccurred())
+ Expect(err.(net.Error).Timeout()).To(BeTrue())
+ })
+ }
+
+ Context("read timeout", func() {
+ BeforeEach(func() {
+ opt := redisClusterOptions()
+ opt.ReadTimeout = time.Nanosecond
+ opt.WriteTimeout = -1
+ client = cluster.clusterClient(opt)
+ })
+
+ testTimeout()
+ })
+
+ Context("write timeout", func() {
+ BeforeEach(func() {
+ opt := redisClusterOptions()
+ opt.ReadTimeout = time.Nanosecond
+ opt.WriteTimeout = -1
+ client = cluster.clusterClient(opt)
+ })
+
+ testTimeout()
+ })
+
+ Context("network timeout", func() {
+ const pause = time.Second
+
+ BeforeEach(func() {
+ opt := redisClusterOptions()
+ opt.ReadTimeout = 100 * time.Millisecond
+ opt.WriteTimeout = 100 * time.Millisecond
+ opt.MaxRedirects = 1
+ client = cluster.clusterClient(opt)
+
+ err := client.ForEachNode(func(client *redis.Client) error {
+ return client.ClientPause(pause).Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Eventually(func() error {
+ return client.ForEachNode(func(client *redis.Client) error {
+ return client.Ping().Err()
+ })
+ }, 2*pause).ShouldNot(HaveOccurred())
+ })
+
+ testTimeout()
+ })
+})
+
+//------------------------------------------------------------------------------
+
+func BenchmarkRedisClusterPing(b *testing.B) {
+ if testing.Short() {
+ b.Skip("skipping in short mode")
+ }
+
+ cluster := &clusterScenario{
+ ports: []string{"8220", "8221", "8222", "8223", "8224", "8225"},
+ nodeIds: make([]string, 6),
+ processes: make(map[string]*redisProcess, 6),
+ clients: make(map[string]*redis.Client, 6),
+ }
+
+ if err := startCluster(cluster); err != nil {
+ b.Fatal(err)
+ }
+ defer stopCluster(cluster)
+
+ client := cluster.clusterClient(redisClusterOptions())
+ defer client.Close()
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if err := client.Ping().Err(); err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+func BenchmarkRedisClusterSetString(b *testing.B) {
+ if testing.Short() {
+ b.Skip("skipping in short mode")
+ }
+
+ cluster := &clusterScenario{
+ ports: []string{"8220", "8221", "8222", "8223", "8224", "8225"},
+ nodeIds: make([]string, 6),
+ processes: make(map[string]*redisProcess, 6),
+ clients: make(map[string]*redis.Client, 6),
+ }
+
+ if err := startCluster(cluster); err != nil {
+ b.Fatal(err)
+ }
+ defer stopCluster(cluster)
+
+ client := cluster.clusterClient(redisClusterOptions())
+ defer client.Close()
+
+ value := string(bytes.Repeat([]byte{'1'}, 10000))
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if err := client.Set("key", value, 0).Err(); err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
diff --git a/vendor/github.com/go-redis/redis/command.go b/vendor/github.com/go-redis/redis/command.go
new file mode 100644
index 000000000..361661adf
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/command.go
@@ -0,0 +1,946 @@
+package redis
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/go-redis/redis/internal"
+ "github.com/go-redis/redis/internal/pool"
+ "github.com/go-redis/redis/internal/proto"
+)
+
+var (
+ _ Cmder = (*Cmd)(nil)
+ _ Cmder = (*SliceCmd)(nil)
+ _ Cmder = (*StatusCmd)(nil)
+ _ Cmder = (*IntCmd)(nil)
+ _ Cmder = (*DurationCmd)(nil)
+ _ Cmder = (*BoolCmd)(nil)
+ _ Cmder = (*StringCmd)(nil)
+ _ Cmder = (*FloatCmd)(nil)
+ _ Cmder = (*StringSliceCmd)(nil)
+ _ Cmder = (*BoolSliceCmd)(nil)
+ _ Cmder = (*StringStringMapCmd)(nil)
+ _ Cmder = (*StringIntMapCmd)(nil)
+ _ Cmder = (*ZSliceCmd)(nil)
+ _ Cmder = (*ScanCmd)(nil)
+ _ Cmder = (*ClusterSlotsCmd)(nil)
+)
+
+type Cmder interface {
+ args() []interface{}
+ arg(int) string
+ Name() string
+
+ readReply(*pool.Conn) error
+ setErr(error)
+
+ readTimeout() *time.Duration
+
+ Err() error
+ fmt.Stringer
+}
+
+func setCmdsErr(cmds []Cmder, e error) {
+ for _, cmd := range cmds {
+ cmd.setErr(e)
+ }
+}
+
+func writeCmd(cn *pool.Conn, cmds ...Cmder) error {
+ cn.Wb.Reset()
+ for _, cmd := range cmds {
+ if err := cn.Wb.Append(cmd.args()); err != nil {
+ return err
+ }
+ }
+
+ _, err := cn.Write(cn.Wb.Bytes())
+ return err
+}
+
+func cmdString(cmd Cmder, val interface{}) string {
+ var ss []string
+ for _, arg := range cmd.args() {
+ ss = append(ss, fmt.Sprint(arg))
+ }
+ s := strings.Join(ss, " ")
+ if err := cmd.Err(); err != nil {
+ return s + ": " + err.Error()
+ }
+ if val != nil {
+ switch vv := val.(type) {
+ case []byte:
+ return s + ": " + string(vv)
+ default:
+ return s + ": " + fmt.Sprint(val)
+ }
+ }
+ return s
+
+}
+
+func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
+ switch cmd.Name() {
+ case "eval", "evalsha":
+ if cmd.arg(2) != "0" {
+ return 3
+ } else {
+ return -1
+ }
+ case "publish":
+ return 1
+ }
+ if info == nil {
+ internal.Logf("info for cmd=%s not found", cmd.Name())
+ return -1
+ }
+ return int(info.FirstKeyPos)
+}
+
+//------------------------------------------------------------------------------
+
+type baseCmd struct {
+ _args []interface{}
+ err error
+
+ _readTimeout *time.Duration
+}
+
+func (cmd *baseCmd) Err() error {
+ return cmd.err
+}
+
+func (cmd *baseCmd) args() []interface{} {
+ return cmd._args
+}
+
+func (cmd *baseCmd) arg(pos int) string {
+ if pos < 0 || pos >= len(cmd._args) {
+ return ""
+ }
+ s, _ := cmd._args[pos].(string)
+ return s
+}
+
+func (cmd *baseCmd) Name() string {
+ if len(cmd._args) > 0 {
+ // Cmd name must be lower cased.
+ s := internal.ToLower(cmd.arg(0))
+ cmd._args[0] = s
+ return s
+ }
+ return ""
+}
+
+func (cmd *baseCmd) readTimeout() *time.Duration {
+ return cmd._readTimeout
+}
+
+func (cmd *baseCmd) setReadTimeout(d time.Duration) {
+ cmd._readTimeout = &d
+}
+
+func (cmd *baseCmd) setErr(e error) {
+ cmd.err = e
+}
+
+//------------------------------------------------------------------------------
+
+type Cmd struct {
+ baseCmd
+
+ val interface{}
+}
+
+func NewCmd(args ...interface{}) *Cmd {
+ return &Cmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *Cmd) Val() interface{} {
+ return cmd.val
+}
+
+func (cmd *Cmd) Result() (interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *Cmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *Cmd) readReply(cn *pool.Conn) error {
+ cmd.val, cmd.err = cn.Rd.ReadReply(sliceParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ if b, ok := cmd.val.([]byte); ok {
+ // Bytes must be copied, because underlying memory is reused.
+ cmd.val = string(b)
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type SliceCmd struct {
+ baseCmd
+
+ val []interface{}
+}
+
+func NewSliceCmd(args ...interface{}) *SliceCmd {
+ return &SliceCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *SliceCmd) Val() []interface{} {
+ return cmd.val
+}
+
+func (cmd *SliceCmd) Result() ([]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *SliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *SliceCmd) readReply(cn *pool.Conn) error {
+ var v interface{}
+ v, cmd.err = cn.Rd.ReadArrayReply(sliceParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.([]interface{})
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StatusCmd struct {
+ baseCmd
+
+ val string
+}
+
+func NewStatusCmd(args ...interface{}) *StatusCmd {
+ return &StatusCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *StatusCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StatusCmd) Result() (string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StatusCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StatusCmd) readReply(cn *pool.Conn) error {
+ cmd.val, cmd.err = cn.Rd.ReadStringReply()
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type IntCmd struct {
+ baseCmd
+
+ val int64
+}
+
+func NewIntCmd(args ...interface{}) *IntCmd {
+ return &IntCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *IntCmd) Val() int64 {
+ return cmd.val
+}
+
+func (cmd *IntCmd) Result() (int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntCmd) readReply(cn *pool.Conn) error {
+ cmd.val, cmd.err = cn.Rd.ReadIntReply()
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type DurationCmd struct {
+ baseCmd
+
+ val time.Duration
+ precision time.Duration
+}
+
+func NewDurationCmd(precision time.Duration, args ...interface{}) *DurationCmd {
+ return &DurationCmd{
+ baseCmd: baseCmd{_args: args},
+ precision: precision,
+ }
+}
+
+func (cmd *DurationCmd) Val() time.Duration {
+ return cmd.val
+}
+
+func (cmd *DurationCmd) Result() (time.Duration, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *DurationCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *DurationCmd) readReply(cn *pool.Conn) error {
+ var n int64
+ n, cmd.err = cn.Rd.ReadIntReply()
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = time.Duration(n) * cmd.precision
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type TimeCmd struct {
+ baseCmd
+
+ val time.Time
+}
+
+func NewTimeCmd(args ...interface{}) *TimeCmd {
+ return &TimeCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *TimeCmd) Val() time.Time {
+ return cmd.val
+}
+
+func (cmd *TimeCmd) Result() (time.Time, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TimeCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TimeCmd) readReply(cn *pool.Conn) error {
+ var v interface{}
+ v, cmd.err = cn.Rd.ReadArrayReply(timeParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.(time.Time)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type BoolCmd struct {
+ baseCmd
+
+ val bool
+}
+
+func NewBoolCmd(args ...interface{}) *BoolCmd {
+ return &BoolCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *BoolCmd) Val() bool {
+ return cmd.val
+}
+
+func (cmd *BoolCmd) Result() (bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+var ok = []byte("OK")
+
+func (cmd *BoolCmd) readReply(cn *pool.Conn) error {
+ var v interface{}
+ v, cmd.err = cn.Rd.ReadReply(nil)
+ // `SET key value NX` returns nil when key already exists. But
+ // `SETNX key value` returns bool (0/1). So convert nil to bool.
+ // TODO: is this okay?
+ if cmd.err == Nil {
+ cmd.val = false
+ cmd.err = nil
+ return nil
+ }
+ if cmd.err != nil {
+ return cmd.err
+ }
+ switch v := v.(type) {
+ case int64:
+ cmd.val = v == 1
+ return nil
+ case []byte:
+ cmd.val = bytes.Equal(v, ok)
+ return nil
+ default:
+ cmd.err = fmt.Errorf("got %T, wanted int64 or string", v)
+ return cmd.err
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type StringCmd struct {
+ baseCmd
+
+ val []byte
+}
+
+func NewStringCmd(args ...interface{}) *StringCmd {
+ return &StringCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *StringCmd) Val() string {
+ return internal.BytesToString(cmd.val)
+}
+
+func (cmd *StringCmd) Result() (string, error) {
+ return cmd.Val(), cmd.err
+}
+
+func (cmd *StringCmd) Bytes() ([]byte, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringCmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseInt(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseUint(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseFloat(cmd.Val(), 64)
+}
+
+func (cmd *StringCmd) Scan(val interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+ return proto.Scan(cmd.val, val)
+}
+
+func (cmd *StringCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringCmd) readReply(cn *pool.Conn) error {
+ cmd.val, cmd.err = cn.Rd.ReadBytesReply()
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatCmd struct {
+ baseCmd
+
+ val float64
+}
+
+func NewFloatCmd(args ...interface{}) *FloatCmd {
+ return &FloatCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *FloatCmd) Val() float64 {
+ return cmd.val
+}
+
+func (cmd *FloatCmd) Result() (float64, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *FloatCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatCmd) readReply(cn *pool.Conn) error {
+ cmd.val, cmd.err = cn.Rd.ReadFloatReply()
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type StringSliceCmd struct {
+ baseCmd
+
+ val []string
+}
+
+func NewStringSliceCmd(args ...interface{}) *StringSliceCmd {
+ return &StringSliceCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *StringSliceCmd) Val() []string {
+ return cmd.val
+}
+
+func (cmd *StringSliceCmd) Result() ([]string, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *StringSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
+ return proto.ScanSlice(cmd.Val(), container)
+}
+
+func (cmd *StringSliceCmd) readReply(cn *pool.Conn) error {
+ var v interface{}
+ v, cmd.err = cn.Rd.ReadArrayReply(stringSliceParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.([]string)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type BoolSliceCmd struct {
+ baseCmd
+
+ val []bool
+}
+
+func NewBoolSliceCmd(args ...interface{}) *BoolSliceCmd {
+ return &BoolSliceCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *BoolSliceCmd) Val() []bool {
+ return cmd.val
+}
+
+func (cmd *BoolSliceCmd) Result() ([]bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolSliceCmd) readReply(cn *pool.Conn) error {
+ var v interface{}
+ v, cmd.err = cn.Rd.ReadArrayReply(boolSliceParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.([]bool)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StringStringMapCmd struct {
+ baseCmd
+
+ val map[string]string
+}
+
+func NewStringStringMapCmd(args ...interface{}) *StringStringMapCmd {
+ return &StringStringMapCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *StringStringMapCmd) Val() map[string]string {
+ return cmd.val
+}
+
+func (cmd *StringStringMapCmd) Result() (map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringStringMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStringMapCmd) readReply(cn *pool.Conn) error {
+ var v interface{}
+ v, cmd.err = cn.Rd.ReadArrayReply(stringStringMapParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.(map[string]string)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StringIntMapCmd struct {
+ baseCmd
+
+ val map[string]int64
+}
+
+func NewStringIntMapCmd(args ...interface{}) *StringIntMapCmd {
+ return &StringIntMapCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *StringIntMapCmd) Val() map[string]int64 {
+ return cmd.val
+}
+
+func (cmd *StringIntMapCmd) Result() (map[string]int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringIntMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringIntMapCmd) readReply(cn *pool.Conn) error {
+ var v interface{}
+ v, cmd.err = cn.Rd.ReadArrayReply(stringIntMapParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.(map[string]int64)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceCmd struct {
+ baseCmd
+
+ val []Z
+}
+
+func NewZSliceCmd(args ...interface{}) *ZSliceCmd {
+ return &ZSliceCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *ZSliceCmd) Val() []Z {
+ return cmd.val
+}
+
+func (cmd *ZSliceCmd) Result() ([]Z, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ZSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceCmd) readReply(cn *pool.Conn) error {
+ var v interface{}
+ v, cmd.err = cn.Rd.ReadArrayReply(zSliceParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.([]Z)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ScanCmd struct {
+ baseCmd
+
+ page []string
+ cursor uint64
+
+ process func(cmd Cmder) error
+}
+
+func NewScanCmd(process func(cmd Cmder) error, args ...interface{}) *ScanCmd {
+ return &ScanCmd{
+ baseCmd: baseCmd{_args: args},
+ process: process,
+ }
+}
+
+func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
+ return cmd.page, cmd.cursor
+}
+
+func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
+ return cmd.page, cmd.cursor, cmd.err
+}
+
+func (cmd *ScanCmd) String() string {
+ return cmdString(cmd, cmd.page)
+}
+
+func (cmd *ScanCmd) readReply(cn *pool.Conn) error {
+ cmd.page, cmd.cursor, cmd.err = cn.Rd.ReadScanReply()
+ return cmd.err
+}
+
+// Iterator creates a new ScanIterator.
+func (cmd *ScanCmd) Iterator() *ScanIterator {
+ return &ScanIterator{
+ cmd: cmd,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type ClusterNode struct {
+ Id string
+ Addr string
+}
+
+type ClusterSlot struct {
+ Start int
+ End int
+ Nodes []ClusterNode
+}
+
+type ClusterSlotsCmd struct {
+ baseCmd
+
+ val []ClusterSlot
+}
+
+func NewClusterSlotsCmd(args ...interface{}) *ClusterSlotsCmd {
+ return &ClusterSlotsCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
+ return cmd.val
+}
+
+func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ClusterSlotsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterSlotsCmd) readReply(cn *pool.Conn) error {
+ var v interface{}
+ v, cmd.err = cn.Rd.ReadArrayReply(clusterSlotsParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.([]ClusterSlot)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// GeoLocation is used with GeoAdd to add geospatial location.
+type GeoLocation struct {
+ Name string
+ Longitude, Latitude, Dist float64
+ GeoHash int64
+}
+
+// GeoRadiusQuery is used with GeoRadius to query geospatial index.
+type GeoRadiusQuery struct {
+ Radius float64
+ // Can be m, km, ft, or mi. Default is km.
+ Unit string
+ WithCoord bool
+ WithDist bool
+ WithGeoHash bool
+ Count int
+ // Can be ASC or DESC. Default is no sort order.
+ Sort string
+}
+
+type GeoLocationCmd struct {
+ baseCmd
+
+ q *GeoRadiusQuery
+ locations []GeoLocation
+}
+
+func NewGeoLocationCmd(q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
+ args = append(args, q.Radius)
+ if q.Unit != "" {
+ args = append(args, q.Unit)
+ } else {
+ args = append(args, "km")
+ }
+ if q.WithCoord {
+ args = append(args, "WITHCOORD")
+ }
+ if q.WithDist {
+ args = append(args, "WITHDIST")
+ }
+ if q.WithGeoHash {
+ args = append(args, "WITHHASH")
+ }
+ if q.Count > 0 {
+ args = append(args, "COUNT", q.Count)
+ }
+ if q.Sort != "" {
+ args = append(args, q.Sort)
+ }
+ return &GeoLocationCmd{
+ baseCmd: baseCmd{_args: args},
+ q: q,
+ }
+}
+
+func (cmd *GeoLocationCmd) Val() []GeoLocation {
+ return cmd.locations
+}
+
+func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
+ return cmd.locations, cmd.err
+}
+
+func (cmd *GeoLocationCmd) String() string {
+ return cmdString(cmd, cmd.locations)
+}
+
+func (cmd *GeoLocationCmd) readReply(cn *pool.Conn) error {
+ var v interface{}
+ v, cmd.err = cn.Rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q))
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.locations = v.([]GeoLocation)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type GeoPos struct {
+ Longitude, Latitude float64
+}
+
+type GeoPosCmd struct {
+ baseCmd
+
+ positions []*GeoPos
+}
+
+func NewGeoPosCmd(args ...interface{}) *GeoPosCmd {
+ return &GeoPosCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *GeoPosCmd) Val() []*GeoPos {
+ return cmd.positions
+}
+
+func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *GeoPosCmd) String() string {
+ return cmdString(cmd, cmd.positions)
+}
+
+func (cmd *GeoPosCmd) readReply(cn *pool.Conn) error {
+ var v interface{}
+ v, cmd.err = cn.Rd.ReadArrayReply(geoPosSliceParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.positions = v.([]*GeoPos)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type CommandInfo struct {
+ Name string
+ Arity int8
+ Flags []string
+ FirstKeyPos int8
+ LastKeyPos int8
+ StepCount int8
+ ReadOnly bool
+}
+
+type CommandsInfoCmd struct {
+ baseCmd
+
+ val map[string]*CommandInfo
+}
+
+func NewCommandsInfoCmd(args ...interface{}) *CommandsInfoCmd {
+ return &CommandsInfoCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
+ return cmd.val
+}
+
+func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *CommandsInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CommandsInfoCmd) readReply(cn *pool.Conn) error {
+ var v interface{}
+ v, cmd.err = cn.Rd.ReadArrayReply(commandInfoSliceParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.(map[string]*CommandInfo)
+ return nil
+}
diff --git a/vendor/github.com/go-redis/redis/command_test.go b/vendor/github.com/go-redis/redis/command_test.go
new file mode 100644
index 000000000..e42375eda
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/command_test.go
@@ -0,0 +1,60 @@
+package redis_test
+
+import (
+ "github.com/go-redis/redis"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Cmd", func() {
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB().Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("implements Stringer", func() {
+ set := client.Set("foo", "bar", 0)
+ Expect(set.String()).To(Equal("set foo bar: OK"))
+
+ get := client.Get("foo")
+ Expect(get.String()).To(Equal("get foo: bar"))
+ })
+
+ It("has val/err", func() {
+ set := client.Set("key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ get := client.Get("key")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("hello"))
+
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+ })
+
+ It("has helpers", func() {
+ set := client.Set("key", "10", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+
+ n, err := client.Get("key").Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(10)))
+
+ un, err := client.Get("key").Uint64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(un).To(Equal(uint64(10)))
+
+ f, err := client.Get("key").Float64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(f).To(Equal(float64(10)))
+ })
+
+})
diff --git a/vendor/github.com/go-redis/redis/commands.go b/vendor/github.com/go-redis/redis/commands.go
new file mode 100644
index 000000000..4ea78777c
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/commands.go
@@ -0,0 +1,2109 @@
+package redis
+
+import (
+ "io"
+ "time"
+
+ "github.com/go-redis/redis/internal"
+)
+
+func readTimeout(timeout time.Duration) time.Duration {
+ if timeout == 0 {
+ return 0
+ }
+ return timeout + time.Second
+}
+
+func usePrecise(dur time.Duration) bool {
+ return dur < time.Second || dur%time.Second != 0
+}
+
+func formatMs(dur time.Duration) int64 {
+ if dur > 0 && dur < time.Millisecond {
+ internal.Logf(
+ "specified duration is %s, but minimal supported value is %s",
+ dur, time.Millisecond,
+ )
+ }
+ return int64(dur / time.Millisecond)
+}
+
+func formatSec(dur time.Duration) int64 {
+ if dur > 0 && dur < time.Second {
+ internal.Logf(
+ "specified duration is %s, but minimal supported value is %s",
+ dur, time.Second,
+ )
+ }
+ return int64(dur / time.Second)
+}
+
+type Cmdable interface {
+ Pipeline() Pipeliner
+ Pipelined(fn func(Pipeliner) error) ([]Cmder, error)
+
+ ClientGetName() *StringCmd
+ Echo(message interface{}) *StringCmd
+ Ping() *StatusCmd
+ Quit() *StatusCmd
+ Del(keys ...string) *IntCmd
+ Unlink(keys ...string) *IntCmd
+ Dump(key string) *StringCmd
+ Exists(keys ...string) *IntCmd
+ Expire(key string, expiration time.Duration) *BoolCmd
+ ExpireAt(key string, tm time.Time) *BoolCmd
+ Keys(pattern string) *StringSliceCmd
+ Migrate(host, port, key string, db int64, timeout time.Duration) *StatusCmd
+ Move(key string, db int64) *BoolCmd
+ ObjectRefCount(key string) *IntCmd
+ ObjectEncoding(key string) *StringCmd
+ ObjectIdleTime(key string) *DurationCmd
+ Persist(key string) *BoolCmd
+ PExpire(key string, expiration time.Duration) *BoolCmd
+ PExpireAt(key string, tm time.Time) *BoolCmd
+ PTTL(key string) *DurationCmd
+ RandomKey() *StringCmd
+ Rename(key, newkey string) *StatusCmd
+ RenameNX(key, newkey string) *BoolCmd
+ Restore(key string, ttl time.Duration, value string) *StatusCmd
+ RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd
+ Sort(key string, sort Sort) *StringSliceCmd
+ SortInterfaces(key string, sort Sort) *SliceCmd
+ TTL(key string) *DurationCmd
+ Type(key string) *StatusCmd
+ Scan(cursor uint64, match string, count int64) *ScanCmd
+ SScan(key string, cursor uint64, match string, count int64) *ScanCmd
+ HScan(key string, cursor uint64, match string, count int64) *ScanCmd
+ ZScan(key string, cursor uint64, match string, count int64) *ScanCmd
+ Append(key, value string) *IntCmd
+ BitCount(key string, bitCount *BitCount) *IntCmd
+ BitOpAnd(destKey string, keys ...string) *IntCmd
+ BitOpOr(destKey string, keys ...string) *IntCmd
+ BitOpXor(destKey string, keys ...string) *IntCmd
+ BitOpNot(destKey string, key string) *IntCmd
+ BitPos(key string, bit int64, pos ...int64) *IntCmd
+ Decr(key string) *IntCmd
+ DecrBy(key string, decrement int64) *IntCmd
+ Get(key string) *StringCmd
+ GetBit(key string, offset int64) *IntCmd
+ GetRange(key string, start, end int64) *StringCmd
+ GetSet(key string, value interface{}) *StringCmd
+ Incr(key string) *IntCmd
+ IncrBy(key string, value int64) *IntCmd
+ IncrByFloat(key string, value float64) *FloatCmd
+ MGet(keys ...string) *SliceCmd
+ MSet(pairs ...interface{}) *StatusCmd
+ MSetNX(pairs ...interface{}) *BoolCmd
+ Set(key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetBit(key string, offset int64, value int) *IntCmd
+ SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetRange(key string, offset int64, value string) *IntCmd
+ StrLen(key string) *IntCmd
+ HDel(key string, fields ...string) *IntCmd
+ HExists(key, field string) *BoolCmd
+ HGet(key, field string) *StringCmd
+ HGetAll(key string) *StringStringMapCmd
+ HIncrBy(key, field string, incr int64) *IntCmd
+ HIncrByFloat(key, field string, incr float64) *FloatCmd
+ HKeys(key string) *StringSliceCmd
+ HLen(key string) *IntCmd
+ HMGet(key string, fields ...string) *SliceCmd
+ HMSet(key string, fields map[string]interface{}) *StatusCmd
+ HSet(key, field string, value interface{}) *BoolCmd
+ HSetNX(key, field string, value interface{}) *BoolCmd
+ HVals(key string) *StringSliceCmd
+ BLPop(timeout time.Duration, keys ...string) *StringSliceCmd
+ BRPop(timeout time.Duration, keys ...string) *StringSliceCmd
+ BRPopLPush(source, destination string, timeout time.Duration) *StringCmd
+ LIndex(key string, index int64) *StringCmd
+ LInsert(key, op string, pivot, value interface{}) *IntCmd
+ LInsertBefore(key string, pivot, value interface{}) *IntCmd
+ LInsertAfter(key string, pivot, value interface{}) *IntCmd
+ LLen(key string) *IntCmd
+ LPop(key string) *StringCmd
+ LPush(key string, values ...interface{}) *IntCmd
+ LPushX(key string, value interface{}) *IntCmd
+ LRange(key string, start, stop int64) *StringSliceCmd
+ LRem(key string, count int64, value interface{}) *IntCmd
+ LSet(key string, index int64, value interface{}) *StatusCmd
+ LTrim(key string, start, stop int64) *StatusCmd
+ RPop(key string) *StringCmd
+ RPopLPush(source, destination string) *StringCmd
+ RPush(key string, values ...interface{}) *IntCmd
+ RPushX(key string, value interface{}) *IntCmd
+ SAdd(key string, members ...interface{}) *IntCmd
+ SCard(key string) *IntCmd
+ SDiff(keys ...string) *StringSliceCmd
+ SDiffStore(destination string, keys ...string) *IntCmd
+ SInter(keys ...string) *StringSliceCmd
+ SInterStore(destination string, keys ...string) *IntCmd
+ SIsMember(key string, member interface{}) *BoolCmd
+ SMembers(key string) *StringSliceCmd
+ SMove(source, destination string, member interface{}) *BoolCmd
+ SPop(key string) *StringCmd
+ SPopN(key string, count int64) *StringSliceCmd
+ SRandMember(key string) *StringCmd
+ SRandMemberN(key string, count int64) *StringSliceCmd
+ SRem(key string, members ...interface{}) *IntCmd
+ SUnion(keys ...string) *StringSliceCmd
+ SUnionStore(destination string, keys ...string) *IntCmd
+ ZAdd(key string, members ...Z) *IntCmd
+ ZAddNX(key string, members ...Z) *IntCmd
+ ZAddXX(key string, members ...Z) *IntCmd
+ ZAddCh(key string, members ...Z) *IntCmd
+ ZAddNXCh(key string, members ...Z) *IntCmd
+ ZAddXXCh(key string, members ...Z) *IntCmd
+ ZIncr(key string, member Z) *FloatCmd
+ ZIncrNX(key string, member Z) *FloatCmd
+ ZIncrXX(key string, member Z) *FloatCmd
+ ZCard(key string) *IntCmd
+ ZCount(key, min, max string) *IntCmd
+ ZIncrBy(key string, increment float64, member string) *FloatCmd
+ ZInterStore(destination string, store ZStore, keys ...string) *IntCmd
+ ZRange(key string, start, stop int64) *StringSliceCmd
+ ZRangeWithScores(key string, start, stop int64) *ZSliceCmd
+ ZRangeByScore(key string, opt ZRangeBy) *StringSliceCmd
+ ZRangeByLex(key string, opt ZRangeBy) *StringSliceCmd
+ ZRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd
+ ZRank(key, member string) *IntCmd
+ ZRem(key string, members ...interface{}) *IntCmd
+ ZRemRangeByRank(key string, start, stop int64) *IntCmd
+ ZRemRangeByScore(key, min, max string) *IntCmd
+ ZRemRangeByLex(key, min, max string) *IntCmd
+ ZRevRange(key string, start, stop int64) *StringSliceCmd
+ ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd
+ ZRevRangeByScore(key string, opt ZRangeBy) *StringSliceCmd
+ ZRevRangeByLex(key string, opt ZRangeBy) *StringSliceCmd
+ ZRevRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd
+ ZRevRank(key, member string) *IntCmd
+ ZScore(key, member string) *FloatCmd
+ ZUnionStore(dest string, store ZStore, keys ...string) *IntCmd
+ PFAdd(key string, els ...interface{}) *IntCmd
+ PFCount(keys ...string) *IntCmd
+ PFMerge(dest string, keys ...string) *StatusCmd
+ BgRewriteAOF() *StatusCmd
+ BgSave() *StatusCmd
+ ClientKill(ipPort string) *StatusCmd
+ ClientList() *StringCmd
+ ClientPause(dur time.Duration) *BoolCmd
+ ConfigGet(parameter string) *SliceCmd
+ ConfigResetStat() *StatusCmd
+ ConfigSet(parameter, value string) *StatusCmd
+ DbSize() *IntCmd
+ FlushAll() *StatusCmd
+ FlushAllAsync() *StatusCmd
+ FlushDB() *StatusCmd
+ FlushDBAsync() *StatusCmd
+ Info(section ...string) *StringCmd
+ LastSave() *IntCmd
+ Save() *StatusCmd
+ Shutdown() *StatusCmd
+ ShutdownSave() *StatusCmd
+ ShutdownNoSave() *StatusCmd
+ SlaveOf(host, port string) *StatusCmd
+ Time() *TimeCmd
+ Eval(script string, keys []string, args ...interface{}) *Cmd
+ EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd
+ ScriptExists(scripts ...string) *BoolSliceCmd
+ ScriptFlush() *StatusCmd
+ ScriptKill() *StatusCmd
+ ScriptLoad(script string) *StringCmd
+ DebugObject(key string) *StringCmd
+ PubSubChannels(pattern string) *StringSliceCmd
+ PubSubNumSub(channels ...string) *StringIntMapCmd
+ PubSubNumPat() *IntCmd
+ ClusterSlots() *ClusterSlotsCmd
+ ClusterNodes() *StringCmd
+ ClusterMeet(host, port string) *StatusCmd
+ ClusterForget(nodeID string) *StatusCmd
+ ClusterReplicate(nodeID string) *StatusCmd
+ ClusterResetSoft() *StatusCmd
+ ClusterResetHard() *StatusCmd
+ ClusterInfo() *StringCmd
+ ClusterKeySlot(key string) *IntCmd
+ ClusterCountFailureReports(nodeID string) *IntCmd
+ ClusterCountKeysInSlot(slot int) *IntCmd
+ ClusterDelSlots(slots ...int) *StatusCmd
+ ClusterDelSlotsRange(min, max int) *StatusCmd
+ ClusterSaveConfig() *StatusCmd
+ ClusterSlaves(nodeID string) *StringSliceCmd
+ ClusterFailover() *StatusCmd
+ ClusterAddSlots(slots ...int) *StatusCmd
+ ClusterAddSlotsRange(min, max int) *StatusCmd
+ GeoAdd(key string, geoLocation ...*GeoLocation) *IntCmd
+ GeoPos(key string, members ...string) *GeoPosCmd
+ GeoRadius(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd
+ GeoRadiusByMember(key, member string, query *GeoRadiusQuery) *GeoLocationCmd
+ GeoDist(key string, member1, member2, unit string) *FloatCmd
+ GeoHash(key string, members ...string) *StringSliceCmd
+ Command() *CommandsInfoCmd
+}
+
+type StatefulCmdable interface {
+ Cmdable
+ Auth(password string) *StatusCmd
+ Select(index int) *StatusCmd
+ ClientSetName(name string) *BoolCmd
+ ReadOnly() *StatusCmd
+ ReadWrite() *StatusCmd
+}
+
+var _ Cmdable = (*Client)(nil)
+var _ Cmdable = (*Tx)(nil)
+var _ Cmdable = (*Ring)(nil)
+var _ Cmdable = (*ClusterClient)(nil)
+
+type cmdable struct {
+ process func(cmd Cmder) error
+}
+
+func (c *cmdable) setProcessor(fn func(Cmder) error) {
+ c.process = fn
+}
+
+type statefulCmdable struct {
+ cmdable
+ process func(cmd Cmder) error
+}
+
+func (c *statefulCmdable) setProcessor(fn func(Cmder) error) {
+ c.process = fn
+ c.cmdable.setProcessor(fn)
+}
+
+//------------------------------------------------------------------------------
+
+func (c *statefulCmdable) Auth(password string) *StatusCmd {
+ cmd := NewStatusCmd("auth", password)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Echo(message interface{}) *StringCmd {
+ cmd := NewStringCmd("echo", message)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Ping() *StatusCmd {
+ cmd := NewStatusCmd("ping")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Wait(numSlaves int, timeout time.Duration) *IntCmd {
+ cmd := NewIntCmd("wait", numSlaves, int(timeout/time.Millisecond))
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Quit() *StatusCmd {
+ panic("not implemented")
+}
+
+func (c *statefulCmdable) Select(index int) *StatusCmd {
+ cmd := NewStatusCmd("select", index)
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) Del(keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "del"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Unlink(keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "unlink"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Dump(key string) *StringCmd {
+ cmd := NewStringCmd("dump", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Exists(keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "exists"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Expire(key string, expiration time.Duration) *BoolCmd {
+ cmd := NewBoolCmd("expire", key, formatSec(expiration))
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ExpireAt(key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd("expireat", key, tm.Unix())
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Keys(pattern string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("keys", pattern)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Migrate(host, port, key string, db int64, timeout time.Duration) *StatusCmd {
+ cmd := NewStatusCmd(
+ "migrate",
+ host,
+ port,
+ key,
+ db,
+ formatMs(timeout),
+ )
+ cmd.setReadTimeout(readTimeout(timeout))
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Move(key string, db int64) *BoolCmd {
+ cmd := NewBoolCmd("move", key, db)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ObjectRefCount(key string) *IntCmd {
+ cmd := NewIntCmd("object", "refcount", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ObjectEncoding(key string) *StringCmd {
+ cmd := NewStringCmd("object", "encoding", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ObjectIdleTime(key string) *DurationCmd {
+ cmd := NewDurationCmd(time.Second, "object", "idletime", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Persist(key string) *BoolCmd {
+ cmd := NewBoolCmd("persist", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) PExpire(key string, expiration time.Duration) *BoolCmd {
+ cmd := NewBoolCmd("pexpire", key, formatMs(expiration))
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) PExpireAt(key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd(
+ "pexpireat",
+ key,
+ tm.UnixNano()/int64(time.Millisecond),
+ )
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) PTTL(key string) *DurationCmd {
+ cmd := NewDurationCmd(time.Millisecond, "pttl", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) RandomKey() *StringCmd {
+ cmd := NewStringCmd("randomkey")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Rename(key, newkey string) *StatusCmd {
+ cmd := NewStatusCmd("rename", key, newkey)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) RenameNX(key, newkey string) *BoolCmd {
+ cmd := NewBoolCmd("renamenx", key, newkey)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Restore(key string, ttl time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ "restore",
+ key,
+ formatMs(ttl),
+ value,
+ )
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ "restore",
+ key,
+ formatMs(ttl),
+ value,
+ "replace",
+ )
+ c.process(cmd)
+ return cmd
+}
+
+type Sort struct {
+ By string
+ Offset, Count float64
+ Get []string
+ Order string
+ IsAlpha bool
+ Store string
+}
+
+func (sort *Sort) args(key string) []interface{} {
+ args := []interface{}{"sort", key}
+ if sort.By != "" {
+ args = append(args, "by", sort.By)
+ }
+ if sort.Offset != 0 || sort.Count != 0 {
+ args = append(args, "limit", sort.Offset, sort.Count)
+ }
+ for _, get := range sort.Get {
+ args = append(args, "get", get)
+ }
+ if sort.Order != "" {
+ args = append(args, sort.Order)
+ }
+ if sort.IsAlpha {
+ args = append(args, "alpha")
+ }
+ if sort.Store != "" {
+ args = append(args, "store", sort.Store)
+ }
+ return args
+}
+
+func (c *cmdable) Sort(key string, sort Sort) *StringSliceCmd {
+ cmd := NewStringSliceCmd(sort.args(key)...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SortInterfaces(key string, sort Sort) *SliceCmd {
+ cmd := NewSliceCmd(sort.args(key)...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) TTL(key string) *DurationCmd {
+ cmd := NewDurationCmd(time.Second, "ttl", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Type(key string) *StatusCmd {
+ cmd := NewStatusCmd("type", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Scan(cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"scan", cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(c.process, args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SScan(key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"sscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(c.process, args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HScan(key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"hscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(c.process, args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZScan(key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"zscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(c.process, args...)
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) Append(key, value string) *IntCmd {
+ cmd := NewIntCmd("append", key, value)
+ c.process(cmd)
+ return cmd
+}
+
+type BitCount struct {
+ Start, End int64
+}
+
+func (c *cmdable) BitCount(key string, bitCount *BitCount) *IntCmd {
+ args := []interface{}{"bitcount", key}
+ if bitCount != nil {
+ args = append(
+ args,
+ bitCount.Start,
+ bitCount.End,
+ )
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) bitOp(op, destKey string, keys ...string) *IntCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "bitop"
+ args[1] = op
+ args[2] = destKey
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) BitOpAnd(destKey string, keys ...string) *IntCmd {
+ return c.bitOp("and", destKey, keys...)
+}
+
+func (c *cmdable) BitOpOr(destKey string, keys ...string) *IntCmd {
+ return c.bitOp("or", destKey, keys...)
+}
+
+func (c *cmdable) BitOpXor(destKey string, keys ...string) *IntCmd {
+ return c.bitOp("xor", destKey, keys...)
+}
+
+func (c *cmdable) BitOpNot(destKey string, key string) *IntCmd {
+ return c.bitOp("not", destKey, key)
+}
+
+func (c *cmdable) BitPos(key string, bit int64, pos ...int64) *IntCmd {
+ args := make([]interface{}, 3+len(pos))
+ args[0] = "bitpos"
+ args[1] = key
+ args[2] = bit
+ switch len(pos) {
+ case 0:
+ case 1:
+ args[3] = pos[0]
+ case 2:
+ args[3] = pos[0]
+ args[4] = pos[1]
+ default:
+ panic("too many arguments")
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Decr(key string) *IntCmd {
+ cmd := NewIntCmd("decr", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) DecrBy(key string, decrement int64) *IntCmd {
+ cmd := NewIntCmd("decrby", key, decrement)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Get(key string) *StringCmd {
+ cmd := NewStringCmd("get", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) GetBit(key string, offset int64) *IntCmd {
+ cmd := NewIntCmd("getbit", key, offset)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) GetRange(key string, start, end int64) *StringCmd {
+ cmd := NewStringCmd("getrange", key, start, end)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) GetSet(key string, value interface{}) *StringCmd {
+ cmd := NewStringCmd("getset", key, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Incr(key string) *IntCmd {
+ cmd := NewIntCmd("incr", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) IncrBy(key string, value int64) *IntCmd {
+ cmd := NewIntCmd("incrby", key, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) IncrByFloat(key string, value float64) *FloatCmd {
+ cmd := NewFloatCmd("incrbyfloat", key, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) MGet(keys ...string) *SliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "mget"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) MSet(pairs ...interface{}) *StatusCmd {
+ args := make([]interface{}, 1+len(pairs))
+ args[0] = "mset"
+ for i, pair := range pairs {
+ args[1+i] = pair
+ }
+ cmd := NewStatusCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) MSetNX(pairs ...interface{}) *BoolCmd {
+ args := make([]interface{}, 1+len(pairs))
+ args[0] = "msetnx"
+ for i, pair := range pairs {
+ args[1+i] = pair
+ }
+ cmd := NewBoolCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+// Redis `SET key value [expiration]` command.
+//
+// Use expiration for `SETEX`-like behavior.
+// Zero expiration means the key has no expiration time.
+func (c *cmdable) Set(key string, value interface{}, expiration time.Duration) *StatusCmd {
+ args := make([]interface{}, 3, 4)
+ args[0] = "set"
+ args[1] = key
+ args[2] = value
+ if expiration > 0 {
+ if usePrecise(expiration) {
+ args = append(args, "px", formatMs(expiration))
+ } else {
+ args = append(args, "ex", formatSec(expiration))
+ }
+ }
+ cmd := NewStatusCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SetBit(key string, offset int64, value int) *IntCmd {
+ cmd := NewIntCmd(
+ "setbit",
+ key,
+ offset,
+ value,
+ )
+ c.process(cmd)
+ return cmd
+}
+
+// Redis `SET key value [expiration] NX` command.
+//
+// Zero expiration means the key has no expiration time.
+func (c *cmdable) SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ if expiration == 0 {
+ // Use old `SETNX` to support old Redis versions.
+ cmd = NewBoolCmd("setnx", key, value)
+ } else {
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "nx")
+ } else {
+ cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "nx")
+ }
+ }
+ c.process(cmd)
+ return cmd
+}
+
+// Redis `SET key value [expiration] XX` command.
+//
+// Zero expiration means the key has no expiration time.
+func (c *cmdable) SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ if expiration == 0 {
+ cmd = NewBoolCmd("set", key, value, "xx")
+ } else {
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "xx")
+ } else {
+ cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "xx")
+ }
+ }
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SetRange(key string, offset int64, value string) *IntCmd {
+ cmd := NewIntCmd("setrange", key, offset, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) StrLen(key string) *IntCmd {
+ cmd := NewIntCmd("strlen", key)
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) HDel(key string, fields ...string) *IntCmd {
+ args := make([]interface{}, 2+len(fields))
+ args[0] = "hdel"
+ args[1] = key
+ for i, field := range fields {
+ args[2+i] = field
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HExists(key, field string) *BoolCmd {
+ cmd := NewBoolCmd("hexists", key, field)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HGet(key, field string) *StringCmd {
+ cmd := NewStringCmd("hget", key, field)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HGetAll(key string) *StringStringMapCmd {
+ cmd := NewStringStringMapCmd("hgetall", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HIncrBy(key, field string, incr int64) *IntCmd {
+ cmd := NewIntCmd("hincrby", key, field, incr)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HIncrByFloat(key, field string, incr float64) *FloatCmd {
+ cmd := NewFloatCmd("hincrbyfloat", key, field, incr)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HKeys(key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("hkeys", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HLen(key string) *IntCmd {
+ cmd := NewIntCmd("hlen", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HMGet(key string, fields ...string) *SliceCmd {
+ args := make([]interface{}, 2+len(fields))
+ args[0] = "hmget"
+ args[1] = key
+ for i, field := range fields {
+ args[2+i] = field
+ }
+ cmd := NewSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HMSet(key string, fields map[string]interface{}) *StatusCmd {
+ args := make([]interface{}, 2+len(fields)*2)
+ args[0] = "hmset"
+ args[1] = key
+ i := 2
+ for k, v := range fields {
+ args[i] = k
+ args[i+1] = v
+ i += 2
+ }
+ cmd := NewStatusCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HSet(key, field string, value interface{}) *BoolCmd {
+ cmd := NewBoolCmd("hset", key, field, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HSetNX(key, field string, value interface{}) *BoolCmd {
+ cmd := NewBoolCmd("hsetnx", key, field, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HVals(key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("hvals", key)
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) BLPop(timeout time.Duration, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "blpop"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(timeout)
+ cmd := NewStringSliceCmd(args...)
+ cmd.setReadTimeout(readTimeout(timeout))
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) BRPop(timeout time.Duration, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "brpop"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(keys)+1] = formatSec(timeout)
+ cmd := NewStringSliceCmd(args...)
+ cmd.setReadTimeout(readTimeout(timeout))
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) BRPopLPush(source, destination string, timeout time.Duration) *StringCmd {
+ cmd := NewStringCmd(
+ "brpoplpush",
+ source,
+ destination,
+ formatSec(timeout),
+ )
+ cmd.setReadTimeout(readTimeout(timeout))
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LIndex(key string, index int64) *StringCmd {
+ cmd := NewStringCmd("lindex", key, index)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LInsert(key, op string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd("linsert", key, op, pivot, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LInsertBefore(key string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd("linsert", key, "before", pivot, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LInsertAfter(key string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd("linsert", key, "after", pivot, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LLen(key string) *IntCmd {
+ cmd := NewIntCmd("llen", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LPop(key string) *StringCmd {
+ cmd := NewStringCmd("lpop", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LPush(key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2+len(values))
+ args[0] = "lpush"
+ args[1] = key
+ for i, value := range values {
+ args[2+i] = value
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LPushX(key string, value interface{}) *IntCmd {
+ cmd := NewIntCmd("lpushx", key, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LRange(key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(
+ "lrange",
+ key,
+ start,
+ stop,
+ )
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LRem(key string, count int64, value interface{}) *IntCmd {
+ cmd := NewIntCmd("lrem", key, count, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LSet(key string, index int64, value interface{}) *StatusCmd {
+ cmd := NewStatusCmd("lset", key, index, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LTrim(key string, start, stop int64) *StatusCmd {
+ cmd := NewStatusCmd(
+ "ltrim",
+ key,
+ start,
+ stop,
+ )
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) RPop(key string) *StringCmd {
+ cmd := NewStringCmd("rpop", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) RPopLPush(source, destination string) *StringCmd {
+ cmd := NewStringCmd("rpoplpush", source, destination)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) RPush(key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2+len(values))
+ args[0] = "rpush"
+ args[1] = key
+ for i, value := range values {
+ args[2+i] = value
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) RPushX(key string, value interface{}) *IntCmd {
+ cmd := NewIntCmd("rpushx", key, value)
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) SAdd(key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "sadd"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SCard(key string) *IntCmd {
+ cmd := NewIntCmd("scard", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SDiff(keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sdiff"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SDiffStore(destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sdiffstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SInter(keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sinter"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SInterStore(destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sinterstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SIsMember(key string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd("sismember", key, member)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SMembers(key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("smembers", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SMove(source, destination string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd("smove", source, destination, member)
+ c.process(cmd)
+ return cmd
+}
+
+// Redis `SPOP key` command.
+func (c *cmdable) SPop(key string) *StringCmd {
+ cmd := NewStringCmd("spop", key)
+ c.process(cmd)
+ return cmd
+}
+
+// Redis `SPOP key count` command.
+func (c *cmdable) SPopN(key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd("spop", key, count)
+ c.process(cmd)
+ return cmd
+}
+
+// Redis `SRANDMEMBER key` command.
+func (c *cmdable) SRandMember(key string) *StringCmd {
+ cmd := NewStringCmd("srandmember", key)
+ c.process(cmd)
+ return cmd
+}
+
+// Redis `SRANDMEMBER key count` command.
+func (c *cmdable) SRandMemberN(key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd("srandmember", key, count)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SRem(key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "srem"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SUnion(keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sunion"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SUnionStore(destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sunionstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// Z represents sorted set member.
+type Z struct {
+ Score float64
+ Member interface{}
+}
+
+// ZStore is used as an arg to ZInterStore and ZUnionStore.
+type ZStore struct {
+ Weights []float64
+ // Can be SUM, MIN or MAX.
+ Aggregate string
+}
+
+func (c *cmdable) zAdd(a []interface{}, n int, members ...Z) *IntCmd {
+ for i, m := range members {
+ a[n+2*i] = m.Score
+ a[n+2*i+1] = m.Member
+ }
+ cmd := NewIntCmd(a...)
+ c.process(cmd)
+ return cmd
+}
+
+// Redis `ZADD key score member [score member ...]` command.
+func (c *cmdable) ZAdd(key string, members ...Z) *IntCmd {
+ const n = 2
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1] = "zadd", key
+ return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key NX score member [score member ...]` command.
+func (c *cmdable) ZAddNX(key string, members ...Z) *IntCmd {
+ const n = 3
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2] = "zadd", key, "nx"
+ return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key XX score member [score member ...]` command.
+func (c *cmdable) ZAddXX(key string, members ...Z) *IntCmd {
+ const n = 3
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2] = "zadd", key, "xx"
+ return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key CH score member [score member ...]` command.
+func (c *cmdable) ZAddCh(key string, members ...Z) *IntCmd {
+ const n = 3
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2] = "zadd", key, "ch"
+ return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key NX CH score member [score member ...]` command.
+func (c *cmdable) ZAddNXCh(key string, members ...Z) *IntCmd {
+ const n = 4
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2], a[3] = "zadd", key, "nx", "ch"
+ return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key XX CH score member [score member ...]` command.
+func (c *cmdable) ZAddXXCh(key string, members ...Z) *IntCmd {
+ const n = 4
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2], a[3] = "zadd", key, "xx", "ch"
+ return c.zAdd(a, n, members...)
+}
+
+func (c *cmdable) zIncr(a []interface{}, n int, members ...Z) *FloatCmd {
+ for i, m := range members {
+ a[n+2*i] = m.Score
+ a[n+2*i+1] = m.Member
+ }
+ cmd := NewFloatCmd(a...)
+ c.process(cmd)
+ return cmd
+}
+
+// Redis `ZADD key INCR score member` command.
+func (c *cmdable) ZIncr(key string, member Z) *FloatCmd {
+ const n = 3
+ a := make([]interface{}, n+2)
+ a[0], a[1], a[2] = "zadd", key, "incr"
+ return c.zIncr(a, n, member)
+}
+
+// Redis `ZADD key NX INCR score member` command.
+func (c *cmdable) ZIncrNX(key string, member Z) *FloatCmd {
+ const n = 4
+ a := make([]interface{}, n+2)
+ a[0], a[1], a[2], a[3] = "zadd", key, "incr", "nx"
+ return c.zIncr(a, n, member)
+}
+
+// Redis `ZADD key XX INCR score member` command.
+func (c *cmdable) ZIncrXX(key string, member Z) *FloatCmd {
+ const n = 4
+ a := make([]interface{}, n+2)
+ a[0], a[1], a[2], a[3] = "zadd", key, "incr", "xx"
+ return c.zIncr(a, n, member)
+}
+
+func (c *cmdable) ZCard(key string) *IntCmd {
+ cmd := NewIntCmd("zcard", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZCount(key, min, max string) *IntCmd {
+ cmd := NewIntCmd("zcount", key, min, max)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZIncrBy(key string, increment float64, member string) *FloatCmd {
+ cmd := NewFloatCmd("zincrby", key, increment, member)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZInterStore(destination string, store ZStore, keys ...string) *IntCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "zinterstore"
+ args[1] = destination
+ args[2] = len(keys)
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ if len(store.Weights) > 0 {
+ args = append(args, "weights")
+ for _, weight := range store.Weights {
+ args = append(args, weight)
+ }
+ }
+ if store.Aggregate != "" {
+ args = append(args, "aggregate", store.Aggregate)
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) zRange(key string, start, stop int64, withScores bool) *StringSliceCmd {
+ args := []interface{}{
+ "zrange",
+ key,
+ start,
+ stop,
+ }
+ if withScores {
+ args = append(args, "withscores")
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZRange(key string, start, stop int64) *StringSliceCmd {
+ return c.zRange(key, start, stop, false)
+}
+
+func (c *cmdable) ZRangeWithScores(key string, start, stop int64) *ZSliceCmd {
+ cmd := NewZSliceCmd("zrange", key, start, stop, "withscores")
+ c.process(cmd)
+ return cmd
+}
+
+type ZRangeBy struct {
+ Min, Max string
+ Offset, Count int64
+}
+
+func (c *cmdable) zRangeBy(zcmd, key string, opt ZRangeBy, withScores bool) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Min, opt.Max}
+ if withScores {
+ args = append(args, "withscores")
+ }
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZRangeByScore(key string, opt ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy("zrangebyscore", key, opt, false)
+}
+
+func (c *cmdable) ZRangeByLex(key string, opt ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy("zrangebylex", key, opt, false)
+}
+
+func (c *cmdable) ZRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZRank(key, member string) *IntCmd {
+ cmd := NewIntCmd("zrank", key, member)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZRem(key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "zrem"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZRemRangeByRank(key string, start, stop int64) *IntCmd {
+ cmd := NewIntCmd(
+ "zremrangebyrank",
+ key,
+ start,
+ stop,
+ )
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZRemRangeByScore(key, min, max string) *IntCmd {
+ cmd := NewIntCmd("zremrangebyscore", key, min, max)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZRemRangeByLex(key, min, max string) *IntCmd {
+ cmd := NewIntCmd("zremrangebylex", key, min, max)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZRevRange(key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd("zrevrange", key, start, stop)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd {
+ cmd := NewZSliceCmd("zrevrange", key, start, stop, "withscores")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) zRevRangeBy(zcmd, key string, opt ZRangeBy) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Max, opt.Min}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZRevRangeByScore(key string, opt ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy("zrevrangebyscore", key, opt)
+}
+
+func (c *cmdable) ZRevRangeByLex(key string, opt ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy("zrevrangebylex", key, opt)
+}
+
+func (c *cmdable) ZRevRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZRevRank(key, member string) *IntCmd {
+ cmd := NewIntCmd("zrevrank", key, member)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZScore(key, member string) *FloatCmd {
+ cmd := NewFloatCmd("zscore", key, member)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZUnionStore(dest string, store ZStore, keys ...string) *IntCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "zunionstore"
+ args[1] = dest
+ args[2] = len(keys)
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ if len(store.Weights) > 0 {
+ args = append(args, "weights")
+ for _, weight := range store.Weights {
+ args = append(args, weight)
+ }
+ }
+ if store.Aggregate != "" {
+ args = append(args, "aggregate", store.Aggregate)
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) PFAdd(key string, els ...interface{}) *IntCmd {
+ args := make([]interface{}, 2+len(els))
+ args[0] = "pfadd"
+ args[1] = key
+ for i, el := range els {
+ args[2+i] = el
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) PFCount(keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "pfcount"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) PFMerge(dest string, keys ...string) *StatusCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "pfmerge"
+ args[1] = dest
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewStatusCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) BgRewriteAOF() *StatusCmd {
+ cmd := NewStatusCmd("bgrewriteaof")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) BgSave() *StatusCmd {
+ cmd := NewStatusCmd("bgsave")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ClientKill(ipPort string) *StatusCmd {
+ cmd := NewStatusCmd("client", "kill", ipPort)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ClientList() *StringCmd {
+ cmd := NewStringCmd("client", "list")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ClientPause(dur time.Duration) *BoolCmd {
+ cmd := NewBoolCmd("client", "pause", formatMs(dur))
+ c.process(cmd)
+ return cmd
+}
+
+// ClientSetName assigns a name to the connection.
+func (c *statefulCmdable) ClientSetName(name string) *BoolCmd {
+ cmd := NewBoolCmd("client", "setname", name)
+ c.process(cmd)
+ return cmd
+}
+
+// ClientGetName returns the name of the connection.
+func (c *cmdable) ClientGetName() *StringCmd {
+ cmd := NewStringCmd("client", "getname")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ConfigGet(parameter string) *SliceCmd {
+ cmd := NewSliceCmd("config", "get", parameter)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ConfigResetStat() *StatusCmd {
+ cmd := NewStatusCmd("config", "resetstat")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ConfigSet(parameter, value string) *StatusCmd {
+ cmd := NewStatusCmd("config", "set", parameter, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) DbSize() *IntCmd {
+ cmd := NewIntCmd("dbsize")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) FlushAll() *StatusCmd {
+ cmd := NewStatusCmd("flushall")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) FlushAllAsync() *StatusCmd {
+ cmd := NewStatusCmd("flushall", "async")
+ c.process(cmd)
+ return cmd
+}
+
+// Deprecated. Use FlushDB instead.
+func (c *cmdable) FlushDb() *StatusCmd {
+ cmd := NewStatusCmd("flushdb")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) FlushDB() *StatusCmd {
+ cmd := NewStatusCmd("flushdb")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) FlushDBAsync() *StatusCmd {
+ cmd := NewStatusCmd("flushdb", "async")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Info(section ...string) *StringCmd {
+ args := []interface{}{"info"}
+ if len(section) > 0 {
+ args = append(args, section[0])
+ }
+ cmd := NewStringCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LastSave() *IntCmd {
+ cmd := NewIntCmd("lastsave")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Save() *StatusCmd {
+ cmd := NewStatusCmd("save")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) shutdown(modifier string) *StatusCmd {
+ var args []interface{}
+ if modifier == "" {
+ args = []interface{}{"shutdown"}
+ } else {
+ args = []interface{}{"shutdown", modifier}
+ }
+ cmd := NewStatusCmd(args...)
+ c.process(cmd)
+ if err := cmd.Err(); err != nil {
+ if err == io.EOF {
+ // Server quit as expected.
+ cmd.err = nil
+ }
+ } else {
+ // Server did not quit. String reply contains the reason.
+ cmd.err = internal.RedisError(cmd.val)
+ cmd.val = ""
+ }
+ return cmd
+}
+
+func (c *cmdable) Shutdown() *StatusCmd {
+ return c.shutdown("")
+}
+
+func (c *cmdable) ShutdownSave() *StatusCmd {
+ return c.shutdown("save")
+}
+
+func (c *cmdable) ShutdownNoSave() *StatusCmd {
+ return c.shutdown("nosave")
+}
+
+func (c *cmdable) SlaveOf(host, port string) *StatusCmd {
+ cmd := NewStatusCmd("slaveof", host, port)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SlowLog() {
+ panic("not implemented")
+}
+
+func (c *cmdable) Sync() {
+ panic("not implemented")
+}
+
+func (c *cmdable) Time() *TimeCmd {
+ cmd := NewTimeCmd("time")
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) Eval(script string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := make([]interface{}, 3+len(keys)+len(args))
+ cmdArgs[0] = "eval"
+ cmdArgs[1] = script
+ cmdArgs[2] = len(keys)
+ for i, key := range keys {
+ cmdArgs[3+i] = key
+ }
+ pos := 3 + len(keys)
+ for i, arg := range args {
+ cmdArgs[pos+i] = arg
+ }
+ cmd := NewCmd(cmdArgs...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := make([]interface{}, 3+len(keys)+len(args))
+ cmdArgs[0] = "evalsha"
+ cmdArgs[1] = sha1
+ cmdArgs[2] = len(keys)
+ for i, key := range keys {
+ cmdArgs[3+i] = key
+ }
+ pos := 3 + len(keys)
+ for i, arg := range args {
+ cmdArgs[pos+i] = arg
+ }
+ cmd := NewCmd(cmdArgs...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ScriptExists(scripts ...string) *BoolSliceCmd {
+ args := make([]interface{}, 2+len(scripts))
+ args[0] = "script"
+ args[1] = "exists"
+ for i, script := range scripts {
+ args[2+i] = script
+ }
+ cmd := NewBoolSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ScriptFlush() *StatusCmd {
+ cmd := NewStatusCmd("script", "flush")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ScriptKill() *StatusCmd {
+ cmd := NewStatusCmd("script", "kill")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ScriptLoad(script string) *StringCmd {
+ cmd := NewStringCmd("script", "load", script)
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) DebugObject(key string) *StringCmd {
+ cmd := NewStringCmd("debug", "object", key)
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// Publish posts the message to the channel.
+func (c *cmdable) Publish(channel, message string) *IntCmd {
+ cmd := NewIntCmd("PUBLISH", channel, message)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) PubSubChannels(pattern string) *StringSliceCmd {
+ args := []interface{}{"pubsub", "channels"}
+ if pattern != "*" {
+ args = append(args, pattern)
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) PubSubNumSub(channels ...string) *StringIntMapCmd {
+ args := make([]interface{}, 2+len(channels))
+ args[0] = "pubsub"
+ args[1] = "numsub"
+ for i, channel := range channels {
+ args[2+i] = channel
+ }
+ cmd := NewStringIntMapCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) PubSubNumPat() *IntCmd {
+ cmd := NewIntCmd("pubsub", "numpat")
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) ClusterSlots() *ClusterSlotsCmd {
+ cmd := NewClusterSlotsCmd("cluster", "slots")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ClusterNodes() *StringCmd {
+ cmd := NewStringCmd("cluster", "nodes")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ClusterMeet(host, port string) *StatusCmd {
+ cmd := NewStatusCmd("cluster", "meet", host, port)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ClusterForget(nodeID string) *StatusCmd {
+ cmd := NewStatusCmd("cluster", "forget", nodeID)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ClusterReplicate(nodeID string) *StatusCmd {
+ cmd := NewStatusCmd("cluster", "replicate", nodeID)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ClusterResetSoft() *StatusCmd {
+ cmd := NewStatusCmd("cluster", "reset", "soft")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ClusterResetHard() *StatusCmd {
+ cmd := NewStatusCmd("cluster", "reset", "hard")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ClusterInfo() *StringCmd {
+ cmd := NewStringCmd("cluster", "info")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ClusterKeySlot(key string) *IntCmd {
+ cmd := NewIntCmd("cluster", "keyslot", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ClusterCountFailureReports(nodeID string) *IntCmd {
+ cmd := NewIntCmd("cluster", "count-failure-reports", nodeID)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ClusterCountKeysInSlot(slot int) *IntCmd {
+ cmd := NewIntCmd("cluster", "countkeysinslot", slot)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ClusterDelSlots(slots ...int) *StatusCmd {
+ args := make([]interface{}, 2+len(slots))
+ args[0] = "cluster"
+ args[1] = "delslots"
+ for i, slot := range slots {
+ args[2+i] = slot
+ }
+ cmd := NewStatusCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ClusterDelSlotsRange(min, max int) *StatusCmd {
+ size := max - min + 1
+ slots := make([]int, size)
+ for i := 0; i < size; i++ {
+ slots[i] = min + i
+ }
+ return c.ClusterDelSlots(slots...)
+}
+
+func (c *cmdable) ClusterSaveConfig() *StatusCmd {
+ cmd := NewStatusCmd("cluster", "saveconfig")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ClusterSlaves(nodeID string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("cluster", "slaves", nodeID)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *statefulCmdable) ReadOnly() *StatusCmd {
+ cmd := NewStatusCmd("readonly")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *statefulCmdable) ReadWrite() *StatusCmd {
+ cmd := NewStatusCmd("readwrite")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ClusterFailover() *StatusCmd {
+ cmd := NewStatusCmd("cluster", "failover")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ClusterAddSlots(slots ...int) *StatusCmd {
+ args := make([]interface{}, 2+len(slots))
+ args[0] = "cluster"
+ args[1] = "addslots"
+ for i, num := range slots {
+ args[2+i] = num
+ }
+ cmd := NewStatusCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ClusterAddSlotsRange(min, max int) *StatusCmd {
+ size := max - min + 1
+ slots := make([]int, size)
+ for i := 0; i < size; i++ {
+ slots[i] = min + i
+ }
+ return c.ClusterAddSlots(slots...)
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) GeoAdd(key string, geoLocation ...*GeoLocation) *IntCmd {
+ args := make([]interface{}, 2+3*len(geoLocation))
+ args[0] = "geoadd"
+ args[1] = key
+ for i, eachLoc := range geoLocation {
+ args[2+3*i] = eachLoc.Longitude
+ args[2+3*i+1] = eachLoc.Latitude
+ args[2+3*i+2] = eachLoc.Name
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) GeoRadius(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd {
+ cmd := NewGeoLocationCmd(query, "georadius", key, longitude, latitude)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) GeoRadiusByMember(key, member string, query *GeoRadiusQuery) *GeoLocationCmd {
+ cmd := NewGeoLocationCmd(query, "georadiusbymember", key, member)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) GeoDist(key string, member1, member2, unit string) *FloatCmd {
+ if unit == "" {
+ unit = "km"
+ }
+ cmd := NewFloatCmd("geodist", key, member1, member2, unit)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) GeoHash(key string, members ...string) *StringSliceCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "geohash"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) GeoPos(key string, members ...string) *GeoPosCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "geopos"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewGeoPosCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) Command() *CommandsInfoCmd {
+ cmd := NewCommandsInfoCmd("command")
+ c.process(cmd)
+ return cmd
+}
diff --git a/vendor/github.com/go-redis/redis/commands_test.go b/vendor/github.com/go-redis/redis/commands_test.go
new file mode 100644
index 000000000..e8cdb205e
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/commands_test.go
@@ -0,0 +1,2938 @@
+package redis_test
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/go-redis/redis"
+)
+
+var _ = Describe("Commands", func() {
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB().Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ Describe("server", func() {
+
+ It("should Auth", func() {
+ _, err := client.Pipelined(func(pipe redis.Pipeliner) error {
+ pipe.Auth("password")
+ return nil
+ })
+ Expect(err).To(MatchError("ERR Client sent AUTH, but no password is set"))
+ })
+
+ It("should Echo", func() {
+ pipe := client.Pipeline()
+ echo := pipe.Echo("hello")
+ _, err := pipe.Exec()
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(echo.Err()).NotTo(HaveOccurred())
+ Expect(echo.Val()).To(Equal("hello"))
+ })
+
+ It("should Ping", func() {
+ ping := client.Ping()
+ Expect(ping.Err()).NotTo(HaveOccurred())
+ Expect(ping.Val()).To(Equal("PONG"))
+ })
+
+ It("should Wait", func() {
+ // assume testing on single redis instance
+ start := time.Now()
+ wait := client.Wait(1, time.Second)
+ Expect(wait.Err()).NotTo(HaveOccurred())
+ Expect(wait.Val()).To(Equal(int64(0)))
+ Expect(time.Now()).To(BeTemporally("~", start.Add(time.Second), 800*time.Millisecond))
+ })
+
+ It("should Select", func() {
+ pipe := client.Pipeline()
+ sel := pipe.Select(1)
+ _, err := pipe.Exec()
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(sel.Err()).NotTo(HaveOccurred())
+ Expect(sel.Val()).To(Equal("OK"))
+ })
+
+ It("should BgRewriteAOF", func() {
+ Skip("flaky test")
+
+ val, err := client.BgRewriteAOF().Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(ContainSubstring("Background append only file rewriting"))
+ })
+
+ It("should BgSave", func() {
+ Skip("flaky test")
+
+ // workaround for "ERR Can't BGSAVE while AOF log rewriting is in progress"
+ Eventually(func() string {
+ return client.BgSave().Val()
+ }, "30s").Should(Equal("Background saving started"))
+ })
+
+ It("should ClientKill", func() {
+ r := client.ClientKill("1.1.1.1:1111")
+ Expect(r.Err()).To(MatchError("ERR No such client"))
+ Expect(r.Val()).To(Equal(""))
+ })
+
+ It("should ClientPause", func() {
+ err := client.ClientPause(time.Second).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ start := time.Now()
+ err = client.Ping().Err()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(time.Now()).To(BeTemporally("~", start.Add(time.Second), 800*time.Millisecond))
+ })
+
+ It("should ClientSetName and ClientGetName", func() {
+ pipe := client.Pipeline()
+ set := pipe.ClientSetName("theclientname")
+ get := pipe.ClientGetName()
+ _, err := pipe.Exec()
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(BeTrue())
+
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("theclientname"))
+ })
+
+ It("should ConfigGet", func() {
+ val, err := client.ConfigGet("*").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).NotTo(BeEmpty())
+ })
+
+ It("should ConfigResetStat", func() {
+ r := client.ConfigResetStat()
+ Expect(r.Err()).NotTo(HaveOccurred())
+ Expect(r.Val()).To(Equal("OK"))
+ })
+
+ It("should ConfigSet", func() {
+ configGet := client.ConfigGet("maxmemory")
+ Expect(configGet.Err()).NotTo(HaveOccurred())
+ Expect(configGet.Val()).To(HaveLen(2))
+ Expect(configGet.Val()[0]).To(Equal("maxmemory"))
+
+ configSet := client.ConfigSet("maxmemory", configGet.Val()[1].(string))
+ Expect(configSet.Err()).NotTo(HaveOccurred())
+ Expect(configSet.Val()).To(Equal("OK"))
+ })
+
+ It("should DbSize", func() {
+ dbSize := client.DbSize()
+ Expect(dbSize.Err()).NotTo(HaveOccurred())
+ Expect(dbSize.Val()).To(Equal(int64(0)))
+ })
+
+ It("should Info", func() {
+ info := client.Info()
+ Expect(info.Err()).NotTo(HaveOccurred())
+ Expect(info.Val()).NotTo(Equal(""))
+ })
+
+ It("should Info cpu", func() {
+ info := client.Info("cpu")
+ Expect(info.Err()).NotTo(HaveOccurred())
+ Expect(info.Val()).NotTo(Equal(""))
+ Expect(info.Val()).To(ContainSubstring(`used_cpu_sys`))
+ })
+
+ It("should LastSave", func() {
+ lastSave := client.LastSave()
+ Expect(lastSave.Err()).NotTo(HaveOccurred())
+ Expect(lastSave.Val()).NotTo(Equal(0))
+ })
+
+ It("should Save", func() {
+ // workaround for "ERR Background save already in progress"
+ Eventually(func() string {
+ return client.Save().Val()
+ }, "10s").Should(Equal("OK"))
+ })
+
+ It("should SlaveOf", func() {
+ slaveOf := client.SlaveOf("localhost", "8888")
+ Expect(slaveOf.Err()).NotTo(HaveOccurred())
+ Expect(slaveOf.Val()).To(Equal("OK"))
+
+ slaveOf = client.SlaveOf("NO", "ONE")
+ Expect(slaveOf.Err()).NotTo(HaveOccurred())
+ Expect(slaveOf.Val()).To(Equal("OK"))
+ })
+
+ It("should Time", func() {
+ tm, err := client.Time().Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(tm).To(BeTemporally("~", time.Now(), 3*time.Second))
+ })
+
+ })
+
+ Describe("debugging", func() {
+
+ It("should DebugObject", func() {
+ debug := client.DebugObject("foo")
+ Expect(debug.Err()).To(HaveOccurred())
+ Expect(debug.Err().Error()).To(Equal("ERR no such key"))
+
+ client.Set("foo", "bar", 0)
+ debug = client.DebugObject("foo")
+ Expect(debug.Err()).NotTo(HaveOccurred())
+ Expect(debug.Val()).To(ContainSubstring(`serializedlength:4`))
+ })
+
+ })
+
+ Describe("keys", func() {
+
+ It("should Del", func() {
+ err := client.Set("key1", "Hello", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.Set("key2", "World", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ n, err := client.Del("key1", "key2", "key3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(2)))
+ })
+
+ It("should Unlink", func() {
+ err := client.Set("key1", "Hello", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.Set("key2", "World", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ n, err := client.Unlink("key1", "key2", "key3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(2)))
+ })
+
+ It("should Dump", func() {
+ set := client.Set("key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ dump := client.Dump("key")
+ Expect(dump.Err()).NotTo(HaveOccurred())
+ Expect(dump.Val()).NotTo(BeEmpty())
+ })
+
+ It("should Exists", func() {
+ set := client.Set("key1", "Hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ n, err := client.Exists("key1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+
+ n, err = client.Exists("key2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(0)))
+
+ n, err = client.Exists("key1", "key2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+
+ n, err = client.Exists("key1", "key1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(2)))
+ })
+
+ It("should Expire", func() {
+ set := client.Set("key", "Hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ expire := client.Expire("key", 10*time.Second)
+ Expect(expire.Err()).NotTo(HaveOccurred())
+ Expect(expire.Val()).To(Equal(true))
+
+ ttl := client.TTL("key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ Expect(ttl.Val()).To(Equal(10 * time.Second))
+
+ set = client.Set("key", "Hello World", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ ttl = client.TTL("key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ Expect(ttl.Val() < 0).To(Equal(true))
+ })
+
+ It("should ExpireAt", func() {
+ set := client.Set("key", "Hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ n, err := client.Exists("key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+
+ expireAt := client.ExpireAt("key", time.Now().Add(-time.Hour))
+ Expect(expireAt.Err()).NotTo(HaveOccurred())
+ Expect(expireAt.Val()).To(Equal(true))
+
+ n, err = client.Exists("key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(0)))
+ })
+
+ It("should Keys", func() {
+ mset := client.MSet("one", "1", "two", "2", "three", "3", "four", "4")
+ Expect(mset.Err()).NotTo(HaveOccurred())
+ Expect(mset.Val()).To(Equal("OK"))
+
+ keys := client.Keys("*o*")
+ Expect(keys.Err()).NotTo(HaveOccurred())
+ Expect(keys.Val()).To(ConsistOf([]string{"four", "one", "two"}))
+
+ keys = client.Keys("t??")
+ Expect(keys.Err()).NotTo(HaveOccurred())
+ Expect(keys.Val()).To(Equal([]string{"two"}))
+
+ keys = client.Keys("*")
+ Expect(keys.Err()).NotTo(HaveOccurred())
+ Expect(keys.Val()).To(ConsistOf([]string{"four", "one", "three", "two"}))
+ })
+
+ It("should Migrate", func() {
+ migrate := client.Migrate("localhost", redisSecondaryPort, "key", 0, 0)
+ Expect(migrate.Err()).NotTo(HaveOccurred())
+ Expect(migrate.Val()).To(Equal("NOKEY"))
+
+ set := client.Set("key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ migrate = client.Migrate("localhost", redisSecondaryPort, "key", 0, 0)
+ Expect(migrate.Err()).To(MatchError("IOERR error or timeout writing to target instance"))
+ Expect(migrate.Val()).To(Equal(""))
+ })
+
+ It("should Move", func() {
+ move := client.Move("key", 2)
+ Expect(move.Err()).NotTo(HaveOccurred())
+ Expect(move.Val()).To(Equal(false))
+
+ set := client.Set("key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ move = client.Move("key", 2)
+ Expect(move.Err()).NotTo(HaveOccurred())
+ Expect(move.Val()).To(Equal(true))
+
+ get := client.Get("key")
+ Expect(get.Err()).To(Equal(redis.Nil))
+ Expect(get.Val()).To(Equal(""))
+
+ pipe := client.Pipeline()
+ pipe.Select(2)
+ get = pipe.Get("key")
+ pipe.FlushDB()
+
+ _, err := pipe.Exec()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("hello"))
+ })
+
+ It("should Object", func() {
+ set := client.Set("key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ refCount := client.ObjectRefCount("key")
+ Expect(refCount.Err()).NotTo(HaveOccurred())
+ Expect(refCount.Val()).To(Equal(int64(1)))
+
+ err := client.ObjectEncoding("key").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ idleTime := client.ObjectIdleTime("key")
+ Expect(idleTime.Err()).NotTo(HaveOccurred())
+ Expect(idleTime.Val()).To(Equal(time.Duration(0)))
+ })
+
+ It("should Persist", func() {
+ set := client.Set("key", "Hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ expire := client.Expire("key", 10*time.Second)
+ Expect(expire.Err()).NotTo(HaveOccurred())
+ Expect(expire.Val()).To(Equal(true))
+
+ ttl := client.TTL("key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ Expect(ttl.Val()).To(Equal(10 * time.Second))
+
+ persist := client.Persist("key")
+ Expect(persist.Err()).NotTo(HaveOccurred())
+ Expect(persist.Val()).To(Equal(true))
+
+ ttl = client.TTL("key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ Expect(ttl.Val() < 0).To(Equal(true))
+ })
+
+ It("should PExpire", func() {
+ set := client.Set("key", "Hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ expiration := 900 * time.Millisecond
+ pexpire := client.PExpire("key", expiration)
+ Expect(pexpire.Err()).NotTo(HaveOccurred())
+ Expect(pexpire.Val()).To(Equal(true))
+
+ ttl := client.TTL("key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ Expect(ttl.Val()).To(Equal(time.Second))
+
+ pttl := client.PTTL("key")
+ Expect(pttl.Err()).NotTo(HaveOccurred())
+ Expect(pttl.Val()).To(BeNumerically("~", expiration, 10*time.Millisecond))
+ })
+
+ It("should PExpireAt", func() {
+ set := client.Set("key", "Hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ expiration := 900 * time.Millisecond
+ pexpireat := client.PExpireAt("key", time.Now().Add(expiration))
+ Expect(pexpireat.Err()).NotTo(HaveOccurred())
+ Expect(pexpireat.Val()).To(Equal(true))
+
+ ttl := client.TTL("key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ Expect(ttl.Val()).To(Equal(time.Second))
+
+ pttl := client.PTTL("key")
+ Expect(pttl.Err()).NotTo(HaveOccurred())
+ Expect(pttl.Val()).To(BeNumerically("~", expiration, 10*time.Millisecond))
+ })
+
+ It("should PTTL", func() {
+ set := client.Set("key", "Hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ expiration := time.Second
+ expire := client.Expire("key", expiration)
+ Expect(expire.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ pttl := client.PTTL("key")
+ Expect(pttl.Err()).NotTo(HaveOccurred())
+ Expect(pttl.Val()).To(BeNumerically("~", expiration, 10*time.Millisecond))
+ })
+
+ It("should RandomKey", func() {
+ randomKey := client.RandomKey()
+ Expect(randomKey.Err()).To(Equal(redis.Nil))
+ Expect(randomKey.Val()).To(Equal(""))
+
+ set := client.Set("key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ randomKey = client.RandomKey()
+ Expect(randomKey.Err()).NotTo(HaveOccurred())
+ Expect(randomKey.Val()).To(Equal("key"))
+ })
+
+ It("should Rename", func() {
+ set := client.Set("key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ status := client.Rename("key", "key1")
+ Expect(status.Err()).NotTo(HaveOccurred())
+ Expect(status.Val()).To(Equal("OK"))
+
+ get := client.Get("key1")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("hello"))
+ })
+
+ It("should RenameNX", func() {
+ set := client.Set("key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ renameNX := client.RenameNX("key", "key1")
+ Expect(renameNX.Err()).NotTo(HaveOccurred())
+ Expect(renameNX.Val()).To(Equal(true))
+
+ get := client.Get("key1")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("hello"))
+ })
+
+ It("should Restore", func() {
+ err := client.Set("key", "hello", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ dump := client.Dump("key")
+ Expect(dump.Err()).NotTo(HaveOccurred())
+
+ err = client.Del("key").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ restore, err := client.Restore("key", 0, dump.Val()).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(restore).To(Equal("OK"))
+
+ type_, err := client.Type("key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(type_).To(Equal("string"))
+
+ val, err := client.Get("key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("hello"))
+ })
+
+ It("should RestoreReplace", func() {
+ err := client.Set("key", "hello", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ dump := client.Dump("key")
+ Expect(dump.Err()).NotTo(HaveOccurred())
+
+ restore, err := client.RestoreReplace("key", 0, dump.Val()).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(restore).To(Equal("OK"))
+
+ type_, err := client.Type("key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(type_).To(Equal("string"))
+
+ val, err := client.Get("key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("hello"))
+ })
+
+ It("should Sort", func() {
+ size, err := client.LPush("list", "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(1)))
+
+ size, err = client.LPush("list", "3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(2)))
+
+ size, err = client.LPush("list", "2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(3)))
+
+ els, err := client.Sort("list", redis.Sort{
+ Offset: 0,
+ Count: 2,
+ Order: "ASC",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(els).To(Equal([]string{"1", "2"}))
+ })
+
+ It("should Sort and Get", func() {
+ size, err := client.LPush("list", "1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(1)))
+
+ size, err = client.LPush("list", "3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(2)))
+
+ size, err = client.LPush("list", "2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(size).To(Equal(int64(3)))
+
+ err = client.Set("object_2", "value2", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ {
+ els, err := client.Sort("list", redis.Sort{
+ Get: []string{"object_*"},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(els).To(Equal([]string{"", "value2", ""}))
+ }
+
+ {
+ els, err := client.SortInterfaces("list", redis.Sort{
+ Get: []string{"object_*"},
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(els).To(Equal([]interface{}{nil, "value2", nil}))
+ }
+ })
+
+ It("should TTL", func() {
+ ttl := client.TTL("key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ Expect(ttl.Val() < 0).To(Equal(true))
+
+ set := client.Set("key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ expire := client.Expire("key", 60*time.Second)
+ Expect(expire.Err()).NotTo(HaveOccurred())
+ Expect(expire.Val()).To(Equal(true))
+
+ ttl = client.TTL("key")
+ Expect(ttl.Err()).NotTo(HaveOccurred())
+ Expect(ttl.Val()).To(Equal(60 * time.Second))
+ })
+
+ It("should Type", func() {
+ set := client.Set("key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ type_ := client.Type("key")
+ Expect(type_.Err()).NotTo(HaveOccurred())
+ Expect(type_.Val()).To(Equal("string"))
+ })
+
+ })
+
+ Describe("scanning", func() {
+
+ It("should Scan", func() {
+ for i := 0; i < 1000; i++ {
+ set := client.Set(fmt.Sprintf("key%d", i), "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ }
+
+ keys, cursor, err := client.Scan(0, "", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(keys).NotTo(BeEmpty())
+ Expect(cursor).NotTo(BeZero())
+ })
+
+ It("should SScan", func() {
+ for i := 0; i < 1000; i++ {
+ sadd := client.SAdd("myset", fmt.Sprintf("member%d", i))
+ Expect(sadd.Err()).NotTo(HaveOccurred())
+ }
+
+ keys, cursor, err := client.SScan("myset", 0, "", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(keys).NotTo(BeEmpty())
+ Expect(cursor).NotTo(BeZero())
+ })
+
+ It("should HScan", func() {
+ for i := 0; i < 1000; i++ {
+ sadd := client.HSet("myhash", fmt.Sprintf("key%d", i), "hello")
+ Expect(sadd.Err()).NotTo(HaveOccurred())
+ }
+
+ keys, cursor, err := client.HScan("myhash", 0, "", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(keys).NotTo(BeEmpty())
+ Expect(cursor).NotTo(BeZero())
+ })
+
+ It("should ZScan", func() {
+ for i := 0; i < 1000; i++ {
+ sadd := client.ZAdd("myset", redis.Z{float64(i), fmt.Sprintf("member%d", i)})
+ Expect(sadd.Err()).NotTo(HaveOccurred())
+ }
+
+ keys, cursor, err := client.ZScan("myset", 0, "", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(keys).NotTo(BeEmpty())
+ Expect(cursor).NotTo(BeZero())
+ })
+
+ })
+
+ Describe("strings", func() {
+
+ It("should Append", func() {
+ n, err := client.Exists("key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(0)))
+
+ append := client.Append("key", "Hello")
+ Expect(append.Err()).NotTo(HaveOccurred())
+ Expect(append.Val()).To(Equal(int64(5)))
+
+ append = client.Append("key", " World")
+ Expect(append.Err()).NotTo(HaveOccurred())
+ Expect(append.Val()).To(Equal(int64(11)))
+
+ get := client.Get("key")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("Hello World"))
+ })
+
+ It("should BitCount", func() {
+ set := client.Set("key", "foobar", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ bitCount := client.BitCount("key", nil)
+ Expect(bitCount.Err()).NotTo(HaveOccurred())
+ Expect(bitCount.Val()).To(Equal(int64(26)))
+
+ bitCount = client.BitCount("key", &redis.BitCount{0, 0})
+ Expect(bitCount.Err()).NotTo(HaveOccurred())
+ Expect(bitCount.Val()).To(Equal(int64(4)))
+
+ bitCount = client.BitCount("key", &redis.BitCount{1, 1})
+ Expect(bitCount.Err()).NotTo(HaveOccurred())
+ Expect(bitCount.Val()).To(Equal(int64(6)))
+ })
+
+ It("should BitOpAnd", func() {
+ set := client.Set("key1", "1", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ set = client.Set("key2", "0", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ bitOpAnd := client.BitOpAnd("dest", "key1", "key2")
+ Expect(bitOpAnd.Err()).NotTo(HaveOccurred())
+ Expect(bitOpAnd.Val()).To(Equal(int64(1)))
+
+ get := client.Get("dest")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("0"))
+ })
+
+ It("should BitOpOr", func() {
+ set := client.Set("key1", "1", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ set = client.Set("key2", "0", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ bitOpOr := client.BitOpOr("dest", "key1", "key2")
+ Expect(bitOpOr.Err()).NotTo(HaveOccurred())
+ Expect(bitOpOr.Val()).To(Equal(int64(1)))
+
+ get := client.Get("dest")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("1"))
+ })
+
+ It("should BitOpXor", func() {
+ set := client.Set("key1", "\xff", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ set = client.Set("key2", "\x0f", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ bitOpXor := client.BitOpXor("dest", "key1", "key2")
+ Expect(bitOpXor.Err()).NotTo(HaveOccurred())
+ Expect(bitOpXor.Val()).To(Equal(int64(1)))
+
+ get := client.Get("dest")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("\xf0"))
+ })
+
+ It("should BitOpNot", func() {
+ set := client.Set("key1", "\x00", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ bitOpNot := client.BitOpNot("dest", "key1")
+ Expect(bitOpNot.Err()).NotTo(HaveOccurred())
+ Expect(bitOpNot.Val()).To(Equal(int64(1)))
+
+ get := client.Get("dest")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("\xff"))
+ })
+
+ It("should BitPos", func() {
+ err := client.Set("mykey", "\xff\xf0\x00", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ pos, err := client.BitPos("mykey", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(Equal(int64(12)))
+
+ pos, err = client.BitPos("mykey", 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(Equal(int64(0)))
+
+ pos, err = client.BitPos("mykey", 0, 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(Equal(int64(16)))
+
+ pos, err = client.BitPos("mykey", 1, 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(Equal(int64(-1)))
+
+ pos, err = client.BitPos("mykey", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(Equal(int64(16)))
+
+ pos, err = client.BitPos("mykey", 1, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(Equal(int64(-1)))
+
+ pos, err = client.BitPos("mykey", 0, 2, 1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(Equal(int64(-1)))
+
+ pos, err = client.BitPos("mykey", 0, 0, -3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(Equal(int64(-1)))
+
+ pos, err = client.BitPos("mykey", 0, 0, 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(Equal(int64(-1)))
+ })
+
+ It("should Decr", func() {
+ set := client.Set("key", "10", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ decr := client.Decr("key")
+ Expect(decr.Err()).NotTo(HaveOccurred())
+ Expect(decr.Val()).To(Equal(int64(9)))
+
+ set = client.Set("key", "234293482390480948029348230948", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ decr = client.Decr("key")
+ Expect(decr.Err()).To(MatchError("ERR value is not an integer or out of range"))
+ Expect(decr.Val()).To(Equal(int64(0)))
+ })
+
+ It("should DecrBy", func() {
+ set := client.Set("key", "10", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ decrBy := client.DecrBy("key", 5)
+ Expect(decrBy.Err()).NotTo(HaveOccurred())
+ Expect(decrBy.Val()).To(Equal(int64(5)))
+ })
+
+ It("should Get", func() {
+ get := client.Get("_")
+ Expect(get.Err()).To(Equal(redis.Nil))
+ Expect(get.Val()).To(Equal(""))
+
+ set := client.Set("key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ get = client.Get("key")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("hello"))
+ })
+
+ It("should GetBit", func() {
+ setBit := client.SetBit("key", 7, 1)
+ Expect(setBit.Err()).NotTo(HaveOccurred())
+ Expect(setBit.Val()).To(Equal(int64(0)))
+
+ getBit := client.GetBit("key", 0)
+ Expect(getBit.Err()).NotTo(HaveOccurred())
+ Expect(getBit.Val()).To(Equal(int64(0)))
+
+ getBit = client.GetBit("key", 7)
+ Expect(getBit.Err()).NotTo(HaveOccurred())
+ Expect(getBit.Val()).To(Equal(int64(1)))
+
+ getBit = client.GetBit("key", 100)
+ Expect(getBit.Err()).NotTo(HaveOccurred())
+ Expect(getBit.Val()).To(Equal(int64(0)))
+ })
+
+ It("should GetRange", func() {
+ set := client.Set("key", "This is a string", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ getRange := client.GetRange("key", 0, 3)
+ Expect(getRange.Err()).NotTo(HaveOccurred())
+ Expect(getRange.Val()).To(Equal("This"))
+
+ getRange = client.GetRange("key", -3, -1)
+ Expect(getRange.Err()).NotTo(HaveOccurred())
+ Expect(getRange.Val()).To(Equal("ing"))
+
+ getRange = client.GetRange("key", 0, -1)
+ Expect(getRange.Err()).NotTo(HaveOccurred())
+ Expect(getRange.Val()).To(Equal("This is a string"))
+
+ getRange = client.GetRange("key", 10, 100)
+ Expect(getRange.Err()).NotTo(HaveOccurred())
+ Expect(getRange.Val()).To(Equal("string"))
+ })
+
+ It("should GetSet", func() {
+ incr := client.Incr("key")
+ Expect(incr.Err()).NotTo(HaveOccurred())
+ Expect(incr.Val()).To(Equal(int64(1)))
+
+ getSet := client.GetSet("key", "0")
+ Expect(getSet.Err()).NotTo(HaveOccurred())
+ Expect(getSet.Val()).To(Equal("1"))
+
+ get := client.Get("key")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("0"))
+ })
+
+ It("should Incr", func() {
+ set := client.Set("key", "10", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ incr := client.Incr("key")
+ Expect(incr.Err()).NotTo(HaveOccurred())
+ Expect(incr.Val()).To(Equal(int64(11)))
+
+ get := client.Get("key")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("11"))
+ })
+
+ It("should IncrBy", func() {
+ set := client.Set("key", "10", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ incrBy := client.IncrBy("key", 5)
+ Expect(incrBy.Err()).NotTo(HaveOccurred())
+ Expect(incrBy.Val()).To(Equal(int64(15)))
+ })
+
+ It("should IncrByFloat", func() {
+ set := client.Set("key", "10.50", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ incrByFloat := client.IncrByFloat("key", 0.1)
+ Expect(incrByFloat.Err()).NotTo(HaveOccurred())
+ Expect(incrByFloat.Val()).To(Equal(10.6))
+
+ set = client.Set("key", "5.0e3", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ incrByFloat = client.IncrByFloat("key", 2.0e2)
+ Expect(incrByFloat.Err()).NotTo(HaveOccurred())
+ Expect(incrByFloat.Val()).To(Equal(float64(5200)))
+ })
+
+ It("should IncrByFloatOverflow", func() {
+ incrByFloat := client.IncrByFloat("key", 996945661)
+ Expect(incrByFloat.Err()).NotTo(HaveOccurred())
+ Expect(incrByFloat.Val()).To(Equal(float64(996945661)))
+ })
+
+ It("should MSetMGet", func() {
+ mSet := client.MSet("key1", "hello1", "key2", "hello2")
+ Expect(mSet.Err()).NotTo(HaveOccurred())
+ Expect(mSet.Val()).To(Equal("OK"))
+
+ mGet := client.MGet("key1", "key2", "_")
+ Expect(mGet.Err()).NotTo(HaveOccurred())
+ Expect(mGet.Val()).To(Equal([]interface{}{"hello1", "hello2", nil}))
+ })
+
+ It("should MSetNX", func() {
+ mSetNX := client.MSetNX("key1", "hello1", "key2", "hello2")
+ Expect(mSetNX.Err()).NotTo(HaveOccurred())
+ Expect(mSetNX.Val()).To(Equal(true))
+
+ mSetNX = client.MSetNX("key2", "hello1", "key3", "hello2")
+ Expect(mSetNX.Err()).NotTo(HaveOccurred())
+ Expect(mSetNX.Val()).To(Equal(false))
+ })
+
+ It("should Set with expiration", func() {
+ err := client.Set("key", "hello", 100*time.Millisecond).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ val, err := client.Get("key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("hello"))
+
+ Eventually(func() error {
+ return client.Get("foo").Err()
+ }, "1s", "100ms").Should(Equal(redis.Nil))
+ })
+
+ It("should SetGet", func() {
+ set := client.Set("key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ get := client.Get("key")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("hello"))
+ })
+
+ It("should SetNX", func() {
+ setNX := client.SetNX("key", "hello", 0)
+ Expect(setNX.Err()).NotTo(HaveOccurred())
+ Expect(setNX.Val()).To(Equal(true))
+
+ setNX = client.SetNX("key", "hello2", 0)
+ Expect(setNX.Err()).NotTo(HaveOccurred())
+ Expect(setNX.Val()).To(Equal(false))
+
+ get := client.Get("key")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("hello"))
+ })
+
+ It("should SetNX with expiration", func() {
+ isSet, err := client.SetNX("key", "hello", time.Second).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(isSet).To(Equal(true))
+
+ isSet, err = client.SetNX("key", "hello2", time.Second).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(isSet).To(Equal(false))
+
+ val, err := client.Get("key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("hello"))
+ })
+
+ It("should SetXX", func() {
+ isSet, err := client.SetXX("key", "hello2", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(isSet).To(Equal(false))
+
+ err = client.Set("key", "hello", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ isSet, err = client.SetXX("key", "hello2", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(isSet).To(Equal(true))
+
+ val, err := client.Get("key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("hello2"))
+ })
+
+ It("should SetXX with expiration", func() {
+ isSet, err := client.SetXX("key", "hello2", time.Second).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(isSet).To(Equal(false))
+
+ err = client.Set("key", "hello", time.Second).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ isSet, err = client.SetXX("key", "hello2", time.Second).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(isSet).To(Equal(true))
+
+ val, err := client.Get("key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("hello2"))
+ })
+
+ It("should SetRange", func() {
+ set := client.Set("key", "Hello World", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ range_ := client.SetRange("key", 6, "Redis")
+ Expect(range_.Err()).NotTo(HaveOccurred())
+ Expect(range_.Val()).To(Equal(int64(11)))
+
+ get := client.Get("key")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("Hello Redis"))
+ })
+
+ It("should StrLen", func() {
+ set := client.Set("key", "hello", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ strLen := client.StrLen("key")
+ Expect(strLen.Err()).NotTo(HaveOccurred())
+ Expect(strLen.Val()).To(Equal(int64(5)))
+
+ strLen = client.StrLen("_")
+ Expect(strLen.Err()).NotTo(HaveOccurred())
+ Expect(strLen.Val()).To(Equal(int64(0)))
+ })
+
+ })
+
+ Describe("hashes", func() {
+
+ It("should HDel", func() {
+ hSet := client.HSet("hash", "key", "hello")
+ Expect(hSet.Err()).NotTo(HaveOccurred())
+
+ hDel := client.HDel("hash", "key")
+ Expect(hDel.Err()).NotTo(HaveOccurred())
+ Expect(hDel.Val()).To(Equal(int64(1)))
+
+ hDel = client.HDel("hash", "key")
+ Expect(hDel.Err()).NotTo(HaveOccurred())
+ Expect(hDel.Val()).To(Equal(int64(0)))
+ })
+
+ It("should HExists", func() {
+ hSet := client.HSet("hash", "key", "hello")
+ Expect(hSet.Err()).NotTo(HaveOccurred())
+
+ hExists := client.HExists("hash", "key")
+ Expect(hExists.Err()).NotTo(HaveOccurred())
+ Expect(hExists.Val()).To(Equal(true))
+
+ hExists = client.HExists("hash", "key1")
+ Expect(hExists.Err()).NotTo(HaveOccurred())
+ Expect(hExists.Val()).To(Equal(false))
+ })
+
+ It("should HGet", func() {
+ hSet := client.HSet("hash", "key", "hello")
+ Expect(hSet.Err()).NotTo(HaveOccurred())
+
+ hGet := client.HGet("hash", "key")
+ Expect(hGet.Err()).NotTo(HaveOccurred())
+ Expect(hGet.Val()).To(Equal("hello"))
+
+ hGet = client.HGet("hash", "key1")
+ Expect(hGet.Err()).To(Equal(redis.Nil))
+ Expect(hGet.Val()).To(Equal(""))
+ })
+
+ It("should HGetAll", func() {
+ err := client.HSet("hash", "key1", "hello1").Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.HSet("hash", "key2", "hello2").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ m, err := client.HGetAll("hash").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(m).To(Equal(map[string]string{"key1": "hello1", "key2": "hello2"}))
+ })
+
+ It("should HIncrBy", func() {
+ hSet := client.HSet("hash", "key", "5")
+ Expect(hSet.Err()).NotTo(HaveOccurred())
+
+ hIncrBy := client.HIncrBy("hash", "key", 1)
+ Expect(hIncrBy.Err()).NotTo(HaveOccurred())
+ Expect(hIncrBy.Val()).To(Equal(int64(6)))
+
+ hIncrBy = client.HIncrBy("hash", "key", -1)
+ Expect(hIncrBy.Err()).NotTo(HaveOccurred())
+ Expect(hIncrBy.Val()).To(Equal(int64(5)))
+
+ hIncrBy = client.HIncrBy("hash", "key", -10)
+ Expect(hIncrBy.Err()).NotTo(HaveOccurred())
+ Expect(hIncrBy.Val()).To(Equal(int64(-5)))
+ })
+
+ It("should HIncrByFloat", func() {
+ hSet := client.HSet("hash", "field", "10.50")
+ Expect(hSet.Err()).NotTo(HaveOccurred())
+ Expect(hSet.Val()).To(Equal(true))
+
+ hIncrByFloat := client.HIncrByFloat("hash", "field", 0.1)
+ Expect(hIncrByFloat.Err()).NotTo(HaveOccurred())
+ Expect(hIncrByFloat.Val()).To(Equal(10.6))
+
+ hSet = client.HSet("hash", "field", "5.0e3")
+ Expect(hSet.Err()).NotTo(HaveOccurred())
+ Expect(hSet.Val()).To(Equal(false))
+
+ hIncrByFloat = client.HIncrByFloat("hash", "field", 2.0e2)
+ Expect(hIncrByFloat.Err()).NotTo(HaveOccurred())
+ Expect(hIncrByFloat.Val()).To(Equal(float64(5200)))
+ })
+
+ It("should HKeys", func() {
+ hkeys := client.HKeys("hash")
+ Expect(hkeys.Err()).NotTo(HaveOccurred())
+ Expect(hkeys.Val()).To(Equal([]string{}))
+
+ hset := client.HSet("hash", "key1", "hello1")
+ Expect(hset.Err()).NotTo(HaveOccurred())
+ hset = client.HSet("hash", "key2", "hello2")
+ Expect(hset.Err()).NotTo(HaveOccurred())
+
+ hkeys = client.HKeys("hash")
+ Expect(hkeys.Err()).NotTo(HaveOccurred())
+ Expect(hkeys.Val()).To(Equal([]string{"key1", "key2"}))
+ })
+
+ It("should HLen", func() {
+ hSet := client.HSet("hash", "key1", "hello1")
+ Expect(hSet.Err()).NotTo(HaveOccurred())
+ hSet = client.HSet("hash", "key2", "hello2")
+ Expect(hSet.Err()).NotTo(HaveOccurred())
+
+ hLen := client.HLen("hash")
+ Expect(hLen.Err()).NotTo(HaveOccurred())
+ Expect(hLen.Val()).To(Equal(int64(2)))
+ })
+
+ It("should HMGet", func() {
+ err := client.HSet("hash", "key1", "hello1").Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.HSet("hash", "key2", "hello2").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ vals, err := client.HMGet("hash", "key1", "key2", "_").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]interface{}{"hello1", "hello2", nil}))
+ })
+
+ It("should HMSet", func() {
+ ok, err := client.HMSet("hash", map[string]interface{}{
+ "key1": "hello1",
+ "key2": "hello2",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(ok).To(Equal("OK"))
+
+ v, err := client.HGet("hash", "key1").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(v).To(Equal("hello1"))
+
+ v, err = client.HGet("hash", "key2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(v).To(Equal("hello2"))
+ })
+
+ It("should HSet", func() {
+ hSet := client.HSet("hash", "key", "hello")
+ Expect(hSet.Err()).NotTo(HaveOccurred())
+ Expect(hSet.Val()).To(Equal(true))
+
+ hGet := client.HGet("hash", "key")
+ Expect(hGet.Err()).NotTo(HaveOccurred())
+ Expect(hGet.Val()).To(Equal("hello"))
+ })
+
+ It("should HSetNX", func() {
+ hSetNX := client.HSetNX("hash", "key", "hello")
+ Expect(hSetNX.Err()).NotTo(HaveOccurred())
+ Expect(hSetNX.Val()).To(Equal(true))
+
+ hSetNX = client.HSetNX("hash", "key", "hello")
+ Expect(hSetNX.Err()).NotTo(HaveOccurred())
+ Expect(hSetNX.Val()).To(Equal(false))
+
+ hGet := client.HGet("hash", "key")
+ Expect(hGet.Err()).NotTo(HaveOccurred())
+ Expect(hGet.Val()).To(Equal("hello"))
+ })
+
+ It("should HVals", func() {
+ err := client.HSet("hash", "key1", "hello1").Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.HSet("hash", "key2", "hello2").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ v, err := client.HVals("hash").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(v).To(Equal([]string{"hello1", "hello2"}))
+
+ var slice []string
+ err = client.HVals("hash").ScanSlice(&slice)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(slice).To(Equal([]string{"hello1", "hello2"}))
+ })
+
+ })
+
+ Describe("hyperloglog", func() {
+ It("should PFMerge", func() {
+ pfAdd := client.PFAdd("hll1", "1", "2", "3", "4", "5")
+ Expect(pfAdd.Err()).NotTo(HaveOccurred())
+
+ pfCount := client.PFCount("hll1")
+ Expect(pfCount.Err()).NotTo(HaveOccurred())
+ Expect(pfCount.Val()).To(Equal(int64(5)))
+
+ pfAdd = client.PFAdd("hll2", "a", "b", "c", "d", "e")
+ Expect(pfAdd.Err()).NotTo(HaveOccurred())
+
+ pfMerge := client.PFMerge("hllMerged", "hll1", "hll2")
+ Expect(pfMerge.Err()).NotTo(HaveOccurred())
+
+ pfCount = client.PFCount("hllMerged")
+ Expect(pfCount.Err()).NotTo(HaveOccurred())
+ Expect(pfCount.Val()).To(Equal(int64(10)))
+
+ pfCount = client.PFCount("hll1", "hll2")
+ Expect(pfCount.Err()).NotTo(HaveOccurred())
+ Expect(pfCount.Val()).To(Equal(int64(10)))
+ })
+ })
+
+ Describe("lists", func() {
+
+ It("should BLPop", func() {
+ rPush := client.RPush("list1", "a", "b", "c")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ bLPop := client.BLPop(0, "list1", "list2")
+ Expect(bLPop.Err()).NotTo(HaveOccurred())
+ Expect(bLPop.Val()).To(Equal([]string{"list1", "a"}))
+ })
+
+ It("should BLPopBlocks", func() {
+ started := make(chan bool)
+ done := make(chan bool)
+ go func() {
+ defer GinkgoRecover()
+
+ started <- true
+ bLPop := client.BLPop(0, "list")
+ Expect(bLPop.Err()).NotTo(HaveOccurred())
+ Expect(bLPop.Val()).To(Equal([]string{"list", "a"}))
+ done <- true
+ }()
+ <-started
+
+ select {
+ case <-done:
+ Fail("BLPop is not blocked")
+ case <-time.After(time.Second):
+ // ok
+ }
+
+ rPush := client.RPush("list", "a")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ select {
+ case <-done:
+ // ok
+ case <-time.After(time.Second):
+ Fail("BLPop is still blocked")
+ }
+ })
+
+ It("should BLPop timeout", func() {
+ val, err := client.BLPop(time.Second, "list1").Result()
+ Expect(err).To(Equal(redis.Nil))
+ Expect(val).To(BeNil())
+
+ Expect(client.Ping().Err()).NotTo(HaveOccurred())
+
+ stats := client.PoolStats()
+ Expect(stats.Requests).To(Equal(uint32(3)))
+ Expect(stats.Hits).To(Equal(uint32(1)))
+ Expect(stats.Timeouts).To(Equal(uint32(0)))
+ })
+
+ It("should BRPop", func() {
+ rPush := client.RPush("list1", "a", "b", "c")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ bRPop := client.BRPop(0, "list1", "list2")
+ Expect(bRPop.Err()).NotTo(HaveOccurred())
+ Expect(bRPop.Val()).To(Equal([]string{"list1", "c"}))
+ })
+
+ It("should BRPop blocks", func() {
+ started := make(chan bool)
+ done := make(chan bool)
+ go func() {
+ defer GinkgoRecover()
+
+ started <- true
+ brpop := client.BRPop(0, "list")
+ Expect(brpop.Err()).NotTo(HaveOccurred())
+ Expect(brpop.Val()).To(Equal([]string{"list", "a"}))
+ done <- true
+ }()
+ <-started
+
+ select {
+ case <-done:
+ Fail("BRPop is not blocked")
+ case <-time.After(time.Second):
+ // ok
+ }
+
+ rPush := client.RPush("list", "a")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ select {
+ case <-done:
+ // ok
+ case <-time.After(time.Second):
+ Fail("BRPop is still blocked")
+ // ok
+ }
+ })
+
+ It("should BRPopLPush", func() {
+ _, err := client.BRPopLPush("list1", "list2", time.Second).Result()
+ Expect(err).To(Equal(redis.Nil))
+
+ err = client.RPush("list1", "a", "b", "c").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ v, err := client.BRPopLPush("list1", "list2", 0).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(v).To(Equal("c"))
+ })
+
+ It("should LIndex", func() {
+ lPush := client.LPush("list", "World")
+ Expect(lPush.Err()).NotTo(HaveOccurred())
+ lPush = client.LPush("list", "Hello")
+ Expect(lPush.Err()).NotTo(HaveOccurred())
+
+ lIndex := client.LIndex("list", 0)
+ Expect(lIndex.Err()).NotTo(HaveOccurred())
+ Expect(lIndex.Val()).To(Equal("Hello"))
+
+ lIndex = client.LIndex("list", -1)
+ Expect(lIndex.Err()).NotTo(HaveOccurred())
+ Expect(lIndex.Val()).To(Equal("World"))
+
+ lIndex = client.LIndex("list", 3)
+ Expect(lIndex.Err()).To(Equal(redis.Nil))
+ Expect(lIndex.Val()).To(Equal(""))
+ })
+
+ It("should LInsert", func() {
+ rPush := client.RPush("list", "Hello")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush("list", "World")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ lInsert := client.LInsert("list", "BEFORE", "World", "There")
+ Expect(lInsert.Err()).NotTo(HaveOccurred())
+ Expect(lInsert.Val()).To(Equal(int64(3)))
+
+ lRange := client.LRange("list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"Hello", "There", "World"}))
+ })
+
+ It("should LLen", func() {
+ lPush := client.LPush("list", "World")
+ Expect(lPush.Err()).NotTo(HaveOccurred())
+ lPush = client.LPush("list", "Hello")
+ Expect(lPush.Err()).NotTo(HaveOccurred())
+
+ lLen := client.LLen("list")
+ Expect(lLen.Err()).NotTo(HaveOccurred())
+ Expect(lLen.Val()).To(Equal(int64(2)))
+ })
+
+ It("should LPop", func() {
+ rPush := client.RPush("list", "one")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush("list", "two")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush("list", "three")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ lPop := client.LPop("list")
+ Expect(lPop.Err()).NotTo(HaveOccurred())
+ Expect(lPop.Val()).To(Equal("one"))
+
+ lRange := client.LRange("list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"two", "three"}))
+ })
+
+ It("should LPush", func() {
+ lPush := client.LPush("list", "World")
+ Expect(lPush.Err()).NotTo(HaveOccurred())
+ lPush = client.LPush("list", "Hello")
+ Expect(lPush.Err()).NotTo(HaveOccurred())
+
+ lRange := client.LRange("list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"Hello", "World"}))
+ })
+
+ It("should LPushX", func() {
+ lPush := client.LPush("list", "World")
+ Expect(lPush.Err()).NotTo(HaveOccurred())
+
+ lPushX := client.LPushX("list", "Hello")
+ Expect(lPushX.Err()).NotTo(HaveOccurred())
+ Expect(lPushX.Val()).To(Equal(int64(2)))
+
+ lPushX = client.LPushX("list2", "Hello")
+ Expect(lPushX.Err()).NotTo(HaveOccurred())
+ Expect(lPushX.Val()).To(Equal(int64(0)))
+
+ lRange := client.LRange("list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"Hello", "World"}))
+
+ lRange = client.LRange("list2", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{}))
+ })
+
+ It("should LRange", func() {
+ rPush := client.RPush("list", "one")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush("list", "two")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush("list", "three")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ lRange := client.LRange("list", 0, 0)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"one"}))
+
+ lRange = client.LRange("list", -3, 2)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"one", "two", "three"}))
+
+ lRange = client.LRange("list", -100, 100)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"one", "two", "three"}))
+
+ lRange = client.LRange("list", 5, 10)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{}))
+ })
+
+ It("should LRem", func() {
+ rPush := client.RPush("list", "hello")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush("list", "hello")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush("list", "key")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush("list", "hello")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ lRem := client.LRem("list", -2, "hello")
+ Expect(lRem.Err()).NotTo(HaveOccurred())
+ Expect(lRem.Val()).To(Equal(int64(2)))
+
+ lRange := client.LRange("list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"hello", "key"}))
+ })
+
+ It("should LSet", func() {
+ rPush := client.RPush("list", "one")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush("list", "two")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush("list", "three")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ lSet := client.LSet("list", 0, "four")
+ Expect(lSet.Err()).NotTo(HaveOccurred())
+ Expect(lSet.Val()).To(Equal("OK"))
+
+ lSet = client.LSet("list", -2, "five")
+ Expect(lSet.Err()).NotTo(HaveOccurred())
+ Expect(lSet.Val()).To(Equal("OK"))
+
+ lRange := client.LRange("list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"four", "five", "three"}))
+ })
+
+ It("should LTrim", func() {
+ rPush := client.RPush("list", "one")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush("list", "two")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush("list", "three")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ lTrim := client.LTrim("list", 1, -1)
+ Expect(lTrim.Err()).NotTo(HaveOccurred())
+ Expect(lTrim.Val()).To(Equal("OK"))
+
+ lRange := client.LRange("list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"two", "three"}))
+ })
+
+ It("should RPop", func() {
+ rPush := client.RPush("list", "one")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush("list", "two")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush("list", "three")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ rPop := client.RPop("list")
+ Expect(rPop.Err()).NotTo(HaveOccurred())
+ Expect(rPop.Val()).To(Equal("three"))
+
+ lRange := client.LRange("list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"one", "two"}))
+ })
+
+ It("should RPopLPush", func() {
+ rPush := client.RPush("list", "one")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush("list", "two")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ rPush = client.RPush("list", "three")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+
+ rPopLPush := client.RPopLPush("list", "list2")
+ Expect(rPopLPush.Err()).NotTo(HaveOccurred())
+ Expect(rPopLPush.Val()).To(Equal("three"))
+
+ lRange := client.LRange("list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"one", "two"}))
+
+ lRange = client.LRange("list2", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"three"}))
+ })
+
+ It("should RPush", func() {
+ rPush := client.RPush("list", "Hello")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ Expect(rPush.Val()).To(Equal(int64(1)))
+
+ rPush = client.RPush("list", "World")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ Expect(rPush.Val()).To(Equal(int64(2)))
+
+ lRange := client.LRange("list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"Hello", "World"}))
+ })
+
+ It("should RPushX", func() {
+ rPush := client.RPush("list", "Hello")
+ Expect(rPush.Err()).NotTo(HaveOccurred())
+ Expect(rPush.Val()).To(Equal(int64(1)))
+
+ rPushX := client.RPushX("list", "World")
+ Expect(rPushX.Err()).NotTo(HaveOccurred())
+ Expect(rPushX.Val()).To(Equal(int64(2)))
+
+ rPushX = client.RPushX("list2", "World")
+ Expect(rPushX.Err()).NotTo(HaveOccurred())
+ Expect(rPushX.Val()).To(Equal(int64(0)))
+
+ lRange := client.LRange("list", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{"Hello", "World"}))
+
+ lRange = client.LRange("list2", 0, -1)
+ Expect(lRange.Err()).NotTo(HaveOccurred())
+ Expect(lRange.Val()).To(Equal([]string{}))
+ })
+
+ })
+
+ Describe("sets", func() {
+
+ It("should SAdd", func() {
+ sAdd := client.SAdd("set", "Hello")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ Expect(sAdd.Val()).To(Equal(int64(1)))
+
+ sAdd = client.SAdd("set", "World")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ Expect(sAdd.Val()).To(Equal(int64(1)))
+
+ sAdd = client.SAdd("set", "World")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ Expect(sAdd.Val()).To(Equal(int64(0)))
+
+ sMembers := client.SMembers("set")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(ConsistOf([]string{"Hello", "World"}))
+ })
+
+ It("should SCard", func() {
+ sAdd := client.SAdd("set", "Hello")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ Expect(sAdd.Val()).To(Equal(int64(1)))
+
+ sAdd = client.SAdd("set", "World")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ Expect(sAdd.Val()).To(Equal(int64(1)))
+
+ sCard := client.SCard("set")
+ Expect(sCard.Err()).NotTo(HaveOccurred())
+ Expect(sCard.Val()).To(Equal(int64(2)))
+ })
+
+ It("should SDiff", func() {
+ sAdd := client.SAdd("set1", "a")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set1", "b")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set1", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sAdd = client.SAdd("set2", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set2", "d")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set2", "e")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sDiff := client.SDiff("set1", "set2")
+ Expect(sDiff.Err()).NotTo(HaveOccurred())
+ Expect(sDiff.Val()).To(ConsistOf([]string{"a", "b"}))
+ })
+
+ It("should SDiffStore", func() {
+ sAdd := client.SAdd("set1", "a")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set1", "b")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set1", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sAdd = client.SAdd("set2", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set2", "d")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set2", "e")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sDiffStore := client.SDiffStore("set", "set1", "set2")
+ Expect(sDiffStore.Err()).NotTo(HaveOccurred())
+ Expect(sDiffStore.Val()).To(Equal(int64(2)))
+
+ sMembers := client.SMembers("set")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(ConsistOf([]string{"a", "b"}))
+ })
+
+ It("should SInter", func() {
+ sAdd := client.SAdd("set1", "a")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set1", "b")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set1", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sAdd = client.SAdd("set2", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set2", "d")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set2", "e")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sInter := client.SInter("set1", "set2")
+ Expect(sInter.Err()).NotTo(HaveOccurred())
+ Expect(sInter.Val()).To(Equal([]string{"c"}))
+ })
+
+ It("should SInterStore", func() {
+ sAdd := client.SAdd("set1", "a")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set1", "b")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set1", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sAdd = client.SAdd("set2", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set2", "d")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set2", "e")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sInterStore := client.SInterStore("set", "set1", "set2")
+ Expect(sInterStore.Err()).NotTo(HaveOccurred())
+ Expect(sInterStore.Val()).To(Equal(int64(1)))
+
+ sMembers := client.SMembers("set")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(Equal([]string{"c"}))
+ })
+
+ It("should IsMember", func() {
+ sAdd := client.SAdd("set", "one")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sIsMember := client.SIsMember("set", "one")
+ Expect(sIsMember.Err()).NotTo(HaveOccurred())
+ Expect(sIsMember.Val()).To(Equal(true))
+
+ sIsMember = client.SIsMember("set", "two")
+ Expect(sIsMember.Err()).NotTo(HaveOccurred())
+ Expect(sIsMember.Val()).To(Equal(false))
+ })
+
+ It("should SMembers", func() {
+ sAdd := client.SAdd("set", "Hello")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set", "World")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sMembers := client.SMembers("set")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(ConsistOf([]string{"Hello", "World"}))
+ })
+
+ It("should SMove", func() {
+ sAdd := client.SAdd("set1", "one")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set1", "two")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sAdd = client.SAdd("set2", "three")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sMove := client.SMove("set1", "set2", "two")
+ Expect(sMove.Err()).NotTo(HaveOccurred())
+ Expect(sMove.Val()).To(Equal(true))
+
+ sMembers := client.SMembers("set1")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(Equal([]string{"one"}))
+
+ sMembers = client.SMembers("set2")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(ConsistOf([]string{"three", "two"}))
+ })
+
+ It("should SPop", func() {
+ sAdd := client.SAdd("set", "one")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set", "two")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set", "three")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sPop := client.SPop("set")
+ Expect(sPop.Err()).NotTo(HaveOccurred())
+ Expect(sPop.Val()).NotTo(Equal(""))
+
+ sMembers := client.SMembers("set")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(HaveLen(2))
+
+ })
+
+ It("should SPopN", func() {
+ sAdd := client.SAdd("set", "one")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set", "two")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set", "three")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set", "four")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sPopN := client.SPopN("set", 1)
+ Expect(sPopN.Err()).NotTo(HaveOccurred())
+ Expect(sPopN.Val()).NotTo(Equal([]string{""}))
+
+ sMembers := client.SMembers("set")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(HaveLen(3))
+
+ sPopN = client.SPopN("set", 4)
+ Expect(sPopN.Err()).NotTo(HaveOccurred())
+ Expect(sPopN.Val()).To(HaveLen(3))
+
+ sMembers = client.SMembers("set")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(HaveLen(0))
+ })
+
+ It("should SRandMember and SRandMemberN", func() {
+ err := client.SAdd("set", "one").Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.SAdd("set", "two").Err()
+ Expect(err).NotTo(HaveOccurred())
+ err = client.SAdd("set", "three").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ members, err := client.SMembers("set").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(members).To(HaveLen(3))
+
+ member, err := client.SRandMember("set").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(member).NotTo(Equal(""))
+
+ members, err = client.SRandMemberN("set", 2).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(members).To(HaveLen(2))
+ })
+
+ It("should SRem", func() {
+ sAdd := client.SAdd("set", "one")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set", "two")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set", "three")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sRem := client.SRem("set", "one")
+ Expect(sRem.Err()).NotTo(HaveOccurred())
+ Expect(sRem.Val()).To(Equal(int64(1)))
+
+ sRem = client.SRem("set", "four")
+ Expect(sRem.Err()).NotTo(HaveOccurred())
+ Expect(sRem.Val()).To(Equal(int64(0)))
+
+ sMembers := client.SMembers("set")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(ConsistOf([]string{"three", "two"}))
+ })
+
+ It("should SUnion", func() {
+ sAdd := client.SAdd("set1", "a")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set1", "b")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set1", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sAdd = client.SAdd("set2", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set2", "d")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set2", "e")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sUnion := client.SUnion("set1", "set2")
+ Expect(sUnion.Err()).NotTo(HaveOccurred())
+ Expect(sUnion.Val()).To(HaveLen(5))
+ })
+
+ It("should SUnionStore", func() {
+ sAdd := client.SAdd("set1", "a")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set1", "b")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set1", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sAdd = client.SAdd("set2", "c")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set2", "d")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set2", "e")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sUnionStore := client.SUnionStore("set", "set1", "set2")
+ Expect(sUnionStore.Err()).NotTo(HaveOccurred())
+ Expect(sUnionStore.Val()).To(Equal(int64(5)))
+
+ sMembers := client.SMembers("set")
+ Expect(sMembers.Err()).NotTo(HaveOccurred())
+ Expect(sMembers.Val()).To(HaveLen(5))
+ })
+
+ })
+
+ Describe("sorted sets", func() {
+
+ It("should ZAdd", func() {
+ added, err := client.ZAdd("zset", redis.Z{1, "one"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ added, err = client.ZAdd("zset", redis.Z{1, "uno"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ added, err = client.ZAdd("zset", redis.Z{2, "two"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ added, err = client.ZAdd("zset", redis.Z{3, "two"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(0)))
+
+ vals, err := client.ZRangeWithScores("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{1, "one"}, {1, "uno"}, {3, "two"}}))
+ })
+
+ It("should ZAdd bytes", func() {
+ added, err := client.ZAdd("zset", redis.Z{1, []byte("one")}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ added, err = client.ZAdd("zset", redis.Z{1, []byte("uno")}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ added, err = client.ZAdd("zset", redis.Z{2, []byte("two")}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ added, err = client.ZAdd("zset", redis.Z{3, []byte("two")}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(0)))
+
+ val, err := client.ZRangeWithScores("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{1, "one"}, {1, "uno"}, {3, "two"}}))
+ })
+
+ It("should ZAddNX", func() {
+ added, err := client.ZAddNX("zset", redis.Z{1, "one"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ vals, err := client.ZRangeWithScores("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{1, "one"}}))
+
+ added, err = client.ZAddNX("zset", redis.Z{2, "one"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(0)))
+
+ vals, err = client.ZRangeWithScores("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{1, "one"}}))
+ })
+
+ It("should ZAddXX", func() {
+ added, err := client.ZAddXX("zset", redis.Z{1, "one"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(0)))
+
+ vals, err := client.ZRangeWithScores("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(BeEmpty())
+
+ added, err = client.ZAdd("zset", redis.Z{1, "one"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ added, err = client.ZAddXX("zset", redis.Z{2, "one"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(0)))
+
+ vals, err = client.ZRangeWithScores("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{2, "one"}}))
+ })
+
+ It("should ZAddCh", func() {
+ changed, err := client.ZAddCh("zset", redis.Z{1, "one"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(changed).To(Equal(int64(1)))
+
+ changed, err = client.ZAddCh("zset", redis.Z{1, "one"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(changed).To(Equal(int64(0)))
+ })
+
+ It("should ZAddNXCh", func() {
+ changed, err := client.ZAddNXCh("zset", redis.Z{1, "one"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(changed).To(Equal(int64(1)))
+
+ vals, err := client.ZRangeWithScores("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{1, "one"}}))
+
+ changed, err = client.ZAddNXCh("zset", redis.Z{2, "one"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(changed).To(Equal(int64(0)))
+
+ vals, err = client.ZRangeWithScores("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{1, "one"}}))
+ })
+
+ It("should ZAddXXCh", func() {
+ changed, err := client.ZAddXXCh("zset", redis.Z{1, "one"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(changed).To(Equal(int64(0)))
+
+ vals, err := client.ZRangeWithScores("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(BeEmpty())
+
+ added, err := client.ZAdd("zset", redis.Z{1, "one"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ changed, err = client.ZAddXXCh("zset", redis.Z{2, "one"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(changed).To(Equal(int64(1)))
+
+ vals, err = client.ZRangeWithScores("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{2, "one"}}))
+ })
+
+ It("should ZIncr", func() {
+ score, err := client.ZIncr("zset", redis.Z{1, "one"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(score).To(Equal(float64(1)))
+
+ vals, err := client.ZRangeWithScores("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{1, "one"}}))
+
+ score, err = client.ZIncr("zset", redis.Z{1, "one"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(score).To(Equal(float64(2)))
+
+ vals, err = client.ZRangeWithScores("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{2, "one"}}))
+ })
+
+ It("should ZIncrNX", func() {
+ score, err := client.ZIncrNX("zset", redis.Z{1, "one"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(score).To(Equal(float64(1)))
+
+ vals, err := client.ZRangeWithScores("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{1, "one"}}))
+
+ score, err = client.ZIncrNX("zset", redis.Z{1, "one"}).Result()
+ Expect(err).To(Equal(redis.Nil))
+ Expect(score).To(Equal(float64(0)))
+
+ vals, err = client.ZRangeWithScores("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{1, "one"}}))
+ })
+
+ It("should ZIncrXX", func() {
+ score, err := client.ZIncrXX("zset", redis.Z{1, "one"}).Result()
+ Expect(err).To(Equal(redis.Nil))
+ Expect(score).To(Equal(float64(0)))
+
+ vals, err := client.ZRangeWithScores("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(BeEmpty())
+
+ added, err := client.ZAdd("zset", redis.Z{1, "one"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(added).To(Equal(int64(1)))
+
+ score, err = client.ZIncrXX("zset", redis.Z{1, "one"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(score).To(Equal(float64(2)))
+
+ vals, err = client.ZRangeWithScores("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{2, "one"}}))
+ })
+
+ It("should ZCard", func() {
+ zAdd := client.ZAdd("zset", redis.Z{1, "one"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{2, "two"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ zCard := client.ZCard("zset")
+ Expect(zCard.Err()).NotTo(HaveOccurred())
+ Expect(zCard.Val()).To(Equal(int64(2)))
+ })
+
+ It("should ZCount", func() {
+ zAdd := client.ZAdd("zset", redis.Z{1, "one"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{2, "two"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{3, "three"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ zCount := client.ZCount("zset", "-inf", "+inf")
+ Expect(zCount.Err()).NotTo(HaveOccurred())
+ Expect(zCount.Val()).To(Equal(int64(3)))
+
+ zCount = client.ZCount("zset", "(1", "3")
+ Expect(zCount.Err()).NotTo(HaveOccurred())
+ Expect(zCount.Val()).To(Equal(int64(2)))
+ })
+
+ It("should ZIncrBy", func() {
+ zAdd := client.ZAdd("zset", redis.Z{1, "one"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{2, "two"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ zIncrBy := client.ZIncrBy("zset", 2, "one")
+ Expect(zIncrBy.Err()).NotTo(HaveOccurred())
+ Expect(zIncrBy.Val()).To(Equal(float64(3)))
+
+ val, err := client.ZRangeWithScores("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{2, "two"}, {3, "one"}}))
+ })
+
+ It("should ZInterStore", func() {
+ zAdd := client.ZAdd("zset1", redis.Z{1, "one"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset1", redis.Z{2, "two"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ zAdd = client.ZAdd("zset2", redis.Z{1, "one"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset2", redis.Z{2, "two"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset3", redis.Z{3, "two"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ zInterStore := client.ZInterStore(
+ "out", redis.ZStore{Weights: []float64{2, 3}}, "zset1", "zset2")
+ Expect(zInterStore.Err()).NotTo(HaveOccurred())
+ Expect(zInterStore.Val()).To(Equal(int64(2)))
+
+ val, err := client.ZRangeWithScores("out", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{5, "one"}, {10, "two"}}))
+ })
+
+ It("should ZRange", func() {
+ zAdd := client.ZAdd("zset", redis.Z{1, "one"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{2, "two"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{3, "three"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ zRange := client.ZRange("zset", 0, -1)
+ Expect(zRange.Err()).NotTo(HaveOccurred())
+ Expect(zRange.Val()).To(Equal([]string{"one", "two", "three"}))
+
+ zRange = client.ZRange("zset", 2, 3)
+ Expect(zRange.Err()).NotTo(HaveOccurred())
+ Expect(zRange.Val()).To(Equal([]string{"three"}))
+
+ zRange = client.ZRange("zset", -2, -1)
+ Expect(zRange.Err()).NotTo(HaveOccurred())
+ Expect(zRange.Val()).To(Equal([]string{"two", "three"}))
+ })
+
+ It("should ZRangeWithScores", func() {
+ zAdd := client.ZAdd("zset", redis.Z{1, "one"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{2, "two"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{3, "three"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ val, err := client.ZRangeWithScores("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{1, "one"}, {2, "two"}, {3, "three"}}))
+
+ val, err = client.ZRangeWithScores("zset", 2, 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{3, "three"}}))
+
+ val, err = client.ZRangeWithScores("zset", -2, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{2, "two"}, {3, "three"}}))
+ })
+
+ It("should ZRangeByScore", func() {
+ zAdd := client.ZAdd("zset", redis.Z{1, "one"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{2, "two"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{3, "three"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ zRangeByScore := client.ZRangeByScore("zset", redis.ZRangeBy{
+ Min: "-inf",
+ Max: "+inf",
+ })
+ Expect(zRangeByScore.Err()).NotTo(HaveOccurred())
+ Expect(zRangeByScore.Val()).To(Equal([]string{"one", "two", "three"}))
+
+ zRangeByScore = client.ZRangeByScore("zset", redis.ZRangeBy{
+ Min: "1",
+ Max: "2",
+ })
+ Expect(zRangeByScore.Err()).NotTo(HaveOccurred())
+ Expect(zRangeByScore.Val()).To(Equal([]string{"one", "two"}))
+
+ zRangeByScore = client.ZRangeByScore("zset", redis.ZRangeBy{
+ Min: "(1",
+ Max: "2",
+ })
+ Expect(zRangeByScore.Err()).NotTo(HaveOccurred())
+ Expect(zRangeByScore.Val()).To(Equal([]string{"two"}))
+
+ zRangeByScore = client.ZRangeByScore("zset", redis.ZRangeBy{
+ Min: "(1",
+ Max: "(2",
+ })
+ Expect(zRangeByScore.Err()).NotTo(HaveOccurred())
+ Expect(zRangeByScore.Val()).To(Equal([]string{}))
+ })
+
+ It("should ZRangeByLex", func() {
+ zAdd := client.ZAdd("zset", redis.Z{0, "a"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{0, "b"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{0, "c"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ zRangeByLex := client.ZRangeByLex("zset", redis.ZRangeBy{
+ Min: "-",
+ Max: "+",
+ })
+ Expect(zRangeByLex.Err()).NotTo(HaveOccurred())
+ Expect(zRangeByLex.Val()).To(Equal([]string{"a", "b", "c"}))
+
+ zRangeByLex = client.ZRangeByLex("zset", redis.ZRangeBy{
+ Min: "[a",
+ Max: "[b",
+ })
+ Expect(zRangeByLex.Err()).NotTo(HaveOccurred())
+ Expect(zRangeByLex.Val()).To(Equal([]string{"a", "b"}))
+
+ zRangeByLex = client.ZRangeByLex("zset", redis.ZRangeBy{
+ Min: "(a",
+ Max: "[b",
+ })
+ Expect(zRangeByLex.Err()).NotTo(HaveOccurred())
+ Expect(zRangeByLex.Val()).To(Equal([]string{"b"}))
+
+ zRangeByLex = client.ZRangeByLex("zset", redis.ZRangeBy{
+ Min: "(a",
+ Max: "(b",
+ })
+ Expect(zRangeByLex.Err()).NotTo(HaveOccurred())
+ Expect(zRangeByLex.Val()).To(Equal([]string{}))
+ })
+
+ It("should ZRangeByScoreWithScoresMap", func() {
+ zAdd := client.ZAdd("zset", redis.Z{1, "one"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{2, "two"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{3, "three"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ val, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeBy{
+ Min: "-inf",
+ Max: "+inf",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{1, "one"}, {2, "two"}, {3, "three"}}))
+
+ val, err = client.ZRangeByScoreWithScores("zset", redis.ZRangeBy{
+ Min: "1",
+ Max: "2",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{1, "one"}, {2, "two"}}))
+
+ val, err = client.ZRangeByScoreWithScores("zset", redis.ZRangeBy{
+ Min: "(1",
+ Max: "2",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{2, "two"}}))
+
+ val, err = client.ZRangeByScoreWithScores("zset", redis.ZRangeBy{
+ Min: "(1",
+ Max: "(2",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{}))
+ })
+
+ It("should ZRank", func() {
+ zAdd := client.ZAdd("zset", redis.Z{1, "one"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{2, "two"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{3, "three"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ zRank := client.ZRank("zset", "three")
+ Expect(zRank.Err()).NotTo(HaveOccurred())
+ Expect(zRank.Val()).To(Equal(int64(2)))
+
+ zRank = client.ZRank("zset", "four")
+ Expect(zRank.Err()).To(Equal(redis.Nil))
+ Expect(zRank.Val()).To(Equal(int64(0)))
+ })
+
+ It("should ZRem", func() {
+ zAdd := client.ZAdd("zset", redis.Z{1, "one"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{2, "two"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{3, "three"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ zRem := client.ZRem("zset", "two")
+ Expect(zRem.Err()).NotTo(HaveOccurred())
+ Expect(zRem.Val()).To(Equal(int64(1)))
+
+ val, err := client.ZRangeWithScores("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{1, "one"}, {3, "three"}}))
+ })
+
+ It("should ZRemRangeByRank", func() {
+ zAdd := client.ZAdd("zset", redis.Z{1, "one"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{2, "two"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{3, "three"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ zRemRangeByRank := client.ZRemRangeByRank("zset", 0, 1)
+ Expect(zRemRangeByRank.Err()).NotTo(HaveOccurred())
+ Expect(zRemRangeByRank.Val()).To(Equal(int64(2)))
+
+ val, err := client.ZRangeWithScores("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{3, "three"}}))
+ })
+
+ It("should ZRemRangeByScore", func() {
+ zAdd := client.ZAdd("zset", redis.Z{1, "one"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{2, "two"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{3, "three"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ zRemRangeByScore := client.ZRemRangeByScore("zset", "-inf", "(2")
+ Expect(zRemRangeByScore.Err()).NotTo(HaveOccurred())
+ Expect(zRemRangeByScore.Val()).To(Equal(int64(1)))
+
+ val, err := client.ZRangeWithScores("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{2, "two"}, {3, "three"}}))
+ })
+
+ It("should ZRemRangeByLex", func() {
+ zz := []redis.Z{
+ {0, "aaaa"},
+ {0, "b"},
+ {0, "c"},
+ {0, "d"},
+ {0, "e"},
+ {0, "foo"},
+ {0, "zap"},
+ {0, "zip"},
+ {0, "ALPHA"},
+ {0, "alpha"},
+ }
+ for _, z := range zz {
+ err := client.ZAdd("zset", z).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ n, err := client.ZRemRangeByLex("zset", "[alpha", "[omega").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(6)))
+
+ vals, err := client.ZRange("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]string{"ALPHA", "aaaa", "zap", "zip"}))
+ })
+
+ It("should ZRevRange", func() {
+ zAdd := client.ZAdd("zset", redis.Z{1, "one"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{2, "two"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{3, "three"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ zRevRange := client.ZRevRange("zset", 0, -1)
+ Expect(zRevRange.Err()).NotTo(HaveOccurred())
+ Expect(zRevRange.Val()).To(Equal([]string{"three", "two", "one"}))
+
+ zRevRange = client.ZRevRange("zset", 2, 3)
+ Expect(zRevRange.Err()).NotTo(HaveOccurred())
+ Expect(zRevRange.Val()).To(Equal([]string{"one"}))
+
+ zRevRange = client.ZRevRange("zset", -2, -1)
+ Expect(zRevRange.Err()).NotTo(HaveOccurred())
+ Expect(zRevRange.Val()).To(Equal([]string{"two", "one"}))
+ })
+
+ It("should ZRevRangeWithScoresMap", func() {
+ zAdd := client.ZAdd("zset", redis.Z{1, "one"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{2, "two"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{3, "three"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ val, err := client.ZRevRangeWithScores("zset", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{3, "three"}, {2, "two"}, {1, "one"}}))
+
+ val, err = client.ZRevRangeWithScores("zset", 2, 3).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{1, "one"}}))
+
+ val, err = client.ZRevRangeWithScores("zset", -2, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{2, "two"}, {1, "one"}}))
+ })
+
+ It("should ZRevRangeByScore", func() {
+ zadd := client.ZAdd("zset", redis.Z{1, "one"})
+ Expect(zadd.Err()).NotTo(HaveOccurred())
+ zadd = client.ZAdd("zset", redis.Z{2, "two"})
+ Expect(zadd.Err()).NotTo(HaveOccurred())
+ zadd = client.ZAdd("zset", redis.Z{3, "three"})
+ Expect(zadd.Err()).NotTo(HaveOccurred())
+
+ vals, err := client.ZRevRangeByScore(
+ "zset", redis.ZRangeBy{Max: "+inf", Min: "-inf"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]string{"three", "two", "one"}))
+
+ vals, err = client.ZRevRangeByScore(
+ "zset", redis.ZRangeBy{Max: "2", Min: "(1"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]string{"two"}))
+
+ vals, err = client.ZRevRangeByScore(
+ "zset", redis.ZRangeBy{Max: "(2", Min: "(1"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]string{}))
+ })
+
+ It("should ZRevRangeByLex", func() {
+ zadd := client.ZAdd("zset", redis.Z{0, "a"})
+ Expect(zadd.Err()).NotTo(HaveOccurred())
+ zadd = client.ZAdd("zset", redis.Z{0, "b"})
+ Expect(zadd.Err()).NotTo(HaveOccurred())
+ zadd = client.ZAdd("zset", redis.Z{0, "c"})
+ Expect(zadd.Err()).NotTo(HaveOccurred())
+
+ vals, err := client.ZRevRangeByLex(
+ "zset", redis.ZRangeBy{Max: "+", Min: "-"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]string{"c", "b", "a"}))
+
+ vals, err = client.ZRevRangeByLex(
+ "zset", redis.ZRangeBy{Max: "[b", Min: "(a"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]string{"b"}))
+
+ vals, err = client.ZRevRangeByLex(
+ "zset", redis.ZRangeBy{Max: "(b", Min: "(a"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]string{}))
+ })
+
+ It("should ZRevRangeByScoreWithScores", func() {
+ zadd := client.ZAdd("zset", redis.Z{1, "one"})
+ Expect(zadd.Err()).NotTo(HaveOccurred())
+ zadd = client.ZAdd("zset", redis.Z{2, "two"})
+ Expect(zadd.Err()).NotTo(HaveOccurred())
+ zadd = client.ZAdd("zset", redis.Z{3, "three"})
+ Expect(zadd.Err()).NotTo(HaveOccurred())
+
+ vals, err := client.ZRevRangeByScoreWithScores(
+ "zset", redis.ZRangeBy{Max: "+inf", Min: "-inf"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]redis.Z{{3, "three"}, {2, "two"}, {1, "one"}}))
+ })
+
+ It("should ZRevRangeByScoreWithScoresMap", func() {
+ zAdd := client.ZAdd("zset", redis.Z{1, "one"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{2, "two"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{3, "three"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ val, err := client.ZRevRangeByScoreWithScores(
+ "zset", redis.ZRangeBy{Max: "+inf", Min: "-inf"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{3, "three"}, {2, "two"}, {1, "one"}}))
+
+ val, err = client.ZRevRangeByScoreWithScores(
+ "zset", redis.ZRangeBy{Max: "2", Min: "(1"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{2, "two"}}))
+
+ val, err = client.ZRevRangeByScoreWithScores(
+ "zset", redis.ZRangeBy{Max: "(2", Min: "(1"}).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{}))
+ })
+
+ It("should ZRevRank", func() {
+ zAdd := client.ZAdd("zset", redis.Z{1, "one"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{2, "two"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset", redis.Z{3, "three"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ zRevRank := client.ZRevRank("zset", "one")
+ Expect(zRevRank.Err()).NotTo(HaveOccurred())
+ Expect(zRevRank.Val()).To(Equal(int64(2)))
+
+ zRevRank = client.ZRevRank("zset", "four")
+ Expect(zRevRank.Err()).To(Equal(redis.Nil))
+ Expect(zRevRank.Val()).To(Equal(int64(0)))
+ })
+
+ It("should ZScore", func() {
+ zAdd := client.ZAdd("zset", redis.Z{1.001, "one"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ zScore := client.ZScore("zset", "one")
+ Expect(zScore.Err()).NotTo(HaveOccurred())
+ Expect(zScore.Val()).To(Equal(float64(1.001)))
+ })
+
+ It("should ZUnionStore", func() {
+ zAdd := client.ZAdd("zset1", redis.Z{1, "one"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset1", redis.Z{2, "two"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ zAdd = client.ZAdd("zset2", redis.Z{1, "one"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset2", redis.Z{2, "two"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+ zAdd = client.ZAdd("zset2", redis.Z{3, "three"})
+ Expect(zAdd.Err()).NotTo(HaveOccurred())
+
+ zUnionStore := client.ZUnionStore(
+ "out", redis.ZStore{Weights: []float64{2, 3}}, "zset1", "zset2")
+ Expect(zUnionStore.Err()).NotTo(HaveOccurred())
+ Expect(zUnionStore.Val()).To(Equal(int64(3)))
+
+ val, err := client.ZRangeWithScores("out", 0, -1).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal([]redis.Z{{5, "one"}, {9, "three"}, {10, "two"}}))
+ })
+
+ })
+
+ Describe("Geo add and radius search", func() {
+ BeforeEach(func() {
+ geoAdd := client.GeoAdd(
+ "Sicily",
+ &redis.GeoLocation{Longitude: 13.361389, Latitude: 38.115556, Name: "Palermo"},
+ &redis.GeoLocation{Longitude: 15.087269, Latitude: 37.502669, Name: "Catania"},
+ )
+ Expect(geoAdd.Err()).NotTo(HaveOccurred())
+ Expect(geoAdd.Val()).To(Equal(int64(2)))
+ })
+
+ It("should not add same geo location", func() {
+ geoAdd := client.GeoAdd(
+ "Sicily",
+ &redis.GeoLocation{Longitude: 13.361389, Latitude: 38.115556, Name: "Palermo"},
+ )
+ Expect(geoAdd.Err()).NotTo(HaveOccurred())
+ Expect(geoAdd.Val()).To(Equal(int64(0)))
+ })
+
+ It("should search geo radius", func() {
+ res, err := client.GeoRadius("Sicily", 15, 37, &redis.GeoRadiusQuery{
+ Radius: 200,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(HaveLen(2))
+ Expect(res[0].Name).To(Equal("Palermo"))
+ Expect(res[1].Name).To(Equal("Catania"))
+ })
+
+ It("should search geo radius with options", func() {
+ res, err := client.GeoRadius("Sicily", 15, 37, &redis.GeoRadiusQuery{
+ Radius: 200,
+ Unit: "km",
+ WithGeoHash: true,
+ WithCoord: true,
+ WithDist: true,
+ Count: 2,
+ Sort: "ASC",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(HaveLen(2))
+ Expect(res[1].Name).To(Equal("Palermo"))
+ Expect(res[1].Dist).To(Equal(190.4424))
+ Expect(res[1].GeoHash).To(Equal(int64(3479099956230698)))
+ Expect(res[1].Longitude).To(Equal(13.361389338970184))
+ Expect(res[1].Latitude).To(Equal(38.115556395496299))
+ Expect(res[0].Name).To(Equal("Catania"))
+ Expect(res[0].Dist).To(Equal(56.4413))
+ Expect(res[0].GeoHash).To(Equal(int64(3479447370796909)))
+ Expect(res[0].Longitude).To(Equal(15.087267458438873))
+ Expect(res[0].Latitude).To(Equal(37.50266842333162))
+ })
+
+ It("should search geo radius with WithDist=false", func() {
+ res, err := client.GeoRadius("Sicily", 15, 37, &redis.GeoRadiusQuery{
+ Radius: 200,
+ Unit: "km",
+ WithGeoHash: true,
+ WithCoord: true,
+ Count: 2,
+ Sort: "ASC",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(HaveLen(2))
+ Expect(res[1].Name).To(Equal("Palermo"))
+ Expect(res[1].Dist).To(Equal(float64(0)))
+ Expect(res[1].GeoHash).To(Equal(int64(3479099956230698)))
+ Expect(res[1].Longitude).To(Equal(13.361389338970184))
+ Expect(res[1].Latitude).To(Equal(38.115556395496299))
+ Expect(res[0].Name).To(Equal("Catania"))
+ Expect(res[0].Dist).To(Equal(float64(0)))
+ Expect(res[0].GeoHash).To(Equal(int64(3479447370796909)))
+ Expect(res[0].Longitude).To(Equal(15.087267458438873))
+ Expect(res[0].Latitude).To(Equal(37.50266842333162))
+ })
+
+ It("should search geo radius by member with options", func() {
+ res, err := client.GeoRadiusByMember("Sicily", "Catania", &redis.GeoRadiusQuery{
+ Radius: 200,
+ Unit: "km",
+ WithGeoHash: true,
+ WithCoord: true,
+ WithDist: true,
+ Count: 2,
+ Sort: "ASC",
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(HaveLen(2))
+ Expect(res[0].Name).To(Equal("Catania"))
+ Expect(res[0].Dist).To(Equal(0.0))
+ Expect(res[0].GeoHash).To(Equal(int64(3479447370796909)))
+ Expect(res[0].Longitude).To(Equal(15.087267458438873))
+ Expect(res[0].Latitude).To(Equal(37.50266842333162))
+ Expect(res[1].Name).To(Equal("Palermo"))
+ Expect(res[1].Dist).To(Equal(166.2742))
+ Expect(res[1].GeoHash).To(Equal(int64(3479099956230698)))
+ Expect(res[1].Longitude).To(Equal(13.361389338970184))
+ Expect(res[1].Latitude).To(Equal(38.115556395496299))
+ })
+
+ It("should search geo radius with no results", func() {
+ res, err := client.GeoRadius("Sicily", 99, 37, &redis.GeoRadiusQuery{
+ Radius: 200,
+ Unit: "km",
+ WithGeoHash: true,
+ WithCoord: true,
+ WithDist: true,
+ }).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res).To(HaveLen(0))
+ })
+
+ It("should get geo distance with unit options", func() {
+ // From Redis CLI, note the difference in rounding in m vs
+ // km on Redis itself.
+ //
+ // GEOADD Sicily 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania"
+ // GEODIST Sicily Palermo Catania m
+ // "166274.15156960033"
+ // GEODIST Sicily Palermo Catania km
+ // "166.27415156960032"
+ dist, err := client.GeoDist("Sicily", "Palermo", "Catania", "km").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(dist).To(BeNumerically("~", 166.27, 0.01))
+
+ dist, err = client.GeoDist("Sicily", "Palermo", "Catania", "m").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(dist).To(BeNumerically("~", 166274.15, 0.01))
+ })
+
+ It("should get geo hash in string representation", func() {
+ hashes, err := client.GeoHash("Sicily", "Palermo", "Catania").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(hashes).To(ConsistOf([]string{"sqc8b49rny0", "sqdtr74hyu0"}))
+ })
+
+ It("should return geo position", func() {
+ pos, err := client.GeoPos("Sicily", "Palermo", "Catania", "NonExisting").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(pos).To(ConsistOf([]*redis.GeoPos{
+ {
+ Longitude: 13.361389338970184,
+ Latitude: 38.1155563954963,
+ },
+ {
+ Longitude: 15.087267458438873,
+ Latitude: 37.50266842333162,
+ },
+ nil,
+ }))
+ })
+ })
+
+ Describe("marshaling/unmarshaling", func() {
+
+ type convTest struct {
+ value interface{}
+ wanted string
+ dest interface{}
+ }
+
+ convTests := []convTest{
+ {nil, "", nil},
+ {"hello", "hello", new(string)},
+ {[]byte("hello"), "hello", new([]byte)},
+ {int(1), "1", new(int)},
+ {int8(1), "1", new(int8)},
+ {int16(1), "1", new(int16)},
+ {int32(1), "1", new(int32)},
+ {int64(1), "1", new(int64)},
+ {uint(1), "1", new(uint)},
+ {uint8(1), "1", new(uint8)},
+ {uint16(1), "1", new(uint16)},
+ {uint32(1), "1", new(uint32)},
+ {uint64(1), "1", new(uint64)},
+ {float32(1.0), "1", new(float32)},
+ {float64(1.0), "1", new(float64)},
+ {true, "1", new(bool)},
+ {false, "0", new(bool)},
+ }
+
+ It("should convert to string", func() {
+ for _, test := range convTests {
+ err := client.Set("key", test.value, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ s, err := client.Get("key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(s).To(Equal(test.wanted))
+
+ if test.dest == nil {
+ continue
+ }
+
+ err = client.Get("key").Scan(test.dest)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(deref(test.dest)).To(Equal(test.value))
+ }
+ })
+
+ })
+
+ Describe("json marshaling/unmarshaling", func() {
+
+ BeforeEach(func() {
+ value := &numberStruct{Number: 42}
+ err := client.Set("key", value, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("should marshal custom values using json", func() {
+ s, err := client.Get("key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(s).To(Equal(`{"Number":42}`))
+ })
+
+ It("should scan custom values using json", func() {
+ value := &numberStruct{}
+ err := client.Get("key").Scan(value)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(value.Number).To(Equal(42))
+ })
+
+ })
+
+ Describe("Command", func() {
+
+ It("returns map of commands", func() {
+ cmds, err := client.Command().Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(cmds)).To(BeNumerically("~", 180, 10))
+
+ cmd := cmds["mget"]
+ Expect(cmd.Name).To(Equal("mget"))
+ Expect(cmd.Arity).To(Equal(int8(-2)))
+ Expect(cmd.Flags).To(ContainElement("readonly"))
+ Expect(cmd.FirstKeyPos).To(Equal(int8(1)))
+ Expect(cmd.LastKeyPos).To(Equal(int8(-1)))
+ Expect(cmd.StepCount).To(Equal(int8(1)))
+ })
+
+ })
+
+ Describe("Eval", func() {
+
+ It("returns keys and values", func() {
+ vals, err := client.Eval(
+ "return {KEYS[1],ARGV[1]}",
+ []string{"key"},
+ "hello",
+ ).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(vals).To(Equal([]interface{}{"key", "hello"}))
+ })
+
+ })
+
+})
+
+type numberStruct struct {
+ Number int
+}
+
+func (s *numberStruct) MarshalBinary() ([]byte, error) {
+ return json.Marshal(s)
+}
+
+func (s *numberStruct) UnmarshalBinary(b []byte) error {
+ return json.Unmarshal(b, s)
+}
+
+func deref(viface interface{}) interface{} {
+ v := reflect.ValueOf(viface)
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ return v.Interface()
+}
diff --git a/vendor/github.com/go-redis/redis/doc.go b/vendor/github.com/go-redis/redis/doc.go
new file mode 100644
index 000000000..55262533a
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/doc.go
@@ -0,0 +1,4 @@
+/*
+Package redis implements a Redis client.
+*/
+package redis
diff --git a/vendor/github.com/go-redis/redis/example_instrumentation_test.go b/vendor/github.com/go-redis/redis/example_instrumentation_test.go
new file mode 100644
index 000000000..02051f9c9
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/example_instrumentation_test.go
@@ -0,0 +1,59 @@
+package redis_test
+
+import (
+ "fmt"
+ "sync/atomic"
+ "time"
+
+ "github.com/go-redis/redis"
+)
+
+func Example_instrumentation() {
+ ring := redis.NewRing(&redis.RingOptions{
+ Addrs: map[string]string{
+ "shard1": ":6379",
+ },
+ })
+ ring.ForEachShard(func(client *redis.Client) error {
+ wrapRedisProcess(client)
+ return nil
+ })
+
+ for {
+ ring.Ping()
+ }
+}
+
+func wrapRedisProcess(client *redis.Client) {
+ const precision = time.Microsecond
+ var count, avgDur uint32
+
+ go func() {
+ for range time.Tick(3 * time.Second) {
+ n := atomic.LoadUint32(&count)
+ dur := time.Duration(atomic.LoadUint32(&avgDur)) * precision
+ fmt.Printf("%s: processed=%d avg_dur=%s\n", client, n, dur)
+ }
+ }()
+
+ client.WrapProcess(func(oldProcess func(redis.Cmder) error) func(redis.Cmder) error {
+ return func(cmd redis.Cmder) error {
+ start := time.Now()
+ err := oldProcess(cmd)
+ dur := time.Since(start)
+
+ const decay = float64(1) / 100
+ ms := float64(dur / precision)
+ for {
+ avg := atomic.LoadUint32(&avgDur)
+ newAvg := uint32((1-decay)*float64(avg) + decay*ms)
+ if atomic.CompareAndSwapUint32(&avgDur, avg, newAvg) {
+ break
+ }
+ }
+ atomic.AddUint32(&count, 1)
+
+ return err
+ }
+ })
+}
diff --git a/vendor/github.com/go-redis/redis/example_test.go b/vendor/github.com/go-redis/redis/example_test.go
new file mode 100644
index 000000000..319ea0ca2
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/example_test.go
@@ -0,0 +1,414 @@
+package redis_test
+
+import (
+ "fmt"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/go-redis/redis"
+)
+
+var client *redis.Client
+
+func init() {
+ client = redis.NewClient(&redis.Options{
+ Addr: ":6379",
+ DialTimeout: 10 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 30 * time.Second,
+ PoolSize: 10,
+ PoolTimeout: 30 * time.Second,
+ })
+ client.FlushDB()
+}
+
+func ExampleNewClient() {
+ client := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password set
+ DB: 0, // use default DB
+ })
+
+ pong, err := client.Ping().Result()
+ fmt.Println(pong, err)
+ // Output: PONG <nil>
+}
+
+func ExampleParseURL() {
+ opt, err := redis.ParseURL("redis://:qwerty@localhost:6379/1")
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println("addr is", opt.Addr)
+ fmt.Println("db is", opt.DB)
+ fmt.Println("password is", opt.Password)
+
+ // Create client as usually.
+ _ = redis.NewClient(opt)
+
+ // Output: addr is localhost:6379
+ // db is 1
+ // password is qwerty
+}
+
+func ExampleNewFailoverClient() {
+ // See http://redis.io/topics/sentinel for instructions how to
+ // setup Redis Sentinel.
+ client := redis.NewFailoverClient(&redis.FailoverOptions{
+ MasterName: "master",
+ SentinelAddrs: []string{":26379"},
+ })
+ client.Ping()
+}
+
+func ExampleNewClusterClient() {
+ // See http://redis.io/topics/cluster-tutorial for instructions
+ // how to setup Redis Cluster.
+ client := redis.NewClusterClient(&redis.ClusterOptions{
+ Addrs: []string{":7000", ":7001", ":7002", ":7003", ":7004", ":7005"},
+ })
+ client.Ping()
+}
+
+func ExampleNewRing() {
+ client := redis.NewRing(&redis.RingOptions{
+ Addrs: map[string]string{
+ "shard1": ":7000",
+ "shard2": ":7001",
+ "shard3": ":7002",
+ },
+ })
+ client.Ping()
+}
+
+func ExampleClient() {
+ err := client.Set("key", "value", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ val, err := client.Get("key").Result()
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println("key", val)
+
+ val2, err := client.Get("key2").Result()
+ if err == redis.Nil {
+ fmt.Println("key2 does not exists")
+ } else if err != nil {
+ panic(err)
+ } else {
+ fmt.Println("key2", val2)
+ }
+ // Output: key value
+ // key2 does not exists
+}
+
+func ExampleClient_Set() {
+ // Last argument is expiration. Zero means the key has no
+ // expiration time.
+ err := client.Set("key", "value", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ // key2 will expire in an hour.
+ err = client.Set("key2", "value", time.Hour).Err()
+ if err != nil {
+ panic(err)
+ }
+}
+
+func ExampleClient_Incr() {
+ if err := client.Incr("counter").Err(); err != nil {
+ panic(err)
+ }
+
+ n, err := client.Get("counter").Int64()
+ fmt.Println(n, err)
+ // Output: 1 <nil>
+}
+
+func ExampleClient_BLPop() {
+ if err := client.RPush("queue", "message").Err(); err != nil {
+ panic(err)
+ }
+
+ // use `client.BLPop(0, "queue")` for infinite waiting time
+ result, err := client.BLPop(1*time.Second, "queue").Result()
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println(result[0], result[1])
+ // Output: queue message
+}
+
+func ExampleClient_Scan() {
+ client.FlushDB()
+ for i := 0; i < 33; i++ {
+ err := client.Set(fmt.Sprintf("key%d", i), "value", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ var cursor uint64
+ var n int
+ for {
+ var keys []string
+ var err error
+ keys, cursor, err = client.Scan(cursor, "", 10).Result()
+ if err != nil {
+ panic(err)
+ }
+ n += len(keys)
+ if cursor == 0 {
+ break
+ }
+ }
+
+ fmt.Printf("found %d keys\n", n)
+ // Output: found 33 keys
+}
+
+func ExampleClient_Pipelined() {
+ var incr *redis.IntCmd
+ _, err := client.Pipelined(func(pipe redis.Pipeliner) error {
+ incr = pipe.Incr("pipelined_counter")
+ pipe.Expire("pipelined_counter", time.Hour)
+ return nil
+ })
+ fmt.Println(incr.Val(), err)
+ // Output: 1 <nil>
+}
+
+func ExampleClient_Pipeline() {
+ pipe := client.Pipeline()
+
+ incr := pipe.Incr("pipeline_counter")
+ pipe.Expire("pipeline_counter", time.Hour)
+
+ // Execute
+ //
+ // INCR pipeline_counter
+ // EXPIRE pipeline_counts 3600
+ //
+ // using one client-server roundtrip.
+ _, err := pipe.Exec()
+ fmt.Println(incr.Val(), err)
+ // Output: 1 <nil>
+}
+
+func ExampleClient_TxPipelined() {
+ var incr *redis.IntCmd
+ _, err := client.TxPipelined(func(pipe redis.Pipeliner) error {
+ incr = pipe.Incr("tx_pipelined_counter")
+ pipe.Expire("tx_pipelined_counter", time.Hour)
+ return nil
+ })
+ fmt.Println(incr.Val(), err)
+ // Output: 1 <nil>
+}
+
+func ExampleClient_TxPipeline() {
+ pipe := client.TxPipeline()
+
+ incr := pipe.Incr("tx_pipeline_counter")
+ pipe.Expire("tx_pipeline_counter", time.Hour)
+
+ // Execute
+ //
+ // MULTI
+ // INCR pipeline_counter
+ // EXPIRE pipeline_counts 3600
+ // EXEC
+ //
+ // using one client-server roundtrip.
+ _, err := pipe.Exec()
+ fmt.Println(incr.Val(), err)
+ // Output: 1 <nil>
+}
+
+func ExampleClient_Watch() {
+ var incr func(string) error
+
+ // Transactionally increments key using GET and SET commands.
+ incr = func(key string) error {
+ err := client.Watch(func(tx *redis.Tx) error {
+ n, err := tx.Get(key).Int64()
+ if err != nil && err != redis.Nil {
+ return err
+ }
+
+ _, err = tx.Pipelined(func(pipe redis.Pipeliner) error {
+ pipe.Set(key, strconv.FormatInt(n+1, 10), 0)
+ return nil
+ })
+ return err
+ }, key)
+ if err == redis.TxFailedErr {
+ return incr(key)
+ }
+ return err
+ }
+
+ var wg sync.WaitGroup
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ err := incr("counter3")
+ if err != nil {
+ panic(err)
+ }
+ }()
+ }
+ wg.Wait()
+
+ n, err := client.Get("counter3").Int64()
+ fmt.Println(n, err)
+ // Output: 100 <nil>
+}
+
+func ExamplePubSub() {
+ pubsub := client.Subscribe("mychannel1")
+ defer pubsub.Close()
+
+ // Wait for subscription to be created before publishing message.
+ subscr, err := pubsub.ReceiveTimeout(time.Second)
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println(subscr)
+
+ err = client.Publish("mychannel1", "hello").Err()
+ if err != nil {
+ panic(err)
+ }
+
+ msg, err := pubsub.ReceiveMessage()
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println(msg.Channel, msg.Payload)
+ // Output: subscribe: mychannel1
+ // mychannel1 hello
+}
+
+func ExamplePubSub_Receive() {
+ pubsub := client.Subscribe("mychannel2")
+ defer pubsub.Close()
+
+ for i := 0; i < 2; i++ {
+ // ReceiveTimeout is a low level API. Use ReceiveMessage instead.
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ if err != nil {
+ break
+ }
+
+ switch msg := msgi.(type) {
+ case *redis.Subscription:
+ fmt.Println("subscribed to", msg.Channel)
+
+ _, err := client.Publish("mychannel2", "hello").Result()
+ if err != nil {
+ panic(err)
+ }
+ case *redis.Message:
+ fmt.Println("received", msg.Payload, "from", msg.Channel)
+ default:
+ panic("unreached")
+ }
+ }
+
+ // sent message to 1 client
+ // received hello from mychannel2
+}
+
+func ExampleScript() {
+ IncrByXX := redis.NewScript(`
+ if redis.call("GET", KEYS[1]) ~= false then
+ return redis.call("INCRBY", KEYS[1], ARGV[1])
+ end
+ return false
+ `)
+
+ n, err := IncrByXX.Run(client, []string{"xx_counter"}, 2).Result()
+ fmt.Println(n, err)
+
+ err = client.Set("xx_counter", "40", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ n, err = IncrByXX.Run(client, []string{"xx_counter"}, 2).Result()
+ fmt.Println(n, err)
+
+ // Output: <nil> redis: nil
+ // 42 <nil>
+}
+
+func Example_customCommand() {
+ Get := func(client *redis.Client, key string) *redis.StringCmd {
+ cmd := redis.NewStringCmd("get", key)
+ client.Process(cmd)
+ return cmd
+ }
+
+ v, err := Get(client, "key_does_not_exist").Result()
+ fmt.Printf("%q %s", v, err)
+ // Output: "" redis: nil
+}
+
+func ExampleScanIterator() {
+ iter := client.Scan(0, "", 0).Iterator()
+ for iter.Next() {
+ fmt.Println(iter.Val())
+ }
+ if err := iter.Err(); err != nil {
+ panic(err)
+ }
+}
+
+func ExampleScanCmd_Iterator() {
+ iter := client.Scan(0, "", 0).Iterator()
+ for iter.Next() {
+ fmt.Println(iter.Val())
+ }
+ if err := iter.Err(); err != nil {
+ panic(err)
+ }
+}
+
+func ExampleNewUniversalClient_simple() {
+ client := redis.NewUniversalClient(&redis.UniversalOptions{
+ Addrs: []string{":6379"},
+ })
+ defer client.Close()
+
+ client.Ping()
+}
+
+func ExampleNewUniversalClient_failover() {
+ client := redis.NewUniversalClient(&redis.UniversalOptions{
+ MasterName: "master",
+ Addrs: []string{":26379"},
+ })
+ defer client.Close()
+
+ client.Ping()
+}
+
+func ExampleNewUniversalClient_cluster() {
+ client := redis.NewUniversalClient(&redis.UniversalOptions{
+ Addrs: []string{":7000", ":7001", ":7002", ":7003", ":7004", ":7005"},
+ })
+ defer client.Close()
+
+ client.Ping()
+}
diff --git a/vendor/github.com/go-redis/redis/export_test.go b/vendor/github.com/go-redis/redis/export_test.go
new file mode 100644
index 000000000..b88e41be9
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/export_test.go
@@ -0,0 +1,35 @@
+package redis
+
+import (
+ "net"
+ "time"
+
+ "github.com/go-redis/redis/internal/pool"
+)
+
+func (c *baseClient) Pool() pool.Pooler {
+ return c.connPool
+}
+
+func (c *PubSub) SetNetConn(netConn net.Conn) {
+ c.cn = pool.NewConn(netConn)
+}
+
+func (c *PubSub) ReceiveMessageTimeout(timeout time.Duration) (*Message, error) {
+ return c.receiveMessage(timeout)
+}
+
+func (c *ClusterClient) SlotAddrs(slot int) []string {
+ var addrs []string
+ for _, n := range c.state().slotNodes(slot) {
+ addrs = append(addrs, n.Client.getAddr())
+ }
+ return addrs
+}
+
+// SwapSlot swaps a slot's master/slave address for testing MOVED redirects.
+func (c *ClusterClient) SwapSlotNodes(slot int) []string {
+ nodes := c.state().slots[slot]
+ nodes[0], nodes[1] = nodes[1], nodes[0]
+ return c.SlotAddrs(slot)
+}
diff --git a/vendor/github.com/go-redis/redis/internal/consistenthash/consistenthash.go b/vendor/github.com/go-redis/redis/internal/consistenthash/consistenthash.go
new file mode 100644
index 000000000..a9c56f076
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/consistenthash/consistenthash.go
@@ -0,0 +1,81 @@
+/*
+Copyright 2013 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package consistenthash provides an implementation of a ring hash.
+package consistenthash
+
+import (
+ "hash/crc32"
+ "sort"
+ "strconv"
+)
+
+type Hash func(data []byte) uint32
+
+type Map struct {
+ hash Hash
+ replicas int
+ keys []int // Sorted
+ hashMap map[int]string
+}
+
+func New(replicas int, fn Hash) *Map {
+ m := &Map{
+ replicas: replicas,
+ hash: fn,
+ hashMap: make(map[int]string),
+ }
+ if m.hash == nil {
+ m.hash = crc32.ChecksumIEEE
+ }
+ return m
+}
+
+// Returns true if there are no items available.
+func (m *Map) IsEmpty() bool {
+ return len(m.keys) == 0
+}
+
+// Adds some keys to the hash.
+func (m *Map) Add(keys ...string) {
+ for _, key := range keys {
+ for i := 0; i < m.replicas; i++ {
+ hash := int(m.hash([]byte(strconv.Itoa(i) + key)))
+ m.keys = append(m.keys, hash)
+ m.hashMap[hash] = key
+ }
+ }
+ sort.Ints(m.keys)
+}
+
+// Gets the closest item in the hash to the provided key.
+func (m *Map) Get(key string) string {
+ if m.IsEmpty() {
+ return ""
+ }
+
+ hash := int(m.hash([]byte(key)))
+
+ // Binary search for appropriate replica.
+ idx := sort.Search(len(m.keys), func(i int) bool { return m.keys[i] >= hash })
+
+ // Means we have cycled back to the first replica.
+ if idx == len(m.keys) {
+ idx = 0
+ }
+
+ return m.hashMap[m.keys[idx]]
+}
diff --git a/vendor/github.com/go-redis/redis/internal/consistenthash/consistenthash_test.go b/vendor/github.com/go-redis/redis/internal/consistenthash/consistenthash_test.go
new file mode 100644
index 000000000..1a37fd7ff
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/consistenthash/consistenthash_test.go
@@ -0,0 +1,110 @@
+/*
+Copyright 2013 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package consistenthash
+
+import (
+ "fmt"
+ "strconv"
+ "testing"
+)
+
+func TestHashing(t *testing.T) {
+
+ // Override the hash function to return easier to reason about values. Assumes
+ // the keys can be converted to an integer.
+ hash := New(3, func(key []byte) uint32 {
+ i, err := strconv.Atoi(string(key))
+ if err != nil {
+ panic(err)
+ }
+ return uint32(i)
+ })
+
+ // Given the above hash function, this will give replicas with "hashes":
+ // 2, 4, 6, 12, 14, 16, 22, 24, 26
+ hash.Add("6", "4", "2")
+
+ testCases := map[string]string{
+ "2": "2",
+ "11": "2",
+ "23": "4",
+ "27": "2",
+ }
+
+ for k, v := range testCases {
+ if hash.Get(k) != v {
+ t.Errorf("Asking for %s, should have yielded %s", k, v)
+ }
+ }
+
+ // Adds 8, 18, 28
+ hash.Add("8")
+
+ // 27 should now map to 8.
+ testCases["27"] = "8"
+
+ for k, v := range testCases {
+ if hash.Get(k) != v {
+ t.Errorf("Asking for %s, should have yielded %s", k, v)
+ }
+ }
+
+}
+
+func TestConsistency(t *testing.T) {
+ hash1 := New(1, nil)
+ hash2 := New(1, nil)
+
+ hash1.Add("Bill", "Bob", "Bonny")
+ hash2.Add("Bob", "Bonny", "Bill")
+
+ if hash1.Get("Ben") != hash2.Get("Ben") {
+ t.Errorf("Fetching 'Ben' from both hashes should be the same")
+ }
+
+ hash2.Add("Becky", "Ben", "Bobby")
+
+ if hash1.Get("Ben") != hash2.Get("Ben") ||
+ hash1.Get("Bob") != hash2.Get("Bob") ||
+ hash1.Get("Bonny") != hash2.Get("Bonny") {
+ t.Errorf("Direct matches should always return the same entry")
+ }
+
+}
+
+func BenchmarkGet8(b *testing.B) { benchmarkGet(b, 8) }
+func BenchmarkGet32(b *testing.B) { benchmarkGet(b, 32) }
+func BenchmarkGet128(b *testing.B) { benchmarkGet(b, 128) }
+func BenchmarkGet512(b *testing.B) { benchmarkGet(b, 512) }
+
+func benchmarkGet(b *testing.B, shards int) {
+
+ hash := New(50, nil)
+
+ var buckets []string
+ for i := 0; i < shards; i++ {
+ buckets = append(buckets, fmt.Sprintf("shard-%d", i))
+ }
+
+ hash.Add(buckets...)
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ hash.Get(buckets[i&(shards-1)])
+ }
+}
diff --git a/vendor/github.com/go-redis/redis/internal/errors.go b/vendor/github.com/go-redis/redis/internal/errors.go
new file mode 100644
index 000000000..c93e00818
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/errors.go
@@ -0,0 +1,75 @@
+package internal
+
+import (
+ "io"
+ "net"
+ "strings"
+)
+
+const Nil = RedisError("redis: nil")
+
+type RedisError string
+
+func (e RedisError) Error() string { return string(e) }
+
+func IsRetryableError(err error) bool {
+ return IsNetworkError(err) || err.Error() == "ERR max number of clients reached"
+}
+
+func IsInternalError(err error) bool {
+ _, ok := err.(RedisError)
+ return ok
+}
+
+func IsNetworkError(err error) bool {
+ if err == io.EOF {
+ return true
+ }
+ _, ok := err.(net.Error)
+ return ok
+}
+
+func IsBadConn(err error, allowTimeout bool) bool {
+ if err == nil {
+ return false
+ }
+ if IsInternalError(err) {
+ return false
+ }
+ if allowTimeout {
+ if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
+ return false
+ }
+ }
+ return true
+}
+
+func IsMovedError(err error) (moved bool, ask bool, addr string) {
+ if !IsInternalError(err) {
+ return
+ }
+
+ s := err.Error()
+ if strings.HasPrefix(s, "MOVED ") {
+ moved = true
+ } else if strings.HasPrefix(s, "ASK ") {
+ ask = true
+ } else {
+ return
+ }
+
+ ind := strings.LastIndex(s, " ")
+ if ind == -1 {
+ return false, false, ""
+ }
+ addr = s[ind+1:]
+ return
+}
+
+func IsLoadingError(err error) bool {
+ return strings.HasPrefix(err.Error(), "LOADING")
+}
+
+func IsExecAbortError(err error) bool {
+ return strings.HasPrefix(err.Error(), "EXECABORT")
+}
diff --git a/vendor/github.com/go-redis/redis/internal/hashtag/hashtag.go b/vendor/github.com/go-redis/redis/internal/hashtag/hashtag.go
new file mode 100644
index 000000000..2866488e5
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/hashtag/hashtag.go
@@ -0,0 +1,73 @@
+package hashtag
+
+import (
+ "math/rand"
+ "strings"
+)
+
+const SlotNumber = 16384
+
+// CRC16 implementation according to CCITT standards.
+// Copyright 2001-2010 Georges Menie (www.menie.org)
+// Copyright 2013 The Go Authors. All rights reserved.
+// http://redis.io/topics/cluster-spec#appendix-a-crc16-reference-implementation-in-ansi-c
+var crc16tab = [256]uint16{
+ 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
+ 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
+ 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
+ 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
+ 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
+ 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
+ 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
+ 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
+ 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
+ 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
+ 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
+ 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
+ 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
+ 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
+ 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
+ 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
+ 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
+ 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
+ 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
+ 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
+ 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
+ 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+ 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
+ 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
+ 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
+ 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
+ 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
+ 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
+ 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
+ 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
+ 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
+ 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
+}
+
+func Key(key string) string {
+ if s := strings.IndexByte(key, '{'); s > -1 {
+ if e := strings.IndexByte(key[s+1:], '}'); e > 0 {
+ return key[s+1 : s+e+1]
+ }
+ }
+ return key
+}
+
+// hashSlot returns a consistent slot number between 0 and 16383
+// for any given string key.
+func Slot(key string) int {
+ key = Key(key)
+ if key == "" {
+ return rand.Intn(SlotNumber)
+ }
+ return int(crc16sum(key)) % SlotNumber
+}
+
+func crc16sum(key string) (crc uint16) {
+ for i := 0; i < len(key); i++ {
+ crc = (crc << 8) ^ crc16tab[(byte(crc>>8)^key[i])&0x00ff]
+ }
+ return
+}
diff --git a/vendor/github.com/go-redis/redis/internal/hashtag/hashtag_test.go b/vendor/github.com/go-redis/redis/internal/hashtag/hashtag_test.go
new file mode 100644
index 000000000..7f0fedf31
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/hashtag/hashtag_test.go
@@ -0,0 +1,74 @@
+package hashtag
+
+import (
+ "math/rand"
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+func TestGinkgoSuite(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "hashtag")
+}
+
+var _ = Describe("CRC16", func() {
+
+ // http://redis.io/topics/cluster-spec#keys-distribution-model
+ It("should calculate CRC16", func() {
+ tests := []struct {
+ s string
+ n uint16
+ }{
+ {"123456789", 0x31C3},
+ {string([]byte{83, 153, 134, 118, 229, 214, 244, 75, 140, 37, 215, 215}), 21847},
+ }
+
+ for _, test := range tests {
+ Expect(crc16sum(test.s)).To(Equal(test.n), "for %s", test.s)
+ }
+ })
+
+})
+
+var _ = Describe("HashSlot", func() {
+
+ It("should calculate hash slots", func() {
+ tests := []struct {
+ key string
+ slot int
+ }{
+ {"123456789", 12739},
+ {"{}foo", 9500},
+ {"foo{}", 5542},
+ {"foo{}{bar}", 8363},
+ {"", 10503},
+ {"", 5176},
+ {string([]byte{83, 153, 134, 118, 229, 214, 244, 75, 140, 37, 215, 215}), 5463},
+ }
+ // Empty keys receive random slot.
+ rand.Seed(100)
+
+ for _, test := range tests {
+ Expect(Slot(test.key)).To(Equal(test.slot), "for %s", test.key)
+ }
+ })
+
+ It("should extract keys from tags", func() {
+ tests := []struct {
+ one, two string
+ }{
+ {"foo{bar}", "bar"},
+ {"{foo}bar", "foo"},
+ {"{user1000}.following", "{user1000}.followers"},
+ {"foo{{bar}}zap", "{bar"},
+ {"foo{bar}{zap}", "bar"},
+ }
+
+ for _, test := range tests {
+ Expect(Slot(test.one)).To(Equal(Slot(test.two)), "for %s <-> %s", test.one, test.two)
+ }
+ })
+
+})
diff --git a/vendor/github.com/go-redis/redis/internal/internal.go b/vendor/github.com/go-redis/redis/internal/internal.go
new file mode 100644
index 000000000..fb4efa5f0
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/internal.go
@@ -0,0 +1,23 @@
+package internal
+
+import (
+ "math/rand"
+ "time"
+)
+
+const retryBackoff = 8 * time.Millisecond
+
+// Retry backoff with jitter sleep to prevent overloaded conditions during intervals
+// https://www.awsarchitectureblog.com/2015/03/backoff.html
+func RetryBackoff(retry int, maxRetryBackoff time.Duration) time.Duration {
+ if retry < 0 {
+ retry = 0
+ }
+
+ backoff := retryBackoff << uint(retry)
+ if backoff > maxRetryBackoff {
+ backoff = maxRetryBackoff
+ }
+
+ return time.Duration(rand.Int63n(int64(backoff)))
+}
diff --git a/vendor/github.com/go-redis/redis/internal/internal_test.go b/vendor/github.com/go-redis/redis/internal/internal_test.go
new file mode 100644
index 000000000..5c7000e1e
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/internal_test.go
@@ -0,0 +1,17 @@
+package internal
+
+import (
+ "testing"
+ . "github.com/onsi/gomega"
+ "time"
+)
+
+func TestRetryBackoff(t *testing.T) {
+ RegisterTestingT(t)
+
+ for i := -1; i<= 8; i++ {
+ backoff := RetryBackoff(i, 512*time.Millisecond)
+ Expect(backoff >= 0).To(BeTrue())
+ Expect(backoff <= 512*time.Millisecond).To(BeTrue())
+ }
+}
diff --git a/vendor/github.com/go-redis/redis/internal/log.go b/vendor/github.com/go-redis/redis/internal/log.go
new file mode 100644
index 000000000..fd14222ee
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/log.go
@@ -0,0 +1,15 @@
+package internal
+
+import (
+ "fmt"
+ "log"
+)
+
+var Logger *log.Logger
+
+func Logf(s string, args ...interface{}) {
+ if Logger == nil {
+ return
+ }
+ Logger.Output(2, fmt.Sprintf(s, args...))
+}
diff --git a/vendor/github.com/go-redis/redis/internal/once.go b/vendor/github.com/go-redis/redis/internal/once.go
new file mode 100644
index 000000000..64f46272a
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/once.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2014 The Camlistore Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package internal
+
+import (
+ "sync"
+ "sync/atomic"
+)
+
+// A Once will perform a successful action exactly once.
+//
+// Unlike a sync.Once, this Once's func returns an error
+// and is re-armed on failure.
+type Once struct {
+ m sync.Mutex
+ done uint32
+}
+
+// Do calls the function f if and only if Do has not been invoked
+// without error for this instance of Once. In other words, given
+// var once Once
+// if once.Do(f) is called multiple times, only the first call will
+// invoke f, even if f has a different value in each invocation unless
+// f returns an error. A new instance of Once is required for each
+// function to execute.
+//
+// Do is intended for initialization that must be run exactly once. Since f
+// is niladic, it may be necessary to use a function literal to capture the
+// arguments to a function to be invoked by Do:
+// err := config.once.Do(func() error { return config.init(filename) })
+func (o *Once) Do(f func() error) error {
+ if atomic.LoadUint32(&o.done) == 1 {
+ return nil
+ }
+ // Slow-path.
+ o.m.Lock()
+ defer o.m.Unlock()
+ var err error
+ if o.done == 0 {
+ err = f()
+ if err == nil {
+ atomic.StoreUint32(&o.done, 1)
+ }
+ }
+ return err
+}
diff --git a/vendor/github.com/go-redis/redis/internal/pool/bench_test.go b/vendor/github.com/go-redis/redis/internal/pool/bench_test.go
new file mode 100644
index 000000000..e0bb52446
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/pool/bench_test.go
@@ -0,0 +1,80 @@
+package pool_test
+
+import (
+ "testing"
+ "time"
+
+ "github.com/go-redis/redis/internal/pool"
+)
+
+func benchmarkPoolGetPut(b *testing.B, poolSize int) {
+ connPool := pool.NewConnPool(&pool.Options{
+ Dialer: dummyDialer,
+ PoolSize: poolSize,
+ PoolTimeout: time.Second,
+ IdleTimeout: time.Hour,
+ IdleCheckFrequency: time.Hour,
+ })
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ cn, _, err := connPool.Get()
+ if err != nil {
+ b.Fatal(err)
+ }
+ if err = connPool.Put(cn); err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+func BenchmarkPoolGetPut10Conns(b *testing.B) {
+ benchmarkPoolGetPut(b, 10)
+}
+
+func BenchmarkPoolGetPut100Conns(b *testing.B) {
+ benchmarkPoolGetPut(b, 100)
+}
+
+func BenchmarkPoolGetPut1000Conns(b *testing.B) {
+ benchmarkPoolGetPut(b, 1000)
+}
+
+func benchmarkPoolGetRemove(b *testing.B, poolSize int) {
+ connPool := pool.NewConnPool(&pool.Options{
+ Dialer: dummyDialer,
+ PoolSize: poolSize,
+ PoolTimeout: time.Second,
+ IdleTimeout: time.Hour,
+ IdleCheckFrequency: time.Hour,
+ })
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ cn, _, err := connPool.Get()
+ if err != nil {
+ b.Fatal(err)
+ }
+ if err := connPool.Remove(cn); err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+func BenchmarkPoolGetRemove10Conns(b *testing.B) {
+ benchmarkPoolGetRemove(b, 10)
+}
+
+func BenchmarkPoolGetRemove100Conns(b *testing.B) {
+ benchmarkPoolGetRemove(b, 100)
+}
+
+func BenchmarkPoolGetRemove1000Conns(b *testing.B) {
+ benchmarkPoolGetRemove(b, 1000)
+}
diff --git a/vendor/github.com/go-redis/redis/internal/pool/conn.go b/vendor/github.com/go-redis/redis/internal/pool/conn.go
new file mode 100644
index 000000000..8af51d9de
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/pool/conn.go
@@ -0,0 +1,78 @@
+package pool
+
+import (
+ "net"
+ "sync/atomic"
+ "time"
+
+ "github.com/go-redis/redis/internal/proto"
+)
+
+var noDeadline = time.Time{}
+
+type Conn struct {
+ netConn net.Conn
+
+ Rd *proto.Reader
+ Wb *proto.WriteBuffer
+
+ Inited bool
+ usedAt atomic.Value
+}
+
+func NewConn(netConn net.Conn) *Conn {
+ cn := &Conn{
+ netConn: netConn,
+ Wb: proto.NewWriteBuffer(),
+ }
+ cn.Rd = proto.NewReader(cn.netConn)
+ cn.SetUsedAt(time.Now())
+ return cn
+}
+
+func (cn *Conn) UsedAt() time.Time {
+ return cn.usedAt.Load().(time.Time)
+}
+
+func (cn *Conn) SetUsedAt(tm time.Time) {
+ cn.usedAt.Store(tm)
+}
+
+func (cn *Conn) SetNetConn(netConn net.Conn) {
+ cn.netConn = netConn
+ cn.Rd.Reset(netConn)
+}
+
+func (cn *Conn) IsStale(timeout time.Duration) bool {
+ return timeout > 0 && time.Since(cn.UsedAt()) > timeout
+}
+
+func (cn *Conn) SetReadTimeout(timeout time.Duration) error {
+ now := time.Now()
+ cn.SetUsedAt(now)
+ if timeout > 0 {
+ return cn.netConn.SetReadDeadline(now.Add(timeout))
+ }
+ return cn.netConn.SetReadDeadline(noDeadline)
+}
+
+func (cn *Conn) SetWriteTimeout(timeout time.Duration) error {
+ now := time.Now()
+ cn.SetUsedAt(now)
+ if timeout > 0 {
+ return cn.netConn.SetWriteDeadline(now.Add(timeout))
+ }
+ return cn.netConn.SetWriteDeadline(noDeadline)
+}
+
+func (cn *Conn) Write(b []byte) (int, error) {
+ return cn.netConn.Write(b)
+}
+
+func (cn *Conn) RemoteAddr() net.Addr {
+ return cn.netConn.RemoteAddr()
+}
+
+func (cn *Conn) Close() error {
+ return cn.netConn.Close()
+}
diff --git a/vendor/github.com/go-redis/redis/internal/pool/main_test.go b/vendor/github.com/go-redis/redis/internal/pool/main_test.go
new file mode 100644
index 000000000..43afe3fa9
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/pool/main_test.go
@@ -0,0 +1,35 @@
+package pool_test
+
+import (
+ "net"
+ "sync"
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+func TestGinkgoSuite(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "pool")
+}
+
+func perform(n int, cbs ...func(int)) {
+ var wg sync.WaitGroup
+ for _, cb := range cbs {
+ for i := 0; i < n; i++ {
+ wg.Add(1)
+ go func(cb func(int), i int) {
+ defer GinkgoRecover()
+ defer wg.Done()
+
+ cb(i)
+ }(cb, i)
+ }
+ }
+ wg.Wait()
+}
+
+func dummyDialer() (net.Conn, error) {
+ return &net.TCPConn{}, nil
+}
diff --git a/vendor/github.com/go-redis/redis/internal/pool/pool.go b/vendor/github.com/go-redis/redis/internal/pool/pool.go
new file mode 100644
index 000000000..a4e650847
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/pool/pool.go
@@ -0,0 +1,367 @@
+package pool
+
+import (
+ "errors"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/go-redis/redis/internal"
+)
+
+var ErrClosed = errors.New("redis: client is closed")
+var ErrPoolTimeout = errors.New("redis: connection pool timeout")
+
+var timers = sync.Pool{
+ New: func() interface{} {
+ t := time.NewTimer(time.Hour)
+ t.Stop()
+ return t
+ },
+}
+
+// Stats contains pool state information and accumulated stats.
+type Stats struct {
+ Requests uint32 // number of times a connection was requested by the pool
+ Hits uint32 // number of times free connection was found in the pool
+ Timeouts uint32 // number of times a wait timeout occurred
+
+ TotalConns uint32 // the number of total connections in the pool
+ FreeConns uint32 // the number of free connections in the pool
+}
+
+type Pooler interface {
+ NewConn() (*Conn, error)
+ CloseConn(*Conn) error
+
+ Get() (*Conn, bool, error)
+ Put(*Conn) error
+ Remove(*Conn) error
+
+ Len() int
+ FreeLen() int
+ Stats() *Stats
+
+ Close() error
+}
+
+type Options struct {
+ Dialer func() (net.Conn, error)
+ OnClose func(*Conn) error
+
+ PoolSize int
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+}
+
+type ConnPool struct {
+ opt *Options
+
+ dialErrorsNum uint32 // atomic
+ _lastDialError atomic.Value
+
+ queue chan struct{}
+
+ connsMu sync.Mutex
+ conns []*Conn
+
+ freeConnsMu sync.Mutex
+ freeConns []*Conn
+
+ stats Stats
+
+ _closed uint32 // atomic
+}
+
+var _ Pooler = (*ConnPool)(nil)
+
+func NewConnPool(opt *Options) *ConnPool {
+ p := &ConnPool{
+ opt: opt,
+
+ queue: make(chan struct{}, opt.PoolSize),
+ conns: make([]*Conn, 0, opt.PoolSize),
+ freeConns: make([]*Conn, 0, opt.PoolSize),
+ }
+ if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 {
+ go p.reaper(opt.IdleCheckFrequency)
+ }
+ return p
+}
+
+func (p *ConnPool) NewConn() (*Conn, error) {
+ if p.closed() {
+ return nil, ErrClosed
+ }
+
+ if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) {
+ return nil, p.lastDialError()
+ }
+
+ netConn, err := p.opt.Dialer()
+ if err != nil {
+ p.setLastDialError(err)
+ if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) {
+ go p.tryDial()
+ }
+ return nil, err
+ }
+
+ cn := NewConn(netConn)
+ p.connsMu.Lock()
+ p.conns = append(p.conns, cn)
+ p.connsMu.Unlock()
+
+ return cn, nil
+}
+
+func (p *ConnPool) tryDial() {
+ for {
+ conn, err := p.opt.Dialer()
+ if err != nil {
+ p.setLastDialError(err)
+ time.Sleep(time.Second)
+ continue
+ }
+
+ atomic.StoreUint32(&p.dialErrorsNum, 0)
+ _ = conn.Close()
+ return
+ }
+}
+
+func (p *ConnPool) setLastDialError(err error) {
+ p._lastDialError.Store(err)
+}
+
+func (p *ConnPool) lastDialError() error {
+ return p._lastDialError.Load().(error)
+}
+
+// Get returns existed connection from the pool or creates a new one.
+func (p *ConnPool) Get() (*Conn, bool, error) {
+ if p.closed() {
+ return nil, false, ErrClosed
+ }
+
+ atomic.AddUint32(&p.stats.Requests, 1)
+
+ select {
+ case p.queue <- struct{}{}:
+ default:
+ timer := timers.Get().(*time.Timer)
+ timer.Reset(p.opt.PoolTimeout)
+
+ select {
+ case p.queue <- struct{}{}:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ timers.Put(timer)
+ case <-timer.C:
+ timers.Put(timer)
+ atomic.AddUint32(&p.stats.Timeouts, 1)
+ return nil, false, ErrPoolTimeout
+ }
+ }
+
+ for {
+ p.freeConnsMu.Lock()
+ cn := p.popFree()
+ p.freeConnsMu.Unlock()
+
+ if cn == nil {
+ break
+ }
+
+ if cn.IsStale(p.opt.IdleTimeout) {
+ p.CloseConn(cn)
+ continue
+ }
+
+ atomic.AddUint32(&p.stats.Hits, 1)
+ return cn, false, nil
+ }
+
+ newcn, err := p.NewConn()
+ if err != nil {
+ <-p.queue
+ return nil, false, err
+ }
+
+ return newcn, true, nil
+}
+
+func (p *ConnPool) popFree() *Conn {
+ if len(p.freeConns) == 0 {
+ return nil
+ }
+
+ idx := len(p.freeConns) - 1
+ cn := p.freeConns[idx]
+ p.freeConns = p.freeConns[:idx]
+ return cn
+}
+
+func (p *ConnPool) Put(cn *Conn) error {
+ if data := cn.Rd.PeekBuffered(); data != nil {
+ internal.Logf("connection has unread data: %q", data)
+ return p.Remove(cn)
+ }
+ p.freeConnsMu.Lock()
+ p.freeConns = append(p.freeConns, cn)
+ p.freeConnsMu.Unlock()
+ <-p.queue
+ return nil
+}
+
+func (p *ConnPool) Remove(cn *Conn) error {
+ _ = p.CloseConn(cn)
+ <-p.queue
+ return nil
+}
+
+func (p *ConnPool) CloseConn(cn *Conn) error {
+ p.connsMu.Lock()
+ for i, c := range p.conns {
+ if c == cn {
+ p.conns = append(p.conns[:i], p.conns[i+1:]...)
+ break
+ }
+ }
+ p.connsMu.Unlock()
+
+ return p.closeConn(cn)
+}
+
+func (p *ConnPool) closeConn(cn *Conn) error {
+ if p.opt.OnClose != nil {
+ _ = p.opt.OnClose(cn)
+ }
+ return cn.Close()
+}
+
+// Len returns total number of connections.
+func (p *ConnPool) Len() int {
+ p.connsMu.Lock()
+ l := len(p.conns)
+ p.connsMu.Unlock()
+ return l
+}
+
+// FreeLen returns number of free connections.
+func (p *ConnPool) FreeLen() int {
+ p.freeConnsMu.Lock()
+ l := len(p.freeConns)
+ p.freeConnsMu.Unlock()
+ return l
+}
+
+func (p *ConnPool) Stats() *Stats {
+ return &Stats{
+ Requests: atomic.LoadUint32(&p.stats.Requests),
+ Hits: atomic.LoadUint32(&p.stats.Hits),
+ Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
+ TotalConns: uint32(p.Len()),
+ FreeConns: uint32(p.FreeLen()),
+ }
+}
+
+func (p *ConnPool) closed() bool {
+ return atomic.LoadUint32(&p._closed) == 1
+}
+
+func (p *ConnPool) Filter(fn func(*Conn) bool) error {
+ var firstErr error
+ p.connsMu.Lock()
+ for _, cn := range p.conns {
+ if fn(cn) {
+ if err := p.closeConn(cn); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ }
+ p.connsMu.Unlock()
+ return firstErr
+}
+
+func (p *ConnPool) Close() error {
+ if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) {
+ return ErrClosed
+ }
+
+ var firstErr error
+ p.connsMu.Lock()
+ for _, cn := range p.conns {
+ if err := p.closeConn(cn); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ p.conns = nil
+ p.connsMu.Unlock()
+
+ p.freeConnsMu.Lock()
+ p.freeConns = nil
+ p.freeConnsMu.Unlock()
+
+ return firstErr
+}
+
+func (p *ConnPool) reapStaleConn() bool {
+ if len(p.freeConns) == 0 {
+ return false
+ }
+
+ cn := p.freeConns[0]
+ if !cn.IsStale(p.opt.IdleTimeout) {
+ return false
+ }
+
+ p.CloseConn(cn)
+ p.freeConns = append(p.freeConns[:0], p.freeConns[1:]...)
+
+ return true
+}
+
+func (p *ConnPool) ReapStaleConns() (int, error) {
+ var n int
+ for {
+ p.queue <- struct{}{}
+ p.freeConnsMu.Lock()
+
+ reaped := p.reapStaleConn()
+
+ p.freeConnsMu.Unlock()
+ <-p.queue
+
+ if reaped {
+ n++
+ } else {
+ break
+ }
+ }
+ return n, nil
+}
+
+func (p *ConnPool) reaper(frequency time.Duration) {
+ ticker := time.NewTicker(frequency)
+ defer ticker.Stop()
+
+ for range ticker.C {
+ if p.closed() {
+ break
+ }
+ n, err := p.ReapStaleConns()
+ if err != nil {
+ internal.Logf("ReapStaleConns failed: %s", err)
+ continue
+ }
+ s := p.Stats()
+ internal.Logf(
+ "reaper: removed %d stale conns (TotalConns=%d FreeConns=%d Requests=%d Hits=%d Timeouts=%d)",
+ n, s.TotalConns, s.FreeConns, s.Requests, s.Hits, s.Timeouts,
+ )
+ }
+}
diff --git a/vendor/github.com/go-redis/redis/internal/pool/pool_single.go b/vendor/github.com/go-redis/redis/internal/pool/pool_single.go
new file mode 100644
index 000000000..ff91279b3
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/pool/pool_single.go
@@ -0,0 +1,55 @@
+package pool
+
+type SingleConnPool struct {
+ cn *Conn
+}
+
+var _ Pooler = (*SingleConnPool)(nil)
+
+func NewSingleConnPool(cn *Conn) *SingleConnPool {
+ return &SingleConnPool{
+ cn: cn,
+ }
+}
+
+func (p *SingleConnPool) NewConn() (*Conn, error) {
+ panic("not implemented")
+}
+
+func (p *SingleConnPool) CloseConn(*Conn) error {
+ panic("not implemented")
+}
+
+func (p *SingleConnPool) Get() (*Conn, bool, error) {
+ return p.cn, false, nil
+}
+
+func (p *SingleConnPool) Put(cn *Conn) error {
+ if p.cn != cn {
+ panic("p.cn != cn")
+ }
+ return nil
+}
+
+func (p *SingleConnPool) Remove(cn *Conn) error {
+ if p.cn != cn {
+ panic("p.cn != cn")
+ }
+ return nil
+}
+
+func (p *SingleConnPool) Len() int {
+ return 1
+}
+
+func (p *SingleConnPool) FreeLen() int {
+ return 0
+}
+
+func (p *SingleConnPool) Stats() *Stats {
+ return nil
+}
+
+func (p *SingleConnPool) Close() error {
+ return nil
+}
diff --git a/vendor/github.com/go-redis/redis/internal/pool/pool_sticky.go b/vendor/github.com/go-redis/redis/internal/pool/pool_sticky.go
new file mode 100644
index 000000000..17f163858
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/pool/pool_sticky.go
@@ -0,0 +1,123 @@
+package pool
+
+import "sync"
+
+type StickyConnPool struct {
+ pool *ConnPool
+ reusable bool
+
+ cn *Conn
+ closed bool
+ mu sync.Mutex
+}
+
+var _ Pooler = (*StickyConnPool)(nil)
+
+func NewStickyConnPool(pool *ConnPool, reusable bool) *StickyConnPool {
+ return &StickyConnPool{
+ pool: pool,
+ reusable: reusable,
+ }
+}
+
+func (p *StickyConnPool) NewConn() (*Conn, error) {
+ panic("not implemented")
+}
+
+func (p *StickyConnPool) CloseConn(*Conn) error {
+ panic("not implemented")
+}
+
+func (p *StickyConnPool) Get() (*Conn, bool, error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ if p.closed {
+ return nil, false, ErrClosed
+ }
+ if p.cn != nil {
+ return p.cn, false, nil
+ }
+
+ cn, _, err := p.pool.Get()
+ if err != nil {
+ return nil, false, err
+ }
+ p.cn = cn
+ return cn, true, nil
+}
+
+func (p *StickyConnPool) putUpstream() (err error) {
+ err = p.pool.Put(p.cn)
+ p.cn = nil
+ return err
+}
+
+func (p *StickyConnPool) Put(cn *Conn) error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ if p.closed {
+ return ErrClosed
+ }
+ return nil
+}
+
+func (p *StickyConnPool) removeUpstream() error {
+ err := p.pool.Remove(p.cn)
+ p.cn = nil
+ return err
+}
+
+func (p *StickyConnPool) Remove(cn *Conn) error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ if p.closed {
+ return nil
+ }
+ return p.removeUpstream()
+}
+
+func (p *StickyConnPool) Len() int {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ if p.cn == nil {
+ return 0
+ }
+ return 1
+}
+
+func (p *StickyConnPool) FreeLen() int {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ if p.cn == nil {
+ return 1
+ }
+ return 0
+}
+
+func (p *StickyConnPool) Stats() *Stats {
+ return nil
+}
+
+func (p *StickyConnPool) Close() error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ if p.closed {
+ return ErrClosed
+ }
+ p.closed = true
+ var err error
+ if p.cn != nil {
+ if p.reusable {
+ err = p.putUpstream()
+ } else {
+ err = p.removeUpstream()
+ }
+ }
+ return err
+}
diff --git a/vendor/github.com/go-redis/redis/internal/pool/pool_test.go b/vendor/github.com/go-redis/redis/internal/pool/pool_test.go
new file mode 100644
index 000000000..68c9a1bef
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/pool/pool_test.go
@@ -0,0 +1,241 @@
+package pool_test
+
+import (
+ "testing"
+ "time"
+
+ "github.com/go-redis/redis/internal/pool"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("ConnPool", func() {
+ var connPool *pool.ConnPool
+
+ BeforeEach(func() {
+ connPool = pool.NewConnPool(&pool.Options{
+ Dialer: dummyDialer,
+ PoolSize: 10,
+ PoolTimeout: time.Hour,
+ IdleTimeout: time.Millisecond,
+ IdleCheckFrequency: time.Millisecond,
+ })
+ })
+
+ AfterEach(func() {
+ connPool.Close()
+ })
+
+ It("should unblock client when conn is removed", func() {
+ // Reserve one connection.
+ cn, _, err := connPool.Get()
+ Expect(err).NotTo(HaveOccurred())
+
+ // Reserve all other connections.
+ var cns []*pool.Conn
+ for i := 0; i < 9; i++ {
+ cn, _, err := connPool.Get()
+ Expect(err).NotTo(HaveOccurred())
+ cns = append(cns, cn)
+ }
+
+ started := make(chan bool, 1)
+ done := make(chan bool, 1)
+ go func() {
+ defer GinkgoRecover()
+
+ started <- true
+ _, _, err := connPool.Get()
+ Expect(err).NotTo(HaveOccurred())
+ done <- true
+
+ err = connPool.Put(cn)
+ Expect(err).NotTo(HaveOccurred())
+ }()
+ <-started
+
+ // Check that Get is blocked.
+ select {
+ case <-done:
+ Fail("Get is not blocked")
+ default:
+ // ok
+ }
+
+ err = connPool.Remove(cn)
+ Expect(err).NotTo(HaveOccurred())
+
+ // Check that Ping is unblocked.
+ select {
+ case <-done:
+ // ok
+ case <-time.After(time.Second):
+ Fail("Get is not unblocked")
+ }
+
+ for _, cn := range cns {
+ err = connPool.Put(cn)
+ Expect(err).NotTo(HaveOccurred())
+ }
+ })
+})
+
+var _ = Describe("conns reaper", func() {
+ const idleTimeout = time.Minute
+
+ var connPool *pool.ConnPool
+ var conns, idleConns, closedConns []*pool.Conn
+
+ BeforeEach(func() {
+ conns = nil
+ closedConns = nil
+
+ connPool = pool.NewConnPool(&pool.Options{
+ Dialer: dummyDialer,
+ PoolSize: 10,
+ PoolTimeout: time.Second,
+ IdleTimeout: idleTimeout,
+ IdleCheckFrequency: time.Hour,
+
+ OnClose: func(cn *pool.Conn) error {
+ closedConns = append(closedConns, cn)
+ return nil
+ },
+ })
+
+ // add stale connections
+ idleConns = nil
+ for i := 0; i < 3; i++ {
+ cn, _, err := connPool.Get()
+ Expect(err).NotTo(HaveOccurred())
+ cn.SetUsedAt(time.Now().Add(-2 * idleTimeout))
+ conns = append(conns, cn)
+ idleConns = append(idleConns, cn)
+ }
+
+ // add fresh connections
+ for i := 0; i < 3; i++ {
+ cn, _, err := connPool.Get()
+ Expect(err).NotTo(HaveOccurred())
+ conns = append(conns, cn)
+ }
+
+ for _, cn := range conns {
+ Expect(connPool.Put(cn)).NotTo(HaveOccurred())
+ }
+
+ Expect(connPool.Len()).To(Equal(6))
+ Expect(connPool.FreeLen()).To(Equal(6))
+
+ n, err := connPool.ReapStaleConns()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(3))
+ })
+
+ AfterEach(func() {
+ _ = connPool.Close()
+ Expect(connPool.Len()).To(Equal(0))
+ Expect(connPool.FreeLen()).To(Equal(0))
+ Expect(len(closedConns)).To(Equal(len(conns)))
+ Expect(closedConns).To(ConsistOf(conns))
+ })
+
+ It("reaps stale connections", func() {
+ Expect(connPool.Len()).To(Equal(3))
+ Expect(connPool.FreeLen()).To(Equal(3))
+ })
+
+ It("does not reap fresh connections", func() {
+ n, err := connPool.ReapStaleConns()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(0))
+ })
+
+ It("stale connections are closed", func() {
+ Expect(len(closedConns)).To(Equal(len(idleConns)))
+ Expect(closedConns).To(ConsistOf(idleConns))
+ })
+
+ It("pool is functional", func() {
+ for j := 0; j < 3; j++ {
+ var freeCns []*pool.Conn
+ for i := 0; i < 3; i++ {
+ cn, _, err := connPool.Get()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cn).NotTo(BeNil())
+ freeCns = append(freeCns, cn)
+ }
+
+ Expect(connPool.Len()).To(Equal(3))
+ Expect(connPool.FreeLen()).To(Equal(0))
+
+ cn, _, err := connPool.Get()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cn).NotTo(BeNil())
+ conns = append(conns, cn)
+
+ Expect(connPool.Len()).To(Equal(4))
+ Expect(connPool.FreeLen()).To(Equal(0))
+
+ err = connPool.Remove(cn)
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(connPool.Len()).To(Equal(3))
+ Expect(connPool.FreeLen()).To(Equal(0))
+
+ for _, cn := range freeCns {
+ err := connPool.Put(cn)
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ Expect(connPool.Len()).To(Equal(3))
+ Expect(connPool.FreeLen()).To(Equal(3))
+ }
+ })
+})
+
+var _ = Describe("race", func() {
+ var connPool *pool.ConnPool
+ var C, N int
+
+ BeforeEach(func() {
+ C, N = 10, 1000
+ if testing.Short() {
+ C = 4
+ N = 100
+ }
+ })
+
+ AfterEach(func() {
+ connPool.Close()
+ })
+
+ It("does not happen on Get, Put, and Remove", func() {
+ connPool = pool.NewConnPool(&pool.Options{
+ Dialer: dummyDialer,
+ PoolSize: 10,
+ PoolTimeout: time.Minute,
+ IdleTimeout: time.Millisecond,
+ IdleCheckFrequency: time.Millisecond,
+ })
+
+ perform(C, func(id int) {
+ for i := 0; i < N; i++ {
+ cn, _, err := connPool.Get()
+ Expect(err).NotTo(HaveOccurred())
+ if err == nil {
+ Expect(connPool.Put(cn)).NotTo(HaveOccurred())
+ }
+ }
+ }, func(id int) {
+ for i := 0; i < N; i++ {
+ cn, _, err := connPool.Get()
+ Expect(err).NotTo(HaveOccurred())
+ if err == nil {
+ Expect(connPool.Remove(cn)).NotTo(HaveOccurred())
+ }
+ }
+ })
+ })
+})
diff --git a/vendor/github.com/go-redis/redis/internal/proto/proto_test.go b/vendor/github.com/go-redis/redis/internal/proto/proto_test.go
new file mode 100644
index 000000000..c9a820eb1
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/proto/proto_test.go
@@ -0,0 +1,13 @@
+package proto_test
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+func TestGinkgoSuite(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "proto")
+}
diff --git a/vendor/github.com/go-redis/redis/internal/proto/reader.go b/vendor/github.com/go-redis/redis/internal/proto/reader.go
new file mode 100644
index 000000000..2159cf639
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/proto/reader.go
@@ -0,0 +1,334 @@
+package proto
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strconv"
+
+ "github.com/go-redis/redis/internal"
+)
+
+const bytesAllocLimit = 1024 * 1024 // 1mb
+
+const (
+ ErrorReply = '-'
+ StatusReply = '+'
+ IntReply = ':'
+ StringReply = '$'
+ ArrayReply = '*'
+)
+
+type MultiBulkParse func(*Reader, int64) (interface{}, error)
+
+type Reader struct {
+ src *bufio.Reader
+ buf []byte
+}
+
+func NewReader(rd io.Reader) *Reader {
+ return &Reader{
+ src: bufio.NewReader(rd),
+ buf: make([]byte, 4096),
+ }
+}
+
+func (r *Reader) Reset(rd io.Reader) {
+ r.src.Reset(rd)
+}
+
+func (p *Reader) PeekBuffered() []byte {
+ if n := p.src.Buffered(); n != 0 {
+ b, _ := p.src.Peek(n)
+ return b
+ }
+ return nil
+}
+
+func (p *Reader) ReadN(n int) ([]byte, error) {
+ b, err := readN(p.src, p.buf, n)
+ if err != nil {
+ return nil, err
+ }
+ p.buf = b
+ return b, nil
+}
+
+func (p *Reader) ReadLine() ([]byte, error) {
+ line, isPrefix, err := p.src.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+ if isPrefix {
+ return nil, bufio.ErrBufferFull
+ }
+ if len(line) == 0 {
+ return nil, internal.RedisError("redis: reply is empty")
+ }
+ if isNilReply(line) {
+ return nil, internal.Nil
+ }
+ return line, nil
+}
+
+func (p *Reader) ReadReply(m MultiBulkParse) (interface{}, error) {
+ line, err := p.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+
+ switch line[0] {
+ case ErrorReply:
+ return nil, ParseErrorReply(line)
+ case StatusReply:
+ return parseStatusValue(line), nil
+ case IntReply:
+ return parseInt(line[1:], 10, 64)
+ case StringReply:
+ return p.readTmpBytesValue(line)
+ case ArrayReply:
+ n, err := parseArrayLen(line)
+ if err != nil {
+ return nil, err
+ }
+ return m(p, n)
+ }
+ return nil, fmt.Errorf("redis: can't parse %.100q", line)
+}
+
+func (p *Reader) ReadIntReply() (int64, error) {
+ line, err := p.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return 0, ParseErrorReply(line)
+ case IntReply:
+ return parseInt(line[1:], 10, 64)
+ default:
+ return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line)
+ }
+}
+
+func (p *Reader) ReadTmpBytesReply() ([]byte, error) {
+ line, err := p.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return nil, ParseErrorReply(line)
+ case StringReply:
+ return p.readTmpBytesValue(line)
+ case StatusReply:
+ return parseStatusValue(line), nil
+ default:
+ return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line)
+ }
+}
+
+func (r *Reader) ReadBytesReply() ([]byte, error) {
+ b, err := r.ReadTmpBytesReply()
+ if err != nil {
+ return nil, err
+ }
+ cp := make([]byte, len(b))
+ copy(cp, b)
+ return cp, nil
+}
+
+func (p *Reader) ReadStringReply() (string, error) {
+ b, err := p.ReadTmpBytesReply()
+ if err != nil {
+ return "", err
+ }
+ return string(b), nil
+}
+
+func (p *Reader) ReadFloatReply() (float64, error) {
+ b, err := p.ReadTmpBytesReply()
+ if err != nil {
+ return 0, err
+ }
+ return parseFloat(b, 64)
+}
+
+func (p *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) {
+ line, err := p.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return nil, ParseErrorReply(line)
+ case ArrayReply:
+ n, err := parseArrayLen(line)
+ if err != nil {
+ return nil, err
+ }
+ return m(p, n)
+ default:
+ return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line)
+ }
+}
+
+func (p *Reader) ReadArrayLen() (int64, error) {
+ line, err := p.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return 0, ParseErrorReply(line)
+ case ArrayReply:
+ return parseArrayLen(line)
+ default:
+ return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line)
+ }
+}
+
+func (p *Reader) ReadScanReply() ([]string, uint64, error) {
+ n, err := p.ReadArrayLen()
+ if err != nil {
+ return nil, 0, err
+ }
+ if n != 2 {
+ return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n)
+ }
+
+ cursor, err := p.ReadUint()
+ if err != nil {
+ return nil, 0, err
+ }
+
+ n, err = p.ReadArrayLen()
+ if err != nil {
+ return nil, 0, err
+ }
+
+ keys := make([]string, n)
+ for i := int64(0); i < n; i++ {
+ key, err := p.ReadStringReply()
+ if err != nil {
+ return nil, 0, err
+ }
+ keys[i] = key
+ }
+
+ return keys, cursor, err
+}
+
+func (p *Reader) readTmpBytesValue(line []byte) ([]byte, error) {
+ if isNilReply(line) {
+ return nil, internal.Nil
+ }
+
+ replyLen, err := strconv.Atoi(string(line[1:]))
+ if err != nil {
+ return nil, err
+ }
+
+ b, err := p.ReadN(replyLen + 2)
+ if err != nil {
+ return nil, err
+ }
+ return b[:replyLen], nil
+}
+
+func (r *Reader) ReadInt() (int64, error) {
+ b, err := r.ReadTmpBytesReply()
+ if err != nil {
+ return 0, err
+ }
+ return parseInt(b, 10, 64)
+}
+
+func (r *Reader) ReadUint() (uint64, error) {
+ b, err := r.ReadTmpBytesReply()
+ if err != nil {
+ return 0, err
+ }
+ return parseUint(b, 10, 64)
+}
+
+// --------------------------------------------------------------------
+
+func readN(r io.Reader, b []byte, n int) ([]byte, error) {
+ if n == 0 && b == nil {
+ return make([]byte, 0), nil
+ }
+
+ if cap(b) >= n {
+ b = b[:n]
+ _, err := io.ReadFull(r, b)
+ return b, err
+ }
+ b = b[:cap(b)]
+
+ pos := 0
+ for pos < n {
+ diff := n - len(b)
+ if diff > bytesAllocLimit {
+ diff = bytesAllocLimit
+ }
+ b = append(b, make([]byte, diff)...)
+
+ nn, err := io.ReadFull(r, b[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += nn
+ }
+
+ return b, nil
+}
+
+func formatInt(n int64) string {
+ return strconv.FormatInt(n, 10)
+}
+
+func formatUint(u uint64) string {
+ return strconv.FormatUint(u, 10)
+}
+
+func formatFloat(f float64) string {
+ return strconv.FormatFloat(f, 'f', -1, 64)
+}
+
+func isNilReply(b []byte) bool {
+ return len(b) == 3 &&
+ (b[0] == StringReply || b[0] == ArrayReply) &&
+ b[1] == '-' && b[2] == '1'
+}
+
+func ParseErrorReply(line []byte) error {
+ return internal.RedisError(string(line[1:]))
+}
+
+func parseStatusValue(line []byte) []byte {
+ return line[1:]
+}
+
+func parseArrayLen(line []byte) (int64, error) {
+ if isNilReply(line) {
+ return 0, internal.Nil
+ }
+ return parseInt(line[1:], 10, 64)
+}
+
+func atoi(b []byte) (int, error) {
+ return strconv.Atoi(internal.BytesToString(b))
+}
+
+func parseInt(b []byte, base int, bitSize int) (int64, error) {
+ return strconv.ParseInt(internal.BytesToString(b), base, bitSize)
+}
+
+func parseUint(b []byte, base int, bitSize int) (uint64, error) {
+ return strconv.ParseUint(internal.BytesToString(b), base, bitSize)
+}
+
+func parseFloat(b []byte, bitSize int) (float64, error) {
+ return strconv.ParseFloat(internal.BytesToString(b), bitSize)
+}
diff --git a/vendor/github.com/go-redis/redis/internal/proto/reader_test.go b/vendor/github.com/go-redis/redis/internal/proto/reader_test.go
new file mode 100644
index 000000000..8d2d71be9
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/proto/reader_test.go
@@ -0,0 +1,87 @@
+package proto_test
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+
+ "github.com/go-redis/redis/internal/proto"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Reader", func() {
+
+ It("should read n bytes", func() {
+ data, err := proto.NewReader(strings.NewReader("ABCDEFGHIJKLMNO")).ReadN(10)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(data)).To(Equal(10))
+ Expect(string(data)).To(Equal("ABCDEFGHIJ"))
+
+ data, err = proto.NewReader(strings.NewReader(strings.Repeat("x", 8192))).ReadN(6000)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(data)).To(Equal(6000))
+ })
+
+ It("should read lines", func() {
+ p := proto.NewReader(strings.NewReader("$5\r\nhello\r\n"))
+
+ data, err := p.ReadLine()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(string(data)).To(Equal("$5"))
+
+ data, err = p.ReadLine()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(string(data)).To(Equal("hello"))
+ })
+
+})
+
+func BenchmarkReader_ParseReply_Status(b *testing.B) {
+ benchmarkParseReply(b, "+OK\r\n", nil, false)
+}
+
+func BenchmarkReader_ParseReply_Int(b *testing.B) {
+ benchmarkParseReply(b, ":1\r\n", nil, false)
+}
+
+func BenchmarkReader_ParseReply_Error(b *testing.B) {
+ benchmarkParseReply(b, "-Error message\r\n", nil, true)
+}
+
+func BenchmarkReader_ParseReply_String(b *testing.B) {
+ benchmarkParseReply(b, "$5\r\nhello\r\n", nil, false)
+}
+
+func BenchmarkReader_ParseReply_Slice(b *testing.B) {
+ benchmarkParseReply(b, "*2\r\n$5\r\nhello\r\n$5\r\nworld\r\n", multiBulkParse, false)
+}
+
+func benchmarkParseReply(b *testing.B, reply string, m proto.MultiBulkParse, wanterr bool) {
+ buf := new(bytes.Buffer)
+ for i := 0; i < b.N; i++ {
+ buf.WriteString(reply)
+ }
+ p := proto.NewReader(buf)
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ _, err := p.ReadReply(m)
+ if !wanterr && err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func multiBulkParse(p *proto.Reader, n int64) (interface{}, error) {
+ vv := make([]interface{}, 0, n)
+ for i := int64(0); i < n; i++ {
+ v, err := p.ReadReply(multiBulkParse)
+ if err != nil {
+ return nil, err
+ }
+ vv = append(vv, v)
+ }
+ return vv, nil
+}
diff --git a/vendor/github.com/go-redis/redis/internal/proto/scan.go b/vendor/github.com/go-redis/redis/internal/proto/scan.go
new file mode 100644
index 000000000..3ab40b94f
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/proto/scan.go
@@ -0,0 +1,131 @@
+package proto
+
+import (
+ "encoding"
+ "fmt"
+ "reflect"
+
+ "github.com/go-redis/redis/internal"
+)
+
+func Scan(b []byte, v interface{}) error {
+ switch v := v.(type) {
+ case nil:
+ return internal.RedisError("redis: Scan(nil)")
+ case *string:
+ *v = internal.BytesToString(b)
+ return nil
+ case *[]byte:
+ *v = b
+ return nil
+ case *int:
+ var err error
+ *v, err = atoi(b)
+ return err
+ case *int8:
+ n, err := parseInt(b, 10, 8)
+ if err != nil {
+ return err
+ }
+ *v = int8(n)
+ return nil
+ case *int16:
+ n, err := parseInt(b, 10, 16)
+ if err != nil {
+ return err
+ }
+ *v = int16(n)
+ return nil
+ case *int32:
+ n, err := parseInt(b, 10, 32)
+ if err != nil {
+ return err
+ }
+ *v = int32(n)
+ return nil
+ case *int64:
+ n, err := parseInt(b, 10, 64)
+ if err != nil {
+ return err
+ }
+ *v = n
+ return nil
+ case *uint:
+ n, err := parseUint(b, 10, 64)
+ if err != nil {
+ return err
+ }
+ *v = uint(n)
+ return nil
+ case *uint8:
+ n, err := parseUint(b, 10, 8)
+ if err != nil {
+ return err
+ }
+ *v = uint8(n)
+ return nil
+ case *uint16:
+ n, err := parseUint(b, 10, 16)
+ if err != nil {
+ return err
+ }
+ *v = uint16(n)
+ return nil
+ case *uint32:
+ n, err := parseUint(b, 10, 32)
+ if err != nil {
+ return err
+ }
+ *v = uint32(n)
+ return nil
+ case *uint64:
+ n, err := parseUint(b, 10, 64)
+ if err != nil {
+ return err
+ }
+ *v = n
+ return nil
+ case *float32:
+ n, err := parseFloat(b, 32)
+ if err != nil {
+ return err
+ }
+ *v = float32(n)
+ return err
+ case *float64:
+ var err error
+ *v, err = parseFloat(b, 64)
+ return err
+ case *bool:
+ *v = len(b) == 1 && b[0] == '1'
+ return nil
+ case encoding.BinaryUnmarshaler:
+ return v.UnmarshalBinary(b)
+ default:
+ return fmt.Errorf(
+ "redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", v)
+ }
+}
+
+func ScanSlice(data []string, slice interface{}) error {
+ v := reflect.ValueOf(slice)
+ if !v.IsValid() {
+ return fmt.Errorf("redis: ScanSlice(nil)")
+ }
+ if v.Kind() != reflect.Ptr {
+ return fmt.Errorf("redis: ScanSlice(non-pointer %T)", slice)
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Slice {
+ return fmt.Errorf("redis: ScanSlice(non-slice %T)", slice)
+ }
+
+ for i, s := range data {
+ elem := internal.SliceNextElem(v)
+ if err := Scan(internal.StringToBytes(s), elem.Addr().Interface()); err != nil {
+ return fmt.Errorf("redis: ScanSlice(index=%d value=%q) failed: %s", i, s, err)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-redis/redis/internal/proto/scan_test.go b/vendor/github.com/go-redis/redis/internal/proto/scan_test.go
new file mode 100644
index 000000000..fadcd0561
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/proto/scan_test.go
@@ -0,0 +1,48 @@
+package proto
+
+import (
+ "encoding/json"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+type testScanSliceStruct struct {
+ ID int
+ Name string
+}
+
+func (s *testScanSliceStruct) MarshalBinary() ([]byte, error) {
+ return json.Marshal(s)
+}
+
+func (s *testScanSliceStruct) UnmarshalBinary(b []byte) error {
+ return json.Unmarshal(b, s)
+}
+
+var _ = Describe("ScanSlice", func() {
+ data := []string{
+ `{"ID":-1,"Name":"Back Yu"}`,
+ `{"ID":1,"Name":"szyhf"}`,
+ }
+
+ It("[]testScanSliceStruct", func() {
+ var slice []testScanSliceStruct
+ err := ScanSlice(data, &slice)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(slice).To(Equal([]testScanSliceStruct{
+ {-1, "Back Yu"},
+ {1, "szyhf"},
+ }))
+ })
+
+ It("var testContainer []*testScanSliceStruct", func() {
+ var slice []*testScanSliceStruct
+ err := ScanSlice(data, &slice)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(slice).To(Equal([]*testScanSliceStruct{
+ {-1, "Back Yu"},
+ {1, "szyhf"},
+ }))
+ })
+})
diff --git a/vendor/github.com/go-redis/redis/internal/proto/write_buffer.go b/vendor/github.com/go-redis/redis/internal/proto/write_buffer.go
new file mode 100644
index 000000000..096b6d76a
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/proto/write_buffer.go
@@ -0,0 +1,103 @@
+package proto
+
+import (
+ "encoding"
+ "fmt"
+ "strconv"
+)
+
+type WriteBuffer struct {
+ b []byte
+}
+
+func NewWriteBuffer() *WriteBuffer {
+ return &WriteBuffer{
+ b: make([]byte, 0, 4096),
+ }
+}
+
+func (w *WriteBuffer) Len() int { return len(w.b) }
+func (w *WriteBuffer) Bytes() []byte { return w.b }
+func (w *WriteBuffer) Reset() { w.b = w.b[:0] }
+
+func (w *WriteBuffer) Append(args []interface{}) error {
+ w.b = append(w.b, ArrayReply)
+ w.b = strconv.AppendUint(w.b, uint64(len(args)), 10)
+ w.b = append(w.b, '\r', '\n')
+
+ for _, arg := range args {
+ if err := w.append(arg); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (w *WriteBuffer) append(val interface{}) error {
+ switch v := val.(type) {
+ case nil:
+ w.AppendString("")
+ case string:
+ w.AppendString(v)
+ case []byte:
+ w.AppendBytes(v)
+ case int:
+ w.AppendString(formatInt(int64(v)))
+ case int8:
+ w.AppendString(formatInt(int64(v)))
+ case int16:
+ w.AppendString(formatInt(int64(v)))
+ case int32:
+ w.AppendString(formatInt(int64(v)))
+ case int64:
+ w.AppendString(formatInt(v))
+ case uint:
+ w.AppendString(formatUint(uint64(v)))
+ case uint8:
+ w.AppendString(formatUint(uint64(v)))
+ case uint16:
+ w.AppendString(formatUint(uint64(v)))
+ case uint32:
+ w.AppendString(formatUint(uint64(v)))
+ case uint64:
+ w.AppendString(formatUint(v))
+ case float32:
+ w.AppendString(formatFloat(float64(v)))
+ case float64:
+ w.AppendString(formatFloat(v))
+ case bool:
+ if v {
+ w.AppendString("1")
+ } else {
+ w.AppendString("0")
+ }
+ default:
+ if bm, ok := val.(encoding.BinaryMarshaler); ok {
+ bb, err := bm.MarshalBinary()
+ if err != nil {
+ return err
+ }
+ w.AppendBytes(bb)
+ } else {
+ return fmt.Errorf(
+ "redis: can't marshal %T (consider implementing encoding.BinaryMarshaler)", val)
+ }
+ }
+ return nil
+}
+
+func (w *WriteBuffer) AppendString(s string) {
+ w.b = append(w.b, StringReply)
+ w.b = strconv.AppendUint(w.b, uint64(len(s)), 10)
+ w.b = append(w.b, '\r', '\n')
+ w.b = append(w.b, s...)
+ w.b = append(w.b, '\r', '\n')
+}
+
+func (w *WriteBuffer) AppendBytes(p []byte) {
+ w.b = append(w.b, StringReply)
+ w.b = strconv.AppendUint(w.b, uint64(len(p)), 10)
+ w.b = append(w.b, '\r', '\n')
+ w.b = append(w.b, p...)
+ w.b = append(w.b, '\r', '\n')
+}
diff --git a/vendor/github.com/go-redis/redis/internal/proto/write_buffer_test.go b/vendor/github.com/go-redis/redis/internal/proto/write_buffer_test.go
new file mode 100644
index 000000000..84799ff3b
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/proto/write_buffer_test.go
@@ -0,0 +1,63 @@
+package proto_test
+
+import (
+ "testing"
+ "time"
+
+ "github.com/go-redis/redis/internal/proto"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("WriteBuffer", func() {
+ var buf *proto.WriteBuffer
+
+ BeforeEach(func() {
+ buf = proto.NewWriteBuffer()
+ })
+
+ It("should reset", func() {
+ buf.AppendString("string")
+ Expect(buf.Len()).To(Equal(12))
+ buf.Reset()
+ Expect(buf.Len()).To(Equal(0))
+ })
+
+ It("should append args", func() {
+ err := buf.Append([]interface{}{
+ "string",
+ 12,
+ 34.56,
+ []byte{'b', 'y', 't', 'e', 's'},
+ true,
+ nil,
+ })
+ Expect(err).NotTo(HaveOccurred())
+ Expect(buf.Bytes()).To(Equal([]byte("*6\r\n" +
+ "$6\r\nstring\r\n" +
+ "$2\r\n12\r\n" +
+ "$5\r\n34.56\r\n" +
+ "$5\r\nbytes\r\n" +
+ "$1\r\n1\r\n" +
+ "$0\r\n" +
+ "\r\n")))
+ })
+
+ It("should append marshalable args", func() {
+ err := buf.Append([]interface{}{time.Unix(1414141414, 0)})
+ Expect(err).NotTo(HaveOccurred())
+ Expect(buf.Len()).To(Equal(26))
+ })
+
+})
+
+func BenchmarkWriteBuffer_Append(b *testing.B) {
+ buf := proto.NewWriteBuffer()
+ args := []interface{}{"hello", "world", "foo", "bar"}
+
+ for i := 0; i < b.N; i++ {
+ buf.Append(args)
+ buf.Reset()
+ }
+}
diff --git a/vendor/github.com/go-redis/redis/internal/safe.go b/vendor/github.com/go-redis/redis/internal/safe.go
new file mode 100644
index 000000000..870fe541f
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/safe.go
@@ -0,0 +1,11 @@
+// +build appengine
+
+package internal
+
+func BytesToString(b []byte) string {
+ return string(b)
+}
+
+func StringToBytes(s string) []byte {
+ return []byte(s)
+}
diff --git a/vendor/github.com/go-redis/redis/internal/unsafe.go b/vendor/github.com/go-redis/redis/internal/unsafe.go
new file mode 100644
index 000000000..c18b25c17
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/unsafe.go
@@ -0,0 +1,27 @@
+// +build !appengine
+
+package internal
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+func BytesToString(b []byte) string {
+ bytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ strHeader := reflect.StringHeader{
+ Data: bytesHeader.Data,
+ Len: bytesHeader.Len,
+ }
+ return *(*string)(unsafe.Pointer(&strHeader))
+}
+
+func StringToBytes(s string) []byte {
+ sh := (*reflect.StringHeader)(unsafe.Pointer(&s))
+ bh := reflect.SliceHeader{
+ Data: sh.Data,
+ Len: sh.Len,
+ Cap: sh.Len,
+ }
+ return *(*[]byte)(unsafe.Pointer(&bh))
+}
diff --git a/vendor/github.com/go-redis/redis/internal/util.go b/vendor/github.com/go-redis/redis/internal/util.go
new file mode 100644
index 000000000..520596fd9
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/internal/util.go
@@ -0,0 +1,47 @@
+package internal
+
+import "reflect"
+
+func ToLower(s string) string {
+ if isLower(s) {
+ return s
+ }
+
+ b := make([]byte, len(s))
+ for i := range b {
+ c := s[i]
+ if c >= 'A' && c <= 'Z' {
+ c += 'a' - 'A'
+ }
+ b[i] = c
+ }
+ return BytesToString(b)
+}
+
+func isLower(s string) bool {
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c >= 'A' && c <= 'Z' {
+ return false
+ }
+ }
+ return true
+}
+
+func SliceNextElem(v reflect.Value) reflect.Value {
+ if v.Len() < v.Cap() {
+ v.Set(v.Slice(0, v.Len()+1))
+ return v.Index(v.Len() - 1)
+ }
+
+ elemType := v.Type().Elem()
+
+ if elemType.Kind() == reflect.Ptr {
+ elem := reflect.New(elemType.Elem())
+ v.Set(reflect.Append(v, elem))
+ return elem.Elem()
+ }
+
+ v.Set(reflect.Append(v, reflect.Zero(elemType)))
+ return v.Index(v.Len() - 1)
+}
diff --git a/vendor/github.com/go-redis/redis/iterator.go b/vendor/github.com/go-redis/redis/iterator.go
new file mode 100644
index 000000000..5d4bedfe5
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/iterator.go
@@ -0,0 +1,73 @@
+package redis
+
+import "sync"
+
+// ScanIterator is used to incrementally iterate over a collection of elements.
+// It's safe for concurrent use by multiple goroutines.
+type ScanIterator struct {
+ mu sync.Mutex // protects Scanner and pos
+ cmd *ScanCmd
+ pos int
+}
+
+// Err returns the last iterator error, if any.
+func (it *ScanIterator) Err() error {
+ it.mu.Lock()
+ err := it.cmd.Err()
+ it.mu.Unlock()
+ return err
+}
+
+// Next advances the cursor and returns true if more values can be read.
+func (it *ScanIterator) Next() bool {
+ it.mu.Lock()
+ defer it.mu.Unlock()
+
+ // Instantly return on errors.
+ if it.cmd.Err() != nil {
+ return false
+ }
+
+ // Advance cursor, check if we are still within range.
+ if it.pos < len(it.cmd.page) {
+ it.pos++
+ return true
+ }
+
+ for {
+ // Return if there is no more data to fetch.
+ if it.cmd.cursor == 0 {
+ return false
+ }
+
+ // Fetch next page.
+ if it.cmd._args[0] == "scan" {
+ it.cmd._args[1] = it.cmd.cursor
+ } else {
+ it.cmd._args[2] = it.cmd.cursor
+ }
+
+ err := it.cmd.process(it.cmd)
+ if err != nil {
+ return false
+ }
+
+ it.pos = 1
+
+ // Redis can occasionally return empty page.
+ if len(it.cmd.page) > 0 {
+ return true
+ }
+ }
+}
+
+// Val returns the key/field at the current cursor position.
+func (it *ScanIterator) Val() string {
+ var v string
+ it.mu.Lock()
+ if it.cmd.Err() == nil && it.pos > 0 && it.pos <= len(it.cmd.page) {
+ v = it.cmd.page[it.pos-1]
+ }
+ it.mu.Unlock()
+ return v
+}
diff --git a/vendor/github.com/go-redis/redis/iterator_test.go b/vendor/github.com/go-redis/redis/iterator_test.go
new file mode 100644
index 000000000..a2e623813
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/iterator_test.go
@@ -0,0 +1,136 @@
+package redis_test
+
+import (
+ "fmt"
+
+ "github.com/go-redis/redis"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("ScanIterator", func() {
+ var client *redis.Client
+
+ var seed = func(n int) error {
+ pipe := client.Pipeline()
+ for i := 1; i <= n; i++ {
+ pipe.Set(fmt.Sprintf("K%02d", i), "x", 0).Err()
+ }
+ _, err := pipe.Exec()
+ return err
+ }
+
+ var extraSeed = func(n int, m int) error {
+ pipe := client.Pipeline()
+ for i := 1; i <= m; i++ {
+ pipe.Set(fmt.Sprintf("A%02d", i), "x", 0).Err()
+ }
+ for i := 1; i <= n; i++ {
+ pipe.Set(fmt.Sprintf("K%02d", i), "x", 0).Err()
+ }
+ _, err := pipe.Exec()
+ return err
+ }
+
+ var hashKey = "K_HASHTEST"
+ var hashSeed = func(n int) error {
+ pipe := client.Pipeline()
+ for i := 1; i <= n; i++ {
+ pipe.HSet(hashKey, fmt.Sprintf("K%02d", i), "x").Err()
+ }
+ _, err := pipe.Exec()
+ return err
+ }
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB().Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should scan across empty DBs", func() {
+ iter := client.Scan(0, "", 10).Iterator()
+ Expect(iter.Next()).To(BeFalse())
+ Expect(iter.Err()).NotTo(HaveOccurred())
+ })
+
+ It("should scan across one page", func() {
+ Expect(seed(7)).NotTo(HaveOccurred())
+
+ var vals []string
+ iter := client.Scan(0, "", 0).Iterator()
+ for iter.Next() {
+ vals = append(vals, iter.Val())
+ }
+ Expect(iter.Err()).NotTo(HaveOccurred())
+ Expect(vals).To(ConsistOf([]string{"K01", "K02", "K03", "K04", "K05", "K06", "K07"}))
+ })
+
+ It("should scan across multiple pages", func() {
+ Expect(seed(71)).NotTo(HaveOccurred())
+
+ var vals []string
+ iter := client.Scan(0, "", 10).Iterator()
+ for iter.Next() {
+ vals = append(vals, iter.Val())
+ }
+ Expect(iter.Err()).NotTo(HaveOccurred())
+ Expect(vals).To(HaveLen(71))
+ Expect(vals).To(ContainElement("K01"))
+ Expect(vals).To(ContainElement("K71"))
+ })
+
+ It("should hscan across multiple pages", func() {
+ Expect(hashSeed(71)).NotTo(HaveOccurred())
+
+ var vals []string
+ iter := client.HScan(hashKey, 0, "", 10).Iterator()
+ for iter.Next() {
+ vals = append(vals, iter.Val())
+ }
+ Expect(iter.Err()).NotTo(HaveOccurred())
+ Expect(vals).To(HaveLen(71 * 2))
+ Expect(vals).To(ContainElement("K01"))
+ Expect(vals).To(ContainElement("K71"))
+ })
+
+ It("should scan to page borders", func() {
+ Expect(seed(20)).NotTo(HaveOccurred())
+
+ var vals []string
+ iter := client.Scan(0, "", 10).Iterator()
+ for iter.Next() {
+ vals = append(vals, iter.Val())
+ }
+ Expect(iter.Err()).NotTo(HaveOccurred())
+ Expect(vals).To(HaveLen(20))
+ })
+
+ It("should scan with match", func() {
+ Expect(seed(33)).NotTo(HaveOccurred())
+
+ var vals []string
+ iter := client.Scan(0, "K*2*", 10).Iterator()
+ for iter.Next() {
+ vals = append(vals, iter.Val())
+ }
+ Expect(iter.Err()).NotTo(HaveOccurred())
+ Expect(vals).To(HaveLen(13))
+ })
+
+ It("should scan with match across empty pages", func() {
+ Expect(extraSeed(2, 10)).NotTo(HaveOccurred())
+
+ var vals []string
+ iter := client.Scan(0, "K*", 1).Iterator()
+ for iter.Next() {
+ vals = append(vals, iter.Val())
+ }
+ Expect(iter.Err()).NotTo(HaveOccurred())
+ Expect(vals).To(HaveLen(2))
+ })
+})
diff --git a/vendor/github.com/go-redis/redis/main_test.go b/vendor/github.com/go-redis/redis/main_test.go
new file mode 100644
index 000000000..30f09c618
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/main_test.go
@@ -0,0 +1,355 @@
+package redis_test
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/go-redis/redis"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+const (
+ redisPort = "6380"
+ redisAddr = ":" + redisPort
+ redisSecondaryPort = "6381"
+)
+
+const (
+ ringShard1Port = "6390"
+ ringShard2Port = "6391"
+)
+
+const (
+ sentinelName = "mymaster"
+ sentinelMasterPort = "8123"
+ sentinelSlave1Port = "8124"
+ sentinelSlave2Port = "8125"
+ sentinelPort = "8126"
+)
+
+var (
+ redisMain *redisProcess
+ ringShard1, ringShard2 *redisProcess
+ sentinelMaster, sentinelSlave1, sentinelSlave2, sentinel *redisProcess
+)
+
+var cluster = &clusterScenario{
+ ports: []string{"8220", "8221", "8222", "8223", "8224", "8225"},
+ nodeIds: make([]string, 6),
+ processes: make(map[string]*redisProcess, 6),
+ clients: make(map[string]*redis.Client, 6),
+}
+
+func init() {
+ //redis.SetLogger(log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile))
+}
+
+var _ = BeforeSuite(func() {
+ var err error
+
+ redisMain, err = startRedis(redisPort)
+ Expect(err).NotTo(HaveOccurred())
+
+ ringShard1, err = startRedis(ringShard1Port)
+ Expect(err).NotTo(HaveOccurred())
+
+ ringShard2, err = startRedis(ringShard2Port)
+ Expect(err).NotTo(HaveOccurred())
+
+ sentinelMaster, err = startRedis(sentinelMasterPort)
+ Expect(err).NotTo(HaveOccurred())
+
+ sentinel, err = startSentinel(sentinelPort, sentinelName, sentinelMasterPort)
+ Expect(err).NotTo(HaveOccurred())
+
+ sentinelSlave1, err = startRedis(
+ sentinelSlave1Port, "--slaveof", "127.0.0.1", sentinelMasterPort)
+ Expect(err).NotTo(HaveOccurred())
+
+ sentinelSlave2, err = startRedis(
+ sentinelSlave2Port, "--slaveof", "127.0.0.1", sentinelMasterPort)
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(startCluster(cluster)).NotTo(HaveOccurred())
+})
+
+var _ = AfterSuite(func() {
+ Expect(redisMain.Close()).NotTo(HaveOccurred())
+
+ Expect(ringShard1.Close()).NotTo(HaveOccurred())
+ Expect(ringShard2.Close()).NotTo(HaveOccurred())
+
+ Expect(sentinel.Close()).NotTo(HaveOccurred())
+ Expect(sentinelSlave1.Close()).NotTo(HaveOccurred())
+ Expect(sentinelSlave2.Close()).NotTo(HaveOccurred())
+ Expect(sentinelMaster.Close()).NotTo(HaveOccurred())
+
+ Expect(stopCluster(cluster)).NotTo(HaveOccurred())
+})
+
+func TestGinkgoSuite(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "go-redis")
+}
+
+//------------------------------------------------------------------------------
+
+func redisOptions() *redis.Options {
+ return &redis.Options{
+ Addr: redisAddr,
+ DB: 15,
+ DialTimeout: 10 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 30 * time.Second,
+ PoolSize: 10,
+ PoolTimeout: 30 * time.Second,
+ IdleTimeout: 500 * time.Millisecond,
+ IdleCheckFrequency: 500 * time.Millisecond,
+ }
+}
+
+func redisClusterOptions() *redis.ClusterOptions {
+ return &redis.ClusterOptions{
+ DialTimeout: 10 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 30 * time.Second,
+ PoolSize: 10,
+ PoolTimeout: 30 * time.Second,
+ IdleTimeout: 500 * time.Millisecond,
+ IdleCheckFrequency: 500 * time.Millisecond,
+ }
+}
+
+func redisRingOptions() *redis.RingOptions {
+ return &redis.RingOptions{
+ Addrs: map[string]string{
+ "ringShardOne": ":" + ringShard1Port,
+ "ringShardTwo": ":" + ringShard2Port,
+ },
+ DialTimeout: 10 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 30 * time.Second,
+ PoolSize: 10,
+ PoolTimeout: 30 * time.Second,
+ IdleTimeout: 500 * time.Millisecond,
+ IdleCheckFrequency: 500 * time.Millisecond,
+ }
+}
+
+func perform(n int, cbs ...func(int)) {
+ var wg sync.WaitGroup
+ for _, cb := range cbs {
+ for i := 0; i < n; i++ {
+ wg.Add(1)
+ go func(cb func(int), i int) {
+ defer GinkgoRecover()
+ defer wg.Done()
+
+ cb(i)
+ }(cb, i)
+ }
+ }
+ wg.Wait()
+}
+
+func eventually(fn func() error, timeout time.Duration) error {
+ var exit int32
+ errCh := make(chan error)
+ done := make(chan struct{})
+
+ go func() {
+ defer GinkgoRecover()
+
+ for atomic.LoadInt32(&exit) == 0 {
+ err := fn()
+ if err == nil {
+ close(done)
+ return
+ }
+ select {
+ case errCh <- err:
+ default:
+ }
+ time.Sleep(timeout / 100)
+ }
+ }()
+
+ select {
+ case <-done:
+ return nil
+ case <-time.After(timeout):
+ atomic.StoreInt32(&exit, 1)
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return fmt.Errorf("timeout after %s", timeout)
+ }
+ }
+}
+
+func execCmd(name string, args ...string) (*os.Process, error) {
+ cmd := exec.Command(name, args...)
+ if testing.Verbose() {
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ }
+ return cmd.Process, cmd.Start()
+}
+
+func connectTo(port string) (*redis.Client, error) {
+ client := redis.NewClient(&redis.Options{
+ Addr: ":" + port,
+ })
+
+ err := eventually(func() error {
+ return client.Ping().Err()
+ }, 30*time.Second)
+ if err != nil {
+ return nil, err
+ }
+
+ return client, nil
+}
+
+type redisProcess struct {
+ *os.Process
+ *redis.Client
+}
+
+func (p *redisProcess) Close() error {
+ if err := p.Kill(); err != nil {
+ return err
+ }
+
+ err := eventually(func() error {
+ if err := p.Client.Ping().Err(); err != nil {
+ return nil
+ }
+ return errors.New("client is not shutdown")
+ }, 10*time.Second)
+ if err != nil {
+ return err
+ }
+
+ p.Client.Close()
+ return nil
+}
+
+var (
+ redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
+ redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis.conf"))
+)
+
+func redisDir(port string) (string, error) {
+ dir, err := filepath.Abs(filepath.Join("testdata", "instances", port))
+ if err != nil {
+ return "", err
+ }
+ if err := os.RemoveAll(dir); err != nil {
+ return "", err
+ }
+ if err := os.MkdirAll(dir, 0775); err != nil {
+ return "", err
+ }
+ return dir, nil
+}
+
+func startRedis(port string, args ...string) (*redisProcess, error) {
+ dir, err := redisDir(port)
+ if err != nil {
+ return nil, err
+ }
+ if err = exec.Command("cp", "-f", redisServerConf, dir).Run(); err != nil {
+ return nil, err
+ }
+
+ baseArgs := []string{filepath.Join(dir, "redis.conf"), "--port", port, "--dir", dir}
+ process, err := execCmd(redisServerBin, append(baseArgs, args...)...)
+ if err != nil {
+ return nil, err
+ }
+
+ client, err := connectTo(port)
+ if err != nil {
+ process.Kill()
+ return nil, err
+ }
+ return &redisProcess{process, client}, err
+}
+
+func startSentinel(port, masterName, masterPort string) (*redisProcess, error) {
+ dir, err := redisDir(port)
+ if err != nil {
+ return nil, err
+ }
+ process, err := execCmd(redisServerBin, os.DevNull, "--sentinel", "--port", port, "--dir", dir)
+ if err != nil {
+ return nil, err
+ }
+ client, err := connectTo(port)
+ if err != nil {
+ process.Kill()
+ return nil, err
+ }
+ for _, cmd := range []*redis.StatusCmd{
+ redis.NewStatusCmd("SENTINEL", "MONITOR", masterName, "127.0.0.1", masterPort, "1"),
+ redis.NewStatusCmd("SENTINEL", "SET", masterName, "down-after-milliseconds", "500"),
+ redis.NewStatusCmd("SENTINEL", "SET", masterName, "failover-timeout", "1000"),
+ redis.NewStatusCmd("SENTINEL", "SET", masterName, "parallel-syncs", "1"),
+ } {
+ client.Process(cmd)
+ if err := cmd.Err(); err != nil {
+ process.Kill()
+ return nil, err
+ }
+ }
+ return &redisProcess{process, client}, nil
+}
+
+//------------------------------------------------------------------------------
+
+type badConnError string
+
+func (e badConnError) Error() string { return string(e) }
+func (e badConnError) Timeout() bool { return false }
+func (e badConnError) Temporary() bool { return false }
+
+type badConn struct {
+ net.TCPConn
+
+ readDelay, writeDelay time.Duration
+ readErr, writeErr error
+}
+
+var _ net.Conn = &badConn{}
+
+func (cn *badConn) Read([]byte) (int, error) {
+ if cn.readDelay != 0 {
+ time.Sleep(cn.readDelay)
+ }
+ if cn.readErr != nil {
+ return 0, cn.readErr
+ }
+ return 0, badConnError("bad connection")
+}
+
+func (cn *badConn) Write([]byte) (int, error) {
+ if cn.writeDelay != 0 {
+ time.Sleep(cn.writeDelay)
+ }
+ if cn.writeErr != nil {
+ return 0, cn.writeErr
+ }
+ return 0, badConnError("bad connection")
+}
diff --git a/vendor/github.com/go-redis/redis/options.go b/vendor/github.com/go-redis/redis/options.go
new file mode 100644
index 000000000..cd6fa981a
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/options.go
@@ -0,0 +1,201 @@
+package redis
+
+import (
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/go-redis/redis/internal/pool"
+)
+
+type Options struct {
+ // The network type, either tcp or unix.
+ // Default is tcp.
+ Network string
+ // host:port address.
+ Addr string
+
+ // Dialer creates new network connection and has priority over
+ // Network and Addr options.
+ Dialer func() (net.Conn, error)
+
+ // Hook that is called when new connection is established.
+ OnConnect func(*Conn) error
+
+ // Optional password. Must match the password specified in the
+ // requirepass server configuration option.
+ Password string
+ // Database to be selected after connecting to the server.
+ DB int
+
+ // Maximum number of retries before giving up.
+ // Default is to not retry failed commands.
+ MaxRetries int
+
+ // Maximum backoff between each retry.
+ // Default is 512 seconds; -1 disables backoff.
+ MaxRetryBackoff time.Duration
+
+ // Dial timeout for establishing new connections.
+ // Default is 5 seconds.
+ DialTimeout time.Duration
+ // Timeout for socket reads. If reached, commands will fail
+ // with a timeout instead of blocking.
+ // Default is 3 seconds.
+ ReadTimeout time.Duration
+ // Timeout for socket writes. If reached, commands will fail
+ // with a timeout instead of blocking.
+ // Default is ReadTimeout.
+ WriteTimeout time.Duration
+
+ // Maximum number of socket connections.
+ // Default is 10 connections per every CPU as reported by runtime.NumCPU.
+ PoolSize int
+ // Amount of time client waits for connection if all connections
+ // are busy before returning an error.
+ // Default is ReadTimeout + 1 second.
+ PoolTimeout time.Duration
+ // Amount of time after which client closes idle connections.
+ // Should be less than server's timeout.
+ // Default is 5 minutes.
+ IdleTimeout time.Duration
+ // Frequency of idle checks.
+ // Default is 1 minute.
+ // When minus value is set, then idle check is disabled.
+ IdleCheckFrequency time.Duration
+
+ // Enables read only queries on slave nodes.
+ ReadOnly bool
+
+ // TLS Config to use. When set TLS will be negotiated.
+ TLSConfig *tls.Config
+}
+
+func (opt *Options) init() {
+ if opt.Network == "" {
+ opt.Network = "tcp"
+ }
+ if opt.Dialer == nil {
+ opt.Dialer = func() (net.Conn, error) {
+ conn, err := net.DialTimeout(opt.Network, opt.Addr, opt.DialTimeout)
+ if opt.TLSConfig == nil || err != nil {
+ return conn, err
+ }
+ t := tls.Client(conn, opt.TLSConfig)
+ return t, t.Handshake()
+ }
+ }
+ if opt.PoolSize == 0 {
+ opt.PoolSize = 10 * runtime.NumCPU()
+ }
+ if opt.DialTimeout == 0 {
+ opt.DialTimeout = 5 * time.Second
+ }
+ switch opt.ReadTimeout {
+ case -1:
+ opt.ReadTimeout = 0
+ case 0:
+ opt.ReadTimeout = 3 * time.Second
+ }
+ switch opt.WriteTimeout {
+ case -1:
+ opt.WriteTimeout = 0
+ case 0:
+ opt.WriteTimeout = opt.ReadTimeout
+ }
+ if opt.PoolTimeout == 0 {
+ opt.PoolTimeout = opt.ReadTimeout + time.Second
+ }
+ if opt.IdleTimeout == 0 {
+ opt.IdleTimeout = 5 * time.Minute
+ }
+ if opt.IdleCheckFrequency == 0 {
+ opt.IdleCheckFrequency = time.Minute
+ }
+ switch opt.MaxRetryBackoff {
+ case -1:
+ opt.MaxRetryBackoff = 0
+ case 0:
+ opt.MaxRetryBackoff = 512 * time.Millisecond
+ }
+}
+
+// ParseURL parses an URL into Options that can be used to connect to Redis.
+func ParseURL(redisURL string) (*Options, error) {
+ o := &Options{Network: "tcp"}
+ u, err := url.Parse(redisURL)
+ if err != nil {
+ return nil, err
+ }
+
+ if u.Scheme != "redis" && u.Scheme != "rediss" {
+ return nil, errors.New("invalid redis URL scheme: " + u.Scheme)
+ }
+
+ if u.User != nil {
+ if p, ok := u.User.Password(); ok {
+ o.Password = p
+ }
+ }
+
+ if len(u.Query()) > 0 {
+ return nil, errors.New("no options supported")
+ }
+
+ h, p, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ h = u.Host
+ }
+ if h == "" {
+ h = "localhost"
+ }
+ if p == "" {
+ p = "6379"
+ }
+ o.Addr = net.JoinHostPort(h, p)
+
+ f := strings.FieldsFunc(u.Path, func(r rune) bool {
+ return r == '/'
+ })
+ switch len(f) {
+ case 0:
+ o.DB = 0
+ case 1:
+ if o.DB, err = strconv.Atoi(f[0]); err != nil {
+ return nil, fmt.Errorf("invalid redis database number: %q", f[0])
+ }
+ default:
+ return nil, errors.New("invalid redis URL path: " + u.Path)
+ }
+
+ if u.Scheme == "rediss" {
+ o.TLSConfig = &tls.Config{ServerName: h}
+ }
+ return o, nil
+}
+
+func newConnPool(opt *Options) *pool.ConnPool {
+ return pool.NewConnPool(&pool.Options{
+ Dialer: opt.Dialer,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+ })
+}
+
+// PoolStats contains pool state information and accumulated stats.
+type PoolStats struct {
+ Requests uint32 // number of times a connection was requested by the pool
+ Hits uint32 // number of times free connection was found in the pool
+ Timeouts uint32 // number of times a wait timeout occurred
+
+ TotalConns uint32 // the number of total connections in the pool
+ FreeConns uint32 // the number of free connections in the pool
+}
diff --git a/vendor/github.com/go-redis/redis/options_test.go b/vendor/github.com/go-redis/redis/options_test.go
new file mode 100644
index 000000000..6a4af7169
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/options_test.go
@@ -0,0 +1,94 @@
+// +build go1.7
+
+package redis
+
+import (
+ "errors"
+ "testing"
+)
+
+func TestParseURL(t *testing.T) {
+ cases := []struct {
+ u string
+ addr string
+ db int
+ tls bool
+ err error
+ }{
+ {
+ "redis://localhost:123/1",
+ "localhost:123",
+ 1, false, nil,
+ },
+ {
+ "redis://localhost:123",
+ "localhost:123",
+ 0, false, nil,
+ },
+ {
+ "redis://localhost/1",
+ "localhost:6379",
+ 1, false, nil,
+ },
+ {
+ "redis://12345",
+ "12345:6379",
+ 0, false, nil,
+ },
+ {
+ "rediss://localhost:123",
+ "localhost:123",
+ 0, true, nil,
+ },
+ {
+ "redis://localhost/?abc=123",
+ "",
+ 0, false, errors.New("no options supported"),
+ },
+ {
+ "http://google.com",
+ "",
+ 0, false, errors.New("invalid redis URL scheme: http"),
+ },
+ {
+ "redis://localhost/1/2/3/4",
+ "",
+ 0, false, errors.New("invalid redis URL path: /1/2/3/4"),
+ },
+ {
+ "12345",
+ "",
+ 0, false, errors.New("invalid redis URL scheme: "),
+ },
+ {
+ "redis://localhost/iamadatabase",
+ "",
+ 0, false, errors.New(`invalid redis database number: "iamadatabase"`),
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.u, func(t *testing.T) {
+ o, err := ParseURL(c.u)
+ if c.err == nil && err != nil {
+ t.Fatalf("unexpected error: '%q'", err)
+ return
+ }
+ if c.err != nil && err != nil {
+ if c.err.Error() != err.Error() {
+ t.Fatalf("got %q, expected %q", err, c.err)
+ }
+ return
+ }
+ if o.Addr != c.addr {
+ t.Errorf("got %q, want %q", o.Addr, c.addr)
+ }
+ if o.DB != c.db {
+ t.Errorf("got %q, expected %q", o.DB, c.db)
+ }
+ if c.tls && o.TLSConfig == nil {
+ t.Errorf("got nil TLSConfig, expected a TLSConfig")
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/go-redis/redis/parser.go b/vendor/github.com/go-redis/redis/parser.go
new file mode 100644
index 000000000..1d7ec630e
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/parser.go
@@ -0,0 +1,374 @@
+package redis
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+ "time"
+
+ "github.com/go-redis/redis/internal/proto"
+)
+
+// Implements proto.MultiBulkParse
+func sliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ vals := make([]interface{}, 0, n)
+ for i := int64(0); i < n; i++ {
+ v, err := rd.ReadReply(sliceParser)
+ if err == Nil {
+ vals = append(vals, nil)
+ } else if err != nil {
+ return nil, err
+ } else {
+ switch vv := v.(type) {
+ case []byte:
+ vals = append(vals, string(vv))
+ default:
+ vals = append(vals, v)
+ }
+ }
+ }
+ return vals, nil
+}
+
+// Implements proto.MultiBulkParse
+func boolSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ bools := make([]bool, 0, n)
+ for i := int64(0); i < n; i++ {
+ n, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ bools = append(bools, n == 1)
+ }
+ return bools, nil
+}
+
+// Implements proto.MultiBulkParse
+func stringSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ ss := make([]string, 0, n)
+ for i := int64(0); i < n; i++ {
+ s, err := rd.ReadStringReply()
+ if err == Nil {
+ ss = append(ss, "")
+ } else if err != nil {
+ return nil, err
+ } else {
+ ss = append(ss, s)
+ }
+ }
+ return ss, nil
+}
+
+// Implements proto.MultiBulkParse
+func stringStringMapParser(rd *proto.Reader, n int64) (interface{}, error) {
+ m := make(map[string]string, n/2)
+ for i := int64(0); i < n; i += 2 {
+ key, err := rd.ReadStringReply()
+ if err != nil {
+ return nil, err
+ }
+
+ value, err := rd.ReadStringReply()
+ if err != nil {
+ return nil, err
+ }
+
+ m[key] = value
+ }
+ return m, nil
+}
+
+// Implements proto.MultiBulkParse
+func stringIntMapParser(rd *proto.Reader, n int64) (interface{}, error) {
+ m := make(map[string]int64, n/2)
+ for i := int64(0); i < n; i += 2 {
+ key, err := rd.ReadStringReply()
+ if err != nil {
+ return nil, err
+ }
+
+ n, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ m[key] = n
+ }
+ return m, nil
+}
+
+// Implements proto.MultiBulkParse
+func zSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ zz := make([]Z, n/2)
+ for i := int64(0); i < n; i += 2 {
+ var err error
+
+ z := &zz[i/2]
+
+ z.Member, err = rd.ReadStringReply()
+ if err != nil {
+ return nil, err
+ }
+
+ z.Score, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+ return zz, nil
+}
+
+// Implements proto.MultiBulkParse
+func clusterSlotsParser(rd *proto.Reader, n int64) (interface{}, error) {
+ slots := make([]ClusterSlot, n)
+ for i := 0; i < len(slots); i++ {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n < 2 {
+ err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
+ return nil, err
+ }
+
+ start, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ end, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ nodes := make([]ClusterNode, n-2)
+ for j := 0; j < len(nodes); j++ {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n != 2 && n != 3 {
+ err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n)
+ return nil, err
+ }
+
+ ip, err := rd.ReadStringReply()
+ if err != nil {
+ return nil, err
+ }
+
+ port, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ nodes[j].Addr = net.JoinHostPort(ip, strconv.FormatInt(port, 10))
+
+ if n == 3 {
+ id, err := rd.ReadStringReply()
+ if err != nil {
+ return nil, err
+ }
+ nodes[j].Id = id
+ }
+ }
+
+ slots[i] = ClusterSlot{
+ Start: int(start),
+ End: int(end),
+ Nodes: nodes,
+ }
+ }
+ return slots, nil
+}
+
+func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse {
+ return func(rd *proto.Reader, n int64) (interface{}, error) {
+ var loc GeoLocation
+ var err error
+
+ loc.Name, err = rd.ReadStringReply()
+ if err != nil {
+ return nil, err
+ }
+ if q.WithDist {
+ loc.Dist, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if q.WithGeoHash {
+ loc.GeoHash, err = rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if q.WithCoord {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n != 2 {
+ return nil, fmt.Errorf("got %d coordinates, expected 2", n)
+ }
+
+ loc.Longitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ loc.Latitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &loc, nil
+ }
+}
+
+func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse {
+ return func(rd *proto.Reader, n int64) (interface{}, error) {
+ locs := make([]GeoLocation, 0, n)
+ for i := int64(0); i < n; i++ {
+ v, err := rd.ReadReply(newGeoLocationParser(q))
+ if err != nil {
+ return nil, err
+ }
+ switch vv := v.(type) {
+ case []byte:
+ locs = append(locs, GeoLocation{
+ Name: string(vv),
+ })
+ case *GeoLocation:
+ locs = append(locs, *vv)
+ default:
+ return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v)
+ }
+ }
+ return locs, nil
+ }
+}
+
+func geoPosParser(rd *proto.Reader, n int64) (interface{}, error) {
+ var pos GeoPos
+ var err error
+
+ pos.Longitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+
+ pos.Latitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+
+ return &pos, nil
+}
+
+func geoPosSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ positions := make([]*GeoPos, 0, n)
+ for i := int64(0); i < n; i++ {
+ v, err := rd.ReadReply(geoPosParser)
+ if err != nil {
+ if err == Nil {
+ positions = append(positions, nil)
+ continue
+ }
+ return nil, err
+ }
+ switch v := v.(type) {
+ case *GeoPos:
+ positions = append(positions, v)
+ default:
+ return nil, fmt.Errorf("got %T, expected *GeoPos", v)
+ }
+ }
+ return positions, nil
+}
+
+func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
+ var cmd CommandInfo
+ var err error
+
+ if n != 6 {
+ return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 6", n)
+ }
+
+ cmd.Name, err = rd.ReadStringReply()
+ if err != nil {
+ return nil, err
+ }
+
+ arity, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.Arity = int8(arity)
+
+ flags, err := rd.ReadReply(stringSliceParser)
+ if err != nil {
+ return nil, err
+ }
+ cmd.Flags = flags.([]string)
+
+ firstKeyPos, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.FirstKeyPos = int8(firstKeyPos)
+
+ lastKeyPos, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.LastKeyPos = int8(lastKeyPos)
+
+ stepCount, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.StepCount = int8(stepCount)
+
+ for _, flag := range cmd.Flags {
+ if flag == "readonly" {
+ cmd.ReadOnly = true
+ break
+ }
+ }
+
+ return &cmd, nil
+}
+
+// Implements proto.MultiBulkParse
+func commandInfoSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ m := make(map[string]*CommandInfo, n)
+ for i := int64(0); i < n; i++ {
+ v, err := rd.ReadReply(commandInfoParser)
+ if err != nil {
+ return nil, err
+ }
+ vv := v.(*CommandInfo)
+ m[vv.Name] = vv
+
+ }
+ return m, nil
+}
+
+// Implements proto.MultiBulkParse
+func timeParser(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d elements, expected 2", n)
+ }
+
+ sec, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ microsec, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ return time.Unix(sec, microsec*1000), nil
+}
diff --git a/vendor/github.com/go-redis/redis/pipeline.go b/vendor/github.com/go-redis/redis/pipeline.go
new file mode 100644
index 000000000..b66c0597f
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/pipeline.go
@@ -0,0 +1,106 @@
+package redis
+
+import (
+ "sync"
+
+ "github.com/go-redis/redis/internal/pool"
+)
+
+type pipelineExecer func([]Cmder) error
+
+type Pipeliner interface {
+ StatefulCmdable
+ Process(cmd Cmder) error
+ Close() error
+ Discard() error
+ discard() error
+ Exec() ([]Cmder, error)
+ pipelined(fn func(Pipeliner) error) ([]Cmder, error)
+}
+
+var _ Pipeliner = (*Pipeline)(nil)
+
+// Pipeline implements pipelining as described in
+// http://redis.io/topics/pipelining. It's safe for concurrent use
+// by multiple goroutines.
+type Pipeline struct {
+ statefulCmdable
+
+ exec pipelineExecer
+
+ mu sync.Mutex
+ cmds []Cmder
+ closed bool
+}
+
+func (c *Pipeline) Process(cmd Cmder) error {
+ c.mu.Lock()
+ c.cmds = append(c.cmds, cmd)
+ c.mu.Unlock()
+ return nil
+}
+
+// Close closes the pipeline, releasing any open resources.
+func (c *Pipeline) Close() error {
+ c.mu.Lock()
+ c.discard()
+ c.closed = true
+ c.mu.Unlock()
+ return nil
+}
+
+// Discard resets the pipeline and discards queued commands.
+func (c *Pipeline) Discard() error {
+ c.mu.Lock()
+ err := c.discard()
+ c.mu.Unlock()
+ return err
+}
+
+func (c *Pipeline) discard() error {
+ if c.closed {
+ return pool.ErrClosed
+ }
+ c.cmds = c.cmds[:0]
+ return nil
+}
+
+// Exec executes all previously queued commands using one
+// client-server roundtrip.
+//
+// Exec always returns list of commands and error of the first failed
+// command if any.
+func (c *Pipeline) Exec() ([]Cmder, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ if len(c.cmds) == 0 {
+ return nil, nil
+ }
+
+ cmds := c.cmds
+ c.cmds = nil
+
+ return cmds, c.exec(cmds)
+}
+
+func (c *Pipeline) pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ if err := fn(c); err != nil {
+ return nil, err
+ }
+ cmds, err := c.Exec()
+ _ = c.Close()
+ return cmds, err
+}
+
+func (c *Pipeline) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.pipelined(fn)
+}
+
+func (c *Pipeline) Pipeline() Pipeliner {
+ return c
+}
diff --git a/vendor/github.com/go-redis/redis/pipeline_test.go b/vendor/github.com/go-redis/redis/pipeline_test.go
new file mode 100644
index 000000000..11896c6bb
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/pipeline_test.go
@@ -0,0 +1,80 @@
+package redis_test
+
+import (
+ "github.com/go-redis/redis"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("pipelining", func() {
+ var client *redis.Client
+ var pipe *redis.Pipeline
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB().Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("supports block style", func() {
+ var get *redis.StringCmd
+ cmds, err := client.Pipelined(func(pipe redis.Pipeliner) error {
+ get = pipe.Get("foo")
+ return nil
+ })
+ Expect(err).To(Equal(redis.Nil))
+ Expect(cmds).To(HaveLen(1))
+ Expect(cmds[0]).To(Equal(get))
+ Expect(get.Err()).To(Equal(redis.Nil))
+ Expect(get.Val()).To(Equal(""))
+ })
+
+ assertPipeline := func() {
+ It("returns no errors when there are no commands", func() {
+ _, err := pipe.Exec()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("discards queued commands", func() {
+ pipe.Get("key")
+ pipe.Discard()
+ cmds, err := pipe.Exec()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(BeNil())
+ })
+
+ It("handles val/err", func() {
+ err := client.Set("key", "value", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ get := pipe.Get("key")
+ cmds, err := pipe.Exec()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(1))
+
+ val, err := get.Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("value"))
+ })
+ }
+
+ Describe("Pipeline", func() {
+ BeforeEach(func() {
+ pipe = client.Pipeline().(*redis.Pipeline)
+ })
+
+ assertPipeline()
+ })
+
+ Describe("TxPipeline", func() {
+ BeforeEach(func() {
+ pipe = client.TxPipeline().(*redis.Pipeline)
+ })
+
+ assertPipeline()
+ })
+})
diff --git a/vendor/github.com/go-redis/redis/pool_test.go b/vendor/github.com/go-redis/redis/pool_test.go
new file mode 100644
index 000000000..34a548a63
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/pool_test.go
@@ -0,0 +1,141 @@
+package redis_test
+
+import (
+ "time"
+
+ "github.com/go-redis/redis"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("pool", func() {
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB().Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("respects max size", func() {
+ perform(1000, func(id int) {
+ val, err := client.Ping().Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("PONG"))
+ })
+
+ pool := client.Pool()
+ Expect(pool.Len()).To(BeNumerically("<=", 10))
+ Expect(pool.FreeLen()).To(BeNumerically("<=", 10))
+ Expect(pool.Len()).To(Equal(pool.FreeLen()))
+ })
+
+ It("respects max size on multi", func() {
+ perform(1000, func(id int) {
+ var ping *redis.StatusCmd
+
+ err := client.Watch(func(tx *redis.Tx) error {
+ cmds, err := tx.Pipelined(func(pipe redis.Pipeliner) error {
+ ping = pipe.Ping()
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(1))
+ return err
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(ping.Err()).NotTo(HaveOccurred())
+ Expect(ping.Val()).To(Equal("PONG"))
+ })
+
+ pool := client.Pool()
+ Expect(pool.Len()).To(BeNumerically("<=", 10))
+ Expect(pool.FreeLen()).To(BeNumerically("<=", 10))
+ Expect(pool.Len()).To(Equal(pool.FreeLen()))
+ })
+
+ It("respects max size on pipelines", func() {
+ perform(1000, func(id int) {
+ pipe := client.Pipeline()
+ ping := pipe.Ping()
+ cmds, err := pipe.Exec()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(1))
+ Expect(ping.Err()).NotTo(HaveOccurred())
+ Expect(ping.Val()).To(Equal("PONG"))
+ Expect(pipe.Close()).NotTo(HaveOccurred())
+ })
+
+ pool := client.Pool()
+ Expect(pool.Len()).To(BeNumerically("<=", 10))
+ Expect(pool.FreeLen()).To(BeNumerically("<=", 10))
+ Expect(pool.Len()).To(Equal(pool.FreeLen()))
+ })
+
+ It("removes broken connections", func() {
+ cn, _, err := client.Pool().Get()
+ Expect(err).NotTo(HaveOccurred())
+ cn.SetNetConn(&badConn{})
+ Expect(client.Pool().Put(cn)).NotTo(HaveOccurred())
+
+ err = client.Ping().Err()
+ Expect(err).To(MatchError("bad connection"))
+
+ val, err := client.Ping().Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("PONG"))
+
+ pool := client.Pool()
+ Expect(pool.Len()).To(Equal(1))
+ Expect(pool.FreeLen()).To(Equal(1))
+
+ stats := pool.Stats()
+ Expect(stats.Requests).To(Equal(uint32(4)))
+ Expect(stats.Hits).To(Equal(uint32(2)))
+ Expect(stats.Timeouts).To(Equal(uint32(0)))
+ })
+
+ It("reuses connections", func() {
+ for i := 0; i < 100; i++ {
+ val, err := client.Ping().Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("PONG"))
+ }
+
+ pool := client.Pool()
+ Expect(pool.Len()).To(Equal(1))
+ Expect(pool.FreeLen()).To(Equal(1))
+
+ stats := pool.Stats()
+ Expect(stats.Requests).To(Equal(uint32(101)))
+ Expect(stats.Hits).To(Equal(uint32(100)))
+ Expect(stats.Timeouts).To(Equal(uint32(0)))
+ })
+
+ It("removes idle connections", func() {
+ stats := client.PoolStats()
+ Expect(stats).To(Equal(&redis.PoolStats{
+ Requests: 1,
+ Hits: 0,
+ Timeouts: 0,
+ TotalConns: 1,
+ FreeConns: 1,
+ }))
+
+ time.Sleep(2 * time.Second)
+
+ stats = client.PoolStats()
+ Expect(stats).To(Equal(&redis.PoolStats{
+ Requests: 1,
+ Hits: 0,
+ Timeouts: 0,
+ TotalConns: 0,
+ FreeConns: 0,
+ }))
+ })
+})
diff --git a/vendor/github.com/go-redis/redis/pubsub.go b/vendor/github.com/go-redis/redis/pubsub.go
new file mode 100644
index 000000000..4872b4e88
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/pubsub.go
@@ -0,0 +1,396 @@
+package redis
+
+import (
+ "fmt"
+ "net"
+ "sync"
+ "time"
+
+ "github.com/go-redis/redis/internal"
+ "github.com/go-redis/redis/internal/pool"
+)
+
+// PubSub implements Pub/Sub commands as described in
+// http://redis.io/topics/pubsub. It's NOT safe for concurrent use by
+// multiple goroutines.
+//
+// PubSub automatically resubscribes to the channels and patterns
+// when Redis becomes unavailable.
+type PubSub struct {
+ base baseClient
+
+ mu sync.Mutex
+ cn *pool.Conn
+ channels []string
+ patterns []string
+ closed bool
+
+ cmd *Cmd
+}
+
+func (c *PubSub) conn() (*pool.Conn, error) {
+ c.mu.Lock()
+ cn, err := c._conn()
+ c.mu.Unlock()
+ return cn, err
+}
+
+func (c *PubSub) _conn() (*pool.Conn, error) {
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ if c.cn != nil {
+ return c.cn, nil
+ }
+
+ cn, err := c.base.connPool.NewConn()
+ if err != nil {
+ return nil, err
+ }
+
+ if !cn.Inited {
+ if err := c.base.initConn(cn); err != nil {
+ _ = c.base.connPool.CloseConn(cn)
+ return nil, err
+ }
+ }
+
+ if err := c.resubscribe(cn); err != nil {
+ _ = c.base.connPool.CloseConn(cn)
+ return nil, err
+ }
+
+ c.cn = cn
+ return cn, nil
+}
+
+func (c *PubSub) resubscribe(cn *pool.Conn) error {
+ var firstErr error
+ if len(c.channels) > 0 {
+ if err := c._subscribe(cn, "subscribe", c.channels...); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ if len(c.patterns) > 0 {
+ if err := c._subscribe(cn, "psubscribe", c.patterns...); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ return firstErr
+}
+
+func (c *PubSub) _subscribe(cn *pool.Conn, redisCmd string, channels ...string) error {
+ args := make([]interface{}, 1+len(channels))
+ args[0] = redisCmd
+ for i, channel := range channels {
+ args[1+i] = channel
+ }
+ cmd := NewSliceCmd(args...)
+
+ cn.SetWriteTimeout(c.base.opt.WriteTimeout)
+ return writeCmd(cn, cmd)
+}
+
+func (c *PubSub) putConn(cn *pool.Conn, err error) {
+ if !internal.IsBadConn(err, true) {
+ return
+ }
+
+ c.mu.Lock()
+ if c.cn == cn {
+ _ = c.closeConn()
+ }
+ c.mu.Unlock()
+}
+
+func (c *PubSub) closeConn() error {
+ err := c.base.connPool.CloseConn(c.cn)
+ c.cn = nil
+ return err
+}
+
+func (c *PubSub) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return pool.ErrClosed
+ }
+ c.closed = true
+
+ if c.cn != nil {
+ return c.closeConn()
+ }
+ return nil
+}
+
+// Subscribes the client to the specified channels. It returns
+// empty subscription if there are no channels.
+func (c *PubSub) Subscribe(channels ...string) error {
+ c.mu.Lock()
+ err := c.subscribe("subscribe", channels...)
+ c.channels = appendIfNotExists(c.channels, channels...)
+ c.mu.Unlock()
+ return err
+}
+
+// Subscribes the client to the given patterns. It returns
+// empty subscription if there are no patterns.
+func (c *PubSub) PSubscribe(patterns ...string) error {
+ c.mu.Lock()
+ err := c.subscribe("psubscribe", patterns...)
+ c.patterns = appendIfNotExists(c.patterns, patterns...)
+ c.mu.Unlock()
+ return err
+}
+
+// Unsubscribes the client from the given channels, or from all of
+// them if none is given.
+func (c *PubSub) Unsubscribe(channels ...string) error {
+ c.mu.Lock()
+ err := c.subscribe("unsubscribe", channels...)
+ c.channels = remove(c.channels, channels...)
+ c.mu.Unlock()
+ return err
+}
+
+// Unsubscribes the client from the given patterns, or from all of
+// them if none is given.
+func (c *PubSub) PUnsubscribe(patterns ...string) error {
+ c.mu.Lock()
+ err := c.subscribe("punsubscribe", patterns...)
+ c.patterns = remove(c.patterns, patterns...)
+ c.mu.Unlock()
+ return err
+}
+
+func (c *PubSub) subscribe(redisCmd string, channels ...string) error {
+ cn, err := c._conn()
+ if err != nil {
+ return err
+ }
+
+ err = c._subscribe(cn, redisCmd, channels...)
+ c.putConn(cn, err)
+ return err
+}
+
+func (c *PubSub) Ping(payload ...string) error {
+ args := []interface{}{"ping"}
+ if len(payload) == 1 {
+ args = append(args, payload[0])
+ }
+ cmd := NewCmd(args...)
+
+ cn, err := c.conn()
+ if err != nil {
+ return err
+ }
+
+ cn.SetWriteTimeout(c.base.opt.WriteTimeout)
+ err = writeCmd(cn, cmd)
+ c.putConn(cn, err)
+ return err
+}
+
+// Message received after a successful subscription to channel.
+type Subscription struct {
+ // Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe".
+ Kind string
+ // Channel name we have subscribed to.
+ Channel string
+ // Number of channels we are currently subscribed to.
+ Count int
+}
+
+func (m *Subscription) String() string {
+ return fmt.Sprintf("%s: %s", m.Kind, m.Channel)
+}
+
+// Message received as result of a PUBLISH command issued by another client.
+type Message struct {
+ Channel string
+ Pattern string
+ Payload string
+}
+
+func (m *Message) String() string {
+ return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload)
+}
+
+// Pong received as result of a PING command issued by another client.
+type Pong struct {
+ Payload string
+}
+
+func (p *Pong) String() string {
+ if p.Payload != "" {
+ return fmt.Sprintf("Pong<%s>", p.Payload)
+ }
+ return "Pong"
+}
+
+func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
+ switch reply := reply.(type) {
+ case string:
+ return &Pong{
+ Payload: reply,
+ }, nil
+ case []interface{}:
+ switch kind := reply[0].(string); kind {
+ case "subscribe", "unsubscribe", "psubscribe", "punsubscribe":
+ return &Subscription{
+ Kind: kind,
+ Channel: reply[1].(string),
+ Count: int(reply[2].(int64)),
+ }, nil
+ case "message":
+ return &Message{
+ Channel: reply[1].(string),
+ Payload: reply[2].(string),
+ }, nil
+ case "pmessage":
+ return &Message{
+ Pattern: reply[1].(string),
+ Channel: reply[2].(string),
+ Payload: reply[3].(string),
+ }, nil
+ case "pong":
+ return &Pong{
+ Payload: reply[1].(string),
+ }, nil
+ default:
+ return nil, fmt.Errorf("redis: unsupported pubsub message: %q", kind)
+ }
+ default:
+ return nil, fmt.Errorf("redis: unsupported pubsub message: %#v", reply)
+ }
+}
+
+// ReceiveTimeout acts like Receive but returns an error if message
+// is not received in time. This is low-level API and most clients
+// should use ReceiveMessage.
+func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) {
+ if c.cmd == nil {
+ c.cmd = NewCmd()
+ }
+
+ cn, err := c.conn()
+ if err != nil {
+ return nil, err
+ }
+
+ cn.SetReadTimeout(timeout)
+ err = c.cmd.readReply(cn)
+ c.putConn(cn, err)
+ if err != nil {
+ return nil, err
+ }
+
+ return c.newMessage(c.cmd.Val())
+}
+
+// Receive returns a message as a Subscription, Message, Pong or error.
+// See PubSub example for details. This is low-level API and most clients
+// should use ReceiveMessage.
+func (c *PubSub) Receive() (interface{}, error) {
+ return c.ReceiveTimeout(0)
+}
+
+// ReceiveMessage returns a Message or error ignoring Subscription or Pong
+// messages. It automatically reconnects to Redis Server and resubscribes
+// to channels in case of network errors.
+func (c *PubSub) ReceiveMessage() (*Message, error) {
+ return c.receiveMessage(5 * time.Second)
+}
+
+func (c *PubSub) receiveMessage(timeout time.Duration) (*Message, error) {
+ var errNum uint
+ for {
+ msgi, err := c.ReceiveTimeout(timeout)
+ if err != nil {
+ if !internal.IsNetworkError(err) {
+ return nil, err
+ }
+
+ errNum++
+ if errNum < 3 {
+ if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
+ err := c.Ping()
+ if err != nil {
+ internal.Logf("PubSub.Ping failed: %s", err)
+ }
+ }
+ } else {
+ // 3 consequent errors - connection is broken or
+ // Redis Server is down.
+ // Sleep to not exceed max number of open connections.
+ time.Sleep(time.Second)
+ }
+ continue
+ }
+
+ // Reset error number, because we received a message.
+ errNum = 0
+
+ switch msg := msgi.(type) {
+ case *Subscription:
+ // Ignore.
+ case *Pong:
+ // Ignore.
+ case *Message:
+ return msg, nil
+ default:
+ return nil, fmt.Errorf("redis: unknown message: %T", msgi)
+ }
+ }
+}
+
+// Channel returns a channel for concurrently receiving messages.
+// The channel is closed with PubSub.
+func (c *PubSub) Channel() <-chan *Message {
+ ch := make(chan *Message, 100)
+ go func() {
+ for {
+ msg, err := c.ReceiveMessage()
+ if err != nil {
+ if err == pool.ErrClosed {
+ break
+ }
+ continue
+ }
+ ch <- msg
+ }
+ close(ch)
+ }()
+ return ch
+}
+
+func appendIfNotExists(ss []string, es ...string) []string {
+loop:
+ for _, e := range es {
+ for _, s := range ss {
+ if s == e {
+ continue loop
+ }
+ }
+ ss = append(ss, e)
+ }
+ return ss
+}
+
+func remove(ss []string, es ...string) []string {
+ if len(es) == 0 {
+ return ss[:0]
+ }
+ for _, e := range es {
+ for i, s := range ss {
+ if s == e {
+ ss = append(ss[:i], ss[i+1:]...)
+ break
+ }
+ }
+ }
+ return ss
+}
diff --git a/vendor/github.com/go-redis/redis/pubsub_test.go b/vendor/github.com/go-redis/redis/pubsub_test.go
new file mode 100644
index 000000000..e8589f461
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/pubsub_test.go
@@ -0,0 +1,406 @@
+package redis_test
+
+import (
+ "io"
+ "net"
+ "sync"
+ "time"
+
+ "github.com/go-redis/redis"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("PubSub", func() {
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB().Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should support pattern matching", func() {
+ pubsub := client.PSubscribe("mychannel*")
+ defer pubsub.Close()
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Subscription)
+ Expect(subscr.Kind).To(Equal("psubscribe"))
+ Expect(subscr.Channel).To(Equal("mychannel*"))
+ Expect(subscr.Count).To(Equal(1))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ Expect(err.(net.Error).Timeout()).To(Equal(true))
+ Expect(msgi).To(BeNil())
+ }
+
+ n, err := client.Publish("mychannel1", "hello").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+
+ Expect(pubsub.PUnsubscribe("mychannel*")).NotTo(HaveOccurred())
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Message)
+ Expect(subscr.Channel).To(Equal("mychannel1"))
+ Expect(subscr.Pattern).To(Equal("mychannel*"))
+ Expect(subscr.Payload).To(Equal("hello"))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Subscription)
+ Expect(subscr.Kind).To(Equal("punsubscribe"))
+ Expect(subscr.Channel).To(Equal("mychannel*"))
+ Expect(subscr.Count).To(Equal(0))
+ }
+
+ stats := client.PoolStats()
+ Expect(stats.Requests - stats.Hits).To(Equal(uint32(2)))
+ })
+
+ It("should pub/sub channels", func() {
+ channels, err := client.PubSubChannels("mychannel*").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(channels).To(BeEmpty())
+
+ pubsub := client.Subscribe("mychannel", "mychannel2")
+ defer pubsub.Close()
+
+ channels, err = client.PubSubChannels("mychannel*").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(channels).To(ConsistOf([]string{"mychannel", "mychannel2"}))
+
+ channels, err = client.PubSubChannels("").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(channels).To(BeEmpty())
+
+ channels, err = client.PubSubChannels("*").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(channels)).To(BeNumerically(">=", 2))
+ })
+
+ It("should return the numbers of subscribers", func() {
+ pubsub := client.Subscribe("mychannel", "mychannel2")
+ defer pubsub.Close()
+
+ channels, err := client.PubSubNumSub("mychannel", "mychannel2", "mychannel3").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(channels).To(Equal(map[string]int64{
+ "mychannel": 1,
+ "mychannel2": 1,
+ "mychannel3": 0,
+ }))
+ })
+
+ It("should return the numbers of subscribers by pattern", func() {
+ num, err := client.PubSubNumPat().Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(num).To(Equal(int64(0)))
+
+ pubsub := client.PSubscribe("*")
+ defer pubsub.Close()
+
+ num, err = client.PubSubNumPat().Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(num).To(Equal(int64(1)))
+ })
+
+ It("should pub/sub", func() {
+ pubsub := client.Subscribe("mychannel", "mychannel2")
+ defer pubsub.Close()
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Subscription)
+ Expect(subscr.Kind).To(Equal("subscribe"))
+ Expect(subscr.Channel).To(Equal("mychannel"))
+ Expect(subscr.Count).To(Equal(1))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Subscription)
+ Expect(subscr.Kind).To(Equal("subscribe"))
+ Expect(subscr.Channel).To(Equal("mychannel2"))
+ Expect(subscr.Count).To(Equal(2))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ Expect(err.(net.Error).Timeout()).To(Equal(true))
+ Expect(msgi).NotTo(HaveOccurred())
+ }
+
+ n, err := client.Publish("mychannel", "hello").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+
+ n, err = client.Publish("mychannel2", "hello2").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+
+ Expect(pubsub.Unsubscribe("mychannel", "mychannel2")).NotTo(HaveOccurred())
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Message)
+ Expect(subscr.Channel).To(Equal("mychannel"))
+ Expect(subscr.Payload).To(Equal("hello"))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ msg := msgi.(*redis.Message)
+ Expect(msg.Channel).To(Equal("mychannel2"))
+ Expect(msg.Payload).To(Equal("hello2"))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Subscription)
+ Expect(subscr.Kind).To(Equal("unsubscribe"))
+ Expect(subscr.Channel).To(Equal("mychannel"))
+ Expect(subscr.Count).To(Equal(1))
+ }
+
+ {
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ subscr := msgi.(*redis.Subscription)
+ Expect(subscr.Kind).To(Equal("unsubscribe"))
+ Expect(subscr.Channel).To(Equal("mychannel2"))
+ Expect(subscr.Count).To(Equal(0))
+ }
+
+ stats := client.PoolStats()
+ Expect(stats.Requests - stats.Hits).To(Equal(uint32(2)))
+ })
+
+ It("should ping/pong", func() {
+ pubsub := client.Subscribe("mychannel")
+ defer pubsub.Close()
+
+ _, err := pubsub.ReceiveTimeout(time.Second)
+ Expect(err).NotTo(HaveOccurred())
+
+ err = pubsub.Ping("")
+ Expect(err).NotTo(HaveOccurred())
+
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ pong := msgi.(*redis.Pong)
+ Expect(pong.Payload).To(Equal(""))
+ })
+
+ It("should ping/pong with payload", func() {
+ pubsub := client.Subscribe("mychannel")
+ defer pubsub.Close()
+
+ _, err := pubsub.ReceiveTimeout(time.Second)
+ Expect(err).NotTo(HaveOccurred())
+
+ err = pubsub.Ping("hello")
+ Expect(err).NotTo(HaveOccurred())
+
+ msgi, err := pubsub.ReceiveTimeout(time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ pong := msgi.(*redis.Pong)
+ Expect(pong.Payload).To(Equal("hello"))
+ })
+
+ It("should multi-ReceiveMessage", func() {
+ pubsub := client.Subscribe("mychannel")
+ defer pubsub.Close()
+
+ subscr, err := pubsub.ReceiveTimeout(time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(subscr).To(Equal(&redis.Subscription{
+ Kind: "subscribe",
+ Channel: "mychannel",
+ Count: 1,
+ }))
+
+ err = client.Publish("mychannel", "hello").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.Publish("mychannel", "world").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ msg, err := pubsub.ReceiveMessage()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(msg.Channel).To(Equal("mychannel"))
+ Expect(msg.Payload).To(Equal("hello"))
+
+ msg, err = pubsub.ReceiveMessage()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(msg.Channel).To(Equal("mychannel"))
+ Expect(msg.Payload).To(Equal("world"))
+ })
+
+ It("should ReceiveMessage after timeout", func() {
+ timeout := 100 * time.Millisecond
+
+ pubsub := client.Subscribe("mychannel")
+ defer pubsub.Close()
+
+ subscr, err := pubsub.ReceiveTimeout(time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(subscr).To(Equal(&redis.Subscription{
+ Kind: "subscribe",
+ Channel: "mychannel",
+ Count: 1,
+ }))
+
+ done := make(chan bool, 1)
+ go func() {
+ defer GinkgoRecover()
+ defer func() {
+ done <- true
+ }()
+
+ time.Sleep(timeout + 100*time.Millisecond)
+ n, err := client.Publish("mychannel", "hello").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+ }()
+
+ msg, err := pubsub.ReceiveMessageTimeout(timeout)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(msg.Channel).To(Equal("mychannel"))
+ Expect(msg.Payload).To(Equal("hello"))
+
+ Eventually(done).Should(Receive())
+
+ stats := client.PoolStats()
+ Expect(stats.Requests).To(Equal(uint32(2)))
+ Expect(stats.Hits).To(Equal(uint32(1)))
+ })
+
+ expectReceiveMessageOnError := func(pubsub *redis.PubSub) {
+ pubsub.SetNetConn(&badConn{
+ readErr: io.EOF,
+ writeErr: io.EOF,
+ })
+
+ done := make(chan bool, 1)
+ go func() {
+ defer GinkgoRecover()
+ defer func() {
+ done <- true
+ }()
+
+ time.Sleep(100 * time.Millisecond)
+ err := client.Publish("mychannel", "hello").Err()
+ Expect(err).NotTo(HaveOccurred())
+ }()
+
+ msg, err := pubsub.ReceiveMessage()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(msg.Channel).To(Equal("mychannel"))
+ Expect(msg.Payload).To(Equal("hello"))
+
+ Eventually(done).Should(Receive())
+ }
+
+ It("Subscribe should reconnect on ReceiveMessage error", func() {
+ pubsub := client.Subscribe("mychannel")
+ defer pubsub.Close()
+
+ subscr, err := pubsub.ReceiveTimeout(time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(subscr).To(Equal(&redis.Subscription{
+ Kind: "subscribe",
+ Channel: "mychannel",
+ Count: 1,
+ }))
+
+ expectReceiveMessageOnError(pubsub)
+ })
+
+ It("PSubscribe should reconnect on ReceiveMessage error", func() {
+ pubsub := client.PSubscribe("mychannel")
+ defer pubsub.Close()
+
+ subscr, err := pubsub.ReceiveTimeout(time.Second)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(subscr).To(Equal(&redis.Subscription{
+ Kind: "psubscribe",
+ Channel: "mychannel",
+ Count: 1,
+ }))
+
+ expectReceiveMessageOnError(pubsub)
+ })
+
+ It("should return on Close", func() {
+ pubsub := client.Subscribe("mychannel")
+ defer pubsub.Close()
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer GinkgoRecover()
+
+ wg.Done()
+ defer wg.Done()
+
+ _, err := pubsub.ReceiveMessage()
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(SatisfyAny(
+ MatchError("redis: client is closed"),
+ MatchError("use of closed network connection"), // Go 1.4
+ ))
+ }()
+
+ wg.Wait()
+ wg.Add(1)
+
+ Expect(pubsub.Close()).NotTo(HaveOccurred())
+
+ wg.Wait()
+ })
+
+ It("should ReceiveMessage without a subscription", func() {
+ timeout := 100 * time.Millisecond
+
+ pubsub := client.Subscribe()
+ defer pubsub.Close()
+
+ go func() {
+ defer GinkgoRecover()
+
+ time.Sleep(2 * timeout)
+
+ err := pubsub.Subscribe("mychannel")
+ Expect(err).NotTo(HaveOccurred())
+
+ time.Sleep(timeout)
+
+ err = client.Publish("mychannel", "hello").Err()
+ Expect(err).NotTo(HaveOccurred())
+ }()
+
+ msg, err := pubsub.ReceiveMessageTimeout(timeout)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(msg.Channel).To(Equal("mychannel"))
+ Expect(msg.Payload).To(Equal("hello"))
+ })
+})
diff --git a/vendor/github.com/go-redis/redis/race_test.go b/vendor/github.com/go-redis/redis/race_test.go
new file mode 100644
index 000000000..5bcb0768e
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/race_test.go
@@ -0,0 +1,247 @@
+package redis_test
+
+import (
+ "bytes"
+ "fmt"
+ "net"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/go-redis/redis"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("races", func() {
+ var client *redis.Client
+ var C, N int
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB().Err()).To(BeNil())
+
+ C, N = 10, 1000
+ if testing.Short() {
+ C = 4
+ N = 100
+ }
+ })
+
+ AfterEach(func() {
+ err := client.Close()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("should echo", func() {
+ perform(C, func(id int) {
+ for i := 0; i < N; i++ {
+ msg := fmt.Sprintf("echo %d %d", id, i)
+ echo, err := client.Echo(msg).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(echo).To(Equal(msg))
+ }
+ })
+ })
+
+ It("should incr", func() {
+ key := "TestIncrFromGoroutines"
+
+ perform(C, func(id int) {
+ for i := 0; i < N; i++ {
+ err := client.Incr(key).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ })
+
+ val, err := client.Get(key).Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal(int64(C * N)))
+ })
+
+ It("should handle many keys", func() {
+ perform(C, func(id int) {
+ for i := 0; i < N; i++ {
+ err := client.Set(
+ fmt.Sprintf("keys.key-%d-%d", id, i),
+ fmt.Sprintf("hello-%d-%d", id, i),
+ 0,
+ ).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ })
+
+ keys := client.Keys("keys.*")
+ Expect(keys.Err()).NotTo(HaveOccurred())
+ Expect(len(keys.Val())).To(Equal(C * N))
+ })
+
+ It("should handle many keys 2", func() {
+ perform(C, func(id int) {
+ keys := []string{"non-existent-key"}
+ for i := 0; i < N; i++ {
+ key := fmt.Sprintf("keys.key-%d", i)
+ keys = append(keys, key)
+
+ err := client.Set(key, fmt.Sprintf("hello-%d", i), 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ keys = append(keys, "non-existent-key")
+
+ vals, err := client.MGet(keys...).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(vals)).To(Equal(N + 2))
+
+ for i := 0; i < N; i++ {
+ Expect(vals[i+1]).To(Equal(fmt.Sprintf("hello-%d", i)))
+ }
+
+ Expect(vals[0]).To(BeNil())
+ Expect(vals[N+1]).To(BeNil())
+ })
+ })
+
+ It("should handle big vals in Get", func() {
+ C, N = 4, 100
+
+ bigVal := bytes.Repeat([]byte{'*'}, 1<<17) // 128kb
+
+ err := client.Set("key", bigVal, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ // Reconnect to get new connection.
+ Expect(client.Close()).To(BeNil())
+ client = redis.NewClient(redisOptions())
+
+ perform(C, func(id int) {
+ for i := 0; i < N; i++ {
+ got, err := client.Get("key").Bytes()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(got).To(Equal(bigVal))
+ }
+ })
+ })
+
+ It("should handle big vals in Set", func() {
+ C, N = 4, 100
+
+ bigVal := bytes.Repeat([]byte{'*'}, 1<<17) // 128kb
+
+ perform(C, func(id int) {
+ for i := 0; i < N; i++ {
+ err := client.Set("key", bigVal, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ })
+ })
+
+ It("should select db", func() {
+ err := client.Set("db", 1, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ perform(C, func(id int) {
+ opt := redisOptions()
+ opt.DB = id
+ client := redis.NewClient(opt)
+ for i := 0; i < N; i++ {
+ err := client.Set("db", id, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ n, err := client.Get("db").Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(id)))
+ }
+ err := client.Close()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ n, err := client.Get("db").Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(1)))
+ })
+
+ It("should select DB with read timeout", func() {
+ perform(C, func(id int) {
+ opt := redisOptions()
+ opt.DB = id
+ opt.ReadTimeout = time.Nanosecond
+ client := redis.NewClient(opt)
+
+ perform(C, func(id int) {
+ err := client.Ping().Err()
+ Expect(err).To(HaveOccurred())
+ Expect(err.(net.Error).Timeout()).To(BeTrue())
+ })
+
+ err := client.Close()
+ Expect(err).NotTo(HaveOccurred())
+ })
+ })
+
+ It("should Watch/Unwatch", func() {
+ err := client.Set("key", "0", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ perform(C, func(id int) {
+ for i := 0; i < N; i++ {
+ err := client.Watch(func(tx *redis.Tx) error {
+ val, err := tx.Get("key").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).NotTo(Equal(redis.Nil))
+
+ num, err := strconv.ParseInt(val, 10, 64)
+ Expect(err).NotTo(HaveOccurred())
+
+ cmds, err := tx.Pipelined(func(pipe redis.Pipeliner) error {
+ pipe.Set("key", strconv.FormatInt(num+1, 10), 0)
+ return nil
+ })
+ Expect(cmds).To(HaveLen(1))
+ return err
+ }, "key")
+ if err == redis.TxFailedErr {
+ i--
+ continue
+ }
+ Expect(err).NotTo(HaveOccurred())
+ }
+ })
+
+ val, err := client.Get("key").Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal(int64(C * N)))
+ })
+
+ It("should Pipeline", func() {
+ perform(C, func(id int) {
+ pipe := client.Pipeline()
+ for i := 0; i < N; i++ {
+ pipe.Echo(fmt.Sprint(i))
+ }
+
+ cmds, err := pipe.Exec()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(N))
+
+ for i := 0; i < N; i++ {
+ Expect(cmds[i].(*redis.StringCmd).Val()).To(Equal(fmt.Sprint(i)))
+ }
+ })
+ })
+
+ It("should Pipeline", func() {
+ pipe := client.Pipeline()
+ perform(N, func(id int) {
+ pipe.Incr("key")
+ })
+
+ cmds, err := pipe.Exec()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(N))
+
+ n, err := client.Get("key").Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(N)))
+ })
+})
diff --git a/vendor/github.com/go-redis/redis/redis.go b/vendor/github.com/go-redis/redis/redis.go
new file mode 100644
index 000000000..9812daf66
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/redis.go
@@ -0,0 +1,436 @@
+package redis
+
+import (
+ "fmt"
+ "log"
+ "time"
+
+ "github.com/go-redis/redis/internal"
+ "github.com/go-redis/redis/internal/pool"
+ "github.com/go-redis/redis/internal/proto"
+)
+
+// Redis nil reply, .e.g. when key does not exist.
+const Nil = internal.Nil
+
+func SetLogger(logger *log.Logger) {
+ internal.Logger = logger
+}
+
+func (c *baseClient) String() string {
+ return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
+}
+
+func (c *baseClient) conn() (*pool.Conn, bool, error) {
+ cn, isNew, err := c.connPool.Get()
+ if err != nil {
+ return nil, false, err
+ }
+
+ if !cn.Inited {
+ if err := c.initConn(cn); err != nil {
+ _ = c.connPool.Remove(cn)
+ return nil, false, err
+ }
+ }
+
+ return cn, isNew, nil
+}
+
+func (c *baseClient) putConn(cn *pool.Conn, err error) bool {
+ if internal.IsBadConn(err, false) {
+ _ = c.connPool.Remove(cn)
+ return false
+ }
+
+ _ = c.connPool.Put(cn)
+ return true
+}
+
+func (c *baseClient) initConn(cn *pool.Conn) error {
+ cn.Inited = true
+
+ if c.opt.Password == "" &&
+ c.opt.DB == 0 &&
+ !c.opt.ReadOnly &&
+ c.opt.OnConnect == nil {
+ return nil
+ }
+
+ // Temp client to initialize connection.
+ conn := &Conn{
+ baseClient: baseClient{
+ opt: c.opt,
+ connPool: pool.NewSingleConnPool(cn),
+ },
+ }
+ conn.setProcessor(conn.Process)
+
+ _, err := conn.Pipelined(func(pipe Pipeliner) error {
+ if c.opt.Password != "" {
+ pipe.Auth(c.opt.Password)
+ }
+
+ if c.opt.DB > 0 {
+ pipe.Select(c.opt.DB)
+ }
+
+ if c.opt.ReadOnly {
+ pipe.ReadOnly()
+ }
+
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ if c.opt.OnConnect != nil {
+ return c.opt.OnConnect(conn)
+ }
+ return nil
+}
+
+func (c *baseClient) Process(cmd Cmder) error {
+ if c.process != nil {
+ return c.process(cmd)
+ }
+ return c.defaultProcess(cmd)
+}
+
+// WrapProcess replaces the process func. It takes a function createWrapper
+// which is supplied by the user. createWrapper takes the old process func as
+// an input and returns the new wrapper process func. createWrapper should
+// use call the old process func within the new process func.
+func (c *baseClient) WrapProcess(fn func(oldProcess func(cmd Cmder) error) func(cmd Cmder) error) {
+ c.process = fn(c.defaultProcess)
+}
+
+func (c *baseClient) defaultProcess(cmd Cmder) error {
+ for i := 0; i <= c.opt.MaxRetries; i++ {
+ if i > 0 {
+ time.Sleep(internal.RetryBackoff(i, c.opt.MaxRetryBackoff))
+ }
+
+ cn, _, err := c.conn()
+ if err != nil {
+ cmd.setErr(err)
+ if internal.IsRetryableError(err) {
+ continue
+ }
+ return err
+ }
+
+ cn.SetWriteTimeout(c.opt.WriteTimeout)
+ if err := writeCmd(cn, cmd); err != nil {
+ c.putConn(cn, err)
+ cmd.setErr(err)
+ if internal.IsRetryableError(err) {
+ continue
+ }
+ return err
+ }
+
+ cn.SetReadTimeout(c.cmdTimeout(cmd))
+ err = cmd.readReply(cn)
+ c.putConn(cn, err)
+ if err != nil && internal.IsRetryableError(err) {
+ continue
+ }
+
+ return err
+ }
+
+ return cmd.Err()
+}
+
+func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
+ if timeout := cmd.readTimeout(); timeout != nil {
+ return *timeout
+ } else {
+ return c.opt.ReadTimeout
+ }
+}
+
+// Close closes the client, releasing any open resources.
+//
+// It is rare to Close a Client, as the Client is meant to be
+// long-lived and shared between many goroutines.
+func (c *baseClient) Close() error {
+ var firstErr error
+ if c.onClose != nil {
+ if err := c.onClose(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ if err := c.connPool.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ return firstErr
+}
+
+func (c *baseClient) getAddr() string {
+ return c.opt.Addr
+}
+
+type pipelineProcessor func(*pool.Conn, []Cmder) (bool, error)
+
+func (c *baseClient) pipelineExecer(p pipelineProcessor) pipelineExecer {
+ return func(cmds []Cmder) error {
+ var firstErr error
+ for i := 0; i <= c.opt.MaxRetries; i++ {
+ cn, _, err := c.conn()
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ canRetry, err := p(cn, cmds)
+ c.putConn(cn, err)
+ if err == nil {
+ return nil
+ }
+ if firstErr == nil {
+ firstErr = err
+ }
+ if !canRetry || !internal.IsRetryableError(err) {
+ break
+ }
+ }
+ return firstErr
+ }
+}
+
+func (c *baseClient) pipelineProcessCmds(cn *pool.Conn, cmds []Cmder) (bool, error) {
+ cn.SetWriteTimeout(c.opt.WriteTimeout)
+ if err := writeCmd(cn, cmds...); err != nil {
+ setCmdsErr(cmds, err)
+ return true, err
+ }
+
+ // Set read timeout for all commands.
+ cn.SetReadTimeout(c.opt.ReadTimeout)
+ return pipelineReadCmds(cn, cmds)
+}
+
+func pipelineReadCmds(cn *pool.Conn, cmds []Cmder) (retry bool, firstErr error) {
+ for i, cmd := range cmds {
+ err := cmd.readReply(cn)
+ if err == nil {
+ continue
+ }
+ if i == 0 {
+ retry = true
+ }
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+ return
+}
+
+func (c *baseClient) txPipelineProcessCmds(cn *pool.Conn, cmds []Cmder) (bool, error) {
+ cn.SetWriteTimeout(c.opt.WriteTimeout)
+ if err := txPipelineWriteMulti(cn, cmds); err != nil {
+ setCmdsErr(cmds, err)
+ return true, err
+ }
+
+ // Set read timeout for all commands.
+ cn.SetReadTimeout(c.opt.ReadTimeout)
+
+ if err := c.txPipelineReadQueued(cn, cmds); err != nil {
+ return false, err
+ }
+
+ _, err := pipelineReadCmds(cn, cmds)
+ return false, err
+}
+
+func txPipelineWriteMulti(cn *pool.Conn, cmds []Cmder) error {
+ multiExec := make([]Cmder, 0, len(cmds)+2)
+ multiExec = append(multiExec, NewStatusCmd("MULTI"))
+ multiExec = append(multiExec, cmds...)
+ multiExec = append(multiExec, NewSliceCmd("EXEC"))
+ return writeCmd(cn, multiExec...)
+}
+
+func (c *baseClient) txPipelineReadQueued(cn *pool.Conn, cmds []Cmder) error {
+ var firstErr error
+
+ // Parse queued replies.
+ var statusCmd StatusCmd
+ if err := statusCmd.readReply(cn); err != nil && firstErr == nil {
+ firstErr = err
+ }
+
+ for _, cmd := range cmds {
+ err := statusCmd.readReply(cn)
+ if err != nil {
+ cmd.setErr(err)
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+ }
+
+ // Parse number of replies.
+ line, err := cn.Rd.ReadLine()
+ if err != nil {
+ if err == Nil {
+ err = TxFailedErr
+ }
+ return err
+ }
+
+ switch line[0] {
+ case proto.ErrorReply:
+ return proto.ParseErrorReply(line)
+ case proto.ArrayReply:
+ // ok
+ default:
+ err := fmt.Errorf("redis: expected '*', but got line %q", line)
+ return err
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// Client is a Redis client representing a pool of zero or more
+// underlying connections. It's safe for concurrent use by multiple
+// goroutines.
+type Client struct {
+ baseClient
+ cmdable
+}
+
+func newClient(opt *Options, pool pool.Pooler) *Client {
+ client := Client{
+ baseClient: baseClient{
+ opt: opt,
+ connPool: pool,
+ },
+ }
+ client.setProcessor(client.Process)
+ return &client
+}
+
+// NewClient returns a client to the Redis Server specified by Options.
+func NewClient(opt *Options) *Client {
+ opt.init()
+ return newClient(opt, newConnPool(opt))
+}
+
+func (c *Client) copy() *Client {
+ c2 := new(Client)
+ *c2 = *c
+ c2.setProcessor(c2.Process)
+ return c2
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Client) Options() *Options {
+ return c.opt
+}
+
+// PoolStats returns connection pool stats.
+func (c *Client) PoolStats() *PoolStats {
+ s := c.connPool.Stats()
+ return &PoolStats{
+ Requests: s.Requests,
+ Hits: s.Hits,
+ Timeouts: s.Timeouts,
+
+ TotalConns: s.TotalConns,
+ FreeConns: s.FreeConns,
+ }
+}
+
+func (c *Client) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().pipelined(fn)
+}
+
+func (c *Client) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: c.pipelineExecer(c.pipelineProcessCmds),
+ }
+ pipe.setProcessor(pipe.Process)
+ return &pipe
+}
+
+func (c *Client) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().pipelined(fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Client) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: c.pipelineExecer(c.txPipelineProcessCmds),
+ }
+ pipe.setProcessor(pipe.Process)
+ return &pipe
+}
+
+func (c *Client) pubSub() *PubSub {
+ return &PubSub{
+ base: baseClient{
+ opt: c.opt,
+ connPool: c.connPool,
+ },
+ }
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *Client) Subscribe(channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *Client) PSubscribe(channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(channels...)
+ }
+ return pubsub
+}
+
+//------------------------------------------------------------------------------
+
+// Conn is like Client, but its pool contains single connection.
+type Conn struct {
+ baseClient
+ statefulCmdable
+}
+
+func (c *Conn) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().pipelined(fn)
+}
+
+func (c *Conn) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: c.pipelineExecer(c.pipelineProcessCmds),
+ }
+ pipe.setProcessor(pipe.Process)
+ return &pipe
+}
+
+func (c *Conn) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().pipelined(fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Conn) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: c.pipelineExecer(c.txPipelineProcessCmds),
+ }
+ pipe.setProcessor(pipe.Process)
+ return &pipe
+}
diff --git a/vendor/github.com/go-redis/redis/redis_context.go b/vendor/github.com/go-redis/redis/redis_context.go
new file mode 100644
index 000000000..6ec811ca5
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/redis_context.go
@@ -0,0 +1,35 @@
+// +build go1.7
+
+package redis
+
+import (
+ "context"
+
+ "github.com/go-redis/redis/internal/pool"
+)
+
+type baseClient struct {
+ connPool pool.Pooler
+ opt *Options
+
+ process func(Cmder) error
+ onClose func() error // hook called when client is closed
+
+ ctx context.Context
+}
+
+func (c *Client) Context() context.Context {
+ if c.ctx != nil {
+ return c.ctx
+ }
+ return context.Background()
+}
+
+func (c *Client) WithContext(ctx context.Context) *Client {
+ if ctx == nil {
+ panic("nil context")
+ }
+ c2 := c.copy()
+ c2.ctx = ctx
+ return c2
+}
diff --git a/vendor/github.com/go-redis/redis/redis_no_context.go b/vendor/github.com/go-redis/redis/redis_no_context.go
new file mode 100644
index 000000000..0752192f1
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/redis_no_context.go
@@ -0,0 +1,15 @@
+// +build !go1.7
+
+package redis
+
+import (
+ "github.com/go-redis/redis/internal/pool"
+)
+
+type baseClient struct {
+ connPool pool.Pooler
+ opt *Options
+
+ process func(Cmder) error
+ onClose func() error // hook called when client is closed
+}
diff --git a/vendor/github.com/go-redis/redis/redis_test.go b/vendor/github.com/go-redis/redis/redis_test.go
new file mode 100644
index 000000000..49d3fb329
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/redis_test.go
@@ -0,0 +1,364 @@
+package redis_test
+
+import (
+ "bytes"
+ "net"
+ "time"
+
+ "github.com/go-redis/redis"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Client", func() {
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB().Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ client.Close()
+ })
+
+ It("should Stringer", func() {
+ Expect(client.String()).To(Equal("Redis<:6380 db:15>"))
+ })
+
+ It("should ping", func() {
+ val, err := client.Ping().Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("PONG"))
+ })
+
+ It("should return pool stats", func() {
+ Expect(client.PoolStats()).To(BeAssignableToTypeOf(&redis.PoolStats{}))
+ })
+
+ It("should support custom dialers", func() {
+ custom := redis.NewClient(&redis.Options{
+ Addr: ":1234",
+ Dialer: func() (net.Conn, error) {
+ return net.Dial("tcp", redisAddr)
+ },
+ })
+
+ val, err := custom.Ping().Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("PONG"))
+ Expect(custom.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should close", func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ err := client.Ping().Err()
+ Expect(err).To(MatchError("redis: client is closed"))
+ })
+
+ It("should close pubsub without closing the client", func() {
+ pubsub := client.Subscribe()
+ Expect(pubsub.Close()).NotTo(HaveOccurred())
+
+ _, err := pubsub.Receive()
+ Expect(err).To(MatchError("redis: client is closed"))
+ Expect(client.Ping().Err()).NotTo(HaveOccurred())
+ })
+
+ It("should close Tx without closing the client", func() {
+ err := client.Watch(func(tx *redis.Tx) error {
+ _, err := tx.Pipelined(func(pipe redis.Pipeliner) error {
+ pipe.Ping()
+ return nil
+ })
+ return err
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(client.Ping().Err()).NotTo(HaveOccurred())
+ })
+
+ It("should close pipeline without closing the client", func() {
+ pipeline := client.Pipeline()
+ Expect(pipeline.Close()).NotTo(HaveOccurred())
+
+ pipeline.Ping()
+ _, err := pipeline.Exec()
+ Expect(err).To(MatchError("redis: client is closed"))
+
+ Expect(client.Ping().Err()).NotTo(HaveOccurred())
+ })
+
+ It("should close pubsub when client is closed", func() {
+ pubsub := client.Subscribe()
+ Expect(client.Close()).NotTo(HaveOccurred())
+
+ _, err := pubsub.Receive()
+ Expect(err).To(MatchError("redis: client is closed"))
+
+ Expect(pubsub.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should close pipeline when client is closed", func() {
+ pipeline := client.Pipeline()
+ Expect(client.Close()).NotTo(HaveOccurred())
+ Expect(pipeline.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should select DB", func() {
+ db2 := redis.NewClient(&redis.Options{
+ Addr: redisAddr,
+ DB: 2,
+ })
+ Expect(db2.FlushDB().Err()).NotTo(HaveOccurred())
+ Expect(db2.Get("db").Err()).To(Equal(redis.Nil))
+ Expect(db2.Set("db", 2, 0).Err()).NotTo(HaveOccurred())
+
+ n, err := db2.Get("db").Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(2)))
+
+ Expect(client.Get("db").Err()).To(Equal(redis.Nil))
+
+ Expect(db2.FlushDB().Err()).NotTo(HaveOccurred())
+ Expect(db2.Close()).NotTo(HaveOccurred())
+ })
+
+ It("processes custom commands", func() {
+ cmd := redis.NewCmd("PING")
+ client.Process(cmd)
+
+ // Flush buffers.
+ Expect(client.Echo("hello").Err()).NotTo(HaveOccurred())
+
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ Expect(cmd.Val()).To(Equal("PONG"))
+ })
+
+ It("should retry command on network error", func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+
+ client = redis.NewClient(&redis.Options{
+ Addr: redisAddr,
+ MaxRetries: 1,
+ })
+
+ // Put bad connection in the pool.
+ cn, _, err := client.Pool().Get()
+ Expect(err).NotTo(HaveOccurred())
+
+ cn.SetNetConn(&badConn{})
+ err = client.Pool().Put(cn)
+ Expect(err).NotTo(HaveOccurred())
+
+ err = client.Ping().Err()
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("should retry with backoff", func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+
+ // use up all the available connections to force a fail
+ connectionHogClient := redis.NewClient(&redis.Options{
+ Addr: redisAddr,
+ MaxRetries: 1,
+ })
+ defer connectionHogClient.Close()
+
+ for i := 0; i <= 1002; i++ {
+ connectionHogClient.Pool().NewConn()
+ }
+
+ clientNoRetry := redis.NewClient(&redis.Options{
+ Addr: redisAddr,
+ PoolSize: 1,
+ MaxRetryBackoff: -1,
+ })
+ defer clientNoRetry.Close()
+
+ clientRetry := redis.NewClient(&redis.Options{
+ Addr: redisAddr,
+ MaxRetries: 5,
+ PoolSize: 1,
+ MaxRetryBackoff: 128 * time.Millisecond,
+ })
+ defer clientRetry.Close()
+
+ startNoRetry := time.Now()
+ err := clientNoRetry.Ping().Err()
+ Expect(err).To(HaveOccurred())
+ elapseNoRetry := time.Since(startNoRetry)
+
+ startRetry := time.Now()
+ err = clientRetry.Ping().Err()
+ Expect(err).To(HaveOccurred())
+ elapseRetry := time.Since(startRetry)
+
+ Expect(elapseRetry > elapseNoRetry).To(BeTrue())
+ })
+
+ It("should update conn.UsedAt on read/write", func() {
+ cn, _, err := client.Pool().Get()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cn.UsedAt).NotTo(BeZero())
+ createdAt := cn.UsedAt()
+
+ err = client.Pool().Put(cn)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cn.UsedAt().Equal(createdAt)).To(BeTrue())
+
+ err = client.Ping().Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ cn, _, err = client.Pool().Get()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cn).NotTo(BeNil())
+ Expect(cn.UsedAt().After(createdAt)).To(BeTrue())
+ })
+
+ It("should process command with special chars", func() {
+ set := client.Set("key", "hello1\r\nhello2\r\n", 0)
+ Expect(set.Err()).NotTo(HaveOccurred())
+ Expect(set.Val()).To(Equal("OK"))
+
+ get := client.Get("key")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("hello1\r\nhello2\r\n"))
+ })
+
+ It("should handle big vals", func() {
+ bigVal := bytes.Repeat([]byte{'*'}, 2e6)
+
+ err := client.Set("key", bigVal, 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ // Reconnect to get new connection.
+ Expect(client.Close()).NotTo(HaveOccurred())
+ client = redis.NewClient(redisOptions())
+
+ got, err := client.Get("key").Bytes()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(got).To(Equal(bigVal))
+ })
+
+ It("should call WrapProcess", func() {
+ var wrapperFnCalled bool
+
+ client.WrapProcess(func(oldProcess func(redis.Cmder) error) func(redis.Cmder) error {
+ return func(cmd redis.Cmder) error {
+ wrapperFnCalled = true
+ return oldProcess(cmd)
+ }
+ })
+
+ Expect(client.Ping().Err()).NotTo(HaveOccurred())
+
+ Expect(wrapperFnCalled).To(BeTrue())
+ })
+})
+
+var _ = Describe("Client timeout", func() {
+ var opt *redis.Options
+ var client *redis.Client
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ testTimeout := func() {
+ It("Ping timeouts", func() {
+ err := client.Ping().Err()
+ Expect(err).To(HaveOccurred())
+ Expect(err.(net.Error).Timeout()).To(BeTrue())
+ })
+
+ It("Pipeline timeouts", func() {
+ _, err := client.Pipelined(func(pipe redis.Pipeliner) error {
+ pipe.Ping()
+ return nil
+ })
+ Expect(err).To(HaveOccurred())
+ Expect(err.(net.Error).Timeout()).To(BeTrue())
+ })
+
+ It("Subscribe timeouts", func() {
+ if opt.WriteTimeout == 0 {
+ return
+ }
+
+ pubsub := client.Subscribe()
+ defer pubsub.Close()
+
+ err := pubsub.Subscribe("_")
+ Expect(err).To(HaveOccurred())
+ Expect(err.(net.Error).Timeout()).To(BeTrue())
+ })
+
+ It("Tx timeouts", func() {
+ err := client.Watch(func(tx *redis.Tx) error {
+ return tx.Ping().Err()
+ })
+ Expect(err).To(HaveOccurred())
+ Expect(err.(net.Error).Timeout()).To(BeTrue())
+ })
+
+ It("Tx Pipeline timeouts", func() {
+ err := client.Watch(func(tx *redis.Tx) error {
+ _, err := tx.Pipelined(func(pipe redis.Pipeliner) error {
+ pipe.Ping()
+ return nil
+ })
+ return err
+ })
+ Expect(err).To(HaveOccurred())
+ Expect(err.(net.Error).Timeout()).To(BeTrue())
+ })
+ }
+
+ Context("read timeout", func() {
+ BeforeEach(func() {
+ opt = redisOptions()
+ opt.ReadTimeout = time.Nanosecond
+ opt.WriteTimeout = -1
+ client = redis.NewClient(opt)
+ })
+
+ testTimeout()
+ })
+
+ Context("write timeout", func() {
+ BeforeEach(func() {
+ opt = redisOptions()
+ opt.ReadTimeout = -1
+ opt.WriteTimeout = time.Nanosecond
+ client = redis.NewClient(opt)
+ })
+
+ testTimeout()
+ })
+})
+
+var _ = Describe("Client OnConnect", func() {
+ var client *redis.Client
+
+ BeforeEach(func() {
+ opt := redisOptions()
+ opt.DB = 0
+ opt.OnConnect = func(cn *redis.Conn) error {
+ return cn.ClientSetName("on_connect").Err()
+ }
+
+ client = redis.NewClient(opt)
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("calls OnConnect", func() {
+ name, err := client.ClientGetName().Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(name).To(Equal("on_connect"))
+ })
+})
diff --git a/vendor/github.com/go-redis/redis/result.go b/vendor/github.com/go-redis/redis/result.go
new file mode 100644
index 000000000..28cea5ca8
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/result.go
@@ -0,0 +1,140 @@
+package redis
+
+import "time"
+
+// NewCmdResult returns a Cmd initalised with val and err for testing
+func NewCmdResult(val interface{}, err error) *Cmd {
+ var cmd Cmd
+ cmd.val = val
+ cmd.setErr(err)
+ return &cmd
+}
+
+// NewSliceResult returns a SliceCmd initalised with val and err for testing
+func NewSliceResult(val []interface{}, err error) *SliceCmd {
+ var cmd SliceCmd
+ cmd.val = val
+ cmd.setErr(err)
+ return &cmd
+}
+
+// NewStatusResult returns a StatusCmd initalised with val and err for testing
+func NewStatusResult(val string, err error) *StatusCmd {
+ var cmd StatusCmd
+ cmd.val = val
+ cmd.setErr(err)
+ return &cmd
+}
+
+// NewIntResult returns an IntCmd initalised with val and err for testing
+func NewIntResult(val int64, err error) *IntCmd {
+ var cmd IntCmd
+ cmd.val = val
+ cmd.setErr(err)
+ return &cmd
+}
+
+// NewDurationResult returns a DurationCmd initalised with val and err for testing
+func NewDurationResult(val time.Duration, err error) *DurationCmd {
+ var cmd DurationCmd
+ cmd.val = val
+ cmd.setErr(err)
+ return &cmd
+}
+
+// NewBoolResult returns a BoolCmd initalised with val and err for testing
+func NewBoolResult(val bool, err error) *BoolCmd {
+ var cmd BoolCmd
+ cmd.val = val
+ cmd.setErr(err)
+ return &cmd
+}
+
+// NewStringResult returns a StringCmd initalised with val and err for testing
+func NewStringResult(val string, err error) *StringCmd {
+ var cmd StringCmd
+ cmd.val = []byte(val)
+ cmd.setErr(err)
+ return &cmd
+}
+
+// NewFloatResult returns a FloatCmd initalised with val and err for testing
+func NewFloatResult(val float64, err error) *FloatCmd {
+ var cmd FloatCmd
+ cmd.val = val
+ cmd.setErr(err)
+ return &cmd
+}
+
+// NewStringSliceResult returns a StringSliceCmd initalised with val and err for testing
+func NewStringSliceResult(val []string, err error) *StringSliceCmd {
+ var cmd StringSliceCmd
+ cmd.val = val
+ cmd.setErr(err)
+ return &cmd
+}
+
+// NewBoolSliceResult returns a BoolSliceCmd initalised with val and err for testing
+func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
+ var cmd BoolSliceCmd
+ cmd.val = val
+ cmd.setErr(err)
+ return &cmd
+}
+
+// NewStringStringMapResult returns a StringStringMapCmd initalised with val and err for testing
+func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd {
+ var cmd StringStringMapCmd
+ cmd.val = val
+ cmd.setErr(err)
+ return &cmd
+}
+
+// NewStringIntMapCmdResult returns a StringIntMapCmd initalised with val and err for testing
+func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd {
+ var cmd StringIntMapCmd
+ cmd.val = val
+ cmd.setErr(err)
+ return &cmd
+}
+
+// NewZSliceCmdResult returns a ZSliceCmd initalised with val and err for testing
+func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
+ var cmd ZSliceCmd
+ cmd.val = val
+ cmd.setErr(err)
+ return &cmd
+}
+
+// NewScanCmdResult returns a ScanCmd initalised with val and err for testing
+func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd {
+ var cmd ScanCmd
+ cmd.page = keys
+ cmd.cursor = cursor
+ cmd.setErr(err)
+ return &cmd
+}
+
+// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initalised with val and err for testing
+func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd {
+ var cmd ClusterSlotsCmd
+ cmd.val = val
+ cmd.setErr(err)
+ return &cmd
+}
+
+// NewGeoLocationCmdResult returns a GeoLocationCmd initalised with val and err for testing
+func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd {
+ var cmd GeoLocationCmd
+ cmd.locations = val
+ cmd.setErr(err)
+ return &cmd
+}
+
+// NewCommandsInfoCmdResult returns a CommandsInfoCmd initalised with val and err for testing
+func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd {
+ var cmd CommandsInfoCmd
+ cmd.val = val
+ cmd.setErr(err)
+ return &cmd
+}
diff --git a/vendor/github.com/go-redis/redis/ring.go b/vendor/github.com/go-redis/redis/ring.go
new file mode 100644
index 000000000..be9251096
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/ring.go
@@ -0,0 +1,458 @@
+package redis
+
+import (
+ "errors"
+ "fmt"
+ "math/rand"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/go-redis/redis/internal"
+ "github.com/go-redis/redis/internal/consistenthash"
+ "github.com/go-redis/redis/internal/hashtag"
+ "github.com/go-redis/redis/internal/pool"
+)
+
+var errRingShardsDown = errors.New("redis: all ring shards are down")
+
+// RingOptions are used to configure a ring client and should be
+// passed to NewRing.
+type RingOptions struct {
+ // Map of name => host:port addresses of ring shards.
+ Addrs map[string]string
+
+ // Frequency of PING commands sent to check shards availability.
+ // Shard is considered down after 3 subsequent failed checks.
+ HeartbeatFrequency time.Duration
+
+ // Following options are copied from Options struct.
+
+ OnConnect func(*Conn) error
+
+ DB int
+ Password string
+
+ MaxRetries int
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ PoolSize int
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+}
+
+func (opt *RingOptions) init() {
+ if opt.HeartbeatFrequency == 0 {
+ opt.HeartbeatFrequency = 500 * time.Millisecond
+ }
+}
+
+func (opt *RingOptions) clientOptions() *Options {
+ return &Options{
+ OnConnect: opt.OnConnect,
+
+ DB: opt.DB,
+ Password: opt.Password,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+ }
+}
+
+type ringShard struct {
+ Client *Client
+ down int32
+}
+
+func (shard *ringShard) String() string {
+ var state string
+ if shard.IsUp() {
+ state = "up"
+ } else {
+ state = "down"
+ }
+ return fmt.Sprintf("%s is %s", shard.Client, state)
+}
+
+func (shard *ringShard) IsDown() bool {
+ const threshold = 3
+ return atomic.LoadInt32(&shard.down) >= threshold
+}
+
+func (shard *ringShard) IsUp() bool {
+ return !shard.IsDown()
+}
+
+// Vote votes to set shard state and returns true if state was changed.
+func (shard *ringShard) Vote(up bool) bool {
+ if up {
+ changed := shard.IsDown()
+ atomic.StoreInt32(&shard.down, 0)
+ return changed
+ }
+
+ if shard.IsDown() {
+ return false
+ }
+
+ atomic.AddInt32(&shard.down, 1)
+ return shard.IsDown()
+}
+
+// Ring is a Redis client that uses constistent hashing to distribute
+// keys across multiple Redis servers (shards). It's safe for
+// concurrent use by multiple goroutines.
+//
+// Ring monitors the state of each shard and removes dead shards from
+// the ring. When shard comes online it is added back to the ring. This
+// gives you maximum availability and partition tolerance, but no
+// consistency between different shards or even clients. Each client
+// uses shards that are available to the client and does not do any
+// coordination when shard state is changed.
+//
+// Ring should be used when you need multiple Redis servers for caching
+// and can tolerate losing data when one of the servers dies.
+// Otherwise you should use Redis Cluster.
+type Ring struct {
+ cmdable
+
+ opt *RingOptions
+ nreplicas int
+
+ mu sync.RWMutex
+ hash *consistenthash.Map
+ shards map[string]*ringShard
+
+ cmdsInfoOnce internal.Once
+ cmdsInfo map[string]*CommandInfo
+
+ closed bool
+}
+
+func NewRing(opt *RingOptions) *Ring {
+ const nreplicas = 100
+ opt.init()
+ ring := &Ring{
+ opt: opt,
+ nreplicas: nreplicas,
+
+ hash: consistenthash.New(nreplicas, nil),
+ shards: make(map[string]*ringShard),
+ }
+ ring.setProcessor(ring.Process)
+ for name, addr := range opt.Addrs {
+ clopt := opt.clientOptions()
+ clopt.Addr = addr
+ ring.addClient(name, NewClient(clopt))
+ }
+ go ring.heartbeat()
+ return ring
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Ring) Options() *RingOptions {
+ return c.opt
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *Ring) PoolStats() *PoolStats {
+ var acc PoolStats
+ for _, shard := range c.shards {
+ s := shard.Client.connPool.Stats()
+ acc.Requests += s.Requests
+ acc.Hits += s.Hits
+ acc.Timeouts += s.Timeouts
+ acc.TotalConns += s.TotalConns
+ acc.FreeConns += s.FreeConns
+ }
+ return &acc
+}
+
+// Subscribe subscribes the client to the specified channels.
+func (c *Ring) Subscribe(channels ...string) *PubSub {
+ if len(channels) == 0 {
+ panic("at least one channel is required")
+ }
+
+ shard, err := c.shardByKey(channels[0])
+ if err != nil {
+ // TODO: return PubSub with sticky error
+ panic(err)
+ }
+ return shard.Client.Subscribe(channels...)
+}
+
+// PSubscribe subscribes the client to the given patterns.
+func (c *Ring) PSubscribe(channels ...string) *PubSub {
+ if len(channels) == 0 {
+ panic("at least one channel is required")
+ }
+
+ shard, err := c.shardByKey(channels[0])
+ if err != nil {
+ // TODO: return PubSub with sticky error
+ panic(err)
+ }
+ return shard.Client.PSubscribe(channels...)
+}
+
+// ForEachShard concurrently calls the fn on each live shard in the ring.
+// It returns the first error if any.
+func (c *Ring) ForEachShard(fn func(client *Client) error) error {
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+ for _, shard := range c.shards {
+ if shard.IsDown() {
+ continue
+ }
+
+ wg.Add(1)
+ go func(shard *ringShard) {
+ defer wg.Done()
+ err := fn(shard.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(shard)
+ }
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+func (c *Ring) cmdInfo(name string) *CommandInfo {
+ err := c.cmdsInfoOnce.Do(func() error {
+ var firstErr error
+ for _, shard := range c.shards {
+ cmdsInfo, err := shard.Client.Command().Result()
+ if err == nil {
+ c.cmdsInfo = cmdsInfo
+ return nil
+ }
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+ return firstErr
+ })
+ if err != nil {
+ return nil
+ }
+ return c.cmdsInfo[name]
+}
+
+func (c *Ring) addClient(name string, cl *Client) {
+ c.mu.Lock()
+ c.hash.Add(name)
+ c.shards[name] = &ringShard{Client: cl}
+ c.mu.Unlock()
+}
+
+func (c *Ring) shardByKey(key string) (*ringShard, error) {
+ key = hashtag.Key(key)
+
+ c.mu.RLock()
+
+ if c.closed {
+ c.mu.RUnlock()
+ return nil, pool.ErrClosed
+ }
+
+ name := c.hash.Get(key)
+ if name == "" {
+ c.mu.RUnlock()
+ return nil, errRingShardsDown
+ }
+
+ shard := c.shards[name]
+ c.mu.RUnlock()
+ return shard, nil
+}
+
+func (c *Ring) randomShard() (*ringShard, error) {
+ return c.shardByKey(strconv.Itoa(rand.Int()))
+}
+
+func (c *Ring) shardByName(name string) (*ringShard, error) {
+ if name == "" {
+ return c.randomShard()
+ }
+
+ c.mu.RLock()
+ shard := c.shards[name]
+ c.mu.RUnlock()
+ return shard, nil
+}
+
+func (c *Ring) cmdShard(cmd Cmder) (*ringShard, error) {
+ cmdInfo := c.cmdInfo(cmd.Name())
+ firstKey := cmd.arg(cmdFirstKeyPos(cmd, cmdInfo))
+ return c.shardByKey(firstKey)
+}
+
+func (c *Ring) Process(cmd Cmder) error {
+ shard, err := c.cmdShard(cmd)
+ if err != nil {
+ cmd.setErr(err)
+ return err
+ }
+ return shard.Client.Process(cmd)
+}
+
+// rebalance removes dead shards from the Ring.
+func (c *Ring) rebalance() {
+ hash := consistenthash.New(c.nreplicas, nil)
+ for name, shard := range c.shards {
+ if shard.IsUp() {
+ hash.Add(name)
+ }
+ }
+
+ c.mu.Lock()
+ c.hash = hash
+ c.mu.Unlock()
+}
+
+// heartbeat monitors state of each shard in the ring.
+func (c *Ring) heartbeat() {
+ ticker := time.NewTicker(c.opt.HeartbeatFrequency)
+ defer ticker.Stop()
+ for range ticker.C {
+ var rebalance bool
+
+ c.mu.RLock()
+
+ if c.closed {
+ c.mu.RUnlock()
+ break
+ }
+
+ for _, shard := range c.shards {
+ err := shard.Client.Ping().Err()
+ if shard.Vote(err == nil || err == pool.ErrPoolTimeout) {
+ internal.Logf("ring shard state changed: %s", shard)
+ rebalance = true
+ }
+ }
+
+ c.mu.RUnlock()
+
+ if rebalance {
+ c.rebalance()
+ }
+ }
+}
+
+// Close closes the ring client, releasing any open resources.
+//
+// It is rare to Close a Ring, as the Ring is meant to be long-lived
+// and shared between many goroutines.
+func (c *Ring) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil
+ }
+ c.closed = true
+
+ var firstErr error
+ for _, shard := range c.shards {
+ if err := shard.Client.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ c.hash = nil
+ c.shards = nil
+
+ return firstErr
+}
+
+func (c *Ring) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: c.pipelineExec,
+ }
+ pipe.setProcessor(pipe.Process)
+ return &pipe
+}
+
+func (c *Ring) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().pipelined(fn)
+}
+
+func (c *Ring) pipelineExec(cmds []Cmder) (firstErr error) {
+ cmdsMap := make(map[string][]Cmder)
+ for _, cmd := range cmds {
+ cmdInfo := c.cmdInfo(cmd.Name())
+ name := cmd.arg(cmdFirstKeyPos(cmd, cmdInfo))
+ if name != "" {
+ name = c.hash.Get(hashtag.Key(name))
+ }
+ cmdsMap[name] = append(cmdsMap[name], cmd)
+ }
+
+ for i := 0; i <= c.opt.MaxRetries; i++ {
+ var failedCmdsMap map[string][]Cmder
+
+ for name, cmds := range cmdsMap {
+ shard, err := c.shardByName(name)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ cn, _, err := shard.Client.conn()
+ if err != nil {
+ setCmdsErr(cmds, err)
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ canRetry, err := shard.Client.pipelineProcessCmds(cn, cmds)
+ shard.Client.putConn(cn, err)
+ if err == nil {
+ continue
+ }
+ if firstErr == nil {
+ firstErr = err
+ }
+ if canRetry && internal.IsRetryableError(err) {
+ if failedCmdsMap == nil {
+ failedCmdsMap = make(map[string][]Cmder)
+ }
+ failedCmdsMap[name] = cmds
+ }
+ }
+
+ if len(failedCmdsMap) == 0 {
+ break
+ }
+ cmdsMap = failedCmdsMap
+ }
+
+ return firstErr
+}
diff --git a/vendor/github.com/go-redis/redis/ring_test.go b/vendor/github.com/go-redis/redis/ring_test.go
new file mode 100644
index 000000000..0cad4298b
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/ring_test.go
@@ -0,0 +1,193 @@
+package redis_test
+
+import (
+ "crypto/rand"
+ "fmt"
+ "time"
+
+ "github.com/go-redis/redis"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Redis Ring", func() {
+ const heartbeat = 100 * time.Millisecond
+
+ var ring *redis.Ring
+
+ setRingKeys := func() {
+ for i := 0; i < 100; i++ {
+ err := ring.Set(fmt.Sprintf("key%d", i), "value", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ }
+
+ BeforeEach(func() {
+ opt := redisRingOptions()
+ opt.HeartbeatFrequency = heartbeat
+ ring = redis.NewRing(opt)
+
+ err := ring.ForEachShard(func(cl *redis.Client) error {
+ return cl.FlushDB().Err()
+ })
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(ring.Close()).NotTo(HaveOccurred())
+ })
+
+ It("distributes keys", func() {
+ setRingKeys()
+
+ // Both shards should have some keys now.
+ Expect(ringShard1.Info().Val()).To(ContainSubstring("keys=57"))
+ Expect(ringShard2.Info().Val()).To(ContainSubstring("keys=43"))
+ })
+
+ It("distributes keys when using EVAL", func() {
+ script := redis.NewScript(`
+ local r = redis.call('SET', KEYS[1], ARGV[1])
+ return r
+ `)
+
+ var key string
+ for i := 0; i < 100; i++ {
+ key = fmt.Sprintf("key%d", i)
+ err := script.Run(ring, []string{key}, "value").Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ Expect(ringShard1.Info().Val()).To(ContainSubstring("keys=57"))
+ Expect(ringShard2.Info().Val()).To(ContainSubstring("keys=43"))
+ })
+
+ It("uses single shard when one of the shards is down", func() {
+ // Stop ringShard2.
+ Expect(ringShard2.Close()).NotTo(HaveOccurred())
+
+ // Ring needs 3 * heartbeat time to detect that node is down.
+ // Give it more to be sure.
+ time.Sleep(2 * 3 * heartbeat)
+
+ setRingKeys()
+
+ // RingShard1 should have all keys.
+ Expect(ringShard1.Info().Val()).To(ContainSubstring("keys=100"))
+
+ // Start ringShard2.
+ var err error
+ ringShard2, err = startRedis(ringShard2Port)
+ Expect(err).NotTo(HaveOccurred())
+
+ // Wait for ringShard2 to come up.
+ Eventually(func() error {
+ return ringShard2.Ping().Err()
+ }, "1s").ShouldNot(HaveOccurred())
+
+ // Ring needs heartbeat time to detect that node is up.
+ // Give it more to be sure.
+ time.Sleep(heartbeat + heartbeat)
+
+ setRingKeys()
+
+ // RingShard2 should have its keys.
+ Expect(ringShard2.Info().Val()).To(ContainSubstring("keys=43"))
+ })
+
+ It("supports hash tags", func() {
+ for i := 0; i < 100; i++ {
+ err := ring.Set(fmt.Sprintf("key%d{tag}", i), "value", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ Expect(ringShard1.Info().Val()).ToNot(ContainSubstring("keys="))
+ Expect(ringShard2.Info().Val()).To(ContainSubstring("keys=100"))
+ })
+
+ Describe("pipeline", func() {
+ It("distributes keys", func() {
+ pipe := ring.Pipeline()
+ for i := 0; i < 100; i++ {
+ err := pipe.Set(fmt.Sprintf("key%d", i), "value", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+ cmds, err := pipe.Exec()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(100))
+ Expect(pipe.Close()).NotTo(HaveOccurred())
+
+ for _, cmd := range cmds {
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ Expect(cmd.(*redis.StatusCmd).Val()).To(Equal("OK"))
+ }
+
+ // Both shards should have some keys now.
+ Expect(ringShard1.Info().Val()).To(ContainSubstring("keys=57"))
+ Expect(ringShard2.Info().Val()).To(ContainSubstring("keys=43"))
+ })
+
+ It("is consistent with ring", func() {
+ var keys []string
+ for i := 0; i < 100; i++ {
+ key := make([]byte, 64)
+ _, err := rand.Read(key)
+ Expect(err).NotTo(HaveOccurred())
+ keys = append(keys, string(key))
+ }
+
+ _, err := ring.Pipelined(func(pipe redis.Pipeliner) error {
+ for _, key := range keys {
+ pipe.Set(key, "value", 0).Err()
+ }
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ for _, key := range keys {
+ val, err := ring.Get(key).Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("value"))
+ }
+ })
+
+ It("supports hash tags", func() {
+ _, err := ring.Pipelined(func(pipe redis.Pipeliner) error {
+ for i := 0; i < 100; i++ {
+ pipe.Set(fmt.Sprintf("key%d{tag}", i), "value", 0).Err()
+ }
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(ringShard1.Info().Val()).ToNot(ContainSubstring("keys="))
+ Expect(ringShard2.Info().Val()).To(ContainSubstring("keys=100"))
+ })
+ })
+})
+
+var _ = Describe("empty Redis Ring", func() {
+ var ring *redis.Ring
+
+ BeforeEach(func() {
+ ring = redis.NewRing(&redis.RingOptions{})
+ })
+
+ AfterEach(func() {
+ Expect(ring.Close()).NotTo(HaveOccurred())
+ })
+
+ It("returns an error", func() {
+ err := ring.Ping().Err()
+ Expect(err).To(MatchError("redis: all ring shards are down"))
+ })
+
+ It("pipeline returns an error", func() {
+ _, err := ring.Pipelined(func(pipe redis.Pipeliner) error {
+ pipe.Ping()
+ return nil
+ })
+ Expect(err).To(MatchError("redis: all ring shards are down"))
+ })
+})
diff --git a/vendor/github.com/go-redis/redis/script.go b/vendor/github.com/go-redis/redis/script.go
new file mode 100644
index 000000000..74135f5a5
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/script.go
@@ -0,0 +1,62 @@
+package redis
+
+import (
+ "crypto/sha1"
+ "encoding/hex"
+ "io"
+ "strings"
+)
+
+type scripter interface {
+ Eval(script string, keys []string, args ...interface{}) *Cmd
+ EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd
+ ScriptExists(scripts ...string) *BoolSliceCmd
+ ScriptLoad(script string) *StringCmd
+}
+
+var _ scripter = (*Client)(nil)
+var _ scripter = (*Ring)(nil)
+var _ scripter = (*ClusterClient)(nil)
+
+type Script struct {
+ src, hash string
+}
+
+func NewScript(src string) *Script {
+ h := sha1.New()
+ io.WriteString(h, src)
+ return &Script{
+ src: src,
+ hash: hex.EncodeToString(h.Sum(nil)),
+ }
+}
+
+func (s *Script) Hash() string {
+ return s.hash
+}
+
+func (s *Script) Load(c scripter) *StringCmd {
+ return c.ScriptLoad(s.src)
+}
+
+func (s *Script) Exists(c scripter) *BoolSliceCmd {
+ return c.ScriptExists(s.src)
+}
+
+func (s *Script) Eval(c scripter, keys []string, args ...interface{}) *Cmd {
+ return c.Eval(s.src, keys, args...)
+}
+
+func (s *Script) EvalSha(c scripter, keys []string, args ...interface{}) *Cmd {
+ return c.EvalSha(s.hash, keys, args...)
+}
+
+// Run optimistically uses EVALSHA to run the script. If script does not exist
+// it is retried using EVAL.
+func (s *Script) Run(c scripter, keys []string, args ...interface{}) *Cmd {
+ r := s.EvalSha(c, keys, args...)
+ if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
+ return s.Eval(c, keys, args...)
+ }
+ return r
+}
diff --git a/vendor/github.com/go-redis/redis/sentinel.go b/vendor/github.com/go-redis/redis/sentinel.go
new file mode 100644
index 000000000..ed6e7ffb3
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/sentinel.go
@@ -0,0 +1,333 @@
+package redis
+
+import (
+ "errors"
+ "net"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/go-redis/redis/internal"
+ "github.com/go-redis/redis/internal/pool"
+)
+
+//------------------------------------------------------------------------------
+
+// FailoverOptions are used to configure a failover client and should
+// be passed to NewFailoverClient.
+type FailoverOptions struct {
+ // The master name.
+ MasterName string
+ // A seed list of host:port addresses of sentinel nodes.
+ SentinelAddrs []string
+
+ // Following options are copied from Options struct.
+
+ OnConnect func(*Conn) error
+
+ Password string
+ DB int
+
+ MaxRetries int
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ PoolSize int
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+}
+
+func (opt *FailoverOptions) options() *Options {
+ return &Options{
+ Addr: "FailoverClient",
+
+ OnConnect: opt.OnConnect,
+
+ DB: opt.DB,
+ Password: opt.Password,
+
+ MaxRetries: opt.MaxRetries,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+ }
+}
+
+// NewFailoverClient returns a Redis client that uses Redis Sentinel
+// for automatic failover. It's safe for concurrent use by multiple
+// goroutines.
+func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
+ opt := failoverOpt.options()
+ opt.init()
+
+ failover := &sentinelFailover{
+ masterName: failoverOpt.MasterName,
+ sentinelAddrs: failoverOpt.SentinelAddrs,
+
+ opt: opt,
+ }
+
+ client := Client{
+ baseClient: baseClient{
+ opt: opt,
+ connPool: failover.Pool(),
+
+ onClose: func() error {
+ return failover.Close()
+ },
+ },
+ }
+ client.setProcessor(client.Process)
+
+ return &client
+}
+
+//------------------------------------------------------------------------------
+
+type sentinelClient struct {
+ cmdable
+ baseClient
+}
+
+func newSentinel(opt *Options) *sentinelClient {
+ opt.init()
+ client := sentinelClient{
+ baseClient: baseClient{
+ opt: opt,
+ connPool: newConnPool(opt),
+ },
+ }
+ client.cmdable = cmdable{client.Process}
+ return &client
+}
+
+func (c *sentinelClient) PubSub() *PubSub {
+ return &PubSub{
+ base: baseClient{
+ opt: c.opt,
+ connPool: c.connPool,
+ },
+ }
+}
+
+func (c *sentinelClient) GetMasterAddrByName(name string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("SENTINEL", "get-master-addr-by-name", name)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *sentinelClient) Sentinels(name string) *SliceCmd {
+ cmd := NewSliceCmd("SENTINEL", "sentinels", name)
+ c.Process(cmd)
+ return cmd
+}
+
+type sentinelFailover struct {
+ sentinelAddrs []string
+
+ opt *Options
+
+ pool *pool.ConnPool
+ poolOnce sync.Once
+
+ mu sync.RWMutex
+ masterName string
+ _masterAddr string
+ sentinel *sentinelClient
+}
+
+func (d *sentinelFailover) Close() error {
+ return d.resetSentinel()
+}
+
+func (d *sentinelFailover) dial() (net.Conn, error) {
+ addr, err := d.MasterAddr()
+ if err != nil {
+ return nil, err
+ }
+ return net.DialTimeout("tcp", addr, d.opt.DialTimeout)
+}
+
+func (d *sentinelFailover) Pool() *pool.ConnPool {
+ d.poolOnce.Do(func() {
+ d.opt.Dialer = d.dial
+ d.pool = newConnPool(d.opt)
+ })
+ return d.pool
+}
+
+func (d *sentinelFailover) MasterAddr() (string, error) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ addr, err := d.masterAddr()
+ if err != nil {
+ return "", err
+ }
+
+ if d._masterAddr != addr {
+ d.switchMaster(addr)
+ }
+
+ return addr, nil
+}
+
+func (d *sentinelFailover) masterAddr() (string, error) {
+ // Try last working sentinel.
+ if d.sentinel != nil {
+ addr, err := d.sentinel.GetMasterAddrByName(d.masterName).Result()
+ if err == nil {
+ addr := net.JoinHostPort(addr[0], addr[1])
+ internal.Logf("sentinel: master=%q addr=%q", d.masterName, addr)
+ return addr, nil
+ }
+
+ internal.Logf("sentinel: GetMasterAddrByName name=%q failed: %s", d.masterName, err)
+ d._resetSentinel()
+ }
+
+ for i, sentinelAddr := range d.sentinelAddrs {
+ sentinel := newSentinel(&Options{
+ Addr: sentinelAddr,
+
+ DialTimeout: d.opt.DialTimeout,
+ ReadTimeout: d.opt.ReadTimeout,
+ WriteTimeout: d.opt.WriteTimeout,
+
+ PoolSize: d.opt.PoolSize,
+ PoolTimeout: d.opt.PoolTimeout,
+ IdleTimeout: d.opt.IdleTimeout,
+ })
+
+ masterAddr, err := sentinel.GetMasterAddrByName(d.masterName).Result()
+ if err != nil {
+ internal.Logf("sentinel: GetMasterAddrByName master=%q failed: %s", d.masterName, err)
+ sentinel.Close()
+ continue
+ }
+
+ // Push working sentinel to the top.
+ d.sentinelAddrs[0], d.sentinelAddrs[i] = d.sentinelAddrs[i], d.sentinelAddrs[0]
+ d.setSentinel(sentinel)
+
+ addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
+ return addr, nil
+ }
+
+ return "", errors.New("redis: all sentinels are unreachable")
+}
+
+func (d *sentinelFailover) switchMaster(masterAddr string) {
+ internal.Logf(
+ "sentinel: new master=%q addr=%q",
+ d.masterName, masterAddr,
+ )
+ _ = d.Pool().Filter(func(cn *pool.Conn) bool {
+ return cn.RemoteAddr().String() != masterAddr
+ })
+ d._masterAddr = masterAddr
+}
+
+func (d *sentinelFailover) setSentinel(sentinel *sentinelClient) {
+ d.discoverSentinels(sentinel)
+ d.sentinel = sentinel
+ go d.listen(sentinel)
+}
+
+func (d *sentinelFailover) resetSentinel() error {
+ var err error
+ d.mu.Lock()
+ if d.sentinel != nil {
+ err = d._resetSentinel()
+ }
+ d.mu.Unlock()
+ return err
+}
+
+func (d *sentinelFailover) _resetSentinel() error {
+ err := d.sentinel.Close()
+ d.sentinel = nil
+ return err
+}
+
+func (d *sentinelFailover) discoverSentinels(sentinel *sentinelClient) {
+ sentinels, err := sentinel.Sentinels(d.masterName).Result()
+ if err != nil {
+ internal.Logf("sentinel: Sentinels master=%q failed: %s", d.masterName, err)
+ return
+ }
+ for _, sentinel := range sentinels {
+ vals := sentinel.([]interface{})
+ for i := 0; i < len(vals); i += 2 {
+ key := vals[i].(string)
+ if key == "name" {
+ sentinelAddr := vals[i+1].(string)
+ if !contains(d.sentinelAddrs, sentinelAddr) {
+ internal.Logf(
+ "sentinel: discovered new sentinel=%q for master=%q",
+ sentinelAddr, d.masterName,
+ )
+ d.sentinelAddrs = append(d.sentinelAddrs, sentinelAddr)
+ }
+ }
+ }
+ }
+}
+
+func (d *sentinelFailover) listen(sentinel *sentinelClient) {
+ var pubsub *PubSub
+ for {
+ if pubsub == nil {
+ pubsub = sentinel.PubSub()
+
+ if err := pubsub.Subscribe("+switch-master"); err != nil {
+ internal.Logf("sentinel: Subscribe failed: %s", err)
+ pubsub.Close()
+ d.resetSentinel()
+ return
+ }
+ }
+
+ msg, err := pubsub.ReceiveMessage()
+ if err != nil {
+ internal.Logf("sentinel: ReceiveMessage failed: %s", err)
+ pubsub.Close()
+ d.resetSentinel()
+ return
+ }
+
+ switch msg.Channel {
+ case "+switch-master":
+ parts := strings.Split(msg.Payload, " ")
+ if parts[0] != d.masterName {
+ internal.Logf("sentinel: ignore addr for master=%q", parts[0])
+ continue
+ }
+ addr := net.JoinHostPort(parts[3], parts[4])
+
+ d.mu.Lock()
+ if d._masterAddr != addr {
+ d.switchMaster(addr)
+ }
+ d.mu.Unlock()
+ }
+ }
+}
+
+func contains(slice []string, str string) bool {
+ for _, s := range slice {
+ if s == str {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/go-redis/redis/sentinel_test.go b/vendor/github.com/go-redis/redis/sentinel_test.go
new file mode 100644
index 000000000..c67713cd0
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/sentinel_test.go
@@ -0,0 +1,88 @@
+package redis_test
+
+import (
+ "github.com/go-redis/redis"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Sentinel", func() {
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewFailoverClient(&redis.FailoverOptions{
+ MasterName: sentinelName,
+ SentinelAddrs: []string{":" + sentinelPort},
+ })
+ Expect(client.FlushDB().Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should facilitate failover", func() {
+ // Set value on master.
+ err := client.Set("foo", "master", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ // Verify.
+ val, err := sentinelMaster.Get("foo").Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(val).To(Equal("master"))
+
+ // Create subscription.
+ ch := client.Subscribe("foo").Channel()
+
+ // Wait until replicated.
+ Eventually(func() string {
+ return sentinelSlave1.Get("foo").Val()
+ }, "1s", "100ms").Should(Equal("master"))
+ Eventually(func() string {
+ return sentinelSlave2.Get("foo").Val()
+ }, "1s", "100ms").Should(Equal("master"))
+
+ // Wait until slaves are picked up by sentinel.
+ Eventually(func() string {
+ return sentinel.Info().Val()
+ }, "10s", "100ms").Should(ContainSubstring("slaves=2"))
+
+ // Kill master.
+ sentinelMaster.Shutdown()
+ Eventually(func() error {
+ return sentinelMaster.Ping().Err()
+ }, "5s", "100ms").Should(HaveOccurred())
+
+ // Wait for Redis sentinel to elect new master.
+ Eventually(func() string {
+ return sentinelSlave1.Info().Val() + sentinelSlave2.Info().Val()
+ }, "30s", "1s").Should(ContainSubstring("role:master"))
+
+ // Check that client picked up new master.
+ Eventually(func() error {
+ return client.Get("foo").Err()
+ }, "5s", "100ms").ShouldNot(HaveOccurred())
+
+ // Publish message to check if subscription is renewed.
+ err = client.Publish("foo", "hello").Err()
+ Expect(err).NotTo(HaveOccurred())
+
+ var msg *redis.Message
+ Eventually(ch).Should(Receive(&msg))
+ Expect(msg.Channel).To(Equal("foo"))
+ Expect(msg.Payload).To(Equal("hello"))
+ })
+
+ It("supports DB selection", func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+
+ client = redis.NewFailoverClient(&redis.FailoverOptions{
+ MasterName: sentinelName,
+ SentinelAddrs: []string{":" + sentinelPort},
+ DB: 1,
+ })
+ err := client.Ping().Err()
+ Expect(err).NotTo(HaveOccurred())
+ })
+})
diff --git a/vendor/github.com/go-redis/redis/testdata/redis.conf b/vendor/github.com/go-redis/redis/testdata/redis.conf
new file mode 100644
index 000000000..235b2954a
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/testdata/redis.conf
@@ -0,0 +1,10 @@
+# Minimal redis.conf
+
+port 6379
+daemonize no
+dir .
+save ""
+appendonly yes
+cluster-config-file nodes.conf
+cluster-node-timeout 30000
+maxclients 1001 \ No newline at end of file
diff --git a/vendor/github.com/go-redis/redis/tx.go b/vendor/github.com/go-redis/redis/tx.go
new file mode 100644
index 000000000..5ef89619b
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/tx.go
@@ -0,0 +1,96 @@
+package redis
+
+import (
+ "github.com/go-redis/redis/internal"
+ "github.com/go-redis/redis/internal/pool"
+)
+
+// Redis transaction failed.
+const TxFailedErr = internal.RedisError("redis: transaction failed")
+
+// Tx implements Redis transactions as described in
+// http://redis.io/topics/transactions. It's NOT safe for concurrent use
+// by multiple goroutines, because Exec resets list of watched keys.
+// If you don't need WATCH it is better to use Pipeline.
+type Tx struct {
+ statefulCmdable
+ baseClient
+}
+
+func (c *Client) newTx() *Tx {
+ tx := Tx{
+ baseClient: baseClient{
+ opt: c.opt,
+ connPool: pool.NewStickyConnPool(c.connPool.(*pool.ConnPool), true),
+ },
+ }
+ tx.setProcessor(tx.Process)
+ return &tx
+}
+
+func (c *Client) Watch(fn func(*Tx) error, keys ...string) error {
+ tx := c.newTx()
+ if len(keys) > 0 {
+ if err := tx.Watch(keys...).Err(); err != nil {
+ _ = tx.Close()
+ return err
+ }
+ }
+ firstErr := fn(tx)
+ if err := tx.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ return firstErr
+}
+
+// close closes the transaction, releasing any open resources.
+func (c *Tx) Close() error {
+ _ = c.Unwatch().Err()
+ return c.baseClient.Close()
+}
+
+// Watch marks the keys to be watched for conditional execution
+// of a transaction.
+func (c *Tx) Watch(keys ...string) *StatusCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "WATCH"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStatusCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+// Unwatch flushes all the previously watched keys for a transaction.
+func (c *Tx) Unwatch(keys ...string) *StatusCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "UNWATCH"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStatusCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Tx) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: c.pipelineExecer(c.txPipelineProcessCmds),
+ }
+ pipe.setProcessor(pipe.Process)
+ return &pipe
+}
+
+// Pipelined executes commands queued in the fn in a transaction
+// and restores the connection state to normal.
+//
+// When using WATCH, EXEC will execute commands only if the watched keys
+// were not modified, allowing for a check-and-set mechanism.
+//
+// Exec always returns list of commands. If transaction fails
+// TxFailedErr is returned. Otherwise Exec returns error of the first
+// failed command or nil.
+func (c *Tx) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().pipelined(fn)
+}
diff --git a/vendor/github.com/go-redis/redis/tx_test.go b/vendor/github.com/go-redis/redis/tx_test.go
new file mode 100644
index 000000000..de597ff06
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/tx_test.go
@@ -0,0 +1,151 @@
+package redis_test
+
+import (
+ "strconv"
+ "sync"
+
+ "github.com/go-redis/redis"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Tx", func() {
+ var client *redis.Client
+
+ BeforeEach(func() {
+ client = redis.NewClient(redisOptions())
+ Expect(client.FlushDB().Err()).NotTo(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ Expect(client.Close()).NotTo(HaveOccurred())
+ })
+
+ It("should Watch", func() {
+ var incr func(string) error
+
+ // Transactionally increments key using GET and SET commands.
+ incr = func(key string) error {
+ err := client.Watch(func(tx *redis.Tx) error {
+ n, err := tx.Get(key).Int64()
+ if err != nil && err != redis.Nil {
+ return err
+ }
+
+ _, err = tx.Pipelined(func(pipe redis.Pipeliner) error {
+ pipe.Set(key, strconv.FormatInt(n+1, 10), 0)
+ return nil
+ })
+ return err
+ }, key)
+ if err == redis.TxFailedErr {
+ return incr(key)
+ }
+ return err
+ }
+
+ var wg sync.WaitGroup
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func() {
+ defer GinkgoRecover()
+ defer wg.Done()
+
+ err := incr("key")
+ Expect(err).NotTo(HaveOccurred())
+ }()
+ }
+ wg.Wait()
+
+ n, err := client.Get("key").Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(n).To(Equal(int64(100)))
+ })
+
+ It("should discard", func() {
+ err := client.Watch(func(tx *redis.Tx) error {
+ cmds, err := tx.Pipelined(func(pipe redis.Pipeliner) error {
+ pipe.Set("key1", "hello1", 0)
+ pipe.Discard()
+ pipe.Set("key2", "hello2", 0)
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cmds).To(HaveLen(1))
+ return err
+ }, "key1", "key2")
+ Expect(err).NotTo(HaveOccurred())
+
+ get := client.Get("key1")
+ Expect(get.Err()).To(Equal(redis.Nil))
+ Expect(get.Val()).To(Equal(""))
+
+ get = client.Get("key2")
+ Expect(get.Err()).NotTo(HaveOccurred())
+ Expect(get.Val()).To(Equal("hello2"))
+ })
+
+ It("returns no error when there are no commands", func() {
+ err := client.Watch(func(tx *redis.Tx) error {
+ _, err := tx.Pipelined(func(redis.Pipeliner) error { return nil })
+ return err
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ v, err := client.Ping().Result()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(v).To(Equal("PONG"))
+ })
+
+ It("should exec bulks", func() {
+ const N = 20000
+
+ err := client.Watch(func(tx *redis.Tx) error {
+ cmds, err := tx.Pipelined(func(pipe redis.Pipeliner) error {
+ for i := 0; i < N; i++ {
+ pipe.Incr("key")
+ }
+ return nil
+ })
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(cmds)).To(Equal(N))
+ for _, cmd := range cmds {
+ Expect(cmd.Err()).NotTo(HaveOccurred())
+ }
+ return err
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ num, err := client.Get("key").Int64()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(num).To(Equal(int64(N)))
+ })
+
+ It("should recover from bad connection", func() {
+ // Put bad connection in the pool.
+ cn, _, err := client.Pool().Get()
+ Expect(err).NotTo(HaveOccurred())
+
+ cn.SetNetConn(&badConn{})
+ err = client.Pool().Put(cn)
+ Expect(err).NotTo(HaveOccurred())
+
+ do := func() error {
+ err := client.Watch(func(tx *redis.Tx) error {
+ _, err := tx.Pipelined(func(pipe redis.Pipeliner) error {
+ pipe.Ping()
+ return nil
+ })
+ return err
+ })
+ return err
+ }
+
+ err = do()
+ Expect(err).To(MatchError("bad connection"))
+
+ err = do()
+ Expect(err).NotTo(HaveOccurred())
+ })
+})
diff --git a/vendor/github.com/go-redis/redis/universal.go b/vendor/github.com/go-redis/redis/universal.go
new file mode 100644
index 000000000..02ed51abd
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/universal.go
@@ -0,0 +1,135 @@
+package redis
+
+import "time"
+
+// UniversalOptions information is required by UniversalClient to establish
+// connections.
+type UniversalOptions struct {
+ // Either a single address or a seed list of host:port addresses
+ // of cluster/sentinel nodes.
+ Addrs []string
+
+ // The sentinel master name.
+ // Only failover clients.
+ MasterName string
+
+ // Database to be selected after connecting to the server.
+ // Only single-node and failover clients.
+ DB int
+
+ // Enables read only queries on slave nodes.
+ // Only cluster and single-node clients.
+ ReadOnly bool
+
+ // Only cluster clients.
+
+ MaxRedirects int
+ RouteByLatency bool
+
+ // Common options
+
+ MaxRetries int
+ Password string
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ PoolSize int
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+}
+
+func (o *UniversalOptions) cluster() *ClusterOptions {
+ if len(o.Addrs) == 0 {
+ o.Addrs = []string{"127.0.0.1:6379"}
+ }
+
+ return &ClusterOptions{
+ Addrs: o.Addrs,
+ MaxRedirects: o.MaxRedirects,
+ RouteByLatency: o.RouteByLatency,
+ ReadOnly: o.ReadOnly,
+
+ MaxRetries: o.MaxRetries,
+ Password: o.Password,
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+ PoolSize: o.PoolSize,
+ PoolTimeout: o.PoolTimeout,
+ IdleTimeout: o.IdleTimeout,
+ IdleCheckFrequency: o.IdleCheckFrequency,
+ }
+}
+
+func (o *UniversalOptions) failover() *FailoverOptions {
+ if len(o.Addrs) == 0 {
+ o.Addrs = []string{"127.0.0.1:26379"}
+ }
+
+ return &FailoverOptions{
+ SentinelAddrs: o.Addrs,
+ MasterName: o.MasterName,
+ DB: o.DB,
+
+ MaxRetries: o.MaxRetries,
+ Password: o.Password,
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+ PoolSize: o.PoolSize,
+ PoolTimeout: o.PoolTimeout,
+ IdleTimeout: o.IdleTimeout,
+ IdleCheckFrequency: o.IdleCheckFrequency,
+ }
+}
+
+func (o *UniversalOptions) simple() *Options {
+ addr := "127.0.0.1:6379"
+ if len(o.Addrs) > 0 {
+ addr = o.Addrs[0]
+ }
+
+ return &Options{
+ Addr: addr,
+ DB: o.DB,
+ ReadOnly: o.ReadOnly,
+
+ MaxRetries: o.MaxRetries,
+ Password: o.Password,
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+ PoolSize: o.PoolSize,
+ PoolTimeout: o.PoolTimeout,
+ IdleTimeout: o.IdleTimeout,
+ IdleCheckFrequency: o.IdleCheckFrequency,
+ }
+}
+
+// --------------------------------------------------------------------
+
+// UniversalClient is an abstract client which - based on the provided options -
+// can connect to either clusters, or sentinel-backed failover instances or simple
+// single-instance servers. This can be useful for testing cluster-specific
+// applications locally.
+type UniversalClient interface {
+ Cmdable
+ Process(cmd Cmder) error
+ Close() error
+}
+
+// NewUniversalClient returns a new multi client. The type of client returned depends
+// on the following three conditions:
+//
+// 1. if a MasterName is passed a sentinel-backed FailoverClient will be returned
+// 2. if the number of Addrs is two or more, a ClusterClient will be returned
+// 3. otherwise, a single-node redis Client will be returned.
+func NewUniversalClient(opts *UniversalOptions) UniversalClient {
+ if opts.MasterName != "" {
+ return NewFailoverClient(opts.failover())
+ } else if len(opts.Addrs) > 1 {
+ return NewClusterClient(opts.cluster())
+ }
+ return NewClient(opts.simple())
+}
diff --git a/vendor/github.com/go-redis/redis/universal_test.go b/vendor/github.com/go-redis/redis/universal_test.go
new file mode 100644
index 000000000..2a0850dea
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/universal_test.go
@@ -0,0 +1,41 @@
+package redis_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/go-redis/redis"
+)
+
+var _ = Describe("UniversalClient", func() {
+ var client redis.UniversalClient
+
+ AfterEach(func() {
+ if client != nil {
+ Expect(client.Close()).To(Succeed())
+ }
+ })
+
+ It("should connect to failover servers", func() {
+ client = redis.NewUniversalClient(&redis.UniversalOptions{
+ MasterName: sentinelName,
+ Addrs: []string{":" + sentinelPort},
+ })
+ Expect(client.Ping().Err()).NotTo(HaveOccurred())
+ })
+
+ It("should connect to simple servers", func() {
+ client = redis.NewUniversalClient(&redis.UniversalOptions{
+ Addrs: []string{redisAddr},
+ })
+ Expect(client.Ping().Err()).NotTo(HaveOccurred())
+ })
+
+ It("should connect to clusters", func() {
+ client = redis.NewUniversalClient(&redis.UniversalOptions{
+ Addrs: cluster.addrs(),
+ })
+ Expect(client.Ping().Err()).NotTo(HaveOccurred())
+ })
+
+})