summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/golang/groupcache
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/golang/groupcache')
-rw-r--r--vendor/github.com/golang/groupcache/.gitignore1
-rw-r--r--vendor/github.com/golang/groupcache/README.md73
-rw-r--r--vendor/github.com/golang/groupcache/byteview.go160
-rw-r--r--vendor/github.com/golang/groupcache/byteview_test.go142
-rw-r--r--vendor/github.com/golang/groupcache/consistenthash/consistenthash.go81
-rw-r--r--vendor/github.com/golang/groupcache/consistenthash/consistenthash_test.go110
-rw-r--r--vendor/github.com/golang/groupcache/groupcache.go489
-rw-r--r--vendor/github.com/golang/groupcache/groupcache_test.go447
-rw-r--r--vendor/github.com/golang/groupcache/groupcachepb/groupcache.pb.go65
-rw-r--r--vendor/github.com/golang/groupcache/groupcachepb/groupcache.proto34
-rw-r--r--vendor/github.com/golang/groupcache/http.go227
-rw-r--r--vendor/github.com/golang/groupcache/http_test.go166
-rw-r--r--vendor/github.com/golang/groupcache/lru/lru_test.go73
-rw-r--r--vendor/github.com/golang/groupcache/peers.go71
-rw-r--r--vendor/github.com/golang/groupcache/singleflight/singleflight.go64
-rw-r--r--vendor/github.com/golang/groupcache/singleflight/singleflight_test.go85
-rw-r--r--vendor/github.com/golang/groupcache/sinks.go322
-rw-r--r--vendor/github.com/golang/groupcache/testpb/test.pb.go235
-rw-r--r--vendor/github.com/golang/groupcache/testpb/test.proto63
19 files changed, 2908 insertions, 0 deletions
diff --git a/vendor/github.com/golang/groupcache/.gitignore b/vendor/github.com/golang/groupcache/.gitignore
new file mode 100644
index 000000000..b25c15b81
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/.gitignore
@@ -0,0 +1 @@
+*~
diff --git a/vendor/github.com/golang/groupcache/README.md b/vendor/github.com/golang/groupcache/README.md
new file mode 100644
index 000000000..70c29da16
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/README.md
@@ -0,0 +1,73 @@
+# groupcache
+
+## Summary
+
+groupcache is a caching and cache-filling library, intended as a
+replacement for memcached in many cases.
+
+For API docs and examples, see http://godoc.org/github.com/golang/groupcache
+
+## Comparison to memcached
+
+### **Like memcached**, groupcache:
+
+ * shards by key to select which peer is responsible for that key
+
+### **Unlike memcached**, groupcache:
+
+ * does not require running a separate set of servers, thus massively
+ reducing deployment/configuration pain. groupcache is a client
+ library as well as a server. It connects to its own peers.
+
+ * comes with a cache filling mechanism. Whereas memcached just says
+ "Sorry, cache miss", often resulting in a thundering herd of
+ database (or whatever) loads from an unbounded number of clients
+ (which has resulted in several fun outages), groupcache coordinates
+ cache fills such that only one load in one process of an entire
+ replicated set of processes populates the cache, then multiplexes
+ the loaded value to all callers.
+
+ * does not support versioned values. If key "foo" is value "bar",
+ key "foo" must always be "bar". There are neither cache expiration
+ times, nor explicit cache evictions. Thus there is also no CAS,
+ nor Increment/Decrement. This also means that groupcache....
+
+ * ... supports automatic mirroring of super-hot items to multiple
+ processes. This prevents memcached hot spotting where a machine's
+ CPU and/or NIC are overloaded by very popular keys/values.
+
+ * is currently only available for Go. It's very unlikely that I
+ (bradfitz@) will port the code to any other language.
+
+## Loading process
+
+In a nutshell, a groupcache lookup of **Get("foo")** looks like:
+
+(On machine #5 of a set of N machines running the same code)
+
+ 1. Is the value of "foo" in local memory because it's super hot? If so, use it.
+
+ 2. Is the value of "foo" in local memory because peer #5 (the current
+ peer) is the owner of it? If so, use it.
+
+ 3. Amongst all the peers in my set of N, am I the owner of the key
+ "foo"? (e.g. does it consistent hash to 5?) If so, load it. If
+ other callers come in, via the same process or via RPC requests
+ from peers, they block waiting for the load to finish and get the
+ same answer. If not, RPC to the peer that's the owner and get
+ the answer. If the RPC fails, just load it locally (still with
+ local dup suppression).
+
+## Users
+
+groupcache is in production use by dl.google.com (its original user),
+parts of Blogger, parts of Google Code, parts of Google Fiber, parts
+of Google production monitoring systems, etc.
+
+## Presentations
+
+See http://talks.golang.org/2013/oscon-dl.slide
+
+## Help
+
+Use the golang-nuts mailing list for any discussion or questions.
diff --git a/vendor/github.com/golang/groupcache/byteview.go b/vendor/github.com/golang/groupcache/byteview.go
new file mode 100644
index 000000000..035a9ee44
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/byteview.go
@@ -0,0 +1,160 @@
+/*
+Copyright 2012 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package groupcache
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "strings"
+)
+
+// A ByteView holds an immutable view of bytes.
+// Internally it wraps either a []byte or a string,
+// but that detail is invisible to callers.
+//
+// A ByteView is meant to be used as a value type, not
+// a pointer (like a time.Time).
+type ByteView struct {
+ // If b is non-nil, b is used, else s is used.
+ b []byte
+ s string
+}
+
+// Len returns the view's length.
+func (v ByteView) Len() int {
+ if v.b != nil {
+ return len(v.b)
+ }
+ return len(v.s)
+}
+
+// ByteSlice returns a copy of the data as a byte slice.
+func (v ByteView) ByteSlice() []byte {
+ if v.b != nil {
+ return cloneBytes(v.b)
+ }
+ return []byte(v.s)
+}
+
+// String returns the data as a string, making a copy if necessary.
+func (v ByteView) String() string {
+ if v.b != nil {
+ return string(v.b)
+ }
+ return v.s
+}
+
+// At returns the byte at index i.
+func (v ByteView) At(i int) byte {
+ if v.b != nil {
+ return v.b[i]
+ }
+ return v.s[i]
+}
+
+// Slice slices the view between the provided from and to indices.
+func (v ByteView) Slice(from, to int) ByteView {
+ if v.b != nil {
+ return ByteView{b: v.b[from:to]}
+ }
+ return ByteView{s: v.s[from:to]}
+}
+
+// SliceFrom slices the view from the provided index until the end.
+func (v ByteView) SliceFrom(from int) ByteView {
+ if v.b != nil {
+ return ByteView{b: v.b[from:]}
+ }
+ return ByteView{s: v.s[from:]}
+}
+
+// Copy copies b into dest and returns the number of bytes copied.
+func (v ByteView) Copy(dest []byte) int {
+ if v.b != nil {
+ return copy(dest, v.b)
+ }
+ return copy(dest, v.s)
+}
+
+// Equal returns whether the bytes in b are the same as the bytes in
+// b2.
+func (v ByteView) Equal(b2 ByteView) bool {
+ if b2.b == nil {
+ return v.EqualString(b2.s)
+ }
+ return v.EqualBytes(b2.b)
+}
+
+// EqualString returns whether the bytes in b are the same as the bytes
+// in s.
+func (v ByteView) EqualString(s string) bool {
+ if v.b == nil {
+ return v.s == s
+ }
+ l := v.Len()
+ if len(s) != l {
+ return false
+ }
+ for i, bi := range v.b {
+ if bi != s[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// EqualBytes returns whether the bytes in b are the same as the bytes
+// in b2.
+func (v ByteView) EqualBytes(b2 []byte) bool {
+ if v.b != nil {
+ return bytes.Equal(v.b, b2)
+ }
+ l := v.Len()
+ if len(b2) != l {
+ return false
+ }
+ for i, bi := range b2 {
+ if bi != v.s[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// Reader returns an io.ReadSeeker for the bytes in v.
+func (v ByteView) Reader() io.ReadSeeker {
+ if v.b != nil {
+ return bytes.NewReader(v.b)
+ }
+ return strings.NewReader(v.s)
+}
+
+// ReadAt implements io.ReaderAt on the bytes in v.
+func (v ByteView) ReadAt(p []byte, off int64) (n int, err error) {
+ if off < 0 {
+ return 0, errors.New("view: invalid offset")
+ }
+ if off >= int64(v.Len()) {
+ return 0, io.EOF
+ }
+ n = v.SliceFrom(int(off)).Copy(p)
+ if n < len(p) {
+ err = io.EOF
+ }
+ return
+}
diff --git a/vendor/github.com/golang/groupcache/byteview_test.go b/vendor/github.com/golang/groupcache/byteview_test.go
new file mode 100644
index 000000000..9ece00f45
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/byteview_test.go
@@ -0,0 +1,142 @@
+/*
+Copyright 2012 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package groupcache
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "testing"
+)
+
+func TestByteView(t *testing.T) {
+ for _, s := range []string{"", "x", "yy"} {
+ for _, v := range []ByteView{of([]byte(s)), of(s)} {
+ name := fmt.Sprintf("string %q, view %+v", s, v)
+ if v.Len() != len(s) {
+ t.Errorf("%s: Len = %d; want %d", name, v.Len(), len(s))
+ }
+ if v.String() != s {
+ t.Errorf("%s: String = %q; want %q", name, v.String(), s)
+ }
+ var longDest [3]byte
+ if n := v.Copy(longDest[:]); n != len(s) {
+ t.Errorf("%s: long Copy = %d; want %d", name, n, len(s))
+ }
+ var shortDest [1]byte
+ if n := v.Copy(shortDest[:]); n != min(len(s), 1) {
+ t.Errorf("%s: short Copy = %d; want %d", name, n, min(len(s), 1))
+ }
+ if got, err := ioutil.ReadAll(v.Reader()); err != nil || string(got) != s {
+ t.Errorf("%s: Reader = %q, %v; want %q", name, got, err, s)
+ }
+ if got, err := ioutil.ReadAll(io.NewSectionReader(v, 0, int64(len(s)))); err != nil || string(got) != s {
+ t.Errorf("%s: SectionReader of ReaderAt = %q, %v; want %q", name, got, err, s)
+ }
+ }
+ }
+}
+
+// of returns a byte view of the []byte or string in x.
+func of(x interface{}) ByteView {
+ if bytes, ok := x.([]byte); ok {
+ return ByteView{b: bytes}
+ }
+ return ByteView{s: x.(string)}
+}
+
+func TestByteViewEqual(t *testing.T) {
+ tests := []struct {
+ a interface{} // string or []byte
+ b interface{} // string or []byte
+ want bool
+ }{
+ {"x", "x", true},
+ {"x", "y", false},
+ {"x", "yy", false},
+ {[]byte("x"), []byte("x"), true},
+ {[]byte("x"), []byte("y"), false},
+ {[]byte("x"), []byte("yy"), false},
+ {[]byte("x"), "x", true},
+ {[]byte("x"), "y", false},
+ {[]byte("x"), "yy", false},
+ {"x", []byte("x"), true},
+ {"x", []byte("y"), false},
+ {"x", []byte("yy"), false},
+ }
+ for i, tt := range tests {
+ va := of(tt.a)
+ if bytes, ok := tt.b.([]byte); ok {
+ if got := va.EqualBytes(bytes); got != tt.want {
+ t.Errorf("%d. EqualBytes = %v; want %v", i, got, tt.want)
+ }
+ } else {
+ if got := va.EqualString(tt.b.(string)); got != tt.want {
+ t.Errorf("%d. EqualString = %v; want %v", i, got, tt.want)
+ }
+ }
+ if got := va.Equal(of(tt.b)); got != tt.want {
+ t.Errorf("%d. Equal = %v; want %v", i, got, tt.want)
+ }
+ }
+}
+
+func TestByteViewSlice(t *testing.T) {
+ tests := []struct {
+ in string
+ from int
+ to interface{} // nil to mean the end (SliceFrom); else int
+ want string
+ }{
+ {
+ in: "abc",
+ from: 1,
+ to: 2,
+ want: "b",
+ },
+ {
+ in: "abc",
+ from: 1,
+ want: "bc",
+ },
+ {
+ in: "abc",
+ to: 2,
+ want: "ab",
+ },
+ }
+ for i, tt := range tests {
+ for _, v := range []ByteView{of([]byte(tt.in)), of(tt.in)} {
+ name := fmt.Sprintf("test %d, view %+v", i, v)
+ if tt.to != nil {
+ v = v.Slice(tt.from, tt.to.(int))
+ } else {
+ v = v.SliceFrom(tt.from)
+ }
+ if v.String() != tt.want {
+ t.Errorf("%s: got %q; want %q", name, v.String(), tt.want)
+ }
+ }
+ }
+}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/github.com/golang/groupcache/consistenthash/consistenthash.go b/vendor/github.com/golang/groupcache/consistenthash/consistenthash.go
new file mode 100644
index 000000000..a9c56f076
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/consistenthash/consistenthash.go
@@ -0,0 +1,81 @@
+/*
+Copyright 2013 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package consistenthash provides an implementation of a ring hash.
+package consistenthash
+
+import (
+ "hash/crc32"
+ "sort"
+ "strconv"
+)
+
+type Hash func(data []byte) uint32
+
+type Map struct {
+ hash Hash
+ replicas int
+ keys []int // Sorted
+ hashMap map[int]string
+}
+
+func New(replicas int, fn Hash) *Map {
+ m := &Map{
+ replicas: replicas,
+ hash: fn,
+ hashMap: make(map[int]string),
+ }
+ if m.hash == nil {
+ m.hash = crc32.ChecksumIEEE
+ }
+ return m
+}
+
+// Returns true if there are no items available.
+func (m *Map) IsEmpty() bool {
+ return len(m.keys) == 0
+}
+
+// Adds some keys to the hash.
+func (m *Map) Add(keys ...string) {
+ for _, key := range keys {
+ for i := 0; i < m.replicas; i++ {
+ hash := int(m.hash([]byte(strconv.Itoa(i) + key)))
+ m.keys = append(m.keys, hash)
+ m.hashMap[hash] = key
+ }
+ }
+ sort.Ints(m.keys)
+}
+
+// Gets the closest item in the hash to the provided key.
+func (m *Map) Get(key string) string {
+ if m.IsEmpty() {
+ return ""
+ }
+
+ hash := int(m.hash([]byte(key)))
+
+ // Binary search for appropriate replica.
+ idx := sort.Search(len(m.keys), func(i int) bool { return m.keys[i] >= hash })
+
+ // Means we have cycled back to the first replica.
+ if idx == len(m.keys) {
+ idx = 0
+ }
+
+ return m.hashMap[m.keys[idx]]
+}
diff --git a/vendor/github.com/golang/groupcache/consistenthash/consistenthash_test.go b/vendor/github.com/golang/groupcache/consistenthash/consistenthash_test.go
new file mode 100644
index 000000000..1a37fd7ff
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/consistenthash/consistenthash_test.go
@@ -0,0 +1,110 @@
+/*
+Copyright 2013 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package consistenthash
+
+import (
+ "fmt"
+ "strconv"
+ "testing"
+)
+
+func TestHashing(t *testing.T) {
+
+ // Override the hash function to return easier to reason about values. Assumes
+ // the keys can be converted to an integer.
+ hash := New(3, func(key []byte) uint32 {
+ i, err := strconv.Atoi(string(key))
+ if err != nil {
+ panic(err)
+ }
+ return uint32(i)
+ })
+
+ // Given the above hash function, this will give replicas with "hashes":
+ // 2, 4, 6, 12, 14, 16, 22, 24, 26
+ hash.Add("6", "4", "2")
+
+ testCases := map[string]string{
+ "2": "2",
+ "11": "2",
+ "23": "4",
+ "27": "2",
+ }
+
+ for k, v := range testCases {
+ if hash.Get(k) != v {
+ t.Errorf("Asking for %s, should have yielded %s", k, v)
+ }
+ }
+
+ // Adds 8, 18, 28
+ hash.Add("8")
+
+ // 27 should now map to 8.
+ testCases["27"] = "8"
+
+ for k, v := range testCases {
+ if hash.Get(k) != v {
+ t.Errorf("Asking for %s, should have yielded %s", k, v)
+ }
+ }
+
+}
+
+func TestConsistency(t *testing.T) {
+ hash1 := New(1, nil)
+ hash2 := New(1, nil)
+
+ hash1.Add("Bill", "Bob", "Bonny")
+ hash2.Add("Bob", "Bonny", "Bill")
+
+ if hash1.Get("Ben") != hash2.Get("Ben") {
+ t.Errorf("Fetching 'Ben' from both hashes should be the same")
+ }
+
+ hash2.Add("Becky", "Ben", "Bobby")
+
+ if hash1.Get("Ben") != hash2.Get("Ben") ||
+ hash1.Get("Bob") != hash2.Get("Bob") ||
+ hash1.Get("Bonny") != hash2.Get("Bonny") {
+ t.Errorf("Direct matches should always return the same entry")
+ }
+
+}
+
+func BenchmarkGet8(b *testing.B) { benchmarkGet(b, 8) }
+func BenchmarkGet32(b *testing.B) { benchmarkGet(b, 32) }
+func BenchmarkGet128(b *testing.B) { benchmarkGet(b, 128) }
+func BenchmarkGet512(b *testing.B) { benchmarkGet(b, 512) }
+
+func benchmarkGet(b *testing.B, shards int) {
+
+ hash := New(50, nil)
+
+ var buckets []string
+ for i := 0; i < shards; i++ {
+ buckets = append(buckets, fmt.Sprintf("shard-%d", i))
+ }
+
+ hash.Add(buckets...)
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ hash.Get(buckets[i&(shards-1)])
+ }
+}
diff --git a/vendor/github.com/golang/groupcache/groupcache.go b/vendor/github.com/golang/groupcache/groupcache.go
new file mode 100644
index 000000000..40410a0cc
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/groupcache.go
@@ -0,0 +1,489 @@
+/*
+Copyright 2012 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package groupcache provides a data loading mechanism with caching
+// and de-duplication that works across a set of peer processes.
+//
+// Each data Get first consults its local cache, otherwise delegates
+// to the requested key's canonical owner, which then checks its cache
+// or finally gets the data. In the common case, many concurrent
+// cache misses across a set of peers for the same key result in just
+// one cache fill.
+package groupcache
+
+import (
+ "errors"
+ "math/rand"
+ "strconv"
+ "sync"
+ "sync/atomic"
+
+ pb "github.com/golang/groupcache/groupcachepb"
+ "github.com/golang/groupcache/lru"
+ "github.com/golang/groupcache/singleflight"
+)
+
+// A Getter loads data for a key.
+type Getter interface {
+ // Get returns the value identified by key, populating dest.
+ //
+ // The returned data must be unversioned. That is, key must
+ // uniquely describe the loaded data, without an implicit
+ // current time, and without relying on cache expiration
+ // mechanisms.
+ Get(ctx Context, key string, dest Sink) error
+}
+
+// A GetterFunc implements Getter with a function.
+type GetterFunc func(ctx Context, key string, dest Sink) error
+
+func (f GetterFunc) Get(ctx Context, key string, dest Sink) error {
+ return f(ctx, key, dest)
+}
+
+var (
+ mu sync.RWMutex
+ groups = make(map[string]*Group)
+
+ initPeerServerOnce sync.Once
+ initPeerServer func()
+)
+
+// GetGroup returns the named group previously created with NewGroup, or
+// nil if there's no such group.
+func GetGroup(name string) *Group {
+ mu.RLock()
+ g := groups[name]
+ mu.RUnlock()
+ return g
+}
+
+// NewGroup creates a coordinated group-aware Getter from a Getter.
+//
+// The returned Getter tries (but does not guarantee) to run only one
+// Get call at once for a given key across an entire set of peer
+// processes. Concurrent callers both in the local process and in
+// other processes receive copies of the answer once the original Get
+// completes.
+//
+// The group name must be unique for each getter.
+func NewGroup(name string, cacheBytes int64, getter Getter) *Group {
+ return newGroup(name, cacheBytes, getter, nil)
+}
+
+// If peers is nil, the peerPicker is called via a sync.Once to initialize it.
+func newGroup(name string, cacheBytes int64, getter Getter, peers PeerPicker) *Group {
+ if getter == nil {
+ panic("nil Getter")
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ initPeerServerOnce.Do(callInitPeerServer)
+ if _, dup := groups[name]; dup {
+ panic("duplicate registration of group " + name)
+ }
+ g := &Group{
+ name: name,
+ getter: getter,
+ peers: peers,
+ cacheBytes: cacheBytes,
+ loadGroup: &singleflight.Group{},
+ }
+ if fn := newGroupHook; fn != nil {
+ fn(g)
+ }
+ groups[name] = g
+ return g
+}
+
+// newGroupHook, if non-nil, is called right after a new group is created.
+var newGroupHook func(*Group)
+
+// RegisterNewGroupHook registers a hook that is run each time
+// a group is created.
+func RegisterNewGroupHook(fn func(*Group)) {
+ if newGroupHook != nil {
+ panic("RegisterNewGroupHook called more than once")
+ }
+ newGroupHook = fn
+}
+
+// RegisterServerStart registers a hook that is run when the first
+// group is created.
+func RegisterServerStart(fn func()) {
+ if initPeerServer != nil {
+ panic("RegisterServerStart called more than once")
+ }
+ initPeerServer = fn
+}
+
+func callInitPeerServer() {
+ if initPeerServer != nil {
+ initPeerServer()
+ }
+}
+
+// A Group is a cache namespace and associated data loaded spread over
+// a group of 1 or more machines.
+type Group struct {
+ name string
+ getter Getter
+ peersOnce sync.Once
+ peers PeerPicker
+ cacheBytes int64 // limit for sum of mainCache and hotCache size
+
+ // mainCache is a cache of the keys for which this process
+ // (amongst its peers) is authorative. That is, this cache
+ // contains keys which consistent hash on to this process's
+ // peer number.
+ mainCache cache
+
+ // hotCache contains keys/values for which this peer is not
+ // authorative (otherwise they would be in mainCache), but
+ // are popular enough to warrant mirroring in this process to
+ // avoid going over the network to fetch from a peer. Having
+ // a hotCache avoids network hotspotting, where a peer's
+ // network card could become the bottleneck on a popular key.
+ // This cache is used sparingly to maximize the total number
+ // of key/value pairs that can be stored globally.
+ hotCache cache
+
+ // loadGroup ensures that each key is only fetched once
+ // (either locally or remotely), regardless of the number of
+ // concurrent callers.
+ loadGroup flightGroup
+
+ // Stats are statistics on the group.
+ Stats Stats
+}
+
+// flightGroup is defined as an interface which flightgroup.Group
+// satisfies. We define this so that we may test with an alternate
+// implementation.
+type flightGroup interface {
+ // Done is called when Do is done.
+ Do(key string, fn func() (interface{}, error)) (interface{}, error)
+}
+
+// Stats are per-group statistics.
+type Stats struct {
+ Gets AtomicInt // any Get request, including from peers
+ CacheHits AtomicInt // either cache was good
+ PeerLoads AtomicInt // either remote load or remote cache hit (not an error)
+ PeerErrors AtomicInt
+ Loads AtomicInt // (gets - cacheHits)
+ LoadsDeduped AtomicInt // after singleflight
+ LocalLoads AtomicInt // total good local loads
+ LocalLoadErrs AtomicInt // total bad local loads
+ ServerRequests AtomicInt // gets that came over the network from peers
+}
+
+// Name returns the name of the group.
+func (g *Group) Name() string {
+ return g.name
+}
+
+func (g *Group) initPeers() {
+ if g.peers == nil {
+ g.peers = getPeers()
+ }
+}
+
+func (g *Group) Get(ctx Context, key string, dest Sink) error {
+ g.peersOnce.Do(g.initPeers)
+ g.Stats.Gets.Add(1)
+ if dest == nil {
+ return errors.New("groupcache: nil dest Sink")
+ }
+ value, cacheHit := g.lookupCache(key)
+
+ if cacheHit {
+ g.Stats.CacheHits.Add(1)
+ return setSinkView(dest, value)
+ }
+
+ // Optimization to avoid double unmarshalling or copying: keep
+ // track of whether the dest was already populated. One caller
+ // (if local) will set this; the losers will not. The common
+ // case will likely be one caller.
+ destPopulated := false
+ value, destPopulated, err := g.load(ctx, key, dest)
+ if err != nil {
+ return err
+ }
+ if destPopulated {
+ return nil
+ }
+ return setSinkView(dest, value)
+}
+
+// load loads key either by invoking the getter locally or by sending it to another machine.
+func (g *Group) load(ctx Context, key string, dest Sink) (value ByteView, destPopulated bool, err error) {
+ g.Stats.Loads.Add(1)
+ viewi, err := g.loadGroup.Do(key, func() (interface{}, error) {
+ // Check the cache again because singleflight can only dedup calls
+ // that overlap concurrently. It's possible for 2 concurrent
+ // requests to miss the cache, resulting in 2 load() calls. An
+ // unfortunate goroutine scheduling would result in this callback
+ // being run twice, serially. If we don't check the cache again,
+ // cache.nbytes would be incremented below even though there will
+ // be only one entry for this key.
+ //
+ // Consider the following serialized event ordering for two
+ // goroutines in which this callback gets called twice for hte
+ // same key:
+ // 1: Get("key")
+ // 2: Get("key")
+ // 1: lookupCache("key")
+ // 2: lookupCache("key")
+ // 1: load("key")
+ // 2: load("key")
+ // 1: loadGroup.Do("key", fn)
+ // 1: fn()
+ // 2: loadGroup.Do("key", fn)
+ // 2: fn()
+ if value, cacheHit := g.lookupCache(key); cacheHit {
+ g.Stats.CacheHits.Add(1)
+ return value, nil
+ }
+ g.Stats.LoadsDeduped.Add(1)
+ var value ByteView
+ var err error
+ if peer, ok := g.peers.PickPeer(key); ok {
+ value, err = g.getFromPeer(ctx, peer, key)
+ if err == nil {
+ g.Stats.PeerLoads.Add(1)
+ return value, nil
+ }
+ g.Stats.PeerErrors.Add(1)
+ // TODO(bradfitz): log the peer's error? keep
+ // log of the past few for /groupcachez? It's
+ // probably boring (normal task movement), so not
+ // worth logging I imagine.
+ }
+ value, err = g.getLocally(ctx, key, dest)
+ if err != nil {
+ g.Stats.LocalLoadErrs.Add(1)
+ return nil, err
+ }
+ g.Stats.LocalLoads.Add(1)
+ destPopulated = true // only one caller of load gets this return value
+ g.populateCache(key, value, &g.mainCache)
+ return value, nil
+ })
+ if err == nil {
+ value = viewi.(ByteView)
+ }
+ return
+}
+
+func (g *Group) getLocally(ctx Context, key string, dest Sink) (ByteView, error) {
+ err := g.getter.Get(ctx, key, dest)
+ if err != nil {
+ return ByteView{}, err
+ }
+ return dest.view()
+}
+
+func (g *Group) getFromPeer(ctx Context, peer ProtoGetter, key string) (ByteView, error) {
+ req := &pb.GetRequest{
+ Group: &g.name,
+ Key: &key,
+ }
+ res := &pb.GetResponse{}
+ err := peer.Get(ctx, req, res)
+ if err != nil {
+ return ByteView{}, err
+ }
+ value := ByteView{b: res.Value}
+ // TODO(bradfitz): use res.MinuteQps or something smart to
+ // conditionally populate hotCache. For now just do it some
+ // percentage of the time.
+ if rand.Intn(10) == 0 {
+ g.populateCache(key, value, &g.hotCache)
+ }
+ return value, nil
+}
+
+func (g *Group) lookupCache(key string) (value ByteView, ok bool) {
+ if g.cacheBytes <= 0 {
+ return
+ }
+ value, ok = g.mainCache.get(key)
+ if ok {
+ return
+ }
+ value, ok = g.hotCache.get(key)
+ return
+}
+
+func (g *Group) populateCache(key string, value ByteView, cache *cache) {
+ if g.cacheBytes <= 0 {
+ return
+ }
+ cache.add(key, value)
+
+ // Evict items from cache(s) if necessary.
+ for {
+ mainBytes := g.mainCache.bytes()
+ hotBytes := g.hotCache.bytes()
+ if mainBytes+hotBytes <= g.cacheBytes {
+ return
+ }
+
+ // TODO(bradfitz): this is good-enough-for-now logic.
+ // It should be something based on measurements and/or
+ // respecting the costs of different resources.
+ victim := &g.mainCache
+ if hotBytes > mainBytes/8 {
+ victim = &g.hotCache
+ }
+ victim.removeOldest()
+ }
+}
+
+// CacheType represents a type of cache.
+type CacheType int
+
+const (
+ // The MainCache is the cache for items that this peer is the
+ // owner for.
+ MainCache CacheType = iota + 1
+
+ // The HotCache is the cache for items that seem popular
+ // enough to replicate to this node, even though it's not the
+ // owner.
+ HotCache
+)
+
+// CacheStats returns stats about the provided cache within the group.
+func (g *Group) CacheStats(which CacheType) CacheStats {
+ switch which {
+ case MainCache:
+ return g.mainCache.stats()
+ case HotCache:
+ return g.hotCache.stats()
+ default:
+ return CacheStats{}
+ }
+}
+
+// cache is a wrapper around an *lru.Cache that adds synchronization,
+// makes values always be ByteView, and counts the size of all keys and
+// values.
+type cache struct {
+ mu sync.RWMutex
+ nbytes int64 // of all keys and values
+ lru *lru.Cache
+ nhit, nget int64
+ nevict int64 // number of evictions
+}
+
+func (c *cache) stats() CacheStats {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return CacheStats{
+ Bytes: c.nbytes,
+ Items: c.itemsLocked(),
+ Gets: c.nget,
+ Hits: c.nhit,
+ Evictions: c.nevict,
+ }
+}
+
+func (c *cache) add(key string, value ByteView) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.lru == nil {
+ c.lru = &lru.Cache{
+ OnEvicted: func(key lru.Key, value interface{}) {
+ val := value.(ByteView)
+ c.nbytes -= int64(len(key.(string))) + int64(val.Len())
+ c.nevict++
+ },
+ }
+ }
+ c.lru.Add(key, value)
+ c.nbytes += int64(len(key)) + int64(value.Len())
+}
+
+func (c *cache) get(key string) (value ByteView, ok bool) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.nget++
+ if c.lru == nil {
+ return
+ }
+ vi, ok := c.lru.Get(key)
+ if !ok {
+ return
+ }
+ c.nhit++
+ return vi.(ByteView), true
+}
+
+func (c *cache) removeOldest() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.lru != nil {
+ c.lru.RemoveOldest()
+ }
+}
+
+func (c *cache) bytes() int64 {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.nbytes
+}
+
+func (c *cache) items() int64 {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.itemsLocked()
+}
+
+func (c *cache) itemsLocked() int64 {
+ if c.lru == nil {
+ return 0
+ }
+ return int64(c.lru.Len())
+}
+
+// An AtomicInt is an int64 to be accessed atomically.
+type AtomicInt int64
+
+// Add atomically adds n to i.
+func (i *AtomicInt) Add(n int64) {
+ atomic.AddInt64((*int64)(i), n)
+}
+
+// Get atomically gets the value of i.
+func (i *AtomicInt) Get() int64 {
+ return atomic.LoadInt64((*int64)(i))
+}
+
+func (i *AtomicInt) String() string {
+ return strconv.FormatInt(i.Get(), 10)
+}
+
+// CacheStats are returned by stats accessors on Group.
+type CacheStats struct {
+ Bytes int64
+ Items int64
+ Gets int64
+ Hits int64
+ Evictions int64
+}
diff --git a/vendor/github.com/golang/groupcache/groupcache_test.go b/vendor/github.com/golang/groupcache/groupcache_test.go
new file mode 100644
index 000000000..3a4ecc2cc
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/groupcache_test.go
@@ -0,0 +1,447 @@
+/*
+Copyright 2012 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Tests for groupcache.
+
+package groupcache
+
+import (
+ "errors"
+ "fmt"
+ "hash/crc32"
+ "math/rand"
+ "reflect"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ pb "github.com/golang/groupcache/groupcachepb"
+ testpb "github.com/golang/groupcache/testpb"
+)
+
+var (
+ once sync.Once
+ stringGroup, protoGroup Getter
+
+ stringc = make(chan string)
+
+ dummyCtx Context
+
+ // cacheFills is the number of times stringGroup or
+ // protoGroup's Getter have been called. Read using the
+ // cacheFills function.
+ cacheFills AtomicInt
+)
+
+const (
+ stringGroupName = "string-group"
+ protoGroupName = "proto-group"
+ testMessageType = "google3/net/groupcache/go/test_proto.TestMessage"
+ fromChan = "from-chan"
+ cacheSize = 1 << 20
+)
+
+func testSetup() {
+ stringGroup = NewGroup(stringGroupName, cacheSize, GetterFunc(func(_ Context, key string, dest Sink) error {
+ if key == fromChan {
+ key = <-stringc
+ }
+ cacheFills.Add(1)
+ return dest.SetString("ECHO:" + key)
+ }))
+
+ protoGroup = NewGroup(protoGroupName, cacheSize, GetterFunc(func(_ Context, key string, dest Sink) error {
+ if key == fromChan {
+ key = <-stringc
+ }
+ cacheFills.Add(1)
+ return dest.SetProto(&testpb.TestMessage{
+ Name: proto.String("ECHO:" + key),
+ City: proto.String("SOME-CITY"),
+ })
+ }))
+}
+
+// tests that a Getter's Get method is only called once with two
+// outstanding callers. This is the string variant.
+func TestGetDupSuppressString(t *testing.T) {
+ once.Do(testSetup)
+ // Start two getters. The first should block (waiting reading
+ // from stringc) and the second should latch on to the first
+ // one.
+ resc := make(chan string, 2)
+ for i := 0; i < 2; i++ {
+ go func() {
+ var s string
+ if err := stringGroup.Get(dummyCtx, fromChan, StringSink(&s)); err != nil {
+ resc <- "ERROR:" + err.Error()
+ return
+ }
+ resc <- s
+ }()
+ }
+
+ // Wait a bit so both goroutines get merged together via
+ // singleflight.
+ // TODO(bradfitz): decide whether there are any non-offensive
+ // debug/test hooks that could be added to singleflight to
+ // make a sleep here unnecessary.
+ time.Sleep(250 * time.Millisecond)
+
+ // Unblock the first getter, which should unblock the second
+ // as well.
+ stringc <- "foo"
+
+ for i := 0; i < 2; i++ {
+ select {
+ case v := <-resc:
+ if v != "ECHO:foo" {
+ t.Errorf("got %q; want %q", v, "ECHO:foo")
+ }
+ case <-time.After(5 * time.Second):
+ t.Errorf("timeout waiting on getter #%d of 2", i+1)
+ }
+ }
+}
+
+// tests that a Getter's Get method is only called once with two
+// outstanding callers. This is the proto variant.
+func TestGetDupSuppressProto(t *testing.T) {
+ once.Do(testSetup)
+ // Start two getters. The first should block (waiting reading
+ // from stringc) and the second should latch on to the first
+ // one.
+ resc := make(chan *testpb.TestMessage, 2)
+ for i := 0; i < 2; i++ {
+ go func() {
+ tm := new(testpb.TestMessage)
+ if err := protoGroup.Get(dummyCtx, fromChan, ProtoSink(tm)); err != nil {
+ tm.Name = proto.String("ERROR:" + err.Error())
+ }
+ resc <- tm
+ }()
+ }
+
+ // Wait a bit so both goroutines get merged together via
+ // singleflight.
+ // TODO(bradfitz): decide whether there are any non-offensive
+ // debug/test hooks that could be added to singleflight to
+ // make a sleep here unnecessary.
+ time.Sleep(250 * time.Millisecond)
+
+ // Unblock the first getter, which should unblock the second
+ // as well.
+ stringc <- "Fluffy"
+ want := &testpb.TestMessage{
+ Name: proto.String("ECHO:Fluffy"),
+ City: proto.String("SOME-CITY"),
+ }
+ for i := 0; i < 2; i++ {
+ select {
+ case v := <-resc:
+ if !reflect.DeepEqual(v, want) {
+ t.Errorf(" Got: %v\nWant: %v", proto.CompactTextString(v), proto.CompactTextString(want))
+ }
+ case <-time.After(5 * time.Second):
+ t.Errorf("timeout waiting on getter #%d of 2", i+1)
+ }
+ }
+}
+
+func countFills(f func()) int64 {
+ fills0 := cacheFills.Get()
+ f()
+ return cacheFills.Get() - fills0
+}
+
+func TestCaching(t *testing.T) {
+ once.Do(testSetup)
+ fills := countFills(func() {
+ for i := 0; i < 10; i++ {
+ var s string
+ if err := stringGroup.Get(dummyCtx, "TestCaching-key", StringSink(&s)); err != nil {
+ t.Fatal(err)
+ }
+ }
+ })
+ if fills != 1 {
+ t.Errorf("expected 1 cache fill; got %d", fills)
+ }
+}
+
+func TestCacheEviction(t *testing.T) {
+ once.Do(testSetup)
+ testKey := "TestCacheEviction-key"
+ getTestKey := func() {
+ var res string
+ for i := 0; i < 10; i++ {
+ if err := stringGroup.Get(dummyCtx, testKey, StringSink(&res)); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+ fills := countFills(getTestKey)
+ if fills != 1 {
+ t.Fatalf("expected 1 cache fill; got %d", fills)
+ }
+
+ g := stringGroup.(*Group)
+ evict0 := g.mainCache.nevict
+
+ // Trash the cache with other keys.
+ var bytesFlooded int64
+ // cacheSize/len(testKey) is approximate
+ for bytesFlooded < cacheSize+1024 {
+ var res string
+ key := fmt.Sprintf("dummy-key-%d", bytesFlooded)
+ stringGroup.Get(dummyCtx, key, StringSink(&res))
+ bytesFlooded += int64(len(key) + len(res))
+ }
+ evicts := g.mainCache.nevict - evict0
+ if evicts <= 0 {
+ t.Errorf("evicts = %v; want more than 0", evicts)
+ }
+
+ // Test that the key is gone.
+ fills = countFills(getTestKey)
+ if fills != 1 {
+ t.Fatalf("expected 1 cache fill after cache trashing; got %d", fills)
+ }
+}
+
+type fakePeer struct {
+ hits int
+ fail bool
+}
+
+func (p *fakePeer) Get(_ Context, in *pb.GetRequest, out *pb.GetResponse) error {
+ p.hits++
+ if p.fail {
+ return errors.New("simulated error from peer")
+ }
+ out.Value = []byte("got:" + in.GetKey())
+ return nil
+}
+
+type fakePeers []ProtoGetter
+
+func (p fakePeers) PickPeer(key string) (peer ProtoGetter, ok bool) {
+ if len(p) == 0 {
+ return
+ }
+ n := crc32.Checksum([]byte(key), crc32.IEEETable) % uint32(len(p))
+ return p[n], p[n] != nil
+}
+
+// tests that peers (virtual, in-process) are hit, and how much.
+func TestPeers(t *testing.T) {
+ once.Do(testSetup)
+ rand.Seed(123)
+ peer0 := &fakePeer{}
+ peer1 := &fakePeer{}
+ peer2 := &fakePeer{}
+ peerList := fakePeers([]ProtoGetter{peer0, peer1, peer2, nil})
+ const cacheSize = 0 // disabled
+ localHits := 0
+ getter := func(_ Context, key string, dest Sink) error {
+ localHits++
+ return dest.SetString("got:" + key)
+ }
+ testGroup := newGroup("TestPeers-group", cacheSize, GetterFunc(getter), peerList)
+ run := func(name string, n int, wantSummary string) {
+ // Reset counters
+ localHits = 0
+ for _, p := range []*fakePeer{peer0, peer1, peer2} {
+ p.hits = 0
+ }
+
+ for i := 0; i < n; i++ {
+ key := fmt.Sprintf("key-%d", i)
+ want := "got:" + key
+ var got string
+ err := testGroup.Get(dummyCtx, key, StringSink(&got))
+ if err != nil {
+ t.Errorf("%s: error on key %q: %v", name, key, err)
+ continue
+ }
+ if got != want {
+ t.Errorf("%s: for key %q, got %q; want %q", name, key, got, want)
+ }
+ }
+ summary := func() string {
+ return fmt.Sprintf("localHits = %d, peers = %d %d %d", localHits, peer0.hits, peer1.hits, peer2.hits)
+ }
+ if got := summary(); got != wantSummary {
+ t.Errorf("%s: got %q; want %q", name, got, wantSummary)
+ }
+ }
+ resetCacheSize := func(maxBytes int64) {
+ g := testGroup
+ g.cacheBytes = maxBytes
+ g.mainCache = cache{}
+ g.hotCache = cache{}
+ }
+
+ // Base case; peers all up, with no problems.
+ resetCacheSize(1 << 20)
+ run("base", 200, "localHits = 49, peers = 51 49 51")
+
+ // Verify cache was hit. All localHits are gone, and some of
+ // the peer hits (the ones randomly selected to be maybe hot)
+ run("cached_base", 200, "localHits = 0, peers = 49 47 48")
+ resetCacheSize(0)
+
+ // With one of the peers being down.
+ // TODO(bradfitz): on a peer number being unavailable, the
+ // consistent hashing should maybe keep trying others to
+ // spread the load out. Currently it fails back to local
+ // execution if the first consistent-hash slot is unavailable.
+ peerList[0] = nil
+ run("one_peer_down", 200, "localHits = 100, peers = 0 49 51")
+
+ // Failing peer
+ peerList[0] = peer0
+ peer0.fail = true
+ run("peer0_failing", 200, "localHits = 100, peers = 51 49 51")
+}
+
+func TestTruncatingByteSliceTarget(t *testing.T) {
+ var buf [100]byte
+ s := buf[:]
+ if err := stringGroup.Get(dummyCtx, "short", TruncatingByteSliceSink(&s)); err != nil {
+ t.Fatal(err)
+ }
+ if want := "ECHO:short"; string(s) != want {
+ t.Errorf("short key got %q; want %q", s, want)
+ }
+
+ s = buf[:6]
+ if err := stringGroup.Get(dummyCtx, "truncated", TruncatingByteSliceSink(&s)); err != nil {
+ t.Fatal(err)
+ }
+ if want := "ECHO:t"; string(s) != want {
+ t.Errorf("truncated key got %q; want %q", s, want)
+ }
+}
+
+func TestAllocatingByteSliceTarget(t *testing.T) {
+ var dst []byte
+ sink := AllocatingByteSliceSink(&dst)
+
+ inBytes := []byte("some bytes")
+ sink.SetBytes(inBytes)
+ if want := "some bytes"; string(dst) != want {
+ t.Errorf("SetBytes resulted in %q; want %q", dst, want)
+ }
+ v, err := sink.view()
+ if err != nil {
+ t.Fatalf("view after SetBytes failed: %v", err)
+ }
+ if &inBytes[0] == &dst[0] {
+ t.Error("inBytes and dst share memory")
+ }
+ if &inBytes[0] == &v.b[0] {
+ t.Error("inBytes and view share memory")
+ }
+ if &dst[0] == &v.b[0] {
+ t.Error("dst and view share memory")
+ }
+}
+
+// orderedFlightGroup allows the caller to force the schedule of when
+// orig.Do will be called. This is useful to serialize calls such
+// that singleflight cannot dedup them.
+type orderedFlightGroup struct {
+ mu sync.Mutex
+ stage1 chan bool
+ stage2 chan bool
+ orig flightGroup
+}
+
+func (g *orderedFlightGroup) Do(key string, fn func() (interface{}, error)) (interface{}, error) {
+ <-g.stage1
+ <-g.stage2
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ return g.orig.Do(key, fn)
+}
+
+// TestNoDedup tests invariants on the cache size when singleflight is
+// unable to dedup calls.
+func TestNoDedup(t *testing.T) {
+ const testkey = "testkey"
+ const testval = "testval"
+ g := newGroup("testgroup", 1024, GetterFunc(func(_ Context, key string, dest Sink) error {
+ return dest.SetString(testval)
+ }), nil)
+
+ orderedGroup := &orderedFlightGroup{
+ stage1: make(chan bool),
+ stage2: make(chan bool),
+ orig: g.loadGroup,
+ }
+ // Replace loadGroup with our wrapper so we can control when
+ // loadGroup.Do is entered for each concurrent request.
+ g.loadGroup = orderedGroup
+
+ // Issue two idential requests concurrently. Since the cache is
+ // empty, it will miss. Both will enter load(), but we will only
+ // allow one at a time to enter singleflight.Do, so the callback
+ // function will be called twice.
+ resc := make(chan string, 2)
+ for i := 0; i < 2; i++ {
+ go func() {
+ var s string
+ if err := g.Get(dummyCtx, testkey, StringSink(&s)); err != nil {
+ resc <- "ERROR:" + err.Error()
+ return
+ }
+ resc <- s
+ }()
+ }
+
+ // Ensure both goroutines have entered the Do routine. This implies
+ // both concurrent requests have checked the cache, found it empty,
+ // and called load().
+ orderedGroup.stage1 <- true
+ orderedGroup.stage1 <- true
+ orderedGroup.stage2 <- true
+ orderedGroup.stage2 <- true
+
+ for i := 0; i < 2; i++ {
+ if s := <-resc; s != testval {
+ t.Errorf("result is %s want %s", s, testval)
+ }
+ }
+
+ const wantItems = 1
+ if g.mainCache.items() != wantItems {
+ t.Errorf("mainCache has %d items, want %d", g.mainCache.items(), wantItems)
+ }
+
+ // If the singleflight callback doesn't double-check the cache again
+ // upon entry, we would increment nbytes twice but the entry would
+ // only be in the cache once.
+ const wantBytes = int64(len(testkey) + len(testval))
+ if g.mainCache.nbytes != wantBytes {
+ t.Errorf("cache has %d bytes, want %d", g.mainCache.nbytes, wantBytes)
+ }
+}
+
+// TODO(bradfitz): port the Google-internal full integration test into here,
+// using HTTP requests instead of our RPC system.
diff --git a/vendor/github.com/golang/groupcache/groupcachepb/groupcache.pb.go b/vendor/github.com/golang/groupcache/groupcachepb/groupcache.pb.go
new file mode 100644
index 000000000..520d1ee9a
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/groupcachepb/groupcache.pb.go
@@ -0,0 +1,65 @@
+// Code generated by protoc-gen-go.
+// source: groupcache.proto
+// DO NOT EDIT!
+
+package groupcachepb
+
+import proto "github.com/golang/protobuf/proto"
+import json "encoding/json"
+import math "math"
+
+// Reference proto, json, and math imports to suppress error if they are not otherwise used.
+var _ = proto.Marshal
+var _ = &json.SyntaxError{}
+var _ = math.Inf
+
+type GetRequest struct {
+ Group *string `protobuf:"bytes,1,req,name=group" json:"group,omitempty"`
+ Key *string `protobuf:"bytes,2,req,name=key" json:"key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetRequest) Reset() { *m = GetRequest{} }
+func (m *GetRequest) String() string { return proto.CompactTextString(m) }
+func (*GetRequest) ProtoMessage() {}
+
+func (m *GetRequest) GetGroup() string {
+ if m != nil && m.Group != nil {
+ return *m.Group
+ }
+ return ""
+}
+
+func (m *GetRequest) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+type GetResponse struct {
+ Value []byte `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"`
+ MinuteQps *float64 `protobuf:"fixed64,2,opt,name=minute_qps" json:"minute_qps,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetResponse) Reset() { *m = GetResponse{} }
+func (m *GetResponse) String() string { return proto.CompactTextString(m) }
+func (*GetResponse) ProtoMessage() {}
+
+func (m *GetResponse) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *GetResponse) GetMinuteQps() float64 {
+ if m != nil && m.MinuteQps != nil {
+ return *m.MinuteQps
+ }
+ return 0
+}
+
+func init() {
+}
diff --git a/vendor/github.com/golang/groupcache/groupcachepb/groupcache.proto b/vendor/github.com/golang/groupcache/groupcachepb/groupcache.proto
new file mode 100644
index 000000000..b5bdff94f
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/groupcachepb/groupcache.proto
@@ -0,0 +1,34 @@
+/*
+Copyright 2012 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+syntax = "proto2";
+
+package groupcachepb;
+
+message GetRequest {
+ required string group = 1;
+ required string key = 2; // not actually required/guaranteed to be UTF-8
+}
+
+message GetResponse {
+ optional bytes value = 1;
+ optional double minute_qps = 2;
+}
+
+service GroupCache {
+ rpc Get(GetRequest) returns (GetResponse) {
+ };
+}
diff --git a/vendor/github.com/golang/groupcache/http.go b/vendor/github.com/golang/groupcache/http.go
new file mode 100644
index 000000000..14eb345a8
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/http.go
@@ -0,0 +1,227 @@
+/*
+Copyright 2013 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package groupcache
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+
+ "github.com/golang/groupcache/consistenthash"
+ pb "github.com/golang/groupcache/groupcachepb"
+ "github.com/golang/protobuf/proto"
+)
+
+const defaultBasePath = "/_groupcache/"
+
+const defaultReplicas = 50
+
+// HTTPPool implements PeerPicker for a pool of HTTP peers.
+type HTTPPool struct {
+ // Context optionally specifies a context for the server to use when it
+ // receives a request.
+ // If nil, the server uses a nil Context.
+ Context func(*http.Request) Context
+
+ // Transport optionally specifies an http.RoundTripper for the client
+ // to use when it makes a request.
+ // If nil, the client uses http.DefaultTransport.
+ Transport func(Context) http.RoundTripper
+
+ // this peer's base URL, e.g. "https://example.net:8000"
+ self string
+
+ // opts specifies the options.
+ opts HTTPPoolOptions
+
+ mu sync.Mutex // guards peers and httpGetters
+ peers *consistenthash.Map
+ httpGetters map[string]*httpGetter // keyed by e.g. "http://10.0.0.2:8008"
+}
+
+// HTTPPoolOptions are the configurations of a HTTPPool.
+type HTTPPoolOptions struct {
+ // BasePath specifies the HTTP path that will serve groupcache requests.
+ // If blank, it defaults to "/_groupcache/".
+ BasePath string
+
+ // Replicas specifies the number of key replicas on the consistent hash.
+ // If blank, it defaults to 50.
+ Replicas int
+
+ // HashFn specifies the hash function of the consistent hash.
+ // If blank, it defaults to crc32.ChecksumIEEE.
+ HashFn consistenthash.Hash
+}
+
+// NewHTTPPool initializes an HTTP pool of peers, and registers itself as a PeerPicker.
+// For convenience, it also registers itself as an http.Handler with http.DefaultServeMux.
+// The self argument be a valid base URL that points to the current server,
+// for example "http://example.net:8000".
+func NewHTTPPool(self string) *HTTPPool {
+ p := NewHTTPPoolOpts(self, nil)
+ http.Handle(p.opts.BasePath, p)
+ return p
+}
+
+var httpPoolMade bool
+
+// NewHTTPPoolOpts initializes an HTTP pool of peers with the given options.
+// Unlike NewHTTPPool, this function does not register the created pool as an HTTP handler.
+// The returned *HTTPPool implements http.Handler and must be registered using http.Handle.
+func NewHTTPPoolOpts(self string, o *HTTPPoolOptions) *HTTPPool {
+ if httpPoolMade {
+ panic("groupcache: NewHTTPPool must be called only once")
+ }
+ httpPoolMade = true
+
+ p := &HTTPPool{
+ self: self,
+ httpGetters: make(map[string]*httpGetter),
+ }
+ if o != nil {
+ p.opts = *o
+ }
+ if p.opts.BasePath == "" {
+ p.opts.BasePath = defaultBasePath
+ }
+ if p.opts.Replicas == 0 {
+ p.opts.Replicas = defaultReplicas
+ }
+ p.peers = consistenthash.New(p.opts.Replicas, p.opts.HashFn)
+
+ RegisterPeerPicker(func() PeerPicker { return p })
+ return p
+}
+
+// Set updates the pool's list of peers.
+// Each peer value should be a valid base URL,
+// for example "http://example.net:8000".
+func (p *HTTPPool) Set(peers ...string) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.peers = consistenthash.New(p.opts.Replicas, p.opts.HashFn)
+ p.peers.Add(peers...)
+ p.httpGetters = make(map[string]*httpGetter, len(peers))
+ for _, peer := range peers {
+ p.httpGetters[peer] = &httpGetter{transport: p.Transport, baseURL: peer + p.opts.BasePath}
+ }
+}
+
+func (p *HTTPPool) PickPeer(key string) (ProtoGetter, bool) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.peers.IsEmpty() {
+ return nil, false
+ }
+ if peer := p.peers.Get(key); peer != p.self {
+ return p.httpGetters[peer], true
+ }
+ return nil, false
+}
+
+func (p *HTTPPool) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ // Parse request.
+ if !strings.HasPrefix(r.URL.Path, p.opts.BasePath) {
+ panic("HTTPPool serving unexpected path: " + r.URL.Path)
+ }
+ parts := strings.SplitN(r.URL.Path[len(p.opts.BasePath):], "/", 2)
+ if len(parts) != 2 {
+ http.Error(w, "bad request", http.StatusBadRequest)
+ return
+ }
+ groupName := parts[0]
+ key := parts[1]
+
+ // Fetch the value for this group/key.
+ group := GetGroup(groupName)
+ if group == nil {
+ http.Error(w, "no such group: "+groupName, http.StatusNotFound)
+ return
+ }
+ var ctx Context
+ if p.Context != nil {
+ ctx = p.Context(r)
+ }
+
+ group.Stats.ServerRequests.Add(1)
+ var value []byte
+ err := group.Get(ctx, key, AllocatingByteSliceSink(&value))
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ // Write the value to the response body as a proto message.
+ body, err := proto.Marshal(&pb.GetResponse{Value: value})
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ w.Header().Set("Content-Type", "application/x-protobuf")
+ w.Write(body)
+}
+
+type httpGetter struct {
+ transport func(Context) http.RoundTripper
+ baseURL string
+}
+
+var bufferPool = sync.Pool{
+ New: func() interface{} { return new(bytes.Buffer) },
+}
+
+func (h *httpGetter) Get(context Context, in *pb.GetRequest, out *pb.GetResponse) error {
+ u := fmt.Sprintf(
+ "%v%v/%v",
+ h.baseURL,
+ url.QueryEscape(in.GetGroup()),
+ url.QueryEscape(in.GetKey()),
+ )
+ req, err := http.NewRequest("GET", u, nil)
+ if err != nil {
+ return err
+ }
+ tr := http.DefaultTransport
+ if h.transport != nil {
+ tr = h.transport(context)
+ }
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ return err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusOK {
+ return fmt.Errorf("server returned: %v", res.Status)
+ }
+ b := bufferPool.Get().(*bytes.Buffer)
+ b.Reset()
+ defer bufferPool.Put(b)
+ _, err = io.Copy(b, res.Body)
+ if err != nil {
+ return fmt.Errorf("reading response body: %v", err)
+ }
+ err = proto.Unmarshal(b.Bytes(), out)
+ if err != nil {
+ return fmt.Errorf("decoding response body: %v", err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/golang/groupcache/http_test.go b/vendor/github.com/golang/groupcache/http_test.go
new file mode 100644
index 000000000..b42edd7f0
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/http_test.go
@@ -0,0 +1,166 @@
+/*
+Copyright 2013 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package groupcache
+
+import (
+ "errors"
+ "flag"
+ "log"
+ "net"
+ "net/http"
+ "os"
+ "os/exec"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+)
+
+var (
+ peerAddrs = flag.String("test_peer_addrs", "", "Comma-separated list of peer addresses; used by TestHTTPPool")
+ peerIndex = flag.Int("test_peer_index", -1, "Index of which peer this child is; used by TestHTTPPool")
+ peerChild = flag.Bool("test_peer_child", false, "True if running as a child process; used by TestHTTPPool")
+)
+
+func TestHTTPPool(t *testing.T) {
+ if *peerChild {
+ beChildForTestHTTPPool()
+ os.Exit(0)
+ }
+
+ const (
+ nChild = 4
+ nGets = 100
+ )
+
+ var childAddr []string
+ for i := 0; i < nChild; i++ {
+ childAddr = append(childAddr, pickFreeAddr(t))
+ }
+
+ var cmds []*exec.Cmd
+ var wg sync.WaitGroup
+ for i := 0; i < nChild; i++ {
+ cmd := exec.Command(os.Args[0],
+ "--test.run=TestHTTPPool",
+ "--test_peer_child",
+ "--test_peer_addrs="+strings.Join(childAddr, ","),
+ "--test_peer_index="+strconv.Itoa(i),
+ )
+ cmds = append(cmds, cmd)
+ wg.Add(1)
+ if err := cmd.Start(); err != nil {
+ t.Fatal("failed to start child process: ", err)
+ }
+ go awaitAddrReady(t, childAddr[i], &wg)
+ }
+ defer func() {
+ for i := 0; i < nChild; i++ {
+ if cmds[i].Process != nil {
+ cmds[i].Process.Kill()
+ }
+ }
+ }()
+ wg.Wait()
+
+ // Use a dummy self address so that we don't handle gets in-process.
+ p := NewHTTPPool("should-be-ignored")
+ p.Set(addrToURL(childAddr)...)
+
+ // Dummy getter function. Gets should go to children only.
+ // The only time this process will handle a get is when the
+ // children can't be contacted for some reason.
+ getter := GetterFunc(func(ctx Context, key string, dest Sink) error {
+ return errors.New("parent getter called; something's wrong")
+ })
+ g := NewGroup("httpPoolTest", 1<<20, getter)
+
+ for _, key := range testKeys(nGets) {
+ var value string
+ if err := g.Get(nil, key, StringSink(&value)); err != nil {
+ t.Fatal(err)
+ }
+ if suffix := ":" + key; !strings.HasSuffix(value, suffix) {
+ t.Errorf("Get(%q) = %q, want value ending in %q", key, value, suffix)
+ }
+ t.Logf("Get key=%q, value=%q (peer:key)", key, value)
+ }
+}
+
+func testKeys(n int) (keys []string) {
+ keys = make([]string, n)
+ for i := range keys {
+ keys[i] = strconv.Itoa(i)
+ }
+ return
+}
+
+func beChildForTestHTTPPool() {
+ addrs := strings.Split(*peerAddrs, ",")
+
+ p := NewHTTPPool("http://" + addrs[*peerIndex])
+ p.Set(addrToURL(addrs)...)
+
+ getter := GetterFunc(func(ctx Context, key string, dest Sink) error {
+ dest.SetString(strconv.Itoa(*peerIndex) + ":" + key)
+ return nil
+ })
+ NewGroup("httpPoolTest", 1<<20, getter)
+
+ log.Fatal(http.ListenAndServe(addrs[*peerIndex], p))
+}
+
+// This is racy. Another process could swoop in and steal the port between the
+// call to this function and the next listen call. Should be okay though.
+// The proper way would be to pass the l.File() as ExtraFiles to the child
+// process, and then close your copy once the child starts.
+func pickFreeAddr(t *testing.T) string {
+ l, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer l.Close()
+ return l.Addr().String()
+}
+
+func addrToURL(addr []string) []string {
+ url := make([]string, len(addr))
+ for i := range addr {
+ url[i] = "http://" + addr[i]
+ }
+ return url
+}
+
+func awaitAddrReady(t *testing.T, addr string, wg *sync.WaitGroup) {
+ defer wg.Done()
+ const max = 1 * time.Second
+ tries := 0
+ for {
+ tries++
+ c, err := net.Dial("tcp", addr)
+ if err == nil {
+ c.Close()
+ return
+ }
+ delay := time.Duration(tries) * 25 * time.Millisecond
+ if delay > max {
+ delay = max
+ }
+ time.Sleep(delay)
+ }
+}
diff --git a/vendor/github.com/golang/groupcache/lru/lru_test.go b/vendor/github.com/golang/groupcache/lru/lru_test.go
new file mode 100644
index 000000000..98a2656e8
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/lru/lru_test.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2013 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package lru
+
+import (
+ "testing"
+)
+
+type simpleStruct struct {
+ int
+ string
+}
+
+type complexStruct struct {
+ int
+ simpleStruct
+}
+
+var getTests = []struct {
+ name string
+ keyToAdd interface{}
+ keyToGet interface{}
+ expectedOk bool
+}{
+ {"string_hit", "myKey", "myKey", true},
+ {"string_miss", "myKey", "nonsense", false},
+ {"simple_struct_hit", simpleStruct{1, "two"}, simpleStruct{1, "two"}, true},
+ {"simeple_struct_miss", simpleStruct{1, "two"}, simpleStruct{0, "noway"}, false},
+ {"complex_struct_hit", complexStruct{1, simpleStruct{2, "three"}},
+ complexStruct{1, simpleStruct{2, "three"}}, true},
+}
+
+func TestGet(t *testing.T) {
+ for _, tt := range getTests {
+ lru := New(0)
+ lru.Add(tt.keyToAdd, 1234)
+ val, ok := lru.Get(tt.keyToGet)
+ if ok != tt.expectedOk {
+ t.Fatalf("%s: cache hit = %v; want %v", tt.name, ok, !ok)
+ } else if ok && val != 1234 {
+ t.Fatalf("%s expected get to return 1234 but got %v", tt.name, val)
+ }
+ }
+}
+
+func TestRemove(t *testing.T) {
+ lru := New(0)
+ lru.Add("myKey", 1234)
+ if val, ok := lru.Get("myKey"); !ok {
+ t.Fatal("TestRemove returned no match")
+ } else if val != 1234 {
+ t.Fatalf("TestRemove failed. Expected %d, got %v", 1234, val)
+ }
+
+ lru.Remove("myKey")
+ if _, ok := lru.Get("myKey"); ok {
+ t.Fatal("TestRemove returned a removed entry")
+ }
+}
diff --git a/vendor/github.com/golang/groupcache/peers.go b/vendor/github.com/golang/groupcache/peers.go
new file mode 100644
index 000000000..a74a79b8f
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/peers.go
@@ -0,0 +1,71 @@
+/*
+Copyright 2012 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// peers.go defines how processes find and communicate with their peers.
+
+package groupcache
+
+import (
+ pb "github.com/golang/groupcache/groupcachepb"
+)
+
+// Context is an opaque value passed through calls to the
+// ProtoGetter. It may be nil if your ProtoGetter implementation does
+// not require a context.
+type Context interface{}
+
+// ProtoGetter is the interface that must be implemented by a peer.
+type ProtoGetter interface {
+ Get(context Context, in *pb.GetRequest, out *pb.GetResponse) error
+}
+
+// PeerPicker is the interface that must be implemented to locate
+// the peer that owns a specific key.
+type PeerPicker interface {
+ // PickPeer returns the peer that owns the specific key
+ // and true to indicate that a remote peer was nominated.
+ // It returns nil, false if the key owner is the current peer.
+ PickPeer(key string) (peer ProtoGetter, ok bool)
+}
+
+// NoPeers is an implementation of PeerPicker that never finds a peer.
+type NoPeers struct{}
+
+func (NoPeers) PickPeer(key string) (peer ProtoGetter, ok bool) { return }
+
+var (
+ portPicker func() PeerPicker
+)
+
+// RegisterPeerPicker registers the peer initialization function.
+// It is called once, when the first group is created.
+func RegisterPeerPicker(fn func() PeerPicker) {
+ if portPicker != nil {
+ panic("RegisterPeerPicker called more than once")
+ }
+ portPicker = fn
+}
+
+func getPeers() PeerPicker {
+ if portPicker == nil {
+ return NoPeers{}
+ }
+ pk := portPicker()
+ if pk == nil {
+ pk = NoPeers{}
+ }
+ return pk
+}
diff --git a/vendor/github.com/golang/groupcache/singleflight/singleflight.go b/vendor/github.com/golang/groupcache/singleflight/singleflight.go
new file mode 100644
index 000000000..ff2c2ee4f
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/singleflight/singleflight.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2012 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package singleflight provides a duplicate function call suppression
+// mechanism.
+package singleflight
+
+import "sync"
+
+// call is an in-flight or completed Do call
+type call struct {
+ wg sync.WaitGroup
+ val interface{}
+ err error
+}
+
+// Group represents a class of work and forms a namespace in which
+// units of work can be executed with duplicate suppression.
+type Group struct {
+ mu sync.Mutex // protects m
+ m map[string]*call // lazily initialized
+}
+
+// Do executes and returns the results of the given function, making
+// sure that only one execution is in-flight for a given key at a
+// time. If a duplicate comes in, the duplicate caller waits for the
+// original to complete and receives the same results.
+func (g *Group) Do(key string, fn func() (interface{}, error)) (interface{}, error) {
+ g.mu.Lock()
+ if g.m == nil {
+ g.m = make(map[string]*call)
+ }
+ if c, ok := g.m[key]; ok {
+ g.mu.Unlock()
+ c.wg.Wait()
+ return c.val, c.err
+ }
+ c := new(call)
+ c.wg.Add(1)
+ g.m[key] = c
+ g.mu.Unlock()
+
+ c.val, c.err = fn()
+ c.wg.Done()
+
+ g.mu.Lock()
+ delete(g.m, key)
+ g.mu.Unlock()
+
+ return c.val, c.err
+}
diff --git a/vendor/github.com/golang/groupcache/singleflight/singleflight_test.go b/vendor/github.com/golang/groupcache/singleflight/singleflight_test.go
new file mode 100644
index 000000000..47b4d3dc0
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/singleflight/singleflight_test.go
@@ -0,0 +1,85 @@
+/*
+Copyright 2012 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package singleflight
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+func TestDo(t *testing.T) {
+ var g Group
+ v, err := g.Do("key", func() (interface{}, error) {
+ return "bar", nil
+ })
+ if got, want := fmt.Sprintf("%v (%T)", v, v), "bar (string)"; got != want {
+ t.Errorf("Do = %v; want %v", got, want)
+ }
+ if err != nil {
+ t.Errorf("Do error = %v", err)
+ }
+}
+
+func TestDoErr(t *testing.T) {
+ var g Group
+ someErr := errors.New("Some error")
+ v, err := g.Do("key", func() (interface{}, error) {
+ return nil, someErr
+ })
+ if err != someErr {
+ t.Errorf("Do error = %v; want someErr", err)
+ }
+ if v != nil {
+ t.Errorf("unexpected non-nil value %#v", v)
+ }
+}
+
+func TestDoDupSuppress(t *testing.T) {
+ var g Group
+ c := make(chan string)
+ var calls int32
+ fn := func() (interface{}, error) {
+ atomic.AddInt32(&calls, 1)
+ return <-c, nil
+ }
+
+ const n = 10
+ var wg sync.WaitGroup
+ for i := 0; i < n; i++ {
+ wg.Add(1)
+ go func() {
+ v, err := g.Do("key", fn)
+ if err != nil {
+ t.Errorf("Do error: %v", err)
+ }
+ if v.(string) != "bar" {
+ t.Errorf("got %q; want %q", v, "bar")
+ }
+ wg.Done()
+ }()
+ }
+ time.Sleep(100 * time.Millisecond) // let goroutines above block
+ c <- "bar"
+ wg.Wait()
+ if got := atomic.LoadInt32(&calls); got != 1 {
+ t.Errorf("number of calls = %d; want 1", got)
+ }
+}
diff --git a/vendor/github.com/golang/groupcache/sinks.go b/vendor/github.com/golang/groupcache/sinks.go
new file mode 100644
index 000000000..cb42b41b4
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/sinks.go
@@ -0,0 +1,322 @@
+/*
+Copyright 2012 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package groupcache
+
+import (
+ "errors"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// A Sink receives data from a Get call.
+//
+// Implementation of Getter must call exactly one of the Set methods
+// on success.
+type Sink interface {
+ // SetString sets the value to s.
+ SetString(s string) error
+
+ // SetBytes sets the value to the contents of v.
+ // The caller retains ownership of v.
+ SetBytes(v []byte) error
+
+ // SetProto sets the value to the encoded version of m.
+ // The caller retains ownership of m.
+ SetProto(m proto.Message) error
+
+ // view returns a frozen view of the bytes for caching.
+ view() (ByteView, error)
+}
+
+func cloneBytes(b []byte) []byte {
+ c := make([]byte, len(b))
+ copy(c, b)
+ return c
+}
+
+func setSinkView(s Sink, v ByteView) error {
+ // A viewSetter is a Sink that can also receive its value from
+ // a ByteView. This is a fast path to minimize copies when the
+ // item was already cached locally in memory (where it's
+ // cached as a ByteView)
+ type viewSetter interface {
+ setView(v ByteView) error
+ }
+ if vs, ok := s.(viewSetter); ok {
+ return vs.setView(v)
+ }
+ if v.b != nil {
+ return s.SetBytes(v.b)
+ }
+ return s.SetString(v.s)
+}
+
+// StringSink returns a Sink that populates the provided string pointer.
+func StringSink(sp *string) Sink {
+ return &stringSink{sp: sp}
+}
+
+type stringSink struct {
+ sp *string
+ v ByteView
+ // TODO(bradfitz): track whether any Sets were called.
+}
+
+func (s *stringSink) view() (ByteView, error) {
+ // TODO(bradfitz): return an error if no Set was called
+ return s.v, nil
+}
+
+func (s *stringSink) SetString(v string) error {
+ s.v.b = nil
+ s.v.s = v
+ *s.sp = v
+ return nil
+}
+
+func (s *stringSink) SetBytes(v []byte) error {
+ return s.SetString(string(v))
+}
+
+func (s *stringSink) SetProto(m proto.Message) error {
+ b, err := proto.Marshal(m)
+ if err != nil {
+ return err
+ }
+ s.v.b = b
+ *s.sp = string(b)
+ return nil
+}
+
+// ByteViewSink returns a Sink that populates a ByteView.
+func ByteViewSink(dst *ByteView) Sink {
+ if dst == nil {
+ panic("nil dst")
+ }
+ return &byteViewSink{dst: dst}
+}
+
+type byteViewSink struct {
+ dst *ByteView
+
+ // if this code ever ends up tracking that at least one set*
+ // method was called, don't make it an error to call set
+ // methods multiple times. Lorry's payload.go does that, and
+ // it makes sense. The comment at the top of this file about
+ // "exactly one of the Set methods" is overly strict. We
+ // really care about at least once (in a handler), but if
+ // multiple handlers fail (or multiple functions in a program
+ // using a Sink), it's okay to re-use the same one.
+}
+
+func (s *byteViewSink) setView(v ByteView) error {
+ *s.dst = v
+ return nil
+}
+
+func (s *byteViewSink) view() (ByteView, error) {
+ return *s.dst, nil
+}
+
+func (s *byteViewSink) SetProto(m proto.Message) error {
+ b, err := proto.Marshal(m)
+ if err != nil {
+ return err
+ }
+ *s.dst = ByteView{b: b}
+ return nil
+}
+
+func (s *byteViewSink) SetBytes(b []byte) error {
+ *s.dst = ByteView{b: cloneBytes(b)}
+ return nil
+}
+
+func (s *byteViewSink) SetString(v string) error {
+ *s.dst = ByteView{s: v}
+ return nil
+}
+
+// ProtoSink returns a sink that unmarshals binary proto values into m.
+func ProtoSink(m proto.Message) Sink {
+ return &protoSink{
+ dst: m,
+ }
+}
+
+type protoSink struct {
+ dst proto.Message // authorative value
+ typ string
+
+ v ByteView // encoded
+}
+
+func (s *protoSink) view() (ByteView, error) {
+ return s.v, nil
+}
+
+func (s *protoSink) SetBytes(b []byte) error {
+ err := proto.Unmarshal(b, s.dst)
+ if err != nil {
+ return err
+ }
+ s.v.b = cloneBytes(b)
+ s.v.s = ""
+ return nil
+}
+
+func (s *protoSink) SetString(v string) error {
+ b := []byte(v)
+ err := proto.Unmarshal(b, s.dst)
+ if err != nil {
+ return err
+ }
+ s.v.b = b
+ s.v.s = ""
+ return nil
+}
+
+func (s *protoSink) SetProto(m proto.Message) error {
+ b, err := proto.Marshal(m)
+ if err != nil {
+ return err
+ }
+ // TODO(bradfitz): optimize for same-task case more and write
+ // right through? would need to document ownership rules at
+ // the same time. but then we could just assign *dst = *m
+ // here. This works for now:
+ err = proto.Unmarshal(b, s.dst)
+ if err != nil {
+ return err
+ }
+ s.v.b = b
+ s.v.s = ""
+ return nil
+}
+
+// AllocatingByteSliceSink returns a Sink that allocates
+// a byte slice to hold the received value and assigns
+// it to *dst. The memory is not retained by groupcache.
+func AllocatingByteSliceSink(dst *[]byte) Sink {
+ return &allocBytesSink{dst: dst}
+}
+
+type allocBytesSink struct {
+ dst *[]byte
+ v ByteView
+}
+
+func (s *allocBytesSink) view() (ByteView, error) {
+ return s.v, nil
+}
+
+func (s *allocBytesSink) setView(v ByteView) error {
+ if v.b != nil {
+ *s.dst = cloneBytes(v.b)
+ } else {
+ *s.dst = []byte(v.s)
+ }
+ s.v = v
+ return nil
+}
+
+func (s *allocBytesSink) SetProto(m proto.Message) error {
+ b, err := proto.Marshal(m)
+ if err != nil {
+ return err
+ }
+ return s.setBytesOwned(b)
+}
+
+func (s *allocBytesSink) SetBytes(b []byte) error {
+ return s.setBytesOwned(cloneBytes(b))
+}
+
+func (s *allocBytesSink) setBytesOwned(b []byte) error {
+ if s.dst == nil {
+ return errors.New("nil AllocatingByteSliceSink *[]byte dst")
+ }
+ *s.dst = cloneBytes(b) // another copy, protecting the read-only s.v.b view
+ s.v.b = b
+ s.v.s = ""
+ return nil
+}
+
+func (s *allocBytesSink) SetString(v string) error {
+ if s.dst == nil {
+ return errors.New("nil AllocatingByteSliceSink *[]byte dst")
+ }
+ *s.dst = []byte(v)
+ s.v.b = nil
+ s.v.s = v
+ return nil
+}
+
+// TruncatingByteSliceSink returns a Sink that writes up to len(*dst)
+// bytes to *dst. If more bytes are available, they're silently
+// truncated. If fewer bytes are available than len(*dst), *dst
+// is shrunk to fit the number of bytes available.
+func TruncatingByteSliceSink(dst *[]byte) Sink {
+ return &truncBytesSink{dst: dst}
+}
+
+type truncBytesSink struct {
+ dst *[]byte
+ v ByteView
+}
+
+func (s *truncBytesSink) view() (ByteView, error) {
+ return s.v, nil
+}
+
+func (s *truncBytesSink) SetProto(m proto.Message) error {
+ b, err := proto.Marshal(m)
+ if err != nil {
+ return err
+ }
+ return s.setBytesOwned(b)
+}
+
+func (s *truncBytesSink) SetBytes(b []byte) error {
+ return s.setBytesOwned(cloneBytes(b))
+}
+
+func (s *truncBytesSink) setBytesOwned(b []byte) error {
+ if s.dst == nil {
+ return errors.New("nil TruncatingByteSliceSink *[]byte dst")
+ }
+ n := copy(*s.dst, b)
+ if n < len(*s.dst) {
+ *s.dst = (*s.dst)[:n]
+ }
+ s.v.b = b
+ s.v.s = ""
+ return nil
+}
+
+func (s *truncBytesSink) SetString(v string) error {
+ if s.dst == nil {
+ return errors.New("nil TruncatingByteSliceSink *[]byte dst")
+ }
+ n := copy(*s.dst, v)
+ if n < len(*s.dst) {
+ *s.dst = (*s.dst)[:n]
+ }
+ s.v.b = nil
+ s.v.s = v
+ return nil
+}
diff --git a/vendor/github.com/golang/groupcache/testpb/test.pb.go b/vendor/github.com/golang/groupcache/testpb/test.pb.go
new file mode 100644
index 000000000..038040d15
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/testpb/test.pb.go
@@ -0,0 +1,235 @@
+// Code generated by protoc-gen-go.
+// source: test.proto
+// DO NOT EDIT!
+
+package testpb
+
+import proto "github.com/golang/protobuf/proto"
+import json "encoding/json"
+import math "math"
+
+// Reference proto, json, and math imports to suppress error if they are not otherwise used.
+var _ = proto.Marshal
+var _ = &json.SyntaxError{}
+var _ = math.Inf
+
+type TestMessage struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ City *string `protobuf:"bytes,2,opt,name=city" json:"city,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TestMessage) Reset() { *m = TestMessage{} }
+func (m *TestMessage) String() string { return proto.CompactTextString(m) }
+func (*TestMessage) ProtoMessage() {}
+
+func (m *TestMessage) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *TestMessage) GetCity() string {
+ if m != nil && m.City != nil {
+ return *m.City
+ }
+ return ""
+}
+
+type TestRequest struct {
+ Lower *string `protobuf:"bytes,1,req,name=lower" json:"lower,omitempty"`
+ RepeatCount *int32 `protobuf:"varint,2,opt,name=repeat_count,def=1" json:"repeat_count,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TestRequest) Reset() { *m = TestRequest{} }
+func (m *TestRequest) String() string { return proto.CompactTextString(m) }
+func (*TestRequest) ProtoMessage() {}
+
+const Default_TestRequest_RepeatCount int32 = 1
+
+func (m *TestRequest) GetLower() string {
+ if m != nil && m.Lower != nil {
+ return *m.Lower
+ }
+ return ""
+}
+
+func (m *TestRequest) GetRepeatCount() int32 {
+ if m != nil && m.RepeatCount != nil {
+ return *m.RepeatCount
+ }
+ return Default_TestRequest_RepeatCount
+}
+
+type TestResponse struct {
+ Value *string `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TestResponse) Reset() { *m = TestResponse{} }
+func (m *TestResponse) String() string { return proto.CompactTextString(m) }
+func (*TestResponse) ProtoMessage() {}
+
+func (m *TestResponse) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type CacheStats struct {
+ Items *int64 `protobuf:"varint,1,opt,name=items" json:"items,omitempty"`
+ Bytes *int64 `protobuf:"varint,2,opt,name=bytes" json:"bytes,omitempty"`
+ Gets *int64 `protobuf:"varint,3,opt,name=gets" json:"gets,omitempty"`
+ Hits *int64 `protobuf:"varint,4,opt,name=hits" json:"hits,omitempty"`
+ Evicts *int64 `protobuf:"varint,5,opt,name=evicts" json:"evicts,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CacheStats) Reset() { *m = CacheStats{} }
+func (m *CacheStats) String() string { return proto.CompactTextString(m) }
+func (*CacheStats) ProtoMessage() {}
+
+func (m *CacheStats) GetItems() int64 {
+ if m != nil && m.Items != nil {
+ return *m.Items
+ }
+ return 0
+}
+
+func (m *CacheStats) GetBytes() int64 {
+ if m != nil && m.Bytes != nil {
+ return *m.Bytes
+ }
+ return 0
+}
+
+func (m *CacheStats) GetGets() int64 {
+ if m != nil && m.Gets != nil {
+ return *m.Gets
+ }
+ return 0
+}
+
+func (m *CacheStats) GetHits() int64 {
+ if m != nil && m.Hits != nil {
+ return *m.Hits
+ }
+ return 0
+}
+
+func (m *CacheStats) GetEvicts() int64 {
+ if m != nil && m.Evicts != nil {
+ return *m.Evicts
+ }
+ return 0
+}
+
+type StatsResponse struct {
+ Gets *int64 `protobuf:"varint,1,opt,name=gets" json:"gets,omitempty"`
+ CacheHits *int64 `protobuf:"varint,12,opt,name=cache_hits" json:"cache_hits,omitempty"`
+ Fills *int64 `protobuf:"varint,2,opt,name=fills" json:"fills,omitempty"`
+ TotalAlloc *uint64 `protobuf:"varint,3,opt,name=total_alloc" json:"total_alloc,omitempty"`
+ MainCache *CacheStats `protobuf:"bytes,4,opt,name=main_cache" json:"main_cache,omitempty"`
+ HotCache *CacheStats `protobuf:"bytes,5,opt,name=hot_cache" json:"hot_cache,omitempty"`
+ ServerIn *int64 `protobuf:"varint,6,opt,name=server_in" json:"server_in,omitempty"`
+ Loads *int64 `protobuf:"varint,8,opt,name=loads" json:"loads,omitempty"`
+ PeerLoads *int64 `protobuf:"varint,9,opt,name=peer_loads" json:"peer_loads,omitempty"`
+ PeerErrors *int64 `protobuf:"varint,10,opt,name=peer_errors" json:"peer_errors,omitempty"`
+ LocalLoads *int64 `protobuf:"varint,11,opt,name=local_loads" json:"local_loads,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StatsResponse) Reset() { *m = StatsResponse{} }
+func (m *StatsResponse) String() string { return proto.CompactTextString(m) }
+func (*StatsResponse) ProtoMessage() {}
+
+func (m *StatsResponse) GetGets() int64 {
+ if m != nil && m.Gets != nil {
+ return *m.Gets
+ }
+ return 0
+}
+
+func (m *StatsResponse) GetCacheHits() int64 {
+ if m != nil && m.CacheHits != nil {
+ return *m.CacheHits
+ }
+ return 0
+}
+
+func (m *StatsResponse) GetFills() int64 {
+ if m != nil && m.Fills != nil {
+ return *m.Fills
+ }
+ return 0
+}
+
+func (m *StatsResponse) GetTotalAlloc() uint64 {
+ if m != nil && m.TotalAlloc != nil {
+ return *m.TotalAlloc
+ }
+ return 0
+}
+
+func (m *StatsResponse) GetMainCache() *CacheStats {
+ if m != nil {
+ return m.MainCache
+ }
+ return nil
+}
+
+func (m *StatsResponse) GetHotCache() *CacheStats {
+ if m != nil {
+ return m.HotCache
+ }
+ return nil
+}
+
+func (m *StatsResponse) GetServerIn() int64 {
+ if m != nil && m.ServerIn != nil {
+ return *m.ServerIn
+ }
+ return 0
+}
+
+func (m *StatsResponse) GetLoads() int64 {
+ if m != nil && m.Loads != nil {
+ return *m.Loads
+ }
+ return 0
+}
+
+func (m *StatsResponse) GetPeerLoads() int64 {
+ if m != nil && m.PeerLoads != nil {
+ return *m.PeerLoads
+ }
+ return 0
+}
+
+func (m *StatsResponse) GetPeerErrors() int64 {
+ if m != nil && m.PeerErrors != nil {
+ return *m.PeerErrors
+ }
+ return 0
+}
+
+func (m *StatsResponse) GetLocalLoads() int64 {
+ if m != nil && m.LocalLoads != nil {
+ return *m.LocalLoads
+ }
+ return 0
+}
+
+type Empty struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Empty) Reset() { *m = Empty{} }
+func (m *Empty) String() string { return proto.CompactTextString(m) }
+func (*Empty) ProtoMessage() {}
+
+func init() {
+}
diff --git a/vendor/github.com/golang/groupcache/testpb/test.proto b/vendor/github.com/golang/groupcache/testpb/test.proto
new file mode 100644
index 000000000..b9dc6c9a0
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/testpb/test.proto
@@ -0,0 +1,63 @@
+/*
+Copyright 2012 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+syntax = "proto2";
+
+package testpb;
+
+message TestMessage {
+ optional string name = 1;
+ optional string city = 2;
+}
+
+message TestRequest {
+ required string lower = 1; // to be returned upper case
+ optional int32 repeat_count = 2 [default = 1]; // .. this many times
+}
+
+message TestResponse {
+ optional string value = 1;
+}
+
+message CacheStats {
+ optional int64 items = 1;
+ optional int64 bytes = 2;
+ optional int64 gets = 3;
+ optional int64 hits = 4;
+ optional int64 evicts = 5;
+}
+
+message StatsResponse {
+ optional int64 gets = 1;
+ optional int64 cache_hits = 12;
+ optional int64 fills = 2;
+ optional uint64 total_alloc = 3;
+ optional CacheStats main_cache = 4;
+ optional CacheStats hot_cache = 5;
+ optional int64 server_in = 6;
+ optional int64 loads = 8;
+ optional int64 peer_loads = 9;
+ optional int64 peer_errors = 10;
+ optional int64 local_loads = 11;
+}
+
+message Empty {}
+
+service GroupCacheTest {
+ rpc InitPeers(Empty) returns (Empty) {};
+ rpc Get(TestRequest) returns (TestResponse) {};
+ rpc GetStats(Empty) returns (StatsResponse) {};
+}