summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/hashicorp/memberlist/memberlist.go
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/hashicorp/memberlist/memberlist.go')
-rw-r--r--vendor/github.com/hashicorp/memberlist/memberlist.go49
1 files changed, 29 insertions, 20 deletions
diff --git a/vendor/github.com/hashicorp/memberlist/memberlist.go b/vendor/github.com/hashicorp/memberlist/memberlist.go
index 9ea195cfc..d5c175e5b 100644
--- a/vendor/github.com/hashicorp/memberlist/memberlist.go
+++ b/vendor/github.com/hashicorp/memberlist/memberlist.go
@@ -22,9 +22,10 @@ import (
"strconv"
"strings"
"sync"
+ "sync/atomic"
"time"
- "github.com/hashicorp/go-multierror"
+ multierror "github.com/hashicorp/go-multierror"
sockaddr "github.com/hashicorp/go-sockaddr"
"github.com/miekg/dns"
)
@@ -35,11 +36,14 @@ type Memberlist struct {
numNodes uint32 // Number of known nodes (estimate)
config *Config
- shutdown bool
+ shutdown int32 // Used as an atomic boolean value
shutdownCh chan struct{}
- leave bool
+ leave int32 // Used as an atomic boolean value
leaveBroadcast chan struct{}
+ shutdownLock sync.Mutex // Serializes calls to Shutdown
+ leaveLock sync.Mutex // Serializes calls to Leave
+
transport Transport
handoff chan msgHandoff
@@ -116,19 +120,19 @@ func newMemberlist(conf *Config) (*Memberlist, error) {
// See comment below for details about the retry in here.
makeNetRetry := func(limit int) (*NetTransport, error) {
+ var err error
for try := 0; try < limit; try++ {
- nt, err := NewNetTransport(nc)
- if err == nil {
+ var nt *NetTransport
+ if nt, err = NewNetTransport(nc); err == nil {
return nt, nil
}
-
if strings.Contains(err.Error(), "address already in use") {
logger.Printf("[DEBUG] Got bind error: %v", err)
continue
}
}
- return nil, fmt.Errorf("ran out of tries to obtain an address")
+ return nil, fmt.Errorf("failed to obtain an address: %v", err)
}
// The dynamic bind port operation is inherently racy because
@@ -554,18 +558,17 @@ func (m *Memberlist) NumMembers() (alive int) {
// This method is safe to call multiple times, but must not be called
// after the cluster is already shut down.
func (m *Memberlist) Leave(timeout time.Duration) error {
- m.nodeLock.Lock()
- // We can't defer m.nodeLock.Unlock() because m.deadNode will also try to
- // acquire a lock so we need to Unlock before that.
+ m.leaveLock.Lock()
+ defer m.leaveLock.Unlock()
- if m.shutdown {
- m.nodeLock.Unlock()
+ if m.hasShutdown() {
panic("leave after shutdown")
}
- if !m.leave {
- m.leave = true
+ if !m.hasLeft() {
+ atomic.StoreInt32(&m.leave, 1)
+ m.nodeLock.Lock()
state, ok := m.nodeMap[m.config.Name]
m.nodeLock.Unlock()
if !ok {
@@ -591,8 +594,6 @@ func (m *Memberlist) Leave(timeout time.Duration) error {
return fmt.Errorf("timeout waiting for leave broadcast")
}
}
- } else {
- m.nodeLock.Unlock()
}
return nil
@@ -634,10 +635,10 @@ func (m *Memberlist) ProtocolVersion() uint8 {
//
// This method is safe to call multiple times.
func (m *Memberlist) Shutdown() error {
- m.nodeLock.Lock()
- defer m.nodeLock.Unlock()
+ m.shutdownLock.Lock()
+ defer m.shutdownLock.Unlock()
- if m.shutdown {
+ if m.hasShutdown() {
return nil
}
@@ -647,8 +648,16 @@ func (m *Memberlist) Shutdown() error {
m.transport.Shutdown()
// Now tear down everything else.
- m.shutdown = true
+ atomic.StoreInt32(&m.shutdown, 1)
close(m.shutdownCh)
m.deschedule()
return nil
}
+
+func (m *Memberlist) hasShutdown() bool {
+ return atomic.LoadInt32(&m.shutdown) == 1
+}
+
+func (m *Memberlist) hasLeft() bool {
+ return atomic.LoadInt32(&m.leave) == 1
+}