From bac3376278bfd8125879ca86e8eb26df85858d4c Mon Sep 17 00:00:00 2001 From: Christopher Speller Date: Sun, 22 Jul 2018 20:14:05 -0700 Subject: Updating dependencies (#9139) --- vendor/github.com/go-ini/ini/.travis.yml | 1 - vendor/github.com/go-ini/ini/README.md | 2 +- vendor/github.com/go-ini/ini/ini.go | 8 +- vendor/github.com/go-ini/ini/parser.go | 48 +- vendor/github.com/go-ini/ini/section.go | 1 + vendor/github.com/go-sql-driver/mysql/AUTHORS | 1 + vendor/github.com/go-sql-driver/mysql/README.md | 2 +- vendor/github.com/go-sql-driver/mysql/auth.go | 2 +- vendor/github.com/go-sql-driver/mysql/driver.go | 3 + vendor/github.com/go-sql-driver/mysql/packets.go | 9 +- vendor/github.com/hashicorp/errwrap/README.md | 2 +- .../github.com/hashicorp/go-multierror/format.go | 6 +- vendor/github.com/hashicorp/go-multierror/sort.go | 16 + vendor/github.com/mailru/easyjson/jlexer/lexer.go | 4 +- vendor/github.com/minio/minio-go/api.go | 2 +- .../github.com/minio/minio-go/functional_tests.go | 19 +- .../mitchellh/mapstructure/mapstructure.go | 3 + .../prometheus/client_model/go/metrics.pb.go | 381 ++++++++-- vendor/github.com/prometheus/procfs/mountstats.go | 51 +- vendor/github.com/prometheus/procfs/xfrm.go | 2 +- vendor/go.uber.org/zap/.readme.tmpl | 2 +- vendor/go.uber.org/zap/.travis.yml | 4 +- vendor/go.uber.org/zap/CHANGELOG.md | 12 + vendor/go.uber.org/zap/FAQ.md | 1 + vendor/go.uber.org/zap/README.md | 2 +- vendor/go.uber.org/zap/buffer/buffer.go | 9 + vendor/go.uber.org/zap/config.go | 6 +- vendor/go.uber.org/zap/doc.go | 2 +- vendor/go.uber.org/zap/http_handler.go | 4 +- vendor/go.uber.org/zap/sink.go | 161 +++++ vendor/go.uber.org/zap/writer.go | 51 +- vendor/go.uber.org/zap/zapcore/json_encoder.go | 30 +- vendor/golang.org/x/image/bmp/writer.go | 122 +++- vendor/golang.org/x/net/html/parse.go | 15 +- vendor/golang.org/x/net/http2/client_conn_pool.go | 28 +- .../golang.org/x/net/http2/configure_transport.go | 8 +- vendor/golang.org/x/net/http2/flow.go | 10 +- vendor/golang.org/x/net/http2/frame.go | 63 +- vendor/golang.org/x/net/http2/go111.go | 26 + vendor/golang.org/x/net/http2/go17.go | 15 + vendor/golang.org/x/net/http2/not_go111.go | 17 + vendor/golang.org/x/net/http2/not_go17.go | 8 + vendor/golang.org/x/net/http2/server.go | 32 + vendor/golang.org/x/net/http2/transport.go | 207 +++++- vendor/golang.org/x/net/ipv4/batch.go | 9 +- vendor/golang.org/x/net/ipv4/dgramopt.go | 31 +- vendor/golang.org/x/net/ipv4/doc.go | 2 +- vendor/golang.org/x/net/ipv4/endpoint.go | 21 +- vendor/golang.org/x/net/ipv4/genericopt.go | 10 +- vendor/golang.org/x/net/ipv4/header.go | 3 +- vendor/golang.org/x/net/ipv4/helper.go | 1 + vendor/golang.org/x/net/ipv4/packet.go | 5 +- vendor/golang.org/x/net/ipv4/payload_cmsg.go | 11 +- vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go | 2 +- vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go | 2 +- vendor/golang.org/x/net/ipv4/payload_nocmsg.go | 11 +- vendor/golang.org/x/net/ipv6/batch.go | 5 +- vendor/golang.org/x/net/ipv6/dgramopt.go | 35 +- vendor/golang.org/x/net/ipv6/doc.go | 2 +- vendor/golang.org/x/net/ipv6/endpoint.go | 13 +- vendor/golang.org/x/net/ipv6/genericopt.go | 10 +- vendor/golang.org/x/net/ipv6/helper.go | 1 + vendor/golang.org/x/net/ipv6/payload_cmsg.go | 11 +- vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go | 2 +- vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go | 2 +- vendor/golang.org/x/net/ipv6/payload_nocmsg.go | 11 +- vendor/golang.org/x/sys/unix/fcntl.go | 6 +- vendor/golang.org/x/sys/unix/syscall_bsd.go | 8 +- vendor/golang.org/x/sys/unix/syscall_dragonfly.go | 2 +- vendor/golang.org/x/sys/unix/syscall_freebsd.go | 2 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 75 +- vendor/golang.org/x/sys/unix/syscall_solaris.go | 14 +- vendor/golang.org/x/sys/unix/syscall_unix.go | 4 +- .../golang.org/x/sys/unix/zsyscall_linux_ppc64.go | 24 +- .../x/sys/unix/zsyscall_linux_ppc64le.go | 24 +- vendor/golang.org/x/sys/unix/ztypes_linux_386.go | 8 + vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go | 8 + vendor/golang.org/x/sys/unix/ztypes_linux_arm.go | 8 + vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go | 8 + vendor/golang.org/x/sys/unix/ztypes_linux_mips.go | 8 + .../golang.org/x/sys/unix/ztypes_linux_mips64.go | 8 + .../golang.org/x/sys/unix/ztypes_linux_mips64le.go | 8 + .../golang.org/x/sys/unix/ztypes_linux_mipsle.go | 8 + vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go | 8 + .../golang.org/x/sys/unix/ztypes_linux_ppc64le.go | 8 + vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go | 8 + vendor/golang.org/x/sys/windows/service.go | 18 + vendor/golang.org/x/sys/windows/types_windows.go | 33 +- .../genproto/googleapis/rpc/status/status.pb.go | 6 +- vendor/google.golang.org/grpc/backoff.go | 66 +- vendor/google.golang.org/grpc/balancer/balancer.go | 42 ++ .../google.golang.org/grpc/balancer_v1_wrapper.go | 56 +- vendor/google.golang.org/grpc/channelz/funcs.go | 573 --------------- vendor/google.golang.org/grpc/channelz/types.go | 418 ----------- vendor/google.golang.org/grpc/clientconn.go | 33 +- vendor/google.golang.org/grpc/codes/codes.go | 13 + vendor/google.golang.org/grpc/grpclb.go | 341 --------- .../grpc/grpclb/grpc_lb_v1/messages/messages.pb.go | 799 --------------------- .../grpc/grpclb/grpc_lb_v1/messages/messages.proto | 155 ---- vendor/google.golang.org/grpc/grpclb_picker.go | 159 ---- .../grpc/grpclb_remote_balancer.go | 266 ------- vendor/google.golang.org/grpc/grpclb_util.go | 214 ------ .../grpc/health/grpc_health_v1/health.pb.go | 62 +- .../grpc/health/grpc_health_v1/health.proto | 44 -- vendor/google.golang.org/grpc/health/health.go | 2 +- vendor/google.golang.org/grpc/health/regenerate.sh | 33 + .../grpc/internal/backoff/backoff.go | 78 ++ .../grpc/internal/channelz/funcs.go | 573 +++++++++++++++ .../grpc/internal/channelz/types.go | 418 +++++++++++ .../grpc/internal/grpcrand/grpcrand.go | 56 ++ vendor/google.golang.org/grpc/internal/internal.go | 25 +- vendor/google.golang.org/grpc/picker_wrapper.go | 21 +- .../grpc/resolver/dns/dns_resolver.go | 8 +- .../grpc/resolver_conn_wrapper.go | 6 +- vendor/google.golang.org/grpc/rpc_util.go | 124 ++-- vendor/google.golang.org/grpc/server.go | 28 +- vendor/google.golang.org/grpc/status/go16.go | 42 ++ vendor/google.golang.org/grpc/status/go17.go | 44 ++ .../google.golang.org/grpc/stickiness_linkedmap.go | 97 +++ vendor/google.golang.org/grpc/stream.go | 66 +- .../google.golang.org/grpc/transport/controlbuf.go | 101 ++- .../grpc/transport/flowcontrol.go | 10 +- .../grpc/transport/http2_client.go | 36 +- .../grpc/transport/http2_server.go | 98 +-- .../google.golang.org/grpc/transport/http_util.go | 54 +- .../google.golang.org/grpc/transport/transport.go | 37 +- vendor/google.golang.org/grpc/version.go | 22 + vendor/google.golang.org/grpc/vet.sh | 3 +- vendor/gopkg.in/olivere/elastic.v5/.travis.yml | 1 + vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS | 2 + vendor/gopkg.in/olivere/elastic.v5/client.go | 4 +- .../elastic.v5/search_queries_has_parent.go | 11 + 132 files changed, 3385 insertions(+), 3758 deletions(-) create mode 100644 vendor/github.com/hashicorp/go-multierror/sort.go create mode 100644 vendor/go.uber.org/zap/sink.go create mode 100644 vendor/golang.org/x/net/http2/go111.go create mode 100644 vendor/golang.org/x/net/http2/not_go111.go delete mode 100644 vendor/google.golang.org/grpc/channelz/funcs.go delete mode 100644 vendor/google.golang.org/grpc/channelz/types.go delete mode 100644 vendor/google.golang.org/grpc/grpclb.go delete mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go delete mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto delete mode 100644 vendor/google.golang.org/grpc/grpclb_picker.go delete mode 100644 vendor/google.golang.org/grpc/grpclb_remote_balancer.go delete mode 100644 vendor/google.golang.org/grpc/grpclb_util.go delete mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto create mode 100755 vendor/google.golang.org/grpc/health/regenerate.sh create mode 100644 vendor/google.golang.org/grpc/internal/backoff/backoff.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/funcs.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go create mode 100644 vendor/google.golang.org/grpc/status/go16.go create mode 100644 vendor/google.golang.org/grpc/status/go17.go create mode 100644 vendor/google.golang.org/grpc/stickiness_linkedmap.go create mode 100644 vendor/google.golang.org/grpc/version.go (limited to 'vendor') diff --git a/vendor/github.com/go-ini/ini/.travis.yml b/vendor/github.com/go-ini/ini/.travis.yml index 3cb77dc9a..4a237eafa 100644 --- a/vendor/github.com/go-ini/ini/.travis.yml +++ b/vendor/github.com/go-ini/ini/.travis.yml @@ -1,7 +1,6 @@ sudo: false language: go go: - - 1.5.x - 1.6.x - 1.7.x - 1.8.x diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md index 95d920d6b..988dceaba 100644 --- a/vendor/github.com/go-ini/ini/README.md +++ b/vendor/github.com/go-ini/ini/README.md @@ -1,4 +1,4 @@ -INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://sourcegraph.com/github.com/go-ini/ini/-/badge.svg)](https://sourcegraph.com/github.com/go-ini/ini?badge) +INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg)](https://sourcegraph.com/github.com/go-ini/ini) === ![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go index 15ebc8f72..595f6002f 100644 --- a/vendor/github.com/go-ini/ini/ini.go +++ b/vendor/github.com/go-ini/ini/ini.go @@ -1,3 +1,5 @@ +// +build go1.6 + // Copyright 2014 Unknwon // // Licensed under the Apache License, Version 2.0 (the "License"): you may @@ -32,7 +34,7 @@ const ( // Maximum allowed depth when recursively substituing variable names. _DEPTH_VALUES = 99 - _VERSION = "1.37.0" + _VERSION = "1.38.1" ) // Version returns current package version literal. @@ -132,6 +134,8 @@ type LoadOptions struct { IgnoreContinuation bool // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. IgnoreInlineComment bool + // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. + SkipUnrecognizableLines bool // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. // This type of keys are mostly used in my.cnf. AllowBooleanKeys bool @@ -157,7 +161,7 @@ type LoadOptions struct { // when value is NOT surrounded by any quotes. // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all. UnescapeValueCommentSymbols bool - // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise + // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise // conform to key/value pairs. Specify the names of those blocks here. UnparseableSections []string } diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go index d5aa2db60..3daf54c38 100644 --- a/vendor/github.com/go-ini/ini/parser.go +++ b/vendor/github.com/go-ini/ini/parser.go @@ -339,8 +339,7 @@ func (f *File) parse(reader io.Reader) (err error) { // NOTE: Iterate and increase `currentPeekSize` until // the size of the parser buffer is found. - // TODO: When Golang 1.10 is the lowest version supported, - // replace with `parserBufferSize := p.buf.Size()`. + // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`. parserBufferSize := 0 // NOTE: Peek 1kb at a time. currentPeekSize := 1024 @@ -390,8 +389,7 @@ func (f *File) parse(reader io.Reader) (err error) { // Section if line[0] == '[' { // Read to the next ']' (TODO: support quoted strings) - // TODO(unknwon): use LastIndexByte when stop supporting Go1.4 - closeIdx := bytes.LastIndex(line, []byte("]")) + closeIdx := bytes.LastIndexByte(line, ']') if closeIdx == -1 { return fmt.Errorf("unclosed section: %s", line) } @@ -433,25 +431,31 @@ func (f *File) parse(reader io.Reader) (err error) { kname, offset, err := readKeyName(line) if err != nil { // Treat as boolean key when desired, and whole line is key name. - if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { - kname, err := p.readValue(line, - parserBufferSize, - f.options.IgnoreContinuation, - f.options.IgnoreInlineComment, - f.options.UnescapeValueDoubleQuotes, - f.options.UnescapeValueCommentSymbols, - f.options.AllowPythonMultilineValues, - f.options.SpaceBeforeInlineComment) - if err != nil { - return err - } - key, err := section.NewBooleanKey(kname) - if err != nil { - return err + if IsErrDelimiterNotFound(err) { + switch { + case f.options.AllowBooleanKeys: + kname, err := p.readValue(line, + parserBufferSize, + f.options.IgnoreContinuation, + f.options.IgnoreInlineComment, + f.options.UnescapeValueDoubleQuotes, + f.options.UnescapeValueCommentSymbols, + f.options.AllowPythonMultilineValues, + f.options.SpaceBeforeInlineComment) + if err != nil { + return err + } + key, err := section.NewBooleanKey(kname) + if err != nil { + return err + } + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + continue + + case f.options.SkipUnrecognizableLines: + continue } - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - continue } return err } diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go index d8a402619..340a1efad 100644 --- a/vendor/github.com/go-ini/ini/section.go +++ b/vendor/github.com/go-ini/ini/section.go @@ -82,6 +82,7 @@ func (s *Section) NewKey(name, val string) (*Key, error) { } } else { s.keys[name].value = val + s.keysHash[name] = val } return s.keys[name], nil } diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS index 73ff68fbc..fbe4ec442 100644 --- a/vendor/github.com/go-sql-driver/mysql/AUTHORS +++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS @@ -72,6 +72,7 @@ Shuode Li Soroush Pour Stan Putrya Stanley Gunawan +Thomas Wodarek Xiangyu Hu Xiaobing Jiang Xiuming Chen diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md index 2e9b07eeb..7e7df1a3d 100644 --- a/vendor/github.com/go-sql-driver/mysql/README.md +++ b/vendor/github.com/go-sql-driver/mysql/README.md @@ -40,7 +40,7 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac * Optional placeholder interpolation ## Requirements - * Go 1.7 or higher. We aim to support the 3 latest versions of Go. + * Go 1.8 or higher. We aim to support the 3 latest versions of Go. * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+) --------------------------------------- diff --git a/vendor/github.com/go-sql-driver/mysql/auth.go b/vendor/github.com/go-sql-driver/mysql/auth.go index 0b59f52ee..2f61ecd4f 100644 --- a/vendor/github.com/go-sql-driver/mysql/auth.go +++ b/vendor/github.com/go-sql-driver/mysql/auth.go @@ -241,7 +241,7 @@ func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, bool, error) switch plugin { case "caching_sha2_password": authResp := scrambleSHA256Password(authData, mc.cfg.Passwd) - return authResp, (authResp == nil), nil + return authResp, false, nil case "mysql_old_password": if !mc.cfg.AllowOldPasswords { diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go index 8c35de73c..ba1297825 100644 --- a/vendor/github.com/go-sql-driver/mysql/driver.go +++ b/vendor/github.com/go-sql-driver/mysql/driver.go @@ -105,6 +105,9 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) { mc.cleanup() return nil, err } + if plugin == "" { + plugin = defaultAuthPlugin + } // Send Client Authentication Packet authResp, addNUL, err := mc.auth(authData, plugin) diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go index f99934e73..170aaa02b 100644 --- a/vendor/github.com/go-sql-driver/mysql/packets.go +++ b/vendor/github.com/go-sql-driver/mysql/packets.go @@ -154,15 +154,15 @@ func (mc *mysqlConn) writePacket(data []byte) error { // Handshake Initialization Packet // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake -func (mc *mysqlConn) readHandshakePacket() ([]byte, string, error) { - data, err := mc.readPacket() +func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err error) { + data, err = mc.readPacket() if err != nil { // for init we can rewrite this to ErrBadConn for sql.Driver to retry, since // in connection initialization we don't risk retrying non-idempotent actions. if err == ErrInvalidConn { return nil, "", driver.ErrBadConn } - return nil, "", err + return } if data[0] == iERR { @@ -198,7 +198,6 @@ func (mc *mysqlConn) readHandshakePacket() ([]byte, string, error) { } pos += 2 - plugin := "" if len(data) > pos { // character set [1 byte] // status flags [2 bytes] @@ -236,8 +235,6 @@ func (mc *mysqlConn) readHandshakePacket() ([]byte, string, error) { return b[:], plugin, nil } - plugin = defaultAuthPlugin - // make a memory safe copy of the cipher slice var b [8]byte copy(b[:], authData) diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md index 1c95f5978..444df08f8 100644 --- a/vendor/github.com/hashicorp/errwrap/README.md +++ b/vendor/github.com/hashicorp/errwrap/README.md @@ -48,7 +48,7 @@ func main() { // We can use the Contains helpers to check if an error contains // another error. It is safe to do this with a nil error, or with // an error that doesn't even use the errwrap package. - if errwrap.Contains(err, ErrNotExist) { + if errwrap.Contains(err, "does not exist") { // Do something } if errwrap.ContainsType(err, new(os.PathError)) { diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go index 6c7a3cc91..47f13c49a 100644 --- a/vendor/github.com/hashicorp/go-multierror/format.go +++ b/vendor/github.com/hashicorp/go-multierror/format.go @@ -13,7 +13,7 @@ type ErrorFormatFunc func([]error) string // that occurred along with a bullet point list of the errors. func ListFormatFunc(es []error) string { if len(es) == 1 { - return fmt.Sprintf("1 error occurred:\n\n* %s", es[0]) + return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0]) } points := make([]string, len(es)) @@ -22,6 +22,6 @@ func ListFormatFunc(es []error) string { } return fmt.Sprintf( - "%d errors occurred:\n\n%s", - len(es), strings.Join(points, "\n")) + "%d errors occurred:\n\t%s\n\n", + len(es), strings.Join(points, "\n\t")) } diff --git a/vendor/github.com/hashicorp/go-multierror/sort.go b/vendor/github.com/hashicorp/go-multierror/sort.go new file mode 100644 index 000000000..fecb14e81 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/sort.go @@ -0,0 +1,16 @@ +package multierror + +// Len implements sort.Interface function for length +func (err Error) Len() int { + return len(err.Errors) +} + +// Swap implements sort.Interface function for swapping elements +func (err Error) Swap(i, j int) { + err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i] +} + +// Less implements sort.Interface function for determining order +func (err Error) Less(i, j int) bool { + return err.Errors[i].Error() < err.Errors[j].Error() +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go index 0fd9b122f..90525e665 100644 --- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go +++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -649,7 +649,7 @@ func (r *Lexer) Bytes() []byte { return nil } ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue))) - len, err := base64.StdEncoding.Decode(ret, r.token.byteValue) + n, err := base64.StdEncoding.Decode(ret, r.token.byteValue) if err != nil { r.fatalError = &LexerError{ Reason: err.Error(), @@ -658,7 +658,7 @@ func (r *Lexer) Bytes() []byte { } r.consume() - return ret[:len] + return ret[:n] } // Bool reads a true or false boolean keyword. diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go index 237ddbcae..5856ed2b1 100644 --- a/vendor/github.com/minio/minio-go/api.go +++ b/vendor/github.com/minio/minio-go/api.go @@ -99,7 +99,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v6.0.3" + libraryVersion = "v6.0.5" ) // User Agent should always following the below style. diff --git a/vendor/github.com/minio/minio-go/functional_tests.go b/vendor/github.com/minio/minio-go/functional_tests.go index e4ea9b647..207cbd03c 100644 --- a/vendor/github.com/minio/minio-go/functional_tests.go +++ b/vendor/github.com/minio/minio-go/functional_tests.go @@ -569,7 +569,7 @@ func testPutObjectReadAt() { logError(testName, function, args, startTime, "", fmt.Sprintf("Number of bytes in stat does not match, expected %d got %d", bufSize, st.Size), err) return } - if st.ContentType != objectContentType { + if st.ContentType != objectContentType && st.ContentType != "application/octet-stream" { logError(testName, function, args, startTime, "", "Content types don't match", err) return } @@ -683,7 +683,7 @@ func testPutObjectWithMetadata() { logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) return } - if st.ContentType != customContentType { + if st.ContentType != customContentType && st.ContentType != "application/octet-stream" { logError(testName, function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err) return } @@ -751,7 +751,7 @@ func testPutObjectWithContentLanguage() { data := bytes.Repeat([]byte("a"), int(0)) n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(0), minio.PutObjectOptions{ - ContentLanguage: "en-US", + ContentLanguage: "en", }) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) @@ -769,8 +769,8 @@ func testPutObjectWithContentLanguage() { return } - if objInfo.Metadata.Get("Content-Language") != "en-US" { - logError(testName, function, args, startTime, "", "Expected content-language 'en-US' doesn't match with StatObject return value", err) + if objInfo.Metadata.Get("Content-Language") != "en" { + logError(testName, function, args, startTime, "", "Expected content-language 'en' doesn't match with StatObject return value", err) return } @@ -1359,7 +1359,7 @@ func testFPutObjectMultipart() { logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err) return } - if objInfo.ContentType != objectContentType { + if objInfo.ContentType != objectContentType && objInfo.ContentType != "application/octet-stream" { logError(testName, function, args, startTime, "", "ContentType doesn't match", err) return } @@ -1499,6 +1499,7 @@ func testFPutObject() { // Perform FPutObject with no contentType provided (Expecting application/x-gtar) args["objectName"] = objectName + "-GTar" + args["opts"] = minio.PutObjectOptions{} n, err = c.FPutObject(bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "FPutObject failed", err) @@ -1541,8 +1542,8 @@ func testFPutObject() { logError(testName, function, args, startTime, "", "StatObject failed", err) return } - if rGTar.ContentType != "application/x-gtar" { - logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-gtar, got "+rGTar.ContentType, err) + if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-gtar or application/octet-stream, got "+rGTar.ContentType, err) return } @@ -4535,7 +4536,7 @@ func testFPutObjectV2() { logError(testName, function, args, startTime, "", "StatObject failed", err) return } - if rGTar.ContentType != "application/x-gtar" { + if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" { logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err) return } diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index 13cc5e3d6..d3222b8f4 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -762,6 +762,9 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) val.Set(reflect.MakeSlice(sliceType, 0, 0)) return nil } + // Create slice of maps of other sizes + return d.decodeSlice(name, []interface{}{data}, val) + case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: return d.decodeSlice(name, []byte(dataVal.String()), val) // All other types we try to convert to the slice type diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index b065f8683..9805432c2 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -1,34 +1,23 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: metrics.proto -// DO NOT EDIT! - -/* -Package io_prometheus_client is a generated protocol buffer package. - -It is generated from these files: - metrics.proto - -It has these top-level messages: - LabelPair - Gauge - Counter - Quantile - Summary - Untyped - Histogram - Bucket - Metric - MetricFamily -*/ -package io_prometheus_client + +package io_prometheus_client // import "github.com/prometheus/client_model/go" import proto "github.com/golang/protobuf/proto" +import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal +var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + type MetricType int32 const ( @@ -70,16 +59,41 @@ func (x *MetricType) UnmarshalJSON(data []byte) error { *x = MetricType(value) return nil } +func (MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} +} type LabelPair struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *LabelPair) Reset() { *m = LabelPair{} } func (m *LabelPair) String() string { return proto.CompactTextString(m) } func (*LabelPair) ProtoMessage() {} +func (*LabelPair) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} +} +func (m *LabelPair) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelPair.Unmarshal(m, b) +} +func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) +} +func (dst *LabelPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelPair.Merge(dst, src) +} +func (m *LabelPair) XXX_Size() int { + return xxx_messageInfo_LabelPair.Size(m) +} +func (m *LabelPair) XXX_DiscardUnknown() { + xxx_messageInfo_LabelPair.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelPair proto.InternalMessageInfo func (m *LabelPair) GetName() string { if m != nil && m.Name != nil { @@ -96,13 +110,35 @@ func (m *LabelPair) GetValue() string { } type Gauge struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Gauge) Reset() { *m = Gauge{} } func (m *Gauge) String() string { return proto.CompactTextString(m) } func (*Gauge) ProtoMessage() {} +func (*Gauge) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1} +} +func (m *Gauge) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Gauge.Unmarshal(m, b) +} +func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) +} +func (dst *Gauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gauge.Merge(dst, src) +} +func (m *Gauge) XXX_Size() int { + return xxx_messageInfo_Gauge.Size(m) +} +func (m *Gauge) XXX_DiscardUnknown() { + xxx_messageInfo_Gauge.DiscardUnknown(m) +} + +var xxx_messageInfo_Gauge proto.InternalMessageInfo func (m *Gauge) GetValue() float64 { if m != nil && m.Value != nil { @@ -112,13 +148,35 @@ func (m *Gauge) GetValue() float64 { } type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Counter) Reset() { *m = Counter{} } func (m *Counter) String() string { return proto.CompactTextString(m) } func (*Counter) ProtoMessage() {} +func (*Counter) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2} +} +func (m *Counter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Counter.Unmarshal(m, b) +} +func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Counter.Marshal(b, m, deterministic) +} +func (dst *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(dst, src) +} +func (m *Counter) XXX_Size() int { + return xxx_messageInfo_Counter.Size(m) +} +func (m *Counter) XXX_DiscardUnknown() { + xxx_messageInfo_Counter.DiscardUnknown(m) +} + +var xxx_messageInfo_Counter proto.InternalMessageInfo func (m *Counter) GetValue() float64 { if m != nil && m.Value != nil { @@ -128,14 +186,36 @@ func (m *Counter) GetValue() float64 { } type Quantile struct { - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Quantile) Reset() { *m = Quantile{} } func (m *Quantile) String() string { return proto.CompactTextString(m) } func (*Quantile) ProtoMessage() {} +func (*Quantile) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3} +} +func (m *Quantile) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Quantile.Unmarshal(m, b) +} +func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) +} +func (dst *Quantile) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantile.Merge(dst, src) +} +func (m *Quantile) XXX_Size() int { + return xxx_messageInfo_Quantile.Size(m) +} +func (m *Quantile) XXX_DiscardUnknown() { + xxx_messageInfo_Quantile.DiscardUnknown(m) +} + +var xxx_messageInfo_Quantile proto.InternalMessageInfo func (m *Quantile) GetQuantile() float64 { if m != nil && m.Quantile != nil { @@ -152,15 +232,37 @@ func (m *Quantile) GetValue() float64 { } type Summary struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - XXX_unrecognized []byte `json:"-"` + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Summary) Reset() { *m = Summary{} } func (m *Summary) String() string { return proto.CompactTextString(m) } func (*Summary) ProtoMessage() {} +func (*Summary) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4} +} +func (m *Summary) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Summary.Unmarshal(m, b) +} +func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Summary.Marshal(b, m, deterministic) +} +func (dst *Summary) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary.Merge(dst, src) +} +func (m *Summary) XXX_Size() int { + return xxx_messageInfo_Summary.Size(m) +} +func (m *Summary) XXX_DiscardUnknown() { + xxx_messageInfo_Summary.DiscardUnknown(m) +} + +var xxx_messageInfo_Summary proto.InternalMessageInfo func (m *Summary) GetSampleCount() uint64 { if m != nil && m.SampleCount != nil { @@ -184,13 +286,35 @@ func (m *Summary) GetQuantile() []*Quantile { } type Untyped struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Untyped) Reset() { *m = Untyped{} } func (m *Untyped) String() string { return proto.CompactTextString(m) } func (*Untyped) ProtoMessage() {} +func (*Untyped) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5} +} +func (m *Untyped) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Untyped.Unmarshal(m, b) +} +func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) +} +func (dst *Untyped) XXX_Merge(src proto.Message) { + xxx_messageInfo_Untyped.Merge(dst, src) +} +func (m *Untyped) XXX_Size() int { + return xxx_messageInfo_Untyped.Size(m) +} +func (m *Untyped) XXX_DiscardUnknown() { + xxx_messageInfo_Untyped.DiscardUnknown(m) +} + +var xxx_messageInfo_Untyped proto.InternalMessageInfo func (m *Untyped) GetValue() float64 { if m != nil && m.Value != nil { @@ -200,15 +324,37 @@ func (m *Untyped) GetValue() float64 { } type Histogram struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` - XXX_unrecognized []byte `json:"-"` + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Histogram) Reset() { *m = Histogram{} } func (m *Histogram) String() string { return proto.CompactTextString(m) } func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6} +} +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Histogram.Unmarshal(m, b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) +} +func (dst *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(dst, src) +} +func (m *Histogram) XXX_Size() int { + return xxx_messageInfo_Histogram.Size(m) +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo func (m *Histogram) GetSampleCount() uint64 { if m != nil && m.SampleCount != nil { @@ -232,14 +378,36 @@ func (m *Histogram) GetBucket() []*Bucket { } type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"` - XXX_unrecognized []byte `json:"-"` + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Bucket) Reset() { *m = Bucket{} } func (m *Bucket) String() string { return proto.CompactTextString(m) } func (*Bucket) ProtoMessage() {} +func (*Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7} +} +func (m *Bucket) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Bucket.Unmarshal(m, b) +} +func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) +} +func (dst *Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bucket.Merge(dst, src) +} +func (m *Bucket) XXX_Size() int { + return xxx_messageInfo_Bucket.Size(m) +} +func (m *Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_Bucket proto.InternalMessageInfo func (m *Bucket) GetCumulativeCount() uint64 { if m != nil && m.CumulativeCount != nil { @@ -256,19 +424,41 @@ func (m *Bucket) GetUpperBound() float64 { } type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"` - XXX_unrecognized []byte `json:"-"` + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Metric) Reset() { *m = Metric{} } func (m *Metric) String() string { return proto.CompactTextString(m) } func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8} +} +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (dst *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(dst, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo func (m *Metric) GetLabel() []*LabelPair { if m != nil { @@ -320,16 +510,38 @@ func (m *Metric) GetTimestampMs() int64 { } type MetricFamily struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *MetricFamily) Reset() { *m = MetricFamily{} } func (m *MetricFamily) String() string { return proto.CompactTextString(m) } func (*MetricFamily) ProtoMessage() {} +func (*MetricFamily) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9} +} +func (m *MetricFamily) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricFamily.Unmarshal(m, b) +} +func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) +} +func (dst *MetricFamily) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricFamily.Merge(dst, src) +} +func (m *MetricFamily) XXX_Size() int { + return xxx_messageInfo_MetricFamily.Size(m) +} +func (m *MetricFamily) XXX_DiscardUnknown() { + xxx_messageInfo_MetricFamily.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricFamily proto.InternalMessageInfo func (m *MetricFamily) GetName() string { if m != nil && m.Name != nil { @@ -360,5 +572,58 @@ func (m *MetricFamily) GetMetric() []*Metric { } func init() { + proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") + proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") + proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") + proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") + proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") + proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") + proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") + proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") + proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") + proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) } + +func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) } + +var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{ + // 591 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e, + 0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89, + 0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81, + 0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47, + 0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77, + 0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e, + 0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64, + 0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58, + 0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c, + 0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2, + 0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4, + 0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12, + 0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c, + 0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee, + 0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f, + 0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54, + 0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea, + 0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63, + 0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45, + 0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d, + 0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5, + 0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d, + 0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d, + 0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7, + 0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8, + 0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2, + 0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58, + 0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11, + 0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff, + 0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02, + 0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd, + 0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25, + 0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9, + 0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27, + 0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9, + 0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48, + 0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index e95ddbc67..7a8a1e099 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -39,8 +39,11 @@ const ( statVersion10 = "1.0" statVersion11 = "1.1" - fieldTransport10Len = 10 - fieldTransport11Len = 13 + fieldTransport10TCPLen = 10 + fieldTransport10UDPLen = 7 + + fieldTransport11TCPLen = 13 + fieldTransport11UDPLen = 10 ) // A Mount is a device mount parsed from /proc/[pid]/mountstats. @@ -186,6 +189,8 @@ type NFSOperationStats struct { // A NFSTransportStats contains statistics for the NFS mount RPC requests and // responses. type NFSTransportStats struct { + // The transport protocol used for the NFS mount. + Protocol string // The local port used for the NFS mount. Port uint64 // Number of times the client has had to establish a connection from scratch @@ -360,7 +365,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) } - tstats, err := parseNFSTransportStats(ss[2:], statVersion) + tstats, err := parseNFSTransportStats(ss[1:], statVersion) if err != nil { return nil, err } @@ -522,13 +527,33 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { // parseNFSTransportStats parses a NFSTransportStats line using an input set of // integer fields matched to a specific stats version. func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { + // Extract the protocol field. It is the only string value in the line + protocol := ss[0] + ss = ss[1:] + switch statVersion { case statVersion10: - if len(ss) != fieldTransport10Len { + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport10TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport10UDPLen + } else { + return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) + } + if len(ss) != expectedLength { return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) } case statVersion11: - if len(ss) != fieldTransport11Len { + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport11TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport11UDPLen + } else { + return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) + } + if len(ss) != expectedLength { return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) } default: @@ -536,12 +561,13 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay - // in a v1.0 response. + // in a v1.0 response. Since the stat length is bigger for TCP stats, we use + // the TCP length here. // // Note: slice length must be set to length of v1.1 stats to avoid a panic when // only v1.0 stats are present. // See: https://github.com/prometheus/node_exporter/issues/571. - ns := make([]uint64, fieldTransport11Len) + ns := make([]uint64, fieldTransport11TCPLen) for i, s := range ss { n, err := strconv.ParseUint(s, 10, 64) if err != nil { @@ -551,7 +577,18 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats ns[i] = n } + // The fields differ depending on the transport protocol (TCP or UDP) + // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt + // + // For the udp RPC transport there is no connection count, connect idle time, + // or idle time (fields #3, #4, and #5); all other fields are the same. So + // we set them to 0 here. + if protocol == "udp" { + ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) + } + return &NFSTransportStats{ + Protocol: protocol, Port: ns[0], Bind: ns[1], Connect: ns[2], diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go index ffe9df50d..8f1508f0f 100644 --- a/vendor/github.com/prometheus/procfs/xfrm.go +++ b/vendor/github.com/prometheus/procfs/xfrm.go @@ -113,7 +113,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) { if len(fields) != 2 { return XfrmStat{}, fmt.Errorf( - "couldnt parse %s line %s", file.Name(), s.Text()) + "couldn't parse %s line %s", file.Name(), s.Text()) } name := fields[0] diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl index 550dcda12..c6440db8e 100644 --- a/vendor/go.uber.org/zap/.readme.tmpl +++ b/vendor/go.uber.org/zap/.readme.tmpl @@ -11,7 +11,7 @@ Note that zap only supports the two most recent minor versions of Go. ## Quick Start In contexts where performance is nice, but not critical, use the -`SugaredLogger`. It's 4-10x faster than than other structured logging +`SugaredLogger`. It's 4-10x faster than other structured logging packages and includes both structured and `printf`-style APIs. ```go diff --git a/vendor/go.uber.org/zap/.travis.yml b/vendor/go.uber.org/zap/.travis.yml index da1100b7d..a3321fa2d 100644 --- a/vendor/go.uber.org/zap/.travis.yml +++ b/vendor/go.uber.org/zap/.travis.yml @@ -1,8 +1,8 @@ language: go sudo: false go: - - "1.9" - - "1.10" + - 1.9.x + - 1.10.x go_import_path: go.uber.org/zap env: global: diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index be28291d5..e60e14629 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## v1.9.0 (19 Jul 2018) + +Enhancements: +* [#602][]: Reduce number of allocations when logging with reflection. +* [#572][], [#606][]: Expose a registry for third-party logging sinks. + +Thanks to @nfarah86, @AlekSi, @JeanMertz, @phillippgille, @etsangsplk, and +@dimroc for their contributions to this release. + ## v1.8.0 (13 Apr 2018) Enhancements: @@ -284,3 +293,6 @@ upgrade to the upcoming stable release. [#518]: https://github.com/uber-go/zap/pull/518 [#577]: https://github.com/uber-go/zap/pull/577 [#574]: https://github.com/uber-go/zap/pull/574 +[#602]: https://github.com/uber-go/zap/pull/602 +[#572]: https://github.com/uber-go/zap/pull/572 +[#606]: https://github.com/uber-go/zap/pull/606 diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md index 90ddfe17a..4256d35c7 100644 --- a/vendor/go.uber.org/zap/FAQ.md +++ b/vendor/go.uber.org/zap/FAQ.md @@ -148,6 +148,7 @@ We're aware of the following extensions, but haven't used them ourselves: | --- | --- | | `github.com/tchap/zapext` | Sentry, syslog | | `github.com/fgrosse/zaptest` | Ginkgo | +| `github.com/blendle/zapdriver` | Stackdriver | [go-proverbs]: https://go-proverbs.github.io/ [import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md index 4b2bb9d8b..f4fd1cb44 100644 --- a/vendor/go.uber.org/zap/README.md +++ b/vendor/go.uber.org/zap/README.md @@ -11,7 +11,7 @@ Note that zap only supports the two most recent minor versions of Go. ## Quick Start In contexts where performance is nice, but not critical, use the -`SugaredLogger`. It's 4-10x faster than than other structured logging +`SugaredLogger`. It's 4-10x faster than other structured logging packages and includes both structured and `printf`-style APIs. ```go diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go index d15f7fdb3..7592e8c63 100644 --- a/vendor/go.uber.org/zap/buffer/buffer.go +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -98,6 +98,15 @@ func (b *Buffer) Write(bs []byte) (int, error) { return len(bs), nil } +// TrimNewline trims any final "\n" byte from the end of the buffer. +func (b *Buffer) TrimNewline() { + if i := len(b.bs) - 1; i >= 0 { + if b.bs[i] == '\n' { + b.bs = b.bs[:i] + } + } +} + // Free returns the Buffer to its Pool. // // Callers must not retain references to the Buffer after calling Free. diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go index dae130303..6fe17d9e0 100644 --- a/vendor/go.uber.org/zap/config.go +++ b/vendor/go.uber.org/zap/config.go @@ -74,10 +74,10 @@ type Config struct { // EncoderConfig sets options for the chosen encoder. See // zapcore.EncoderConfig for details. EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"` - // OutputPaths is a list of paths to write logging output to. See Open for - // details. + // OutputPaths is a list of URLs or file paths to write logging output to. + // See Open for details. OutputPaths []string `json:"outputPaths" yaml:"outputPaths"` - // ErrorOutputPaths is a list of paths to write internal logger errors to. + // ErrorOutputPaths is a list of URLs to write internal logger errors to. // The default is standard error. // // Note that this setting only affects internal errors; for sample code that diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go index 3f16a8d45..8638dd1b9 100644 --- a/vendor/go.uber.org/zap/doc.go +++ b/vendor/go.uber.org/zap/doc.go @@ -48,7 +48,7 @@ // "attempt", 3, // "backoff", time.Second, // ) -// sugar.Printf("failed to fetch URL: %s", "http://example.com") +// sugar.Infof("failed to fetch URL: %s", "http://example.com") // // By default, loggers are unbuffered. However, since zap's low-level APIs // allow buffering, calling Sync before letting your process exit is a good diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go index f171c3847..1b0ecaca9 100644 --- a/vendor/go.uber.org/zap/http_handler.go +++ b/vendor/go.uber.org/zap/http_handler.go @@ -48,11 +48,11 @@ func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { switch r.Method { - case "GET": + case http.MethodGet: current := lvl.Level() enc.Encode(payload{Level: ¤t}) - case "PUT": + case http.MethodPut: var req payload if errmess := func() string { diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go new file mode 100644 index 000000000..ff0becfe5 --- /dev/null +++ b/vendor/go.uber.org/zap/sink.go @@ -0,0 +1,161 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "io" + "net/url" + "os" + "strings" + "sync" + + "go.uber.org/zap/zapcore" +) + +const schemeFile = "file" + +var ( + _sinkMutex sync.RWMutex + _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme +) + +func init() { + resetSinkRegistry() +} + +func resetSinkRegistry() { + _sinkMutex.Lock() + defer _sinkMutex.Unlock() + + _sinkFactories = map[string]func(*url.URL) (Sink, error){ + schemeFile: newFileSink, + } +} + +// Sink defines the interface to write to and close logger destinations. +type Sink interface { + zapcore.WriteSyncer + io.Closer +} + +type nopCloserSink struct{ zapcore.WriteSyncer } + +func (nopCloserSink) Close() error { return nil } + +type errSinkNotFound struct { + scheme string +} + +func (e *errSinkNotFound) Error() string { + return fmt.Sprintf("no sink found for scheme %q", e.scheme) +} + +// RegisterSink registers a user-supplied factory for all sinks with a +// particular scheme. +// +// All schemes must be ASCII, valid under section 3.1 of RFC 3986 +// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already +// have a factory registered. Zap automatically registers a factory for the +// "file" scheme. +func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + _sinkMutex.Lock() + defer _sinkMutex.Unlock() + + if scheme == "" { + return errors.New("can't register a sink factory for empty string") + } + normalized, err := normalizeScheme(scheme) + if err != nil { + return fmt.Errorf("%q is not a valid scheme: %v", scheme, err) + } + if _, ok := _sinkFactories[normalized]; ok { + return fmt.Errorf("sink factory already registered for scheme %q", normalized) + } + _sinkFactories[normalized] = factory + return nil +} + +func newSink(rawURL string) (Sink, error) { + u, err := url.Parse(rawURL) + if err != nil { + return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err) + } + if u.Scheme == "" { + u.Scheme = schemeFile + } + + _sinkMutex.RLock() + factory, ok := _sinkFactories[u.Scheme] + _sinkMutex.RUnlock() + if !ok { + return nil, &errSinkNotFound{u.Scheme} + } + return factory(u) +} + +func newFileSink(u *url.URL) (Sink, error) { + if u.User != nil { + return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u) + } + if u.Fragment != "" { + return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u) + } + if u.RawQuery != "" { + return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u) + } + // Error messages are better if we check hostname and port separately. + if u.Port() != "" { + return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u) + } + if hn := u.Hostname(); hn != "" && hn != "localhost" { + return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u) + } + switch u.Path { + case "stdout": + return nopCloserSink{os.Stdout}, nil + case "stderr": + return nopCloserSink{os.Stderr}, nil + } + return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) +} + +func normalizeScheme(s string) (string, error) { + // https://tools.ietf.org/html/rfc3986#section-3.1 + s = strings.ToLower(s) + if first := s[0]; 'a' > first || 'z' < first { + return "", errors.New("must start with a letter") + } + for i := 1; i < len(s); i++ { // iterate over bytes, not runes + c := s[i] + switch { + case 'a' <= c && c <= 'z': + continue + case '0' <= c && c <= '9': + continue + case c == '.' || c == '+' || c == '-': + continue + } + return "", fmt.Errorf("may not contain %q", c) + } + return s, nil +} diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go index 16f55ce48..86a709ab0 100644 --- a/vendor/go.uber.org/zap/writer.go +++ b/vendor/go.uber.org/zap/writer.go @@ -21,21 +21,33 @@ package zap import ( + "fmt" + "io" "io/ioutil" - "os" "go.uber.org/zap/zapcore" "go.uber.org/multierr" ) -// Open is a high-level wrapper that takes a variadic number of paths, opens or -// creates each of the specified files, and combines them into a locked +// Open is a high-level wrapper that takes a variadic number of URLs, opens or +// creates each of the specified resources, and combines them into a locked // WriteSyncer. It also returns any error encountered and a function to close // any opened files. // -// Passing no paths returns a no-op WriteSyncer. The special paths "stdout" and -// "stderr" are interpreted as os.Stdout and os.Stderr, respectively. +// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a +// scheme and URLs with the "file" scheme. Third-party code may register +// factories for other schemes using RegisterSink. +// +// URLs with the "file" scheme must use absolute paths on the local +// filesystem. No user, password, port, fragments, or query parameters are +// allowed, and the hostname must be empty or "localhost". +// +// Since it's common to write logs to the local filesystem, URLs without a +// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without +// a scheme, the special paths "stdout" and "stderr" are interpreted as +// os.Stdout and os.Stderr. When specified without a scheme, relative file +// paths also work. func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { writers, close, err := open(paths) if err != nil { @@ -47,33 +59,24 @@ func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { } func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { - var openErr error writers := make([]zapcore.WriteSyncer, 0, len(paths)) - files := make([]*os.File, 0, len(paths)) + closers := make([]io.Closer, 0, len(paths)) close := func() { - for _, f := range files { - f.Close() + for _, c := range closers { + c.Close() } } + + var openErr error for _, path := range paths { - switch path { - case "stdout": - writers = append(writers, os.Stdout) - // Don't close standard out. - continue - case "stderr": - writers = append(writers, os.Stderr) - // Don't close standard error. + sink, err := newSink(path) + if err != nil { + openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err)) continue } - f, err := os.OpenFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) - openErr = multierr.Append(openErr, err) - if err == nil { - writers = append(writers, f) - files = append(files, f) - } + writers = append(writers, sink) + closers = append(closers, sink) } - if openErr != nil { close() return writers, nil, openErr diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go index 1006ba2b1..2dc67d81e 100644 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -44,10 +44,15 @@ func getJSONEncoder() *jsonEncoder { } func putJSONEncoder(enc *jsonEncoder) { + if enc.reflectBuf != nil { + enc.reflectBuf.Free() + } enc.EncoderConfig = nil enc.buf = nil enc.spaced = false enc.openNamespaces = 0 + enc.reflectBuf = nil + enc.reflectEnc = nil _jsonPool.Put(enc) } @@ -56,6 +61,10 @@ type jsonEncoder struct { buf *buffer.Buffer spaced bool // include spaces after colons and commas openNamespaces int + + // for encoding generic values by reflection + reflectBuf *buffer.Buffer + reflectEnc *json.Encoder } // NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder @@ -124,13 +133,24 @@ func (enc *jsonEncoder) AddInt64(key string, val int64) { enc.AppendInt64(val) } +func (enc *jsonEncoder) resetReflectBuf() { + if enc.reflectBuf == nil { + enc.reflectBuf = bufferpool.Get() + enc.reflectEnc = json.NewEncoder(enc.reflectBuf) + } else { + enc.reflectBuf.Reset() + } +} + func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { - marshaled, err := json.Marshal(obj) + enc.resetReflectBuf() + err := enc.reflectEnc.Encode(obj) if err != nil { return err } + enc.reflectBuf.TrimNewline() enc.addKey(key) - _, err = enc.buf.Write(marshaled) + _, err = enc.buf.Write(enc.reflectBuf.Bytes()) return err } @@ -213,12 +233,14 @@ func (enc *jsonEncoder) AppendInt64(val int64) { } func (enc *jsonEncoder) AppendReflected(val interface{}) error { - marshaled, err := json.Marshal(val) + enc.resetReflectBuf() + err := enc.reflectEnc.Encode(val) if err != nil { return err } + enc.reflectBuf.TrimNewline() enc.addElementSeparator() - _, err = enc.buf.Write(marshaled) + _, err = enc.buf.Write(enc.reflectBuf.Bytes()) return err } diff --git a/vendor/golang.org/x/image/bmp/writer.go b/vendor/golang.org/x/image/bmp/writer.go index 6947968a4..f07b39dba 100644 --- a/vendor/golang.org/x/image/bmp/writer.go +++ b/vendor/golang.org/x/image/bmp/writer.go @@ -49,20 +49,91 @@ func encodePaletted(w io.Writer, pix []uint8, dx, dy, stride, step int) error { return nil } -func encodeRGBA(w io.Writer, pix []uint8, dx, dy, stride, step int) error { +func encodeRGBA(w io.Writer, pix []uint8, dx, dy, stride, step int, opaque bool) error { buf := make([]byte, step) - for y := dy - 1; y >= 0; y-- { - min := y*stride + 0 - max := y*stride + dx*4 - off := 0 - for i := min; i < max; i += 4 { - buf[off+2] = pix[i+0] - buf[off+1] = pix[i+1] - buf[off+0] = pix[i+2] - off += 3 + if opaque { + for y := dy - 1; y >= 0; y-- { + min := y*stride + 0 + max := y*stride + dx*4 + off := 0 + for i := min; i < max; i += 4 { + buf[off+2] = pix[i+0] + buf[off+1] = pix[i+1] + buf[off+0] = pix[i+2] + off += 3 + } + if _, err := w.Write(buf); err != nil { + return err + } } - if _, err := w.Write(buf); err != nil { - return err + } else { + for y := dy - 1; y >= 0; y-- { + min := y*stride + 0 + max := y*stride + dx*4 + off := 0 + for i := min; i < max; i += 4 { + a := uint32(pix[i+3]) + if a == 0 { + buf[off+2] = 0 + buf[off+1] = 0 + buf[off+0] = 0 + buf[off+3] = 0 + off += 4 + continue + } else if a == 0xff { + buf[off+2] = pix[i+0] + buf[off+1] = pix[i+1] + buf[off+0] = pix[i+2] + buf[off+3] = 0xff + off += 4 + continue + } + buf[off+2] = uint8(((uint32(pix[i+0]) * 0xffff) / a) >> 8) + buf[off+1] = uint8(((uint32(pix[i+1]) * 0xffff) / a) >> 8) + buf[off+0] = uint8(((uint32(pix[i+2]) * 0xffff) / a) >> 8) + buf[off+3] = uint8(a) + off += 4 + } + if _, err := w.Write(buf); err != nil { + return err + } + } + } + return nil +} + +func encodeNRGBA(w io.Writer, pix []uint8, dx, dy, stride, step int, opaque bool) error { + buf := make([]byte, step) + if opaque { + for y := dy - 1; y >= 0; y-- { + min := y*stride + 0 + max := y*stride + dx*4 + off := 0 + for i := min; i < max; i += 4 { + buf[off+2] = pix[i+0] + buf[off+1] = pix[i+1] + buf[off+0] = pix[i+2] + off += 3 + } + if _, err := w.Write(buf); err != nil { + return err + } + } + } else { + for y := dy - 1; y >= 0; y-- { + min := y*stride + 0 + max := y*stride + dx*4 + off := 0 + for i := min; i < max; i += 4 { + buf[off+2] = pix[i+0] + buf[off+1] = pix[i+1] + buf[off+0] = pix[i+2] + buf[off+3] = pix[i+3] + off += 4 + } + if _, err := w.Write(buf); err != nil { + return err + } } } return nil @@ -105,6 +176,7 @@ func Encode(w io.Writer, m image.Image) error { var step int var palette []byte + var opaque bool switch m := m.(type) { case *image.Gray: step = (d.X + 3) &^ 3 @@ -134,6 +206,28 @@ func Encode(w io.Writer, m image.Image) error { h.fileSize += uint32(len(palette)) + h.imageSize h.pixOffset += uint32(len(palette)) h.bpp = 8 + case *image.RGBA: + opaque = m.Opaque() + if opaque { + step = (3*d.X + 3) &^ 3 + h.bpp = 24 + } else { + step = 4 * d.X + h.bpp = 32 + } + h.imageSize = uint32(d.Y * step) + h.fileSize += h.imageSize + case *image.NRGBA: + opaque = m.Opaque() + if opaque { + step = (3*d.X + 3) &^ 3 + h.bpp = 24 + } else { + step = 4 * d.X + h.bpp = 32 + } + h.imageSize = uint32(d.Y * step) + h.fileSize += h.imageSize default: step = (3*d.X + 3) &^ 3 h.imageSize = uint32(d.Y * step) @@ -160,7 +254,9 @@ func Encode(w io.Writer, m image.Image) error { case *image.Paletted: return encodePaletted(w, m.Pix, d.X, d.Y, m.Stride, step) case *image.RGBA: - return encodeRGBA(w, m.Pix, d.X, d.Y, m.Stride, step) + return encodeRGBA(w, m.Pix, d.X, d.Y, m.Stride, step, opaque) + case *image.NRGBA: + return encodeNRGBA(w, m.Pix, d.X, d.Y, m.Stride, step, opaque) } return encode(w, m, step) } diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go index d23e05e06..7e539b123 100644 --- a/vendor/golang.org/x/net/html/parse.go +++ b/vendor/golang.org/x/net/html/parse.go @@ -860,9 +860,13 @@ func inBodyIM(p *parser) bool { // The newline, if any, will be dealt with by the TextToken case. p.framesetOK = false case a.Form: - if p.oe.contains(a.Template) || p.form == nil { - p.popUntil(buttonScope, a.P) - p.addElement() + if p.form != nil && !p.oe.contains(a.Template) { + // Ignore the token + return true + } + p.popUntil(buttonScope, a.P) + p.addElement() + if !p.oe.contains(a.Template) { p.form = p.top() } case a.Li: @@ -1098,12 +1102,13 @@ func inBodyIM(p *parser) bool { p.popUntil(defaultScope, p.tok.DataAtom) case a.Form: if p.oe.contains(a.Template) { - if !p.oe.contains(a.Form) { + i := p.indexOfElementInScope(defaultScope, a.Form) + if i == -1 { // Ignore the token. return true } p.generateImpliedEndTags() - if p.tok.DataAtom == a.Form { + if p.oe[i].DataAtom != a.Form { // Ignore the token. return true } diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go index bdf5652b0..f4d9b5ece 100644 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -52,9 +52,31 @@ const ( noDialOnMiss = false ) +// shouldTraceGetConn reports whether getClientConn should call any +// ClientTrace.GetConn hook associated with the http.Request. +// +// This complexity is needed to avoid double calls of the GetConn hook +// during the back-and-forth between net/http and x/net/http2 (when the +// net/http.Transport is upgraded to also speak http2), as well as support +// the case where x/net/http2 is being used directly. +func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool { + // If our Transport wasn't made via ConfigureTransport, always + // trace the GetConn hook if provided, because that means the + // http2 package is being used directly and it's the one + // dialing, as opposed to net/http. + if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok { + return true + } + // Otherwise, only use the GetConn hook if this connection has + // been used previously for other requests. For fresh + // connections, the net/http package does the dialing. + return !st.freshConn +} + func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { if isConnectionCloseRequest(req) && dialOnMiss { // It gets its own connection. + traceGetConn(req, addr) const singleUse = true cc, err := p.t.dialClientConn(addr, singleUse) if err != nil { @@ -64,7 +86,10 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis } p.mu.Lock() for _, cc := range p.conns[addr] { - if cc.CanTakeNewRequest() { + if st := cc.idleState(); st.canTakeNewRequest { + if p.shouldTraceGetConn(st) { + traceGetConn(req, addr) + } p.mu.Unlock() return cc, nil } @@ -73,6 +98,7 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis p.mu.Unlock() return nil, ErrNoCachedConn } + traceGetConn(req, addr) call := p.getStartDialLocked(addr) p.mu.Unlock() <-call.done diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go index 088d6e2bd..6356b3287 100644 --- a/vendor/golang.org/x/net/http2/configure_transport.go +++ b/vendor/golang.org/x/net/http2/configure_transport.go @@ -57,7 +57,7 @@ func configureTransport(t1 *http.Transport) (*Transport, error) { // registerHTTPSProtocol calls Transport.RegisterProtocol but // converting panics into errors. -func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { +func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("%v", e) @@ -69,10 +69,12 @@ func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) // noDialH2RoundTripper is a RoundTripper which only tries to complete the request // if there's already has a cached connection to the host. -type noDialH2RoundTripper struct{ t *Transport } +// (The field is exported so it can be accessed via reflect from net/http; tested +// by TestNoDialH2RoundTripperType) +type noDialH2RoundTripper struct{ *Transport } func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - res, err := rt.t.RoundTrip(req) + res, err := rt.Transport.RoundTrip(req) if isNoCachedConnError(err) { return nil, http.ErrSkipAltProtocol } diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go index 957de2542..cea601fcd 100644 --- a/vendor/golang.org/x/net/http2/flow.go +++ b/vendor/golang.org/x/net/http2/flow.go @@ -41,10 +41,10 @@ func (f *flow) take(n int32) { // add adds n bytes (positive or negative) to the flow control window. // It returns false if the sum would exceed 2^31-1. func (f *flow) add(n int32) bool { - remain := (1<<31 - 1) - f.n - if n > remain { - return false + sum := f.n + n + if (sum > n) == (f.n > 0) { + f.n = sum + return true } - f.n += n - return true + return false } diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index e32500779..c85e31f26 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -733,32 +733,67 @@ func (f *SettingsFrame) IsAck() bool { return f.FrameHeader.Flags.Has(FlagSettingsAck) } -func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) { +func (f *SettingsFrame) Value(id SettingID) (v uint32, ok bool) { f.checkValid() - buf := f.p - for len(buf) > 0 { - settingID := SettingID(binary.BigEndian.Uint16(buf[:2])) - if settingID == s { - return binary.BigEndian.Uint32(buf[2:6]), true + for i := 0; i < f.NumSettings(); i++ { + if s := f.Setting(i); s.ID == id { + return s.Val, true } - buf = buf[6:] } return 0, false } +// Setting returns the setting from the frame at the given 0-based index. +// The index must be >= 0 and less than f.NumSettings(). +func (f *SettingsFrame) Setting(i int) Setting { + buf := f.p + return Setting{ + ID: SettingID(binary.BigEndian.Uint16(buf[i*6 : i*6+2])), + Val: binary.BigEndian.Uint32(buf[i*6+2 : i*6+6]), + } +} + +func (f *SettingsFrame) NumSettings() int { return len(f.p) / 6 } + +// HasDuplicates reports whether f contains any duplicate setting IDs. +func (f *SettingsFrame) HasDuplicates() bool { + num := f.NumSettings() + if num == 0 { + return false + } + // If it's small enough (the common case), just do the n^2 + // thing and avoid a map allocation. + if num < 10 { + for i := 0; i < num; i++ { + idi := f.Setting(i).ID + for j := i + 1; j < num; j++ { + idj := f.Setting(j).ID + if idi == idj { + return true + } + } + } + return false + } + seen := map[SettingID]bool{} + for i := 0; i < num; i++ { + id := f.Setting(i).ID + if seen[id] { + return true + } + seen[id] = true + } + return false +} + // ForeachSetting runs fn for each setting. // It stops and returns the first error. func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { f.checkValid() - buf := f.p - for len(buf) > 0 { - if err := fn(Setting{ - SettingID(binary.BigEndian.Uint16(buf[:2])), - binary.BigEndian.Uint32(buf[2:6]), - }); err != nil { + for i := 0; i < f.NumSettings(); i++ { + if err := fn(f.Setting(i)); err != nil { return err } - buf = buf[6:] } return nil } diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go new file mode 100644 index 000000000..9749dc0be --- /dev/null +++ b/vendor/golang.org/x/net/http2/go111.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11 + +package http2 + +import "net/textproto" + +func traceHasWroteHeaderField(trace *clientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} + +func traceWroteHeaderField(trace *clientTrace, k, v string) { + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(k, []string{v}) + } +} + +func traceGot1xxResponseFunc(trace *clientTrace) func(int, textproto.MIMEHeader) error { + if trace != nil { + return trace.Got1xxResponse + } + return nil +} diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go index 47b7fae08..d957b7bc5 100644 --- a/vendor/golang.org/x/net/http2/go17.go +++ b/vendor/golang.org/x/net/http2/go17.go @@ -18,6 +18,8 @@ type contextContext interface { context.Context } +var errCanceled = context.Canceled + func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { ctx, cancel = context.WithCancel(context.Background()) ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) @@ -48,6 +50,14 @@ func (t *Transport) idleConnTimeout() time.Duration { func setResponseUncompressed(res *http.Response) { res.Uncompressed = true } +func traceGetConn(req *http.Request, hostPort string) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GetConn == nil { + return + } + trace.GetConn(hostPort) +} + func traceGotConn(req *http.Request, cc *ClientConn) { trace := httptrace.ContextClientTrace(req.Context()) if trace == nil || trace.GotConn == nil { @@ -104,3 +114,8 @@ func requestTrace(req *http.Request) *clientTrace { func (cc *ClientConn) Ping(ctx context.Context) error { return cc.ping(ctx) } + +// Shutdown gracefully closes the client connection, waiting for running streams to complete. +func (cc *ClientConn) Shutdown(ctx context.Context) error { + return cc.shutdown(ctx) +} diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go new file mode 100644 index 000000000..0df34e6d9 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go111.go @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.11 + +package http2 + +import "net/textproto" + +func traceHasWroteHeaderField(trace *clientTrace) bool { return false } + +func traceWroteHeaderField(trace *clientTrace, k, v string) {} + +func traceGot1xxResponseFunc(trace *clientTrace) func(int, textproto.MIMEHeader) error { + return nil +} diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go index 140434a79..7ffb25046 100644 --- a/vendor/golang.org/x/net/http2/not_go17.go +++ b/vendor/golang.org/x/net/http2/not_go17.go @@ -8,6 +8,7 @@ package http2 import ( "crypto/tls" + "errors" "net" "net/http" "time" @@ -18,6 +19,8 @@ type contextContext interface { Err() error } +var errCanceled = errors.New("canceled") + type fakeContext struct{} func (fakeContext) Done() <-chan struct{} { return nil } @@ -34,6 +37,7 @@ func setResponseUncompressed(res *http.Response) { type clientTrace struct{} func requestTrace(*http.Request) *clientTrace { return nil } +func traceGetConn(*http.Request, string) {} func traceGotConn(*http.Request, *ClientConn) {} func traceFirstResponseByte(*clientTrace) {} func traceWroteHeaders(*clientTrace) {} @@ -84,4 +88,8 @@ func (cc *ClientConn) Ping(ctx contextContext) error { return cc.ping(ctx) } +func (cc *ClientConn) Shutdown(ctx contextContext) error { + return cc.shutdown(ctx) +} + func (t *Transport) idleConnTimeout() time.Duration { return 0 } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 769c0fe5e..22cb18e06 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -1487,6 +1487,12 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error { } return nil } + if f.NumSettings() > 100 || f.HasDuplicates() { + // This isn't actually in the spec, but hang up on + // suspiciously large settings frames or those with + // duplicate entries. + return ConnectionError(ErrCodeProtocol) + } if err := f.ForeachSetting(sc.processSetting); err != nil { return err } @@ -1575,6 +1581,12 @@ func (sc *serverConn) processData(f *DataFrame) error { // type PROTOCOL_ERROR." return ConnectionError(ErrCodeProtocol) } + // RFC 7540, sec 6.1: If a DATA frame is received whose stream is not in + // "open" or "half-closed (local)" state, the recipient MUST respond with a + // stream error (Section 5.4.2) of type STREAM_CLOSED. + if state == stateClosed { + return streamError(id, ErrCodeStreamClosed) + } if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { // This includes sending a RST_STREAM if the stream is // in stateHalfClosedLocal (which currently means that @@ -1721,6 +1733,13 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // processing this frame. return nil } + // RFC 7540, sec 5.1: If an endpoint receives additional frames, other than + // WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in + // this state, it MUST respond with a stream error (Section 5.4.2) of + // type STREAM_CLOSED. + if st.state == stateHalfClosedRemote { + return streamError(id, ErrCodeStreamClosed) + } return st.processTrailerHeaders(f) } @@ -2347,6 +2366,19 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { foreachHeaderElement(v, rws.declareTrailer) } + // "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2), + // but respect "Connection" == "close" to mean sending a GOAWAY and tearing + // down the TCP connection when idle, like we do for HTTP/1. + // TODO: remove more Connection-specific header fields here, in addition + // to "Connection". + if _, ok := rws.snapHeader["Connection"]; ok { + v := rws.snapHeader.Get("Connection") + delete(rws.snapHeader, "Connection") + if v == "close" { + rws.conn.startGracefulShutdown() + } + } + endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ streamID: rws.stream.id, diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index d23a22625..9d1f2fadd 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -21,6 +21,7 @@ import ( mathrand "math/rand" "net" "net/http" + "net/textproto" "sort" "strconv" "strings" @@ -159,6 +160,7 @@ type ClientConn struct { cond *sync.Cond // hold mu; broadcast on flow/closed changes flow flow // our conn-level flow control quota (cs.flow is per stream) inflow flow // peer's conn-level flow control + closing bool closed bool wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received @@ -211,9 +213,10 @@ type clientStream struct { done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + num1xx uint8 // number of 1xx responses seen trailer http.Header // accumulated trailers resTrailer *http.Header // client's Response.Trailer @@ -237,6 +240,17 @@ func awaitRequestCancel(req *http.Request, done <-chan struct{}) error { } } +var got1xxFuncForTests func(int, textproto.MIMEHeader) error + +// get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func, +// if any. It returns nil if not set or if the Go version is too old. +func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error { + if fn := got1xxFuncForTests; fn != nil { + return fn + } + return traceGot1xxResponseFunc(cs.trace) +} + // awaitRequestCancel waits for the user to cancel a request, its context to // expire, or for the request to be done (any way it might be removed from the // cc.streams map: peer reset, successful completion, TCP connection breakage, @@ -423,27 +437,36 @@ func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*htt if !canRetryError(err) { return nil, err } - if !afterBodyWrite { - return req, nil - } // If the Body is nil (or http.NoBody), it's safe to reuse // this request and its Body. if req.Body == nil || reqBodyIsNoBody(req.Body) { return req, nil } - // Otherwise we depend on the Request having its GetBody - // func defined. + + // If the request body can be reset back to its original + // state via the optional req.GetBody, do that. getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody - if getBody == nil { - return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) + if getBody != nil { + // TODO: consider a req.Body.Close here? or audit that all caller paths do? + body, err := getBody() + if err != nil { + return nil, err + } + newReq := *req + newReq.Body = body + return &newReq, nil } - body, err := getBody() - if err != nil { - return nil, err + + // The Request.Body can't reset back to the beginning, but we + // don't seem to have started to read from it yet, so reuse + // the request directly. The "afterBodyWrite" means the + // bodyWrite process has started, which becomes true before + // the first Read. + if !afterBodyWrite { + return req, nil } - newReq := *req - newReq.Body = body - return &newReq, nil + + return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) } func canRetryError(err error) bool { @@ -630,12 +653,32 @@ func (cc *ClientConn) CanTakeNewRequest() bool { return cc.canTakeNewRequestLocked() } -func (cc *ClientConn) canTakeNewRequestLocked() bool { +// clientConnIdleState describes the suitability of a client +// connection to initiate a new RoundTrip request. +type clientConnIdleState struct { + canTakeNewRequest bool + freshConn bool // whether it's unused by any previous request +} + +func (cc *ClientConn) idleState() clientConnIdleState { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.idleStateLocked() +} + +func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { if cc.singleUse && cc.nextStreamID > 1 { - return false + return } - return cc.goAway == nil && !cc.closed && + st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32 + st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest + return +} + +func (cc *ClientConn) canTakeNewRequestLocked() bool { + st := cc.idleStateLocked() + return st.canTakeNewRequest } // onIdleTimeout is called from a time.AfterFunc goroutine. It will @@ -665,6 +708,88 @@ func (cc *ClientConn) closeIfIdle() { cc.tconn.Close() } +var shutdownEnterWaitStateHook = func() {} + +// Shutdown gracefully close the client connection, waiting for running streams to complete. +// Public implementation is in go17.go and not_go17.go +func (cc *ClientConn) shutdown(ctx contextContext) error { + if err := cc.sendGoAway(); err != nil { + return err + } + // Wait for all in-flight streams to complete or connection to close + done := make(chan error, 1) + cancelled := false // guarded by cc.mu + go func() { + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if len(cc.streams) == 0 || cc.closed { + cc.closed = true + done <- cc.tconn.Close() + break + } + if cancelled { + break + } + cc.cond.Wait() + } + }() + shutdownEnterWaitStateHook() + select { + case err := <-done: + return err + case <-ctx.Done(): + cc.mu.Lock() + // Free the goroutine above + cancelled = true + cc.cond.Broadcast() + cc.mu.Unlock() + return ctx.Err() + } +} + +func (cc *ClientConn) sendGoAway() error { + cc.mu.Lock() + defer cc.mu.Unlock() + cc.wmu.Lock() + defer cc.wmu.Unlock() + if cc.closing { + // GOAWAY sent already + return nil + } + // Send a graceful shutdown frame to server + maxStreamID := cc.nextStreamID + if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil { + return err + } + if err := cc.bw.Flush(); err != nil { + return err + } + // Prevent new requests + cc.closing = true + return nil +} + +// Close closes the client connection immediately. +// +// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. +func (cc *ClientConn) Close() error { + cc.mu.Lock() + defer cc.cond.Broadcast() + defer cc.mu.Unlock() + err := errors.New("http2: client connection force closed via ClientConn.Close") + for id, cs := range cc.streams { + select { + case cs.resc <- resAndError{err: err}: + default: + } + cs.bufPipe.CloseWithError(err) + delete(cc.streams, id) + } + cc.closed = true + return cc.tconn.Close() +} + const maxAllocFrameSize = 512 << 10 // frameBuffer returns a scratch buffer suitable for writing DATA frames. @@ -747,7 +872,7 @@ func checkConnHeaders(req *http.Request) error { if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") { + if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !strings.EqualFold(vv[0], "close") && !strings.EqualFold(vv[0], "keep-alive")) { return fmt.Errorf("http2: invalid Connection request header: %q", vv) } return nil @@ -1291,9 +1416,16 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail return nil, errRequestHeaderListSize } + trace := requestTrace(req) + traceHeaders := traceHasWroteHeaderField(trace) + // Header list size is ok. Write the headers. enumerateHeaders(func(name, value string) { - cc.writeHeader(strings.ToLower(name), value) + name = strings.ToLower(name) + cc.writeHeader(name, value) + if traceHeaders { + traceWroteHeaderField(trace, name, value) + } }) return cc.hbuf.Bytes(), nil @@ -1615,8 +1747,7 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { // is the detail. // // As a special case, handleResponse may return (nil, nil) to skip the -// frame (currently only used for 100 expect continue). This special -// case is going away after Issue 13851 is fixed. +// frame (currently only used for 1xx responses). func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { if f.Truncated { return nil, errResponseHeaderListSize @@ -1631,15 +1762,6 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") } - if statusCode == 100 { - traceGot100Continue(cs.trace) - if cs.on100 != nil { - cs.on100() // forces any write delay timer to fire - } - cs.pastHeaders = false // do it all again - return nil, nil - } - header := make(http.Header) res := &http.Response{ Proto: "HTTP/2.0", @@ -1664,6 +1786,27 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra } } + if statusCode >= 100 && statusCode <= 199 { + cs.num1xx++ + const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http + if cs.num1xx > max1xxResponses { + return nil, errors.New("http2: too many 1xx informational responses") + } + if fn := cs.get1xxTraceFunc(); fn != nil { + if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { + return nil, err + } + } + if statusCode == 100 { + traceGot100Continue(cs.trace) + if cs.on100 != nil { + cs.on100() // forces any write delay timer to fire + } + } + cs.pastHeaders = false // do it all again + return nil, nil + } + streamEnded := f.StreamEnded() isHead := cs.req.Method == "HEAD" if !streamEnded || isHead { diff --git a/vendor/golang.org/x/net/ipv4/batch.go b/vendor/golang.org/x/net/ipv4/batch.go index b44549928..5ce9b3583 100644 --- a/vendor/golang.org/x/net/ipv4/batch.go +++ b/vendor/golang.org/x/net/ipv4/batch.go @@ -9,7 +9,6 @@ package ipv4 import ( "net" "runtime" - "syscall" "golang.org/x/net/internal/socket" ) @@ -76,7 +75,7 @@ type Message = socket.Message // headers. func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } switch runtime.GOOS { case "linux": @@ -107,7 +106,7 @@ func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { // On other platforms, this method will write only a single message. func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } switch runtime.GOOS { case "linux": @@ -139,7 +138,7 @@ func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { // On other platforms, this method will read only a single message. func (c *packetHandler) ReadBatch(ms []Message, flags int) (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } switch runtime.GOOS { case "linux": @@ -170,7 +169,7 @@ func (c *packetHandler) ReadBatch(ms []Message, flags int) (int, error) { // On other platforms, this method will write only a single message. func (c *packetHandler) WriteBatch(ms []Message, flags int) (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } switch runtime.GOOS { case "linux": diff --git a/vendor/golang.org/x/net/ipv4/dgramopt.go b/vendor/golang.org/x/net/ipv4/dgramopt.go index 54d77d5fe..36764492d 100644 --- a/vendor/golang.org/x/net/ipv4/dgramopt.go +++ b/vendor/golang.org/x/net/ipv4/dgramopt.go @@ -6,7 +6,6 @@ package ipv4 import ( "net" - "syscall" "golang.org/x/net/bpf" ) @@ -15,7 +14,7 @@ import ( // multicast packets. func (c *dgramOpt) MulticastTTL() (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } so, ok := sockOpts[ssoMulticastTTL] if !ok { @@ -28,7 +27,7 @@ func (c *dgramOpt) MulticastTTL() (int, error) { // outgoing multicast packets. func (c *dgramOpt) SetMulticastTTL(ttl int) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoMulticastTTL] if !ok { @@ -41,7 +40,7 @@ func (c *dgramOpt) SetMulticastTTL(ttl int) error { // packet transmissions. func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { if !c.ok() { - return nil, syscall.EINVAL + return nil, errInvalidConn } so, ok := sockOpts[ssoMulticastInterface] if !ok { @@ -54,7 +53,7 @@ func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { // multicast packet transmissions. func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoMulticastInterface] if !ok { @@ -67,7 +66,7 @@ func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { // should be copied and send back to the originator. func (c *dgramOpt) MulticastLoopback() (bool, error) { if !c.ok() { - return false, syscall.EINVAL + return false, errInvalidConn } so, ok := sockOpts[ssoMulticastLoopback] if !ok { @@ -84,7 +83,7 @@ func (c *dgramOpt) MulticastLoopback() (bool, error) { // should be copied and send back to the originator. func (c *dgramOpt) SetMulticastLoopback(on bool) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoMulticastLoopback] if !ok { @@ -104,7 +103,7 @@ func (c *dgramOpt) SetMulticastLoopback(on bool) error { // configuration. func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoJoinGroup] if !ok { @@ -122,7 +121,7 @@ func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { // source-specific group. func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoLeaveGroup] if !ok { @@ -143,7 +142,7 @@ func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { // routing configuration. func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoJoinSourceGroup] if !ok { @@ -164,7 +163,7 @@ func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net // interface ifi. func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoLeaveSourceGroup] if !ok { @@ -186,7 +185,7 @@ func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source ne // ifi. func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoBlockSourceGroup] if !ok { @@ -207,7 +206,7 @@ func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source // group by ExcludeSourceSpecificGroup again on the interface ifi. func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoUnblockSourceGroup] if !ok { @@ -228,7 +227,7 @@ func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source // Currently only Linux supports this. func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { if !c.ok() { - return nil, syscall.EINVAL + return nil, errInvalidConn } so, ok := sockOpts[ssoICMPFilter] if !ok { @@ -241,7 +240,7 @@ func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { // Currently only Linux supports this. func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoICMPFilter] if !ok { @@ -255,7 +254,7 @@ func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { // Only supported on Linux. func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoAttachFilter] if !ok { diff --git a/vendor/golang.org/x/net/ipv4/doc.go b/vendor/golang.org/x/net/ipv4/doc.go index b43935a5a..d12455548 100644 --- a/vendor/golang.org/x/net/ipv4/doc.go +++ b/vendor/golang.org/x/net/ipv4/doc.go @@ -241,4 +241,4 @@ // IncludeSourceSpecificGroup may return an error. package ipv4 // import "golang.org/x/net/ipv4" -// BUG(mikio): This package is not implemented on NaCl and Plan 9. +// BUG(mikio): This package is not implemented on JS, NaCl and Plan 9. diff --git a/vendor/golang.org/x/net/ipv4/endpoint.go b/vendor/golang.org/x/net/ipv4/endpoint.go index 2ab877363..500946378 100644 --- a/vendor/golang.org/x/net/ipv4/endpoint.go +++ b/vendor/golang.org/x/net/ipv4/endpoint.go @@ -6,7 +6,6 @@ package ipv4 import ( "net" - "syscall" "time" "golang.org/x/net/internal/socket" @@ -58,7 +57,7 @@ func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } // SetControlMessage sets the per packet IP-level socket options. func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { if !c.payloadHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) } @@ -67,7 +66,7 @@ func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { // endpoint. func (c *PacketConn) SetDeadline(t time.Time) error { if !c.payloadHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.payloadHandler.PacketConn.SetDeadline(t) } @@ -76,7 +75,7 @@ func (c *PacketConn) SetDeadline(t time.Time) error { // endpoint. func (c *PacketConn) SetReadDeadline(t time.Time) error { if !c.payloadHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.payloadHandler.PacketConn.SetReadDeadline(t) } @@ -85,7 +84,7 @@ func (c *PacketConn) SetReadDeadline(t time.Time) error { // endpoint. func (c *PacketConn) SetWriteDeadline(t time.Time) error { if !c.payloadHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.payloadHandler.PacketConn.SetWriteDeadline(t) } @@ -93,7 +92,7 @@ func (c *PacketConn) SetWriteDeadline(t time.Time) error { // Close closes the endpoint. func (c *PacketConn) Close() error { if !c.payloadHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.payloadHandler.PacketConn.Close() } @@ -124,7 +123,7 @@ type RawConn struct { // SetControlMessage sets the per packet IP-level socket options. func (c *RawConn) SetControlMessage(cf ControlFlags, on bool) error { if !c.packetHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return setControlMessage(c.dgramOpt.Conn, &c.packetHandler.rawOpt, cf, on) } @@ -133,7 +132,7 @@ func (c *RawConn) SetControlMessage(cf ControlFlags, on bool) error { // endpoint. func (c *RawConn) SetDeadline(t time.Time) error { if !c.packetHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.packetHandler.IPConn.SetDeadline(t) } @@ -142,7 +141,7 @@ func (c *RawConn) SetDeadline(t time.Time) error { // endpoint. func (c *RawConn) SetReadDeadline(t time.Time) error { if !c.packetHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.packetHandler.IPConn.SetReadDeadline(t) } @@ -151,7 +150,7 @@ func (c *RawConn) SetReadDeadline(t time.Time) error { // endpoint. func (c *RawConn) SetWriteDeadline(t time.Time) error { if !c.packetHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.packetHandler.IPConn.SetWriteDeadline(t) } @@ -159,7 +158,7 @@ func (c *RawConn) SetWriteDeadline(t time.Time) error { // Close closes the endpoint. func (c *RawConn) Close() error { if !c.packetHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.packetHandler.IPConn.Close() } diff --git a/vendor/golang.org/x/net/ipv4/genericopt.go b/vendor/golang.org/x/net/ipv4/genericopt.go index 119bf841b..587ae4a19 100644 --- a/vendor/golang.org/x/net/ipv4/genericopt.go +++ b/vendor/golang.org/x/net/ipv4/genericopt.go @@ -4,12 +4,10 @@ package ipv4 -import "syscall" - // TOS returns the type-of-service field value for outgoing packets. func (c *genericOpt) TOS() (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } so, ok := sockOpts[ssoTOS] if !ok { @@ -22,7 +20,7 @@ func (c *genericOpt) TOS() (int, error) { // packets. func (c *genericOpt) SetTOS(tos int) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoTOS] if !ok { @@ -34,7 +32,7 @@ func (c *genericOpt) SetTOS(tos int) error { // TTL returns the time-to-live field value for outgoing packets. func (c *genericOpt) TTL() (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } so, ok := sockOpts[ssoTTL] if !ok { @@ -47,7 +45,7 @@ func (c *genericOpt) TTL() (int, error) { // packets. func (c *genericOpt) SetTTL(ttl int) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoTTL] if !ok { diff --git a/vendor/golang.org/x/net/ipv4/header.go b/vendor/golang.org/x/net/ipv4/header.go index c8822a6ab..358afe2f1 100644 --- a/vendor/golang.org/x/net/ipv4/header.go +++ b/vendor/golang.org/x/net/ipv4/header.go @@ -9,7 +9,6 @@ import ( "fmt" "net" "runtime" - "syscall" "golang.org/x/net/internal/socket" ) @@ -54,7 +53,7 @@ func (h *Header) String() string { // Marshal returns the binary encoding of h. func (h *Header) Marshal() ([]byte, error) { if h == nil { - return nil, syscall.EINVAL + return nil, errInvalidConn } if h.Len < HeaderLen { return nil, errHeaderTooShort diff --git a/vendor/golang.org/x/net/ipv4/helper.go b/vendor/golang.org/x/net/ipv4/helper.go index a5052e324..8d8ff98e9 100644 --- a/vendor/golang.org/x/net/ipv4/helper.go +++ b/vendor/golang.org/x/net/ipv4/helper.go @@ -10,6 +10,7 @@ import ( ) var ( + errInvalidConn = errors.New("invalid connection") errMissingAddress = errors.New("missing address") errMissingHeader = errors.New("missing header") errHeaderTooShort = errors.New("header too short") diff --git a/vendor/golang.org/x/net/ipv4/packet.go b/vendor/golang.org/x/net/ipv4/packet.go index f00f5b052..966bb776f 100644 --- a/vendor/golang.org/x/net/ipv4/packet.go +++ b/vendor/golang.org/x/net/ipv4/packet.go @@ -6,7 +6,6 @@ package ipv4 import ( "net" - "syscall" "golang.org/x/net/internal/socket" ) @@ -28,7 +27,7 @@ func (c *packetHandler) ok() bool { return c != nil && c.IPConn != nil && c.Conn // header h, the payload p and the control message cm. func (c *packetHandler) ReadFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { if !c.ok() { - return nil, nil, nil, syscall.EINVAL + return nil, nil, nil, errInvalidConn } return c.readFrom(b) } @@ -63,7 +62,7 @@ func slicePacket(b []byte) (h, p []byte, err error) { // Options = optional func (c *packetHandler) WriteTo(h *Header, p []byte, cm *ControlMessage) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } return c.writeTo(h, p, cm) } diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg.go b/vendor/golang.org/x/net/ipv4/payload_cmsg.go index 3f06d7606..204a49fea 100644 --- a/vendor/golang.org/x/net/ipv4/payload_cmsg.go +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg.go @@ -2,14 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !nacl,!plan9,!windows +// +build !js,!nacl,!plan9,!windows package ipv4 -import ( - "net" - "syscall" -) +import "net" // ReadFrom reads a payload of the received IPv4 datagram, from the // endpoint c, copying the payload into b. It returns the number of @@ -17,7 +14,7 @@ import ( // src of the received datagram. func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { if !c.ok() { - return 0, nil, nil, syscall.EINVAL + return 0, nil, nil, errInvalidConn } return c.readFrom(b) } @@ -30,7 +27,7 @@ func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net. // control of the outgoing datagram is not required. func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } return c.writeTo(b, cm, dst) } diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go index d26ccd90c..8d45599f4 100644 --- a/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build !go1.9 -// +build !nacl,!plan9,!windows +// +build !js,!nacl,!plan9,!windows package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go index 2f1931183..4081aad8b 100644 --- a/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build go1.9 -// +build !nacl,!plan9,!windows +// +build !js,!nacl,!plan9,!windows package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go index 3926de70b..1d434c61a 100644 --- a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go +++ b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go @@ -2,14 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build nacl plan9 windows +// +build js nacl plan9 windows package ipv4 -import ( - "net" - "syscall" -) +import "net" // ReadFrom reads a payload of the received IPv4 datagram, from the // endpoint c, copying the payload into b. It returns the number of @@ -17,7 +14,7 @@ import ( // src of the received datagram. func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { if !c.ok() { - return 0, nil, nil, syscall.EINVAL + return 0, nil, nil, errInvalidConn } if n, src, err = c.PacketConn.ReadFrom(b); err != nil { return 0, nil, nil, err @@ -33,7 +30,7 @@ func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net. // control of the outgoing datagram is not required. func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } if dst == nil { return 0, errMissingAddress diff --git a/vendor/golang.org/x/net/ipv6/batch.go b/vendor/golang.org/x/net/ipv6/batch.go index 4f5fe683d..10d649245 100644 --- a/vendor/golang.org/x/net/ipv6/batch.go +++ b/vendor/golang.org/x/net/ipv6/batch.go @@ -9,7 +9,6 @@ package ipv6 import ( "net" "runtime" - "syscall" "golang.org/x/net/internal/socket" ) @@ -67,7 +66,7 @@ type Message = socket.Message // On other platforms, this method will read only a single message. func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } switch runtime.GOOS { case "linux": @@ -98,7 +97,7 @@ func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { // On other platforms, this method will write only a single message. func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } switch runtime.GOOS { case "linux": diff --git a/vendor/golang.org/x/net/ipv6/dgramopt.go b/vendor/golang.org/x/net/ipv6/dgramopt.go index 703dafe84..eea4fde25 100644 --- a/vendor/golang.org/x/net/ipv6/dgramopt.go +++ b/vendor/golang.org/x/net/ipv6/dgramopt.go @@ -6,7 +6,6 @@ package ipv6 import ( "net" - "syscall" "golang.org/x/net/bpf" ) @@ -15,7 +14,7 @@ import ( // multicast packets. func (c *dgramOpt) MulticastHopLimit() (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } so, ok := sockOpts[ssoMulticastHopLimit] if !ok { @@ -28,7 +27,7 @@ func (c *dgramOpt) MulticastHopLimit() (int, error) { // outgoing multicast packets. func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoMulticastHopLimit] if !ok { @@ -41,7 +40,7 @@ func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error { // packet transmissions. func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { if !c.ok() { - return nil, syscall.EINVAL + return nil, errInvalidConn } so, ok := sockOpts[ssoMulticastInterface] if !ok { @@ -54,7 +53,7 @@ func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { // multicast packet transmissions. func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoMulticastInterface] if !ok { @@ -67,7 +66,7 @@ func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { // should be copied and send back to the originator. func (c *dgramOpt) MulticastLoopback() (bool, error) { if !c.ok() { - return false, syscall.EINVAL + return false, errInvalidConn } so, ok := sockOpts[ssoMulticastLoopback] if !ok { @@ -84,7 +83,7 @@ func (c *dgramOpt) MulticastLoopback() (bool, error) { // should be copied and send back to the originator. func (c *dgramOpt) SetMulticastLoopback(on bool) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoMulticastLoopback] if !ok { @@ -104,7 +103,7 @@ func (c *dgramOpt) SetMulticastLoopback(on bool) error { // configuration. func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoJoinGroup] if !ok { @@ -122,7 +121,7 @@ func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { // source-specific group. func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoLeaveGroup] if !ok { @@ -143,7 +142,7 @@ func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { // routing configuration. func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoJoinSourceGroup] if !ok { @@ -164,7 +163,7 @@ func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net // interface ifi. func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoLeaveSourceGroup] if !ok { @@ -186,7 +185,7 @@ func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source ne // ifi. func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoBlockSourceGroup] if !ok { @@ -207,7 +206,7 @@ func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source // group by ExcludeSourceSpecificGroup again on the interface ifi. func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoUnblockSourceGroup] if !ok { @@ -230,7 +229,7 @@ func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source // field is located. func (c *dgramOpt) Checksum() (on bool, offset int, err error) { if !c.ok() { - return false, 0, syscall.EINVAL + return false, 0, errInvalidConn } so, ok := sockOpts[ssoChecksum] if !ok { @@ -251,7 +250,7 @@ func (c *dgramOpt) Checksum() (on bool, offset int, err error) { // checksum field is located. func (c *dgramOpt) SetChecksum(on bool, offset int) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoChecksum] if !ok { @@ -266,7 +265,7 @@ func (c *dgramOpt) SetChecksum(on bool, offset int) error { // ICMPFilter returns an ICMP filter. func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { if !c.ok() { - return nil, syscall.EINVAL + return nil, errInvalidConn } so, ok := sockOpts[ssoICMPFilter] if !ok { @@ -278,7 +277,7 @@ func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { // SetICMPFilter deploys the ICMP filter. func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoICMPFilter] if !ok { @@ -292,7 +291,7 @@ func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { // Only supported on Linux. func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoAttachFilter] if !ok { diff --git a/vendor/golang.org/x/net/ipv6/doc.go b/vendor/golang.org/x/net/ipv6/doc.go index 664a97dea..cb4000623 100644 --- a/vendor/golang.org/x/net/ipv6/doc.go +++ b/vendor/golang.org/x/net/ipv6/doc.go @@ -240,4 +240,4 @@ // IncludeSourceSpecificGroup may return an error. package ipv6 // import "golang.org/x/net/ipv6" -// BUG(mikio): This package is not implemented on NaCl and Plan 9. +// BUG(mikio): This package is not implemented on JS, NaCl and Plan 9. diff --git a/vendor/golang.org/x/net/ipv6/endpoint.go b/vendor/golang.org/x/net/ipv6/endpoint.go index 0624c1740..932575639 100644 --- a/vendor/golang.org/x/net/ipv6/endpoint.go +++ b/vendor/golang.org/x/net/ipv6/endpoint.go @@ -6,7 +6,6 @@ package ipv6 import ( "net" - "syscall" "time" "golang.org/x/net/internal/socket" @@ -34,7 +33,7 @@ func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } // with the endpoint. func (c *Conn) PathMTU() (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } so, ok := sockOpts[ssoPathMTU] if !ok { @@ -76,7 +75,7 @@ func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } // socket options. func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { if !c.payloadHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) } @@ -85,7 +84,7 @@ func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { // endpoint. func (c *PacketConn) SetDeadline(t time.Time) error { if !c.payloadHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.payloadHandler.SetDeadline(t) } @@ -94,7 +93,7 @@ func (c *PacketConn) SetDeadline(t time.Time) error { // endpoint. func (c *PacketConn) SetReadDeadline(t time.Time) error { if !c.payloadHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.payloadHandler.SetReadDeadline(t) } @@ -103,7 +102,7 @@ func (c *PacketConn) SetReadDeadline(t time.Time) error { // endpoint. func (c *PacketConn) SetWriteDeadline(t time.Time) error { if !c.payloadHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.payloadHandler.SetWriteDeadline(t) } @@ -111,7 +110,7 @@ func (c *PacketConn) SetWriteDeadline(t time.Time) error { // Close closes the endpoint. func (c *PacketConn) Close() error { if !c.payloadHandler.ok() { - return syscall.EINVAL + return errInvalidConn } return c.payloadHandler.Close() } diff --git a/vendor/golang.org/x/net/ipv6/genericopt.go b/vendor/golang.org/x/net/ipv6/genericopt.go index e9dbc2e18..1a18f7549 100644 --- a/vendor/golang.org/x/net/ipv6/genericopt.go +++ b/vendor/golang.org/x/net/ipv6/genericopt.go @@ -4,13 +4,11 @@ package ipv6 -import "syscall" - // TrafficClass returns the traffic class field value for outgoing // packets. func (c *genericOpt) TrafficClass() (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } so, ok := sockOpts[ssoTrafficClass] if !ok { @@ -23,7 +21,7 @@ func (c *genericOpt) TrafficClass() (int, error) { // outgoing packets. func (c *genericOpt) SetTrafficClass(tclass int) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoTrafficClass] if !ok { @@ -35,7 +33,7 @@ func (c *genericOpt) SetTrafficClass(tclass int) error { // HopLimit returns the hop limit field value for outgoing packets. func (c *genericOpt) HopLimit() (int, error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } so, ok := sockOpts[ssoHopLimit] if !ok { @@ -48,7 +46,7 @@ func (c *genericOpt) HopLimit() (int, error) { // packets. func (c *genericOpt) SetHopLimit(hoplim int) error { if !c.ok() { - return syscall.EINVAL + return errInvalidConn } so, ok := sockOpts[ssoHopLimit] if !ok { diff --git a/vendor/golang.org/x/net/ipv6/helper.go b/vendor/golang.org/x/net/ipv6/helper.go index 259740132..7ac535229 100644 --- a/vendor/golang.org/x/net/ipv6/helper.go +++ b/vendor/golang.org/x/net/ipv6/helper.go @@ -10,6 +10,7 @@ import ( ) var ( + errInvalidConn = errors.New("invalid connection") errMissingAddress = errors.New("missing address") errHeaderTooShort = errors.New("header too short") errInvalidConnType = errors.New("invalid conn type") diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg.go b/vendor/golang.org/x/net/ipv6/payload_cmsg.go index 4ee4b062c..3f23b5d21 100644 --- a/vendor/golang.org/x/net/ipv6/payload_cmsg.go +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg.go @@ -2,14 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !nacl,!plan9,!windows +// +build !js,!nacl,!plan9,!windows package ipv6 -import ( - "net" - "syscall" -) +import "net" // ReadFrom reads a payload of the received IPv6 datagram, from the // endpoint c, copying the payload into b. It returns the number of @@ -17,7 +14,7 @@ import ( // src of the received datagram. func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { if !c.ok() { - return 0, nil, nil, syscall.EINVAL + return 0, nil, nil, errInvalidConn } return c.readFrom(b) } @@ -29,7 +26,7 @@ func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net. // cm may be nil if control of the outgoing datagram is not required. func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } return c.writeTo(b, cm, dst) } diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go index fdc6c3994..bc4209db7 100644 --- a/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build !go1.9 -// +build !nacl,!plan9,!windows +// +build !js,!nacl,!plan9,!windows package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go index 8f6d02e2f..7dd650480 100644 --- a/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build go1.9 -// +build !nacl,!plan9,!windows +// +build !js,!nacl,!plan9,!windows package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go index 99a43542b..459142d26 100644 --- a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go +++ b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go @@ -2,14 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build nacl plan9 windows +// +build js nacl plan9 windows package ipv6 -import ( - "net" - "syscall" -) +import "net" // ReadFrom reads a payload of the received IPv6 datagram, from the // endpoint c, copying the payload into b. It returns the number of @@ -17,7 +14,7 @@ import ( // src of the received datagram. func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { if !c.ok() { - return 0, nil, nil, syscall.EINVAL + return 0, nil, nil, errInvalidConn } if n, src, err = c.PacketConn.ReadFrom(b); err != nil { return 0, nil, nil, err @@ -32,7 +29,7 @@ func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net. // cm may be nil if control of the outgoing datagram is not required. func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { if !c.ok() { - return 0, syscall.EINVAL + return 0, errInvalidConn } if dst == nil { return 0, errMissingAddress diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go index 0c58c7e1e..9379ba9ce 100644 --- a/vendor/golang.org/x/sys/unix/fcntl.go +++ b/vendor/golang.org/x/sys/unix/fcntl.go @@ -14,7 +14,11 @@ var fcntl64Syscall uintptr = SYS_FCNTL // FcntlInt performs a fcntl syscall on fd with the provided command and argument. func FcntlInt(fd uintptr, cmd, arg int) (int, error) { - valptr, _, err := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(arg)) + valptr, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(arg)) + var err error + if errno != 0 { + err = errno + } return int(valptr), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 53fb85182..33c8b5f0d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -206,7 +206,7 @@ func (sa *SockaddrDatalink) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrDatalink, nil } -func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { +func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_LINK: pp := (*RawSockaddrDatalink)(unsafe.Pointer(rsa)) @@ -286,7 +286,7 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { Close(nfd) return 0, nil, ECONNABORTED } - sa, err = anyToSockaddr(&rsa) + sa, err = anyToSockaddr(fd, &rsa) if err != nil { Close(nfd) nfd = 0 @@ -306,7 +306,7 @@ func Getsockname(fd int) (sa Sockaddr, err error) { rsa.Addr.Family = AF_UNIX rsa.Addr.Len = SizeofSockaddrUnix } - return anyToSockaddr(&rsa) + return anyToSockaddr(fd, &rsa) } //sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) @@ -356,7 +356,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from recvflags = int(msg.Flags) // source address is only specified if the socket is unconnected if rsa.Addr.Family != AF_UNSPEC { - from, err = anyToSockaddr(&rsa) + from, err = anyToSockaddr(fd, &rsa) } return } diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index b5072de28..e34abe29d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -87,7 +87,7 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { if len > SizeofSockaddrAny { panic("RawSockaddrAny too small") } - sa, err = anyToSockaddr(&rsa) + sa, err = anyToSockaddr(fd, &rsa) if err != nil { Close(nfd) nfd = 0 diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index ba9df4ac1..5561a3eb7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -89,7 +89,7 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { if len > SizeofSockaddrAny { panic("RawSockaddrAny too small") } - sa, err = anyToSockaddr(&rsa) + sa, err = anyToSockaddr(fd, &rsa) if err != nil { Close(nfd) nfd = 0 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 9908030cb..690c2c87f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -489,6 +489,47 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrL2, nil } +// SockaddrRFCOMM implements the Sockaddr interface for AF_BLUETOOTH type sockets +// using the RFCOMM protocol. +// +// Server example: +// +// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) +// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{ +// Channel: 1, +// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00 +// }) +// _ = Listen(fd, 1) +// nfd, sa, _ := Accept(fd) +// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd) +// Read(nfd, buf) +// +// Client example: +// +// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) +// _ = Connect(fd, &SockaddrRFCOMM{ +// Channel: 1, +// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11 +// }) +// Write(fd, []byte(`hello`)) +type SockaddrRFCOMM struct { + // Addr represents a bluetooth address, byte ordering is little-endian. + Addr [6]uint8 + + // Channel is a designated bluetooth channel, only 1-30 are available for use. + // Since Linux 2.6.7 and further zero value is the first available channel. + Channel uint8 + + raw RawSockaddrRFCOMM +} + +func (sa *SockaddrRFCOMM) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Family = AF_BLUETOOTH + sa.raw.Channel = sa.Channel + sa.raw.Bdaddr = sa.Addr + return unsafe.Pointer(&sa.raw), SizeofSockaddrRFCOMM, nil +} + // SockaddrCAN implements the Sockaddr interface for AF_CAN type sockets. // The RxID and TxID fields are used for transport protocol addressing in // (CAN_TP16, CAN_TP20, CAN_MCNET, and CAN_ISOTP), they can be left with @@ -651,7 +692,7 @@ func (sa *SockaddrVM) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrVM, nil } -func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { +func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_NETLINK: pp := (*RawSockaddrNetlink)(unsafe.Pointer(rsa)) @@ -728,6 +769,30 @@ func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { Port: pp.Port, } return sa, nil + case AF_BLUETOOTH: + proto, err := GetsockoptInt(fd, SOL_SOCKET, SO_PROTOCOL) + if err != nil { + return nil, err + } + // only BTPROTO_L2CAP and BTPROTO_RFCOMM can accept connections + switch proto { + case BTPROTO_L2CAP: + pp := (*RawSockaddrL2)(unsafe.Pointer(rsa)) + sa := &SockaddrL2{ + PSM: pp.Psm, + CID: pp.Cid, + Addr: pp.Bdaddr, + AddrType: pp.Bdaddr_type, + } + return sa, nil + case BTPROTO_RFCOMM: + pp := (*RawSockaddrRFCOMM)(unsafe.Pointer(rsa)) + sa := &SockaddrRFCOMM{ + Channel: pp.Channel, + Addr: pp.Bdaddr, + } + return sa, nil + } } return nil, EAFNOSUPPORT } @@ -739,7 +804,7 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { if err != nil { return } - sa, err = anyToSockaddr(&rsa) + sa, err = anyToSockaddr(fd, &rsa) if err != nil { Close(nfd) nfd = 0 @@ -757,7 +822,7 @@ func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { if len > SizeofSockaddrAny { panic("RawSockaddrAny too small") } - sa, err = anyToSockaddr(&rsa) + sa, err = anyToSockaddr(fd, &rsa) if err != nil { Close(nfd) nfd = 0 @@ -771,7 +836,7 @@ func Getsockname(fd int) (sa Sockaddr, err error) { if err = getsockname(fd, &rsa, &len); err != nil { return } - return anyToSockaddr(&rsa) + return anyToSockaddr(fd, &rsa) } func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) { @@ -960,7 +1025,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from recvflags = int(msg.Flags) // source address is only specified if the socket is unconnected if rsa.Addr.Family != AF_UNSPEC { - from, err = anyToSockaddr(&rsa) + from, err = anyToSockaddr(fd, &rsa) } return } diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 4d7efbc27..a05337d54 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -112,7 +112,7 @@ func Getsockname(fd int) (sa Sockaddr, err error) { if err = getsockname(fd, &rsa, &len); err != nil { return } - return anyToSockaddr(&rsa) + return anyToSockaddr(fd, &rsa) } // GetsockoptString returns the string value of the socket option opt for the @@ -314,7 +314,11 @@ func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error { // FcntlInt performs a fcntl syscall on fd with the provided command and argument. func FcntlInt(fd uintptr, cmd, arg int) (int, error) { - valptr, _, err := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0) + valptr, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0) + var err error + if errno != 0 { + err = errno + } return int(valptr), err } @@ -356,7 +360,7 @@ func Futimes(fd int, tv []Timeval) error { return futimesat(fd, nil, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } -func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { +func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_UNIX: pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) @@ -407,7 +411,7 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { if nfd == -1 { return } - sa, err = anyToSockaddr(&rsa) + sa, err = anyToSockaddr(fd, &rsa) if err != nil { Close(nfd) nfd = 0 @@ -444,7 +448,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from oobn = int(msg.Accrightslen) // source address is only specified if the socket is unconnected if rsa.Addr.Family != AF_UNSPEC { - from, err = anyToSockaddr(&rsa) + from, err = anyToSockaddr(fd, &rsa) } return } diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index b835bad0f..95b2180ae 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -219,7 +219,7 @@ func Getpeername(fd int) (sa Sockaddr, err error) { if err = getpeername(fd, &rsa, &len); err != nil { return } - return anyToSockaddr(&rsa) + return anyToSockaddr(fd, &rsa) } func GetsockoptByte(fd, level, opt int) (value byte, err error) { @@ -291,7 +291,7 @@ func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) { return } if rsa.Addr.Family != AF_UNSPEC { - from, err = anyToSockaddr(&rsa) + from, err = anyToSockaddr(fd, &rsa) } return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 0ae2aa842..7cc1bfd12 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -1474,8 +1474,13 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } @@ -1484,9 +1489,8 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) +func Dup2(oldfd int, newfd int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1495,13 +1499,9 @@ func EpollCreate(size int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index fa16c165e..c3dcb3818 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -1474,8 +1474,13 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } @@ -1484,9 +1489,8 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) +func Dup2(oldfd int, newfd int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1495,13 +1499,9 @@ func EpollCreate(size int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index e89bc6b36..4c250033f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -248,6 +248,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -401,6 +408,7 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index d95372bae..2e4d709b4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -250,6 +250,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -405,6 +412,7 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 77875ba01..bf38e5e2c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -251,6 +251,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -404,6 +411,7 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 5a9df694a..972c1b872 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -251,6 +251,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -406,6 +413,7 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index dcb239de8..783e70e87 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -249,6 +249,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -402,6 +409,7 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 9cf85f721..5c6ea719d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -251,6 +251,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -406,6 +413,7 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 6fd66e751..93effc8ec 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -251,6 +251,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -406,6 +413,7 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index faa5b3ef1..cc5ca242e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -249,6 +249,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -402,6 +409,7 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index ad4c45246..712f64029 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -252,6 +252,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -407,6 +414,7 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 1fdb2f216..1be45320a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -252,6 +252,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -407,6 +414,7 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index d32079d1a..932b655fe 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -250,6 +250,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -405,6 +412,7 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index 24aa90bbb..62fc31b40 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -43,6 +43,11 @@ const ( SC_STATUS_PROCESS_INFO = 0 + SC_ACTION_NONE = 0 + SC_ACTION_RESTART = 1 + SC_ACTION_REBOOT = 2 + SC_ACTION_RUN_COMMAND = 3 + SERVICE_STOPPED = 1 SERVICE_START_PENDING = 2 SERVICE_STOP_PENDING = 3 @@ -148,6 +153,19 @@ type ENUM_SERVICE_STATUS_PROCESS struct { ServiceStatusProcess SERVICE_STATUS_PROCESS } +type SERVICE_FAILURE_ACTIONS struct { + ResetPeriod uint32 + RebootMsg *uint16 + Command *uint16 + ActionsCount uint32 + Actions *SC_ACTION +} + +type SC_ACTION struct { + Type uint32 + Delay uint32 +} + //sys CloseServiceHandle(handle Handle) (err error) = advapi32.CloseServiceHandle //sys CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) [failretval==0] = advapi32.CreateServiceW //sys OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenServiceW diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index b4e424788..7d2a6794d 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -94,16 +94,29 @@ const ( FILE_APPEND_DATA = 0x00000004 FILE_WRITE_ATTRIBUTES = 0x00000100 - FILE_SHARE_READ = 0x00000001 - FILE_SHARE_WRITE = 0x00000002 - FILE_SHARE_DELETE = 0x00000004 - FILE_ATTRIBUTE_READONLY = 0x00000001 - FILE_ATTRIBUTE_HIDDEN = 0x00000002 - FILE_ATTRIBUTE_SYSTEM = 0x00000004 - FILE_ATTRIBUTE_DIRECTORY = 0x00000010 - FILE_ATTRIBUTE_ARCHIVE = 0x00000020 - FILE_ATTRIBUTE_NORMAL = 0x00000080 - FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400 + FILE_SHARE_READ = 0x00000001 + FILE_SHARE_WRITE = 0x00000002 + FILE_SHARE_DELETE = 0x00000004 + + FILE_ATTRIBUTE_READONLY = 0x00000001 + FILE_ATTRIBUTE_HIDDEN = 0x00000002 + FILE_ATTRIBUTE_SYSTEM = 0x00000004 + FILE_ATTRIBUTE_DIRECTORY = 0x00000010 + FILE_ATTRIBUTE_ARCHIVE = 0x00000020 + FILE_ATTRIBUTE_DEVICE = 0x00000040 + FILE_ATTRIBUTE_NORMAL = 0x00000080 + FILE_ATTRIBUTE_TEMPORARY = 0x00000100 + FILE_ATTRIBUTE_SPARSE_FILE = 0x00000200 + FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400 + FILE_ATTRIBUTE_COMPRESSED = 0x00000800 + FILE_ATTRIBUTE_OFFLINE = 0x00001000 + FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x00002000 + FILE_ATTRIBUTE_ENCRYPTED = 0x00004000 + FILE_ATTRIBUTE_INTEGRITY_STREAM = 0x00008000 + FILE_ATTRIBUTE_VIRTUAL = 0x00010000 + FILE_ATTRIBUTE_NO_SCRUB_DATA = 0x00020000 + FILE_ATTRIBUTE_RECALL_ON_OPEN = 0x00040000 + FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS = 0x00400000 INVALID_FILE_ATTRIBUTES = 0xffffffff diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index fb104e6df..3b07a25b7 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -73,14 +73,14 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // be used directly after any stripping needed for security/privacy reasons. type Status struct { // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. - Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"` + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. - Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // A list of messages that carry the error details. There is a common set of // message types for APIs to use. - Details []*any.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"` + Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go index c40facce5..fa31565fd 100644 --- a/vendor/google.golang.org/grpc/backoff.go +++ b/vendor/google.golang.org/grpc/backoff.go @@ -16,81 +16,23 @@ * */ +// See internal/backoff package for the backoff implementation. This file is +// kept for the exported types and API backward compatility. + package grpc import ( - "math/rand" "time" ) // DefaultBackoffConfig uses values specified for backoff in // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. var DefaultBackoffConfig = BackoffConfig{ - MaxDelay: 120 * time.Second, - baseDelay: 1.0 * time.Second, - factor: 1.6, - jitter: 0.2, -} - -// backoffStrategy defines the methodology for backing off after a grpc -// connection failure. -// -// This is unexported until the gRPC project decides whether or not to allow -// alternative backoff strategies. Once a decision is made, this type and its -// method may be exported. -type backoffStrategy interface { - // backoff returns the amount of time to wait before the next retry given - // the number of consecutive failures. - backoff(retries int) time.Duration + MaxDelay: 120 * time.Second, } // BackoffConfig defines the parameters for the default gRPC backoff strategy. type BackoffConfig struct { // MaxDelay is the upper bound of backoff delay. MaxDelay time.Duration - - // TODO(stevvooe): The following fields are not exported, as allowing - // changes would violate the current gRPC specification for backoff. If - // gRPC decides to allow more interesting backoff strategies, these fields - // may be opened up in the future. - - // baseDelay is the amount of time to wait before retrying after the first - // failure. - baseDelay time.Duration - - // factor is applied to the backoff after each retry. - factor float64 - - // jitter provides a range to randomize backoff delays. - jitter float64 -} - -func setDefaults(bc *BackoffConfig) { - md := bc.MaxDelay - *bc = DefaultBackoffConfig - - if md > 0 { - bc.MaxDelay = md - } -} - -func (bc BackoffConfig) backoff(retries int) time.Duration { - if retries == 0 { - return bc.baseDelay - } - backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay) - for backoff < max && retries > 0 { - backoff *= bc.factor - retries-- - } - if backoff > max { - backoff = max - } - // Randomize backoff delays so that if a cluster of requests start at - // the same time, they won't operate in lockstep. - backoff *= 1 + bc.jitter*(rand.Float64()*2-1) - if backoff < 0 { - return 0 - } - return time.Duration(backoff) } diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 63b8d7137..f9d83c2f3 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -226,3 +226,45 @@ type Balancer interface { // ClientConn.RemoveSubConn for its existing SubConns. Close() } + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transientFailure. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else the aggregated state is TransientFailure. +// +// Idle and Shutdown are not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go index b7abc6b74..e0ce32cfb 100644 --- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go @@ -55,7 +55,7 @@ func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.B startCh: make(chan struct{}), conns: make(map[resolver.Address]balancer.SubConn), connSt: make(map[balancer.SubConn]*scState), - csEvltr: &connectivityStateEvaluator{}, + csEvltr: &balancer.ConnectivityStateEvaluator{}, state: connectivity.Idle, } cc.UpdateBalancerState(connectivity.Idle, bw) @@ -80,10 +80,6 @@ type balancerWrapper struct { cc balancer.ClientConn targetAddr string // Target without the scheme. - // To aggregate the connectivity state. - csEvltr *connectivityStateEvaluator - state connectivity.State - mu sync.Mutex conns map[resolver.Address]balancer.SubConn connSt map[balancer.SubConn]*scState @@ -92,6 +88,10 @@ type balancerWrapper struct { // - NewSubConn is created, cc wants to notify balancer of state changes; // - Build hasn't return, cc doesn't have access to balancer. startCh chan struct{} + + // To aggregate the connectivity state. + csEvltr *balancer.ConnectivityStateEvaluator + state connectivity.State } // lbWatcher watches the Notify channel of the balancer and manages @@ -248,7 +248,7 @@ func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s conne scSt.down(errConnClosing) } } - sa := bw.csEvltr.recordTransition(oldS, s) + sa := bw.csEvltr.RecordTransition(oldS, s) if bw.state != sa { bw.state = sa } @@ -326,47 +326,3 @@ func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) return sc, done, nil } - -// connectivityStateEvaluator gets updated by addrConns when their -// states transition, based on which it evaluates the state of -// ClientConn. -type connectivityStateEvaluator struct { - mu sync.Mutex - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. - numTransientFailure uint64 // Number of addrConns in transientFailure. -} - -// recordTransition records state change happening in every subConn and based on -// that it evaluates what aggregated state should be. -// It can only transition between Ready, Connecting and TransientFailure. Other states, -// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection -// before any subConn is created ClientConn is in idle state. In the end when ClientConn -// closes it is in Shutdown state. -// TODO Note that in later releases, a ClientConn with no activity will be put into an Idle state. -func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { - cse.mu.Lock() - defer cse.mu.Unlock() - - // Update counters. - for idx, state := range []connectivity.State{oldState, newState} { - updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. - switch state { - case connectivity.Ready: - cse.numReady += updateVal - case connectivity.Connecting: - cse.numConnecting += updateVal - case connectivity.TransientFailure: - cse.numTransientFailure += updateVal - } - } - - // Evaluate. - if cse.numReady > 0 { - return connectivity.Ready - } - if cse.numConnecting > 0 { - return connectivity.Connecting - } - return connectivity.TransientFailure -} diff --git a/vendor/google.golang.org/grpc/channelz/funcs.go b/vendor/google.golang.org/grpc/channelz/funcs.go deleted file mode 100644 index 586a0336b..000000000 --- a/vendor/google.golang.org/grpc/channelz/funcs.go +++ /dev/null @@ -1,573 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package channelz defines APIs for enabling channelz service, entry -// registration/deletion, and accessing channelz data. It also defines channelz -// metric struct formats. -// -// All APIs in this package are experimental. -package channelz - -import ( - "sort" - "sync" - "sync/atomic" - - "google.golang.org/grpc/grpclog" -) - -var ( - db dbWrapper - idGen idGenerator - // EntryPerPage defines the number of channelz entries to be shown on a web page. - EntryPerPage = 50 - curState int32 -) - -// TurnOn turns on channelz data collection. -func TurnOn() { - if !IsOn() { - NewChannelzStorage() - atomic.StoreInt32(&curState, 1) - } -} - -// IsOn returns whether channelz data collection is on. -func IsOn() bool { - return atomic.CompareAndSwapInt32(&curState, 1, 1) -} - -// dbWarpper wraps around a reference to internal channelz data storage, and -// provide synchronized functionality to set and get the reference. -type dbWrapper struct { - mu sync.RWMutex - DB *channelMap -} - -func (d *dbWrapper) set(db *channelMap) { - d.mu.Lock() - d.DB = db - d.mu.Unlock() -} - -func (d *dbWrapper) get() *channelMap { - d.mu.RLock() - defer d.mu.RUnlock() - return d.DB -} - -// NewChannelzStorage initializes channelz data storage and id generator. -// -// Note: This function is exported for testing purpose only. User should not call -// it in most cases. -func NewChannelzStorage() { - db.set(&channelMap{ - topLevelChannels: make(map[int64]struct{}), - channels: make(map[int64]*channel), - listenSockets: make(map[int64]*listenSocket), - normalSockets: make(map[int64]*normalSocket), - servers: make(map[int64]*server), - subChannels: make(map[int64]*subChannel), - }) - idGen.reset() -} - -// GetTopChannels returns a slice of top channel's ChannelMetric, along with a -// boolean indicating whether there's more top channels to be queried for. -// -// The arg id specifies that only top channel with id at or above it will be included -// in the result. The returned slice is up to a length of EntryPerPage, and is -// sorted in ascending id order. -func GetTopChannels(id int64) ([]*ChannelMetric, bool) { - return db.get().GetTopChannels(id) -} - -// GetServers returns a slice of server's ServerMetric, along with a -// boolean indicating whether there's more servers to be queried for. -// -// The arg id specifies that only server with id at or above it will be included -// in the result. The returned slice is up to a length of EntryPerPage, and is -// sorted in ascending id order. -func GetServers(id int64) ([]*ServerMetric, bool) { - return db.get().GetServers(id) -} - -// GetServerSockets returns a slice of server's (identified by id) normal socket's -// SocketMetric, along with a boolean indicating whether there's more sockets to -// be queried for. -// -// The arg startID specifies that only sockets with id at or above it will be -// included in the result. The returned slice is up to a length of EntryPerPage, -// and is sorted in ascending id order. -func GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) { - return db.get().GetServerSockets(id, startID) -} - -// GetChannel returns the ChannelMetric for the channel (identified by id). -func GetChannel(id int64) *ChannelMetric { - return db.get().GetChannel(id) -} - -// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). -func GetSubChannel(id int64) *SubChannelMetric { - return db.get().GetSubChannel(id) -} - -// GetSocket returns the SocketInternalMetric for the socket (identified by id). -func GetSocket(id int64) *SocketMetric { - return db.get().GetSocket(id) -} - -// RegisterChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). pid = 0 means no parent. It returns the unique channelz tracking id -// assigned to this channel. -func RegisterChannel(c Channel, pid int64, ref string) int64 { - id := idGen.genID() - cn := &channel{ - refName: ref, - c: c, - subChans: make(map[int64]string), - nestedChans: make(map[int64]string), - id: id, - pid: pid, - } - if pid == 0 { - db.get().addChannel(id, cn, true, pid, ref) - } else { - db.get().addChannel(id, cn, false, pid, ref) - } - return id -} - -// RegisterSubChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). It returns the unique channelz tracking id assigned to this subchannel. -func RegisterSubChannel(c Channel, pid int64, ref string) int64 { - if pid == 0 { - grpclog.Error("a SubChannel's parent id cannot be 0") - return 0 - } - id := idGen.genID() - sc := &subChannel{ - refName: ref, - c: c, - sockets: make(map[int64]string), - id: id, - pid: pid, - } - db.get().addSubChannel(id, sc, pid, ref) - return id -} - -// RegisterServer registers the given server s in channelz database. It returns -// the unique channelz tracking id assigned to this server. -func RegisterServer(s Server, ref string) int64 { - id := idGen.genID() - svr := &server{ - refName: ref, - s: s, - sockets: make(map[int64]string), - listenSockets: make(map[int64]string), - id: id, - } - db.get().addServer(id, svr) - return id -} - -// RegisterListenSocket registers the given listen socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent -// (identified by pid). It returns the unique channelz tracking id assigned to -// this listen socket. -func RegisterListenSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - grpclog.Error("a ListenSocket's parent id cannot be 0") - return 0 - } - id := idGen.genID() - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addListenSocket(id, ls, pid, ref) - return id -} - -// RegisterNormalSocket registers the given normal socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent -// (identified by pid). It returns the unique channelz tracking id assigned to -// this normal socket. -func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - grpclog.Error("a NormalSocket's parent id cannot be 0") - return 0 - } - id := idGen.genID() - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addNormalSocket(id, ns, pid, ref) - return id -} - -// RemoveEntry removes an entry with unique channelz trakcing id to be id from -// channelz database. -func RemoveEntry(id int64) { - db.get().removeEntry(id) -} - -// channelMap is the storage data structure for channelz. -// Methods of channelMap can be divided in two two categories with respect to locking. -// 1. Methods acquire the global lock. -// 2. Methods that can only be called when global lock is held. -// A second type of method need always to be called inside a first type of method. -type channelMap struct { - mu sync.RWMutex - topLevelChannels map[int64]struct{} - servers map[int64]*server - channels map[int64]*channel - subChannels map[int64]*subChannel - listenSockets map[int64]*listenSocket - normalSockets map[int64]*normalSocket -} - -func (c *channelMap) addServer(id int64, s *server) { - c.mu.Lock() - s.cm = c - c.servers[id] = s - c.mu.Unlock() -} - -func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { - c.mu.Lock() - cn.cm = c - c.channels[id] = cn - if isTopChannel { - c.topLevelChannels[id] = struct{}{} - } else { - c.findEntry(pid).addChild(id, cn) - } - c.mu.Unlock() -} - -func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { - c.mu.Lock() - sc.cm = c - c.subChannels[id] = sc - c.findEntry(pid).addChild(id, sc) - c.mu.Unlock() -} - -func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { - c.mu.Lock() - ls.cm = c - c.listenSockets[id] = ls - c.findEntry(pid).addChild(id, ls) - c.mu.Unlock() -} - -func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { - c.mu.Lock() - ns.cm = c - c.normalSockets[id] = ns - c.findEntry(pid).addChild(id, ns) - c.mu.Unlock() -} - -// removeEntry triggers the removal of an entry, which may not indeed delete the -// entry, if it has to wait on the deletion of its children, or may lead to a chain -// of entry deletion. For example, deleting the last socket of a gracefully shutting -// down server will lead to the server being also deleted. -func (c *channelMap) removeEntry(id int64) { - c.mu.Lock() - c.findEntry(id).triggerDelete() - c.mu.Unlock() -} - -// c.mu must be held by the caller. -func (c *channelMap) findEntry(id int64) entry { - var v entry - var ok bool - if v, ok = c.channels[id]; ok { - return v - } - if v, ok = c.subChannels[id]; ok { - return v - } - if v, ok = c.servers[id]; ok { - return v - } - if v, ok = c.listenSockets[id]; ok { - return v - } - if v, ok = c.normalSockets[id]; ok { - return v - } - return &dummyEntry{idNotFound: id} -} - -// c.mu must be held by the caller -// deleteEntry simply deletes an entry from the channelMap. Before calling this -// method, caller must check this entry is ready to be deleted, i.e removeEntry() -// has been called on it, and no children still exist. -// Conditionals are ordered by the expected frequency of deletion of each entity -// type, in order to optimize performance. -func (c *channelMap) deleteEntry(id int64) { - var ok bool - if _, ok = c.normalSockets[id]; ok { - delete(c.normalSockets, id) - return - } - if _, ok = c.subChannels[id]; ok { - delete(c.subChannels, id) - return - } - if _, ok = c.channels[id]; ok { - delete(c.channels, id) - delete(c.topLevelChannels, id) - return - } - if _, ok = c.listenSockets[id]; ok { - delete(c.listenSockets, id) - return - } - if _, ok = c.servers[id]; ok { - delete(c.servers, id) - return - } -} - -type int64Slice []int64 - -func (s int64Slice) Len() int { return len(s) } -func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } - -func copyMap(m map[int64]string) map[int64]string { - n := make(map[int64]string) - for k, v := range m { - n[k] = v - } - return n -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func (c *channelMap) GetTopChannels(id int64) ([]*ChannelMetric, bool) { - c.mu.RLock() - l := len(c.topLevelChannels) - ids := make([]int64, 0, l) - cns := make([]*channel, 0, min(l, EntryPerPage)) - - for k := range c.topLevelChannels { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) - count := 0 - var end bool - var t []*ChannelMetric - for i, v := range ids[idx:] { - if count == EntryPerPage { - break - } - if cn, ok := c.channels[v]; ok { - cns = append(cns, cn) - t = append(t, &ChannelMetric{ - NestedChans: copyMap(cn.nestedChans), - SubChans: copyMap(cn.subChans), - }) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - - for i, cn := range cns { - t[i].ChannelData = cn.c.ChannelzMetric() - t[i].ID = cn.id - t[i].RefName = cn.refName - } - return t, end -} - -func (c *channelMap) GetServers(id int64) ([]*ServerMetric, bool) { - c.mu.RLock() - l := len(c.servers) - ids := make([]int64, 0, l) - ss := make([]*server, 0, min(l, EntryPerPage)) - for k := range c.servers { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) - count := 0 - var end bool - var s []*ServerMetric - for i, v := range ids[idx:] { - if count == EntryPerPage { - break - } - if svr, ok := c.servers[v]; ok { - ss = append(ss, svr) - s = append(s, &ServerMetric{ - ListenSockets: copyMap(svr.listenSockets), - }) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - - for i, svr := range ss { - s[i].ServerData = svr.s.ChannelzMetric() - s[i].ID = svr.id - s[i].RefName = svr.refName - } - return s, end -} - -func (c *channelMap) GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) { - var svr *server - var ok bool - c.mu.RLock() - if svr, ok = c.servers[id]; !ok { - // server with id doesn't exist. - c.mu.RUnlock() - return nil, true - } - svrskts := svr.sockets - l := len(svrskts) - ids := make([]int64, 0, l) - sks := make([]*normalSocket, 0, min(l, EntryPerPage)) - for k := range svrskts { - ids = append(ids, k) - } - sort.Sort((int64Slice(ids))) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) - count := 0 - var end bool - for i, v := range ids[idx:] { - if count == EntryPerPage { - break - } - if ns, ok := c.normalSockets[v]; ok { - sks = append(sks, ns) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - var s []*SocketMetric - for _, ns := range sks { - sm := &SocketMetric{} - sm.SocketData = ns.s.ChannelzMetric() - sm.ID = ns.id - sm.RefName = ns.refName - s = append(s, sm) - } - return s, end -} - -func (c *channelMap) GetChannel(id int64) *ChannelMetric { - cm := &ChannelMetric{} - var cn *channel - var ok bool - c.mu.RLock() - if cn, ok = c.channels[id]; !ok { - // channel with id doesn't exist. - c.mu.RUnlock() - return nil - } - cm.NestedChans = copyMap(cn.nestedChans) - cm.SubChans = copyMap(cn.subChans) - c.mu.RUnlock() - cm.ChannelData = cn.c.ChannelzMetric() - cm.ID = cn.id - cm.RefName = cn.refName - return cm -} - -func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { - cm := &SubChannelMetric{} - var sc *subChannel - var ok bool - c.mu.RLock() - if sc, ok = c.subChannels[id]; !ok { - // subchannel with id doesn't exist. - c.mu.RUnlock() - return nil - } - cm.Sockets = copyMap(sc.sockets) - c.mu.RUnlock() - cm.ChannelData = sc.c.ChannelzMetric() - cm.ID = sc.id - cm.RefName = sc.refName - return cm -} - -func (c *channelMap) GetSocket(id int64) *SocketMetric { - sm := &SocketMetric{} - c.mu.RLock() - if ls, ok := c.listenSockets[id]; ok { - c.mu.RUnlock() - sm.SocketData = ls.s.ChannelzMetric() - sm.ID = ls.id - sm.RefName = ls.refName - return sm - } - if ns, ok := c.normalSockets[id]; ok { - c.mu.RUnlock() - sm.SocketData = ns.s.ChannelzMetric() - sm.ID = ns.id - sm.RefName = ns.refName - return sm - } - c.mu.RUnlock() - return nil -} - -type idGenerator struct { - id int64 -} - -func (i *idGenerator) reset() { - atomic.StoreInt64(&i.id, 0) -} - -func (i *idGenerator) genID() int64 { - return atomic.AddInt64(&i.id, 1) -} diff --git a/vendor/google.golang.org/grpc/channelz/types.go b/vendor/google.golang.org/grpc/channelz/types.go deleted file mode 100644 index 153d75340..000000000 --- a/vendor/google.golang.org/grpc/channelz/types.go +++ /dev/null @@ -1,418 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import ( - "net" - "time" - - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" -) - -// entry represents a node in the channelz database. -type entry interface { - // addChild adds a child e, whose channelz id is id to child list - addChild(id int64, e entry) - // deleteChild deletes a child with channelz id to be id from child list - deleteChild(id int64) - // triggerDelete tries to delete self from channelz database. However, if child - // list is not empty, then deletion from the database is on hold until the last - // child is deleted from database. - triggerDelete() - // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child - // list is now empty. If both conditions are met, then delete self from database. - deleteSelfIfReady() -} - -// dummyEntry is a fake entry to handle entry not found case. -type dummyEntry struct { - idNotFound int64 -} - -func (d *dummyEntry) addChild(id int64, e entry) { - // Note: It is possible for a normal program to reach here under race condition. - // For example, there could be a race between ClientConn.Close() info being propagated - // to addrConn and http2Client. ClientConn.Close() cancel the context and result - // in http2Client to error. The error info is then caught by transport monitor - // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, - // the addrConn will create a new transport. And when registering the new transport in - // channelz, its parent addrConn could have already been torn down and deleted - // from channelz tracking, and thus reach the code here. - grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) -} - -func (d *dummyEntry) deleteChild(id int64) { - // It is possible for a normal program to reach here under race condition. - // Refer to the example described in addChild(). - grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) -} - -func (d *dummyEntry) triggerDelete() { - grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) -} - -func (*dummyEntry) deleteSelfIfReady() { - // code should not reach here. deleteSelfIfReady is always called on an existing entry. -} - -// ChannelMetric defines the info channelz provides for a specific Channel, which -// includes ChannelInternalMetric and channelz-specific data, such as channelz id, -// child list, etc. -type ChannelMetric struct { - // ID is the channelz id of this channel. - ID int64 - // RefName is the human readable reference string of this channel. - RefName string - // ChannelData contains channel internal metric reported by the channel through - // ChannelzMetric(). - ChannelData *ChannelInternalMetric - // NestedChans tracks the nested channel type children of this channel in the format of - // a map from nested channel channelz id to corresponding reference string. - NestedChans map[int64]string - // SubChans tracks the subchannel type children of this channel in the format of a - // map from subchannel channelz id to corresponding reference string. - SubChans map[int64]string - // Sockets tracks the socket type children of this channel in the format of a map - // from socket channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow channel having sockets directly, - // therefore, this is field is unused. - Sockets map[int64]string -} - -// SubChannelMetric defines the info channelz provides for a specific SubChannel, -// which includes ChannelInternalMetric and channelz-specific data, such as -// channelz id, child list, etc. -type SubChannelMetric struct { - // ID is the channelz id of this subchannel. - ID int64 - // RefName is the human readable reference string of this subchannel. - RefName string - // ChannelData contains subchannel internal metric reported by the subchannel - // through ChannelzMetric(). - ChannelData *ChannelInternalMetric - // NestedChans tracks the nested channel type children of this subchannel in the format of - // a map from nested channel channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow subchannel to have nested channels - // as children, therefore, this field is unused. - NestedChans map[int64]string - // SubChans tracks the subchannel type children of this subchannel in the format of a - // map from subchannel channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow subchannel to have subchannels - // as children, therefore, this field is unused. - SubChans map[int64]string - // Sockets tracks the socket type children of this subchannel in the format of a map - // from socket channelz id to corresponding reference string. - Sockets map[int64]string -} - -// ChannelInternalMetric defines the struct that the implementor of Channel interface -// should return from ChannelzMetric(). -type ChannelInternalMetric struct { - // current connectivity state of the channel. - State connectivity.State - // The target this channel originally tried to connect to. May be absent - Target string - // The number of calls started on the channel. - CallsStarted int64 - // The number of calls that have completed with an OK status. - CallsSucceeded int64 - // The number of calls that have a completed with a non-OK status. - CallsFailed int64 - // The last time a call was started on the channel. - LastCallStartedTimestamp time.Time - //TODO: trace -} - -// Channel is the interface that should be satisfied in order to be tracked by -// channelz as Channel or SubChannel. -type Channel interface { - ChannelzMetric() *ChannelInternalMetric -} - -type channel struct { - refName string - c Channel - closeCalled bool - nestedChans map[int64]string - subChans map[int64]string - id int64 - pid int64 - cm *channelMap -} - -func (c *channel) addChild(id int64, e entry) { - switch v := e.(type) { - case *subChannel: - c.subChans[id] = v.refName - case *channel: - c.nestedChans[id] = v.refName - default: - grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) - } -} - -func (c *channel) deleteChild(id int64) { - delete(c.subChans, id) - delete(c.nestedChans, id) - c.deleteSelfIfReady() -} - -func (c *channel) triggerDelete() { - c.closeCalled = true - c.deleteSelfIfReady() -} - -func (c *channel) deleteSelfIfReady() { - if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { - return - } - c.cm.deleteEntry(c.id) - // not top channel - if c.pid != 0 { - c.cm.findEntry(c.pid).deleteChild(c.id) - } -} - -type subChannel struct { - refName string - c Channel - closeCalled bool - sockets map[int64]string - id int64 - pid int64 - cm *channelMap -} - -func (sc *subChannel) addChild(id int64, e entry) { - if v, ok := e.(*normalSocket); ok { - sc.sockets[id] = v.refName - } else { - grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) - } -} - -func (sc *subChannel) deleteChild(id int64) { - delete(sc.sockets, id) - sc.deleteSelfIfReady() -} - -func (sc *subChannel) triggerDelete() { - sc.closeCalled = true - sc.deleteSelfIfReady() -} - -func (sc *subChannel) deleteSelfIfReady() { - if !sc.closeCalled || len(sc.sockets) != 0 { - return - } - sc.cm.deleteEntry(sc.id) - sc.cm.findEntry(sc.pid).deleteChild(sc.id) -} - -// SocketMetric defines the info channelz provides for a specific Socket, which -// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. -type SocketMetric struct { - // ID is the channelz id of this socket. - ID int64 - // RefName is the human readable reference string of this socket. - RefName string - // SocketData contains socket internal metric reported by the socket through - // ChannelzMetric(). - SocketData *SocketInternalMetric -} - -// SocketInternalMetric defines the struct that the implementor of Socket interface -// should return from ChannelzMetric(). -type SocketInternalMetric struct { - // The number of streams that have been started. - StreamsStarted int64 - // The number of streams that have ended successfully: - // On client side, receiving frame with eos bit set. - // On server side, sending frame with eos bit set. - StreamsSucceeded int64 - // The number of streams that have ended unsuccessfully: - // On client side, termination without receiving frame with eos bit set. - // On server side, termination without sending frame with eos bit set. - StreamsFailed int64 - // The number of messages successfully sent on this socket. - MessagesSent int64 - MessagesReceived int64 - // The number of keep alives sent. This is typically implemented with HTTP/2 - // ping messages. - KeepAlivesSent int64 - // The last time a stream was created by this endpoint. Usually unset for - // servers. - LastLocalStreamCreatedTimestamp time.Time - // The last time a stream was created by the remote endpoint. Usually unset - // for clients. - LastRemoteStreamCreatedTimestamp time.Time - // The last time a message was sent by this endpoint. - LastMessageSentTimestamp time.Time - // The last time a message was received by this endpoint. - LastMessageReceivedTimestamp time.Time - // The amount of window, granted to the local endpoint by the remote endpoint. - // This may be slightly out of date due to network latency. This does NOT - // include stream level or TCP level flow control info. - LocalFlowControlWindow int64 - // The amount of window, granted to the remote endpoint by the local endpoint. - // This may be slightly out of date due to network latency. This does NOT - // include stream level or TCP level flow control info. - RemoteFlowControlWindow int64 - // The locally bound address. - LocalAddr net.Addr - // The remote bound address. May be absent. - RemoteAddr net.Addr - // Optional, represents the name of the remote endpoint, if different than - // the original target name. - RemoteName string - //TODO: socket options - //TODO: Security -} - -// Socket is the interface that should be satisfied in order to be tracked by -// channelz as Socket. -type Socket interface { - ChannelzMetric() *SocketInternalMetric -} - -type listenSocket struct { - refName string - s Socket - id int64 - pid int64 - cm *channelMap -} - -func (ls *listenSocket) addChild(id int64, e entry) { - grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) -} - -func (ls *listenSocket) deleteChild(id int64) { - grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id) -} - -func (ls *listenSocket) triggerDelete() { - ls.cm.deleteEntry(ls.id) - ls.cm.findEntry(ls.pid).deleteChild(ls.id) -} - -func (ls *listenSocket) deleteSelfIfReady() { - grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket") -} - -type normalSocket struct { - refName string - s Socket - id int64 - pid int64 - cm *channelMap -} - -func (ns *normalSocket) addChild(id int64, e entry) { - grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) -} - -func (ns *normalSocket) deleteChild(id int64) { - grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id) -} - -func (ns *normalSocket) triggerDelete() { - ns.cm.deleteEntry(ns.id) - ns.cm.findEntry(ns.pid).deleteChild(ns.id) -} - -func (ns *normalSocket) deleteSelfIfReady() { - grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket") -} - -// ServerMetric defines the info channelz provides for a specific Server, which -// includes ServerInternalMetric and channelz-specific data, such as channelz id, -// child list, etc. -type ServerMetric struct { - // ID is the channelz id of this server. - ID int64 - // RefName is the human readable reference string of this server. - RefName string - // ServerData contains server internal metric reported by the server through - // ChannelzMetric(). - ServerData *ServerInternalMetric - // ListenSockets tracks the listener socket type children of this server in the - // format of a map from socket channelz id to corresponding reference string. - ListenSockets map[int64]string -} - -// ServerInternalMetric defines the struct that the implementor of Server interface -// should return from ChannelzMetric(). -type ServerInternalMetric struct { - // The number of incoming calls started on the server. - CallsStarted int64 - // The number of incoming calls that have completed with an OK status. - CallsSucceeded int64 - // The number of incoming calls that have a completed with a non-OK status. - CallsFailed int64 - // The last time a call was started on the server. - LastCallStartedTimestamp time.Time - //TODO: trace -} - -// Server is the interface to be satisfied in order to be tracked by channelz as -// Server. -type Server interface { - ChannelzMetric() *ServerInternalMetric -} - -type server struct { - refName string - s Server - closeCalled bool - sockets map[int64]string - listenSockets map[int64]string - id int64 - cm *channelMap -} - -func (s *server) addChild(id int64, e entry) { - switch v := e.(type) { - case *normalSocket: - s.sockets[id] = v.refName - case *listenSocket: - s.listenSockets[id] = v.refName - default: - grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) - } -} - -func (s *server) deleteChild(id int64) { - delete(s.sockets, id) - delete(s.listenSockets, id) - s.deleteSelfIfReady() -} - -func (s *server) triggerDelete() { - s.closeCalled = true - s.deleteSelfIfReady() -} - -func (s *server) deleteSelfIfReady() { - if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { - return - } - s.cm.deleteEntry(s.id) -} diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index e8d95b43b..84ba9e5ad 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -32,11 +32,13 @@ import ( "golang.org/x/net/trace" "google.golang.org/grpc/balancer" _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. - "google.golang.org/grpc/channelz" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. @@ -49,6 +51,8 @@ import ( const ( // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second + // must match grpclbName in grpclb/grpclb.go + grpclbName = "grpclb" ) var ( @@ -97,7 +101,7 @@ type dialOptions struct { streamInt StreamClientInterceptor cp Compressor dc Decompressor - bs backoffStrategy + bs backoff.Strategy block bool insecure bool timeout time.Duration @@ -275,17 +279,17 @@ func WithBackoffMaxDelay(md time.Duration) DialOption { // Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up // for use. func WithBackoffConfig(b BackoffConfig) DialOption { - // Set defaults to ensure that provided BackoffConfig is valid and - // unexported fields get default values. - setDefaults(&b) - return withBackoff(b) + + return withBackoff(backoff.Exponential{ + MaxDelay: b.MaxDelay, + }) } // withBackoff sets the backoff strategy used for connectRetryNum after a // failed connection attempt. // // This can be exported if arbitrary backoff strategies are allowed by gRPC. -func withBackoff(bs backoffStrategy) DialOption { +func withBackoff(bs backoff.Strategy) DialOption { return func(o *dialOptions) { o.bs = bs } @@ -340,6 +344,11 @@ func withContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp } } +func init() { + internal.WithContextDialer = withContextDialer + internal.WithResolverBuilder = withResolverBuilder +} + // WithDialer returns a DialOption that specifies a function to use for dialing network addresses. // If FailOnNonTempDialError() is set to true, and an error is returned by f, gRPC checks the error's // Temporary() method to decide if it should try to reconnect to the network address. @@ -532,7 +541,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } } if cc.dopts.bs == nil { - cc.dopts.bs = DefaultBackoffConfig + cc.dopts.bs = backoff.Exponential{ + MaxDelay: DefaultBackoffConfig.MaxDelay, + } } if cc.dopts.resolverBuilder == nil { // Only try to parse target when resolver builder is not already set. @@ -1052,9 +1063,9 @@ func (cc *ClientConn) handleServiceConfig(js string) error { } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) { - cc.mu.Lock() + cc.mu.RLock() r := cc.resolverWrapper - cc.mu.Unlock() + cc.mu.RUnlock() if r == nil { return } @@ -1203,7 +1214,7 @@ func (ac *addrConn) resetTransport() error { // This means either a successful HTTP2 connection was established // or this is the first time this addrConn is trying to establish a // connection. - backoffFor := ac.dopts.bs.backoff(connectRetryNum) // time.Duration. + backoffFor := ac.dopts.bs.Backoff(connectRetryNum) // time.Duration. // This will be the duration that dial gets to finish. dialDuration := getMinConnectTimeout() if backoffFor > dialDuration { diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index a8280ae66..d9b9d5782 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -22,6 +22,7 @@ package codes // import "google.golang.org/grpc/codes" import ( "fmt" + "strconv" ) // A Code is an unsigned 32-bit error code as defined in the gRPC spec. @@ -143,6 +144,8 @@ const ( // Unauthenticated indicates the request does not have valid // authentication credentials for the operation. Unauthenticated Code = 16 + + _maxCode = 17 ) var strToCode = map[string]Code{ @@ -176,6 +179,16 @@ func (c *Code) UnmarshalJSON(b []byte) error { if c == nil { return fmt.Errorf("nil receiver passed to UnmarshalJSON") } + + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= _maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) + return nil + } + if jc, ok := strToCode[string(b)]; ok { *c = jc return nil diff --git a/vendor/google.golang.org/grpc/grpclb.go b/vendor/google.golang.org/grpc/grpclb.go deleted file mode 100644 index bc2b44525..000000000 --- a/vendor/google.golang.org/grpc/grpclb.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/resolver" -) - -const ( - lbTokeyKey = "lb-token" - defaultFallbackTimeout = 10 * time.Second - grpclbName = "grpclb" -) - -func convertDuration(d *lbpb.Duration) time.Duration { - if d == nil { - return 0 - } - return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond -} - -// Client API for LoadBalancer service. -// Mostly copied from generated pb.go file. -// To avoid circular dependency. -type loadBalancerClient struct { - cc *ClientConn -} - -func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...CallOption) (*balanceLoadClientStream, error) { - desc := &StreamDesc{ - StreamName: "BalanceLoad", - ServerStreams: true, - ClientStreams: true, - } - stream, err := c.cc.NewStream(ctx, desc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) - if err != nil { - return nil, err - } - x := &balanceLoadClientStream{stream} - return x, nil -} - -type balanceLoadClientStream struct { - ClientStream -} - -func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) { - m := new(lbpb.LoadBalanceResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func init() { - balancer.Register(newLBBuilder()) -} - -// newLBBuilder creates a builder for grpclb. -func newLBBuilder() balancer.Builder { - return NewLBBuilderWithFallbackTimeout(defaultFallbackTimeout) -} - -// NewLBBuilderWithFallbackTimeout creates a grpclb builder with the given -// fallbackTimeout. If no response is received from the remote balancer within -// fallbackTimeout, the backend addresses from the resolved address list will be -// used. -// -// Only call this function when a non-default fallback timeout is needed. -func NewLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder { - return &lbBuilder{ - fallbackTimeout: fallbackTimeout, - } -} - -type lbBuilder struct { - fallbackTimeout time.Duration -} - -func (b *lbBuilder) Name() string { - return grpclbName -} - -func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - // This generates a manual resolver builder with a random scheme. This - // scheme will be used to dial to remote LB, so we can send filtered address - // updates to remote LB ClientConn using this manual resolver. - scheme := "grpclb_internal_" + strconv.FormatInt(time.Now().UnixNano(), 36) - r := &lbManualResolver{scheme: scheme, ccb: cc} - - var target string - targetSplitted := strings.Split(cc.Target(), ":///") - if len(targetSplitted) < 2 { - target = cc.Target() - } else { - target = targetSplitted[1] - } - - lb := &lbBalancer{ - cc: newLBCacheClientConn(cc), - target: target, - opt: opt, - fallbackTimeout: b.fallbackTimeout, - doneCh: make(chan struct{}), - - manualResolver: r, - csEvltr: &connectivityStateEvaluator{}, - subConns: make(map[resolver.Address]balancer.SubConn), - scStates: make(map[balancer.SubConn]connectivity.State), - picker: &errPicker{err: balancer.ErrNoSubConnAvailable}, - clientStats: &rpcStats{}, - } - - return lb -} - -type lbBalancer struct { - cc *lbCacheClientConn - target string - opt balancer.BuildOptions - fallbackTimeout time.Duration - doneCh chan struct{} - - // manualResolver is used in the remote LB ClientConn inside grpclb. When - // resolved address updates are received by grpclb, filtered updates will be - // send to remote LB ClientConn through this resolver. - manualResolver *lbManualResolver - // The ClientConn to talk to the remote balancer. - ccRemoteLB *ClientConn - - // Support client side load reporting. Each picker gets a reference to this, - // and will update its content. - clientStats *rpcStats - - mu sync.Mutex // guards everything following. - // The full server list including drops, used to check if the newly received - // serverList contains anything new. Each generate picker will also have - // reference to this list to do the first layer pick. - fullServerList []*lbpb.Server - // All backends addresses, with metadata set to nil. This list contains all - // backend addresses in the same order and with the same duplicates as in - // serverlist. When generating picker, a SubConn slice with the same order - // but with only READY SCs will be gerenated. - backendAddrs []resolver.Address - // Roundrobin functionalities. - csEvltr *connectivityStateEvaluator - state connectivity.State - subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn. - scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns. - picker balancer.Picker - // Support fallback to resolved backend addresses if there's no response - // from remote balancer within fallbackTimeout. - fallbackTimerExpired bool - serverListReceived bool - // resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set - // when resolved address updates are received, and read in the goroutine - // handling fallback. - resolvedBackendAddrs []resolver.Address -} - -// regeneratePicker takes a snapshot of the balancer, and generates a picker from -// it. The picker -// - always returns ErrTransientFailure if the balancer is in TransientFailure, -// - does two layer roundrobin pick otherwise. -// Caller must hold lb.mu. -func (lb *lbBalancer) regeneratePicker() { - if lb.state == connectivity.TransientFailure { - lb.picker = &errPicker{err: balancer.ErrTransientFailure} - return - } - var readySCs []balancer.SubConn - for _, a := range lb.backendAddrs { - if sc, ok := lb.subConns[a]; ok { - if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready { - readySCs = append(readySCs, sc) - } - } - } - - if len(lb.fullServerList) <= 0 { - if len(readySCs) <= 0 { - lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} - return - } - lb.picker = &rrPicker{subConns: readySCs} - return - } - lb.picker = &lbPicker{ - serverList: lb.fullServerList, - subConns: readySCs, - stats: lb.clientStats, - } -} - -func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - grpclog.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s) - lb.mu.Lock() - defer lb.mu.Unlock() - - oldS, ok := lb.scStates[sc] - if !ok { - grpclog.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) - return - } - lb.scStates[sc] = s - switch s { - case connectivity.Idle: - sc.Connect() - case connectivity.Shutdown: - // When an address was removed by resolver, b called RemoveSubConn but - // kept the sc's state in scStates. Remove state for this sc here. - delete(lb.scStates, sc) - } - - oldAggrState := lb.state - lb.state = lb.csEvltr.recordTransition(oldS, s) - - // Regenerate picker when one of the following happens: - // - this sc became ready from not-ready - // - this sc became not-ready from ready - // - the aggregated state of balancer became TransientFailure from non-TransientFailure - // - the aggregated state of balancer became non-TransientFailure from TransientFailure - if (oldS == connectivity.Ready) != (s == connectivity.Ready) || - (lb.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) { - lb.regeneratePicker() - } - - lb.cc.UpdateBalancerState(lb.state, lb.picker) -} - -// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use -// resolved backends (backends received from resolver, not from remote balancer) -// if no connection to remote balancers was successful. -func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) { - timer := time.NewTimer(fallbackTimeout) - defer timer.Stop() - select { - case <-timer.C: - case <-lb.doneCh: - return - } - lb.mu.Lock() - if lb.serverListReceived { - lb.mu.Unlock() - return - } - lb.fallbackTimerExpired = true - lb.refreshSubConns(lb.resolvedBackendAddrs) - lb.mu.Unlock() -} - -// HandleResolvedAddrs sends the updated remoteLB addresses to remoteLB -// clientConn. The remoteLB clientConn will handle creating/removing remoteLB -// connections. -func (lb *lbBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { - grpclog.Infof("lbBalancer: handleResolvedResult: %+v", addrs) - if len(addrs) <= 0 { - return - } - - var remoteBalancerAddrs, backendAddrs []resolver.Address - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - remoteBalancerAddrs = append(remoteBalancerAddrs, a) - } else { - backendAddrs = append(backendAddrs, a) - } - } - - if lb.ccRemoteLB == nil { - if len(remoteBalancerAddrs) <= 0 { - grpclog.Errorf("grpclb: no remote balancer address is available, should never happen") - return - } - // First time receiving resolved addresses, create a cc to remote - // balancers. - lb.dialRemoteLB(remoteBalancerAddrs[0].ServerName) - // Start the fallback goroutine. - go lb.fallbackToBackendsAfter(lb.fallbackTimeout) - } - - // cc to remote balancers uses lb.manualResolver. Send the updated remote - // balancer addresses to it through manualResolver. - lb.manualResolver.NewAddress(remoteBalancerAddrs) - - lb.mu.Lock() - lb.resolvedBackendAddrs = backendAddrs - // If serverListReceived is true, connection to remote balancer was - // successful and there's no need to do fallback anymore. - // If fallbackTimerExpired is false, fallback hasn't happened yet. - if !lb.serverListReceived && lb.fallbackTimerExpired { - // This means we received a new list of resolved backends, and we are - // still in fallback mode. Need to update the list of backends we are - // using to the new list of backends. - lb.refreshSubConns(lb.resolvedBackendAddrs) - } - lb.mu.Unlock() -} - -func (lb *lbBalancer) Close() { - select { - case <-lb.doneCh: - return - default: - } - close(lb.doneCh) - if lb.ccRemoteLB != nil { - lb.ccRemoteLB.Close() - } - lb.cc.close() -} diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go deleted file mode 100644 index b3b32b48e..000000000 --- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go +++ /dev/null @@ -1,799 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc_lb_v1/messages/messages.proto - -package messages // import "google.golang.org/grpc/grpclb/grpc_lb_v1/messages" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Duration struct { - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { - return fileDescriptor_messages_b81c731f0e83edbd, []int{0} -} -func (m *Duration) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Duration.Unmarshal(m, b) -} -func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Duration.Marshal(b, m, deterministic) -} -func (dst *Duration) XXX_Merge(src proto.Message) { - xxx_messageInfo_Duration.Merge(dst, src) -} -func (m *Duration) XXX_Size() int { - return xxx_messageInfo_Duration.Size(m) -} -func (m *Duration) XXX_DiscardUnknown() { - xxx_messageInfo_Duration.DiscardUnknown(m) -} - -var xxx_messageInfo_Duration proto.InternalMessageInfo - -func (m *Duration) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 -} - -func (m *Duration) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -type Timestamp struct { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_messages_b81c731f0e83edbd, []int{1} -} -func (m *Timestamp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Timestamp.Unmarshal(m, b) -} -func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) -} -func (dst *Timestamp) XXX_Merge(src proto.Message) { - xxx_messageInfo_Timestamp.Merge(dst, src) -} -func (m *Timestamp) XXX_Size() int { - return xxx_messageInfo_Timestamp.Size(m) -} -func (m *Timestamp) XXX_DiscardUnknown() { - xxx_messageInfo_Timestamp.DiscardUnknown(m) -} - -var xxx_messageInfo_Timestamp proto.InternalMessageInfo - -func (m *Timestamp) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 -} - -func (m *Timestamp) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -type LoadBalanceRequest struct { - // Types that are valid to be assigned to LoadBalanceRequestType: - // *LoadBalanceRequest_InitialRequest - // *LoadBalanceRequest_ClientStats - LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} } -func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) } -func (*LoadBalanceRequest) ProtoMessage() {} -func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_messages_b81c731f0e83edbd, []int{2} -} -func (m *LoadBalanceRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LoadBalanceRequest.Unmarshal(m, b) -} -func (m *LoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LoadBalanceRequest.Marshal(b, m, deterministic) -} -func (dst *LoadBalanceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LoadBalanceRequest.Merge(dst, src) -} -func (m *LoadBalanceRequest) XXX_Size() int { - return xxx_messageInfo_LoadBalanceRequest.Size(m) -} -func (m *LoadBalanceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LoadBalanceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LoadBalanceRequest proto.InternalMessageInfo - -type isLoadBalanceRequest_LoadBalanceRequestType interface { - isLoadBalanceRequest_LoadBalanceRequestType() -} - -type LoadBalanceRequest_InitialRequest struct { - InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,oneof"` -} -type LoadBalanceRequest_ClientStats struct { - ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,oneof"` -} - -func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {} -func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {} - -func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType { - if m != nil { - return m.LoadBalanceRequestType - } - return nil -} - -func (m *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest { - if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok { - return x.InitialRequest - } - return nil -} - -func (m *LoadBalanceRequest) GetClientStats() *ClientStats { - if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok { - return x.ClientStats - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*LoadBalanceRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _LoadBalanceRequest_OneofMarshaler, _LoadBalanceRequest_OneofUnmarshaler, _LoadBalanceRequest_OneofSizer, []interface{}{ - (*LoadBalanceRequest_InitialRequest)(nil), - (*LoadBalanceRequest_ClientStats)(nil), - } -} - -func _LoadBalanceRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*LoadBalanceRequest) - // load_balance_request_type - switch x := m.LoadBalanceRequestType.(type) { - case *LoadBalanceRequest_InitialRequest: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.InitialRequest); err != nil { - return err - } - case *LoadBalanceRequest_ClientStats: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ClientStats); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("LoadBalanceRequest.LoadBalanceRequestType has unexpected type %T", x) - } - return nil -} - -func _LoadBalanceRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*LoadBalanceRequest) - switch tag { - case 1: // load_balance_request_type.initial_request - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(InitialLoadBalanceRequest) - err := b.DecodeMessage(msg) - m.LoadBalanceRequestType = &LoadBalanceRequest_InitialRequest{msg} - return true, err - case 2: // load_balance_request_type.client_stats - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ClientStats) - err := b.DecodeMessage(msg) - m.LoadBalanceRequestType = &LoadBalanceRequest_ClientStats{msg} - return true, err - default: - return false, nil - } -} - -func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) { - m := msg.(*LoadBalanceRequest) - // load_balance_request_type - switch x := m.LoadBalanceRequestType.(type) { - case *LoadBalanceRequest_InitialRequest: - s := proto.Size(x.InitialRequest) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *LoadBalanceRequest_ClientStats: - s := proto.Size(x.ClientStats) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type InitialLoadBalanceRequest struct { - // Name of load balanced service (IE, balancer.service.com) - // length should be less than 256 bytes. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InitialLoadBalanceRequest) Reset() { *m = InitialLoadBalanceRequest{} } -func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) } -func (*InitialLoadBalanceRequest) ProtoMessage() {} -func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_messages_b81c731f0e83edbd, []int{3} -} -func (m *InitialLoadBalanceRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InitialLoadBalanceRequest.Unmarshal(m, b) -} -func (m *InitialLoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InitialLoadBalanceRequest.Marshal(b, m, deterministic) -} -func (dst *InitialLoadBalanceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_InitialLoadBalanceRequest.Merge(dst, src) -} -func (m *InitialLoadBalanceRequest) XXX_Size() int { - return xxx_messageInfo_InitialLoadBalanceRequest.Size(m) -} -func (m *InitialLoadBalanceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_InitialLoadBalanceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_InitialLoadBalanceRequest proto.InternalMessageInfo - -func (m *InitialLoadBalanceRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -// Contains client level statistics that are useful to load balancing. Each -// count except the timestamp should be reset to zero after reporting the stats. -type ClientStats struct { - // The timestamp of generating the report. - Timestamp *Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"` - // The total number of RPCs that started. - NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted" json:"num_calls_started,omitempty"` - // The total number of RPCs that finished. - NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished" json:"num_calls_finished,omitempty"` - // The total number of RPCs that were dropped by the client because of rate - // limiting. - NumCallsFinishedWithDropForRateLimiting int64 `protobuf:"varint,4,opt,name=num_calls_finished_with_drop_for_rate_limiting,json=numCallsFinishedWithDropForRateLimiting" json:"num_calls_finished_with_drop_for_rate_limiting,omitempty"` - // The total number of RPCs that were dropped by the client because of load - // balancing. - NumCallsFinishedWithDropForLoadBalancing int64 `protobuf:"varint,5,opt,name=num_calls_finished_with_drop_for_load_balancing,json=numCallsFinishedWithDropForLoadBalancing" json:"num_calls_finished_with_drop_for_load_balancing,omitempty"` - // The total number of RPCs that failed to reach a server except dropped RPCs. - NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend" json:"num_calls_finished_with_client_failed_to_send,omitempty"` - // The total number of RPCs that finished and are known to have been received - // by a server. - NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived" json:"num_calls_finished_known_received,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClientStats) Reset() { *m = ClientStats{} } -func (m *ClientStats) String() string { return proto.CompactTextString(m) } -func (*ClientStats) ProtoMessage() {} -func (*ClientStats) Descriptor() ([]byte, []int) { - return fileDescriptor_messages_b81c731f0e83edbd, []int{4} -} -func (m *ClientStats) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClientStats.Unmarshal(m, b) -} -func (m *ClientStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClientStats.Marshal(b, m, deterministic) -} -func (dst *ClientStats) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClientStats.Merge(dst, src) -} -func (m *ClientStats) XXX_Size() int { - return xxx_messageInfo_ClientStats.Size(m) -} -func (m *ClientStats) XXX_DiscardUnknown() { - xxx_messageInfo_ClientStats.DiscardUnknown(m) -} - -var xxx_messageInfo_ClientStats proto.InternalMessageInfo - -func (m *ClientStats) GetTimestamp() *Timestamp { - if m != nil { - return m.Timestamp - } - return nil -} - -func (m *ClientStats) GetNumCallsStarted() int64 { - if m != nil { - return m.NumCallsStarted - } - return 0 -} - -func (m *ClientStats) GetNumCallsFinished() int64 { - if m != nil { - return m.NumCallsFinished - } - return 0 -} - -func (m *ClientStats) GetNumCallsFinishedWithDropForRateLimiting() int64 { - if m != nil { - return m.NumCallsFinishedWithDropForRateLimiting - } - return 0 -} - -func (m *ClientStats) GetNumCallsFinishedWithDropForLoadBalancing() int64 { - if m != nil { - return m.NumCallsFinishedWithDropForLoadBalancing - } - return 0 -} - -func (m *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 { - if m != nil { - return m.NumCallsFinishedWithClientFailedToSend - } - return 0 -} - -func (m *ClientStats) GetNumCallsFinishedKnownReceived() int64 { - if m != nil { - return m.NumCallsFinishedKnownReceived - } - return 0 -} - -type LoadBalanceResponse struct { - // Types that are valid to be assigned to LoadBalanceResponseType: - // *LoadBalanceResponse_InitialResponse - // *LoadBalanceResponse_ServerList - LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LoadBalanceResponse) Reset() { *m = LoadBalanceResponse{} } -func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) } -func (*LoadBalanceResponse) ProtoMessage() {} -func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_messages_b81c731f0e83edbd, []int{5} -} -func (m *LoadBalanceResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LoadBalanceResponse.Unmarshal(m, b) -} -func (m *LoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LoadBalanceResponse.Marshal(b, m, deterministic) -} -func (dst *LoadBalanceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LoadBalanceResponse.Merge(dst, src) -} -func (m *LoadBalanceResponse) XXX_Size() int { - return xxx_messageInfo_LoadBalanceResponse.Size(m) -} -func (m *LoadBalanceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LoadBalanceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LoadBalanceResponse proto.InternalMessageInfo - -type isLoadBalanceResponse_LoadBalanceResponseType interface { - isLoadBalanceResponse_LoadBalanceResponseType() -} - -type LoadBalanceResponse_InitialResponse struct { - InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,oneof"` -} -type LoadBalanceResponse_ServerList struct { - ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,oneof"` -} - -func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} -func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {} - -func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType { - if m != nil { - return m.LoadBalanceResponseType - } - return nil -} - -func (m *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse { - if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok { - return x.InitialResponse - } - return nil -} - -func (m *LoadBalanceResponse) GetServerList() *ServerList { - if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok { - return x.ServerList - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*LoadBalanceResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _LoadBalanceResponse_OneofMarshaler, _LoadBalanceResponse_OneofUnmarshaler, _LoadBalanceResponse_OneofSizer, []interface{}{ - (*LoadBalanceResponse_InitialResponse)(nil), - (*LoadBalanceResponse_ServerList)(nil), - } -} - -func _LoadBalanceResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*LoadBalanceResponse) - // load_balance_response_type - switch x := m.LoadBalanceResponseType.(type) { - case *LoadBalanceResponse_InitialResponse: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.InitialResponse); err != nil { - return err - } - case *LoadBalanceResponse_ServerList: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ServerList); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("LoadBalanceResponse.LoadBalanceResponseType has unexpected type %T", x) - } - return nil -} - -func _LoadBalanceResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*LoadBalanceResponse) - switch tag { - case 1: // load_balance_response_type.initial_response - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(InitialLoadBalanceResponse) - err := b.DecodeMessage(msg) - m.LoadBalanceResponseType = &LoadBalanceResponse_InitialResponse{msg} - return true, err - case 2: // load_balance_response_type.server_list - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ServerList) - err := b.DecodeMessage(msg) - m.LoadBalanceResponseType = &LoadBalanceResponse_ServerList{msg} - return true, err - default: - return false, nil - } -} - -func _LoadBalanceResponse_OneofSizer(msg proto.Message) (n int) { - m := msg.(*LoadBalanceResponse) - // load_balance_response_type - switch x := m.LoadBalanceResponseType.(type) { - case *LoadBalanceResponse_InitialResponse: - s := proto.Size(x.InitialResponse) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *LoadBalanceResponse_ServerList: - s := proto.Size(x.ServerList) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type InitialLoadBalanceResponse struct { - // This is an application layer redirect that indicates the client should use - // the specified server for load balancing. When this field is non-empty in - // the response, the client should open a separate connection to the - // load_balancer_delegate and call the BalanceLoad method. Its length should - // be less than 64 bytes. - LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate,json=loadBalancerDelegate" json:"load_balancer_delegate,omitempty"` - // This interval defines how often the client should send the client stats - // to the load balancer. Stats should only be reported when the duration is - // positive. - ClientStatsReportInterval *Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval" json:"client_stats_report_interval,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InitialLoadBalanceResponse) Reset() { *m = InitialLoadBalanceResponse{} } -func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) } -func (*InitialLoadBalanceResponse) ProtoMessage() {} -func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_messages_b81c731f0e83edbd, []int{6} -} -func (m *InitialLoadBalanceResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InitialLoadBalanceResponse.Unmarshal(m, b) -} -func (m *InitialLoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InitialLoadBalanceResponse.Marshal(b, m, deterministic) -} -func (dst *InitialLoadBalanceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_InitialLoadBalanceResponse.Merge(dst, src) -} -func (m *InitialLoadBalanceResponse) XXX_Size() int { - return xxx_messageInfo_InitialLoadBalanceResponse.Size(m) -} -func (m *InitialLoadBalanceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_InitialLoadBalanceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_InitialLoadBalanceResponse proto.InternalMessageInfo - -func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string { - if m != nil { - return m.LoadBalancerDelegate - } - return "" -} - -func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *Duration { - if m != nil { - return m.ClientStatsReportInterval - } - return nil -} - -type ServerList struct { - // Contains a list of servers selected by the load balancer. The list will - // be updated when server resolutions change or as needed to balance load - // across more servers. The client should consume the server list in order - // unless instructed otherwise via the client_config. - Servers []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ServerList) Reset() { *m = ServerList{} } -func (m *ServerList) String() string { return proto.CompactTextString(m) } -func (*ServerList) ProtoMessage() {} -func (*ServerList) Descriptor() ([]byte, []int) { - return fileDescriptor_messages_b81c731f0e83edbd, []int{7} -} -func (m *ServerList) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServerList.Unmarshal(m, b) -} -func (m *ServerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServerList.Marshal(b, m, deterministic) -} -func (dst *ServerList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServerList.Merge(dst, src) -} -func (m *ServerList) XXX_Size() int { - return xxx_messageInfo_ServerList.Size(m) -} -func (m *ServerList) XXX_DiscardUnknown() { - xxx_messageInfo_ServerList.DiscardUnknown(m) -} - -var xxx_messageInfo_ServerList proto.InternalMessageInfo - -func (m *ServerList) GetServers() []*Server { - if m != nil { - return m.Servers - } - return nil -} - -// Contains server information. When none of the [drop_for_*] fields are true, -// use the other fields. When drop_for_rate_limiting is true, ignore all other -// fields. Use drop_for_load_balancing only when it is true and -// drop_for_rate_limiting is false. -type Server struct { - // A resolved address for the server, serialized in network-byte-order. It may - // either be an IPv4 or IPv6 address. - IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` - // A resolved port number for the server. - Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` - // An opaque but printable token given to the frontend for each pick. All - // frontend requests for that pick must include the token in its initial - // metadata. The token is used by the backend to verify the request and to - // allow the backend to report load to the gRPC LB system. - // - // Its length is variable but less than 50 bytes. - LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken" json:"load_balance_token,omitempty"` - // Indicates whether this particular request should be dropped by the client - // for rate limiting. - DropForRateLimiting bool `protobuf:"varint,4,opt,name=drop_for_rate_limiting,json=dropForRateLimiting" json:"drop_for_rate_limiting,omitempty"` - // Indicates whether this particular request should be dropped by the client - // for load balancing. - DropForLoadBalancing bool `protobuf:"varint,5,opt,name=drop_for_load_balancing,json=dropForLoadBalancing" json:"drop_for_load_balancing,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Server) Reset() { *m = Server{} } -func (m *Server) String() string { return proto.CompactTextString(m) } -func (*Server) ProtoMessage() {} -func (*Server) Descriptor() ([]byte, []int) { - return fileDescriptor_messages_b81c731f0e83edbd, []int{8} -} -func (m *Server) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Server.Unmarshal(m, b) -} -func (m *Server) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Server.Marshal(b, m, deterministic) -} -func (dst *Server) XXX_Merge(src proto.Message) { - xxx_messageInfo_Server.Merge(dst, src) -} -func (m *Server) XXX_Size() int { - return xxx_messageInfo_Server.Size(m) -} -func (m *Server) XXX_DiscardUnknown() { - xxx_messageInfo_Server.DiscardUnknown(m) -} - -var xxx_messageInfo_Server proto.InternalMessageInfo - -func (m *Server) GetIpAddress() []byte { - if m != nil { - return m.IpAddress - } - return nil -} - -func (m *Server) GetPort() int32 { - if m != nil { - return m.Port - } - return 0 -} - -func (m *Server) GetLoadBalanceToken() string { - if m != nil { - return m.LoadBalanceToken - } - return "" -} - -func (m *Server) GetDropForRateLimiting() bool { - if m != nil { - return m.DropForRateLimiting - } - return false -} - -func (m *Server) GetDropForLoadBalancing() bool { - if m != nil { - return m.DropForLoadBalancing - } - return false -} - -func init() { - proto.RegisterType((*Duration)(nil), "grpc.lb.v1.Duration") - proto.RegisterType((*Timestamp)(nil), "grpc.lb.v1.Timestamp") - proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest") - proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest") - proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats") - proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse") - proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse") - proto.RegisterType((*ServerList)(nil), "grpc.lb.v1.ServerList") - proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server") -} - -func init() { - proto.RegisterFile("grpc_lb_v1/messages/messages.proto", fileDescriptor_messages_b81c731f0e83edbd) -} - -var fileDescriptor_messages_b81c731f0e83edbd = []byte{ - // 731 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x39, - 0x14, 0x26, 0x9b, 0x00, 0xc9, 0x09, 0x5a, 0xb2, 0x26, 0x0b, 0x81, 0x05, 0x89, 0x1d, 0x69, 0xd9, - 0x68, 0xc5, 0x4e, 0x04, 0xd9, 0xbd, 0xe8, 0xcf, 0x45, 0x1b, 0x10, 0x0a, 0x2d, 0x17, 0x95, 0x43, - 0x55, 0xa9, 0x52, 0x65, 0x39, 0x19, 0x33, 0x58, 0x38, 0xf6, 0xd4, 0x76, 0x82, 0xfa, 0x08, 0x7d, - 0x94, 0x3e, 0x46, 0xd5, 0x67, 0xe8, 0xfb, 0x54, 0xe3, 0x99, 0xc9, 0x0c, 0x10, 0x40, 0xbd, 0x89, - 0xec, 0xe3, 0xef, 0x7c, 0xdf, 0xf1, 0x89, 0xbf, 0x33, 0xe0, 0x85, 0x3a, 0x1a, 0x11, 0x31, 0x24, - 0xd3, 0x83, 0xce, 0x98, 0x19, 0x43, 0x43, 0x66, 0x66, 0x0b, 0x3f, 0xd2, 0xca, 0x2a, 0x04, 0x31, - 0xc6, 0x17, 0x43, 0x7f, 0x7a, 0xe0, 0x3d, 0x85, 0xea, 0xf1, 0x44, 0x53, 0xcb, 0x95, 0x44, 0x2d, - 0x58, 0x36, 0x6c, 0xa4, 0x64, 0x60, 0x5a, 0xa5, 0xdd, 0x52, 0xbb, 0x8c, 0xb3, 0x2d, 0x6a, 0xc2, - 0xa2, 0xa4, 0x52, 0x99, 0xd6, 0x2f, 0xbb, 0xa5, 0xf6, 0x22, 0x4e, 0x36, 0xde, 0x33, 0xa8, 0x9d, - 0xf3, 0x31, 0x33, 0x96, 0x8e, 0xa3, 0x9f, 0x4e, 0xfe, 0x5a, 0x02, 0x74, 0xa6, 0x68, 0xd0, 0xa3, - 0x82, 0xca, 0x11, 0xc3, 0xec, 0xe3, 0x84, 0x19, 0x8b, 0xde, 0xc0, 0x2a, 0x97, 0xdc, 0x72, 0x2a, - 0x88, 0x4e, 0x42, 0x8e, 0xae, 0x7e, 0xf8, 0x97, 0x9f, 0x57, 0xed, 0x9f, 0x26, 0x90, 0xbb, 0xf9, - 0xfd, 0x05, 0xfc, 0x6b, 0x9a, 0x9f, 0x31, 0x3e, 0x87, 0x95, 0x91, 0xe0, 0x4c, 0x5a, 0x62, 0x2c, - 0xb5, 0x49, 0x15, 0xf5, 0xc3, 0x8d, 0x22, 0xdd, 0x91, 0x3b, 0x1f, 0xc4, 0xc7, 0xfd, 0x05, 0x5c, - 0x1f, 0xe5, 0xdb, 0xde, 0x1f, 0xb0, 0x29, 0x14, 0x0d, 0xc8, 0x30, 0x91, 0xc9, 0x8a, 0x22, 0xf6, - 0x53, 0xc4, 0xbc, 0x0e, 0x6c, 0xde, 0x5b, 0x09, 0x42, 0x50, 0x91, 0x74, 0xcc, 0x5c, 0xf9, 0x35, - 0xec, 0xd6, 0xde, 0xe7, 0x0a, 0xd4, 0x0b, 0x62, 0xa8, 0x0b, 0x35, 0x9b, 0x75, 0x30, 0xbd, 0xe7, - 0xef, 0xc5, 0xc2, 0x66, 0xed, 0xc5, 0x39, 0x0e, 0xfd, 0x03, 0xbf, 0xc9, 0xc9, 0x98, 0x8c, 0xa8, - 0x10, 0x26, 0xbe, 0x93, 0xb6, 0x2c, 0x70, 0xb7, 0x2a, 0xe3, 0x55, 0x39, 0x19, 0x1f, 0xc5, 0xf1, - 0x41, 0x12, 0x46, 0xfb, 0x80, 0x72, 0xec, 0x05, 0x97, 0xdc, 0x5c, 0xb2, 0xa0, 0x55, 0x76, 0xe0, - 0x46, 0x06, 0x3e, 0x49, 0xe3, 0x88, 0x80, 0x7f, 0x17, 0x4d, 0xae, 0xb9, 0xbd, 0x24, 0x81, 0x56, - 0x11, 0xb9, 0x50, 0x9a, 0x68, 0x6a, 0x19, 0x11, 0x7c, 0xcc, 0x2d, 0x97, 0x61, 0xab, 0xe2, 0x98, - 0xfe, 0xbe, 0xcd, 0xf4, 0x8e, 0xdb, 0xcb, 0x63, 0xad, 0xa2, 0x13, 0xa5, 0x31, 0xb5, 0xec, 0x2c, - 0x85, 0x23, 0x0a, 0x9d, 0x47, 0x05, 0x0a, 0xed, 0x8e, 0x15, 0x16, 0x9d, 0x42, 0xfb, 0x01, 0x85, - 0xbc, 0xf7, 0xb1, 0xc4, 0x07, 0xf8, 0xf7, 0x3e, 0x89, 0xf4, 0x19, 0x5c, 0x50, 0x2e, 0x58, 0x40, - 0xac, 0x22, 0x86, 0xc9, 0xa0, 0xb5, 0xe4, 0x04, 0xf6, 0xe6, 0x09, 0x24, 0x7f, 0xd5, 0x89, 0xc3, - 0x9f, 0xab, 0x01, 0x93, 0x01, 0xea, 0xc3, 0x9f, 0x73, 0xe8, 0xaf, 0xa4, 0xba, 0x96, 0x44, 0xb3, - 0x11, 0xe3, 0x53, 0x16, 0xb4, 0x96, 0x1d, 0xe5, 0xce, 0x6d, 0xca, 0xd7, 0x31, 0x0a, 0xa7, 0x20, - 0xef, 0x5b, 0x09, 0xd6, 0x6e, 0x3c, 0x1b, 0x13, 0x29, 0x69, 0x18, 0x1a, 0x40, 0x23, 0x77, 0x40, - 0x12, 0x4b, 0x9f, 0xc6, 0xde, 0x63, 0x16, 0x48, 0xd0, 0xfd, 0x05, 0xbc, 0x3a, 0xf3, 0x40, 0x4a, - 0xfa, 0x04, 0xea, 0x86, 0xe9, 0x29, 0xd3, 0x44, 0x70, 0x63, 0x53, 0x0f, 0xac, 0x17, 0xf9, 0x06, - 0xee, 0xf8, 0x8c, 0x3b, 0x0f, 0x81, 0x99, 0xed, 0x7a, 0xdb, 0xb0, 0x75, 0xcb, 0x01, 0x09, 0x67, - 0x62, 0x81, 0x2f, 0x25, 0xd8, 0xba, 0xbf, 0x14, 0xf4, 0x1f, 0xac, 0x17, 0x93, 0x35, 0x09, 0x98, - 0x60, 0x21, 0xb5, 0x99, 0x2d, 0x9a, 0x22, 0x4f, 0xd2, 0xc7, 0xe9, 0x19, 0x7a, 0x0b, 0xdb, 0x45, - 0xcb, 0x12, 0xcd, 0x22, 0xa5, 0x2d, 0xe1, 0xd2, 0x32, 0x3d, 0xa5, 0x22, 0x2d, 0xbf, 0x59, 0x2c, - 0x3f, 0x1b, 0x62, 0x78, 0xb3, 0xe0, 0x5e, 0xec, 0xf2, 0x4e, 0xd3, 0x34, 0xef, 0x05, 0x40, 0x7e, - 0x4b, 0xb4, 0x1f, 0x0f, 0xac, 0x78, 0x17, 0x0f, 0xac, 0x72, 0xbb, 0x7e, 0x88, 0xee, 0xb6, 0x03, - 0x67, 0x90, 0x57, 0x95, 0x6a, 0xb9, 0x51, 0xf1, 0xbe, 0x97, 0x60, 0x29, 0x39, 0x41, 0x3b, 0x00, - 0x3c, 0x22, 0x34, 0x08, 0x34, 0x33, 0xc9, 0xc8, 0x5b, 0xc1, 0x35, 0x1e, 0xbd, 0x4c, 0x02, 0xb1, - 0xfb, 0x63, 0xed, 0x74, 0xe6, 0xb9, 0x75, 0x6c, 0xc6, 0x1b, 0x9d, 0xb4, 0xea, 0x8a, 0x49, 0x67, - 0xc6, 0x1a, 0x6e, 0x14, 0x1a, 0x71, 0x1e, 0xc7, 0x51, 0x17, 0xd6, 0x1f, 0x30, 0x5d, 0x15, 0xaf, - 0x05, 0x73, 0x0c, 0xf6, 0x3f, 0x6c, 0x3c, 0x64, 0xa4, 0x2a, 0x6e, 0x06, 0x73, 0x4c, 0xd3, 0xeb, - 0xbe, 0x3f, 0x08, 0x95, 0x0a, 0x05, 0xf3, 0x43, 0x25, 0xa8, 0x0c, 0x7d, 0xa5, 0xc3, 0x4e, 0xdc, - 0x0d, 0xf7, 0x23, 0x86, 0x9d, 0x39, 0x5f, 0x95, 0xe1, 0x92, 0xfb, 0x9a, 0x74, 0x7f, 0x04, 0x00, - 0x00, 0xff, 0xff, 0x8e, 0xd0, 0x70, 0xb7, 0x73, 0x06, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto deleted file mode 100644 index 42d99c109..000000000 --- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2016 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package grpc.lb.v1; -option go_package = "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"; - -message Duration { - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. - int64 seconds = 1; - - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - int32 nanos = 2; -} - -message Timestamp { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - int32 nanos = 2; -} - -message LoadBalanceRequest { - oneof load_balance_request_type { - // This message should be sent on the first request to the load balancer. - InitialLoadBalanceRequest initial_request = 1; - - // The client stats should be periodically reported to the load balancer - // based on the duration defined in the InitialLoadBalanceResponse. - ClientStats client_stats = 2; - } -} - -message InitialLoadBalanceRequest { - // Name of load balanced service (IE, balancer.service.com) - // length should be less than 256 bytes. - string name = 1; -} - -// Contains client level statistics that are useful to load balancing. Each -// count except the timestamp should be reset to zero after reporting the stats. -message ClientStats { - // The timestamp of generating the report. - Timestamp timestamp = 1; - - // The total number of RPCs that started. - int64 num_calls_started = 2; - - // The total number of RPCs that finished. - int64 num_calls_finished = 3; - - // The total number of RPCs that were dropped by the client because of rate - // limiting. - int64 num_calls_finished_with_drop_for_rate_limiting = 4; - - // The total number of RPCs that were dropped by the client because of load - // balancing. - int64 num_calls_finished_with_drop_for_load_balancing = 5; - - // The total number of RPCs that failed to reach a server except dropped RPCs. - int64 num_calls_finished_with_client_failed_to_send = 6; - - // The total number of RPCs that finished and are known to have been received - // by a server. - int64 num_calls_finished_known_received = 7; -} - -message LoadBalanceResponse { - oneof load_balance_response_type { - // This message should be sent on the first response to the client. - InitialLoadBalanceResponse initial_response = 1; - - // Contains the list of servers selected by the load balancer. The client - // should send requests to these servers in the specified order. - ServerList server_list = 2; - } -} - -message InitialLoadBalanceResponse { - // This is an application layer redirect that indicates the client should use - // the specified server for load balancing. When this field is non-empty in - // the response, the client should open a separate connection to the - // load_balancer_delegate and call the BalanceLoad method. Its length should - // be less than 64 bytes. - string load_balancer_delegate = 1; - - // This interval defines how often the client should send the client stats - // to the load balancer. Stats should only be reported when the duration is - // positive. - Duration client_stats_report_interval = 2; -} - -message ServerList { - // Contains a list of servers selected by the load balancer. The list will - // be updated when server resolutions change or as needed to balance load - // across more servers. The client should consume the server list in order - // unless instructed otherwise via the client_config. - repeated Server servers = 1; - - // Was google.protobuf.Duration expiration_interval. - reserved 3; -} - -// Contains server information. When none of the [drop_for_*] fields are true, -// use the other fields. When drop_for_rate_limiting is true, ignore all other -// fields. Use drop_for_load_balancing only when it is true and -// drop_for_rate_limiting is false. -message Server { - // A resolved address for the server, serialized in network-byte-order. It may - // either be an IPv4 or IPv6 address. - bytes ip_address = 1; - - // A resolved port number for the server. - int32 port = 2; - - // An opaque but printable token given to the frontend for each pick. All - // frontend requests for that pick must include the token in its initial - // metadata. The token is used by the backend to verify the request and to - // allow the backend to report load to the gRPC LB system. - // - // Its length is variable but less than 50 bytes. - string load_balance_token = 3; - - // Indicates whether this particular request should be dropped by the client - // for rate limiting. - bool drop_for_rate_limiting = 4; - - // Indicates whether this particular request should be dropped by the client - // for load balancing. - bool drop_for_load_balancing = 5; -} diff --git a/vendor/google.golang.org/grpc/grpclb_picker.go b/vendor/google.golang.org/grpc/grpclb_picker.go deleted file mode 100644 index 872c7ccea..000000000 --- a/vendor/google.golang.org/grpc/grpclb_picker.go +++ /dev/null @@ -1,159 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "sync" - "sync/atomic" - - "golang.org/x/net/context" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/codes" - lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages" - "google.golang.org/grpc/status" -) - -type rpcStats struct { - NumCallsStarted int64 - NumCallsFinished int64 - NumCallsFinishedWithDropForRateLimiting int64 - NumCallsFinishedWithDropForLoadBalancing int64 - NumCallsFinishedWithClientFailedToSend int64 - NumCallsFinishedKnownReceived int64 -} - -// toClientStats converts rpcStats to lbpb.ClientStats, and clears rpcStats. -func (s *rpcStats) toClientStats() *lbpb.ClientStats { - stats := &lbpb.ClientStats{ - NumCallsStarted: atomic.SwapInt64(&s.NumCallsStarted, 0), - NumCallsFinished: atomic.SwapInt64(&s.NumCallsFinished, 0), - NumCallsFinishedWithDropForRateLimiting: atomic.SwapInt64(&s.NumCallsFinishedWithDropForRateLimiting, 0), - NumCallsFinishedWithDropForLoadBalancing: atomic.SwapInt64(&s.NumCallsFinishedWithDropForLoadBalancing, 0), - NumCallsFinishedWithClientFailedToSend: atomic.SwapInt64(&s.NumCallsFinishedWithClientFailedToSend, 0), - NumCallsFinishedKnownReceived: atomic.SwapInt64(&s.NumCallsFinishedKnownReceived, 0), - } - return stats -} - -func (s *rpcStats) dropForRateLimiting() { - atomic.AddInt64(&s.NumCallsStarted, 1) - atomic.AddInt64(&s.NumCallsFinishedWithDropForRateLimiting, 1) - atomic.AddInt64(&s.NumCallsFinished, 1) -} - -func (s *rpcStats) dropForLoadBalancing() { - atomic.AddInt64(&s.NumCallsStarted, 1) - atomic.AddInt64(&s.NumCallsFinishedWithDropForLoadBalancing, 1) - atomic.AddInt64(&s.NumCallsFinished, 1) -} - -func (s *rpcStats) failedToSend() { - atomic.AddInt64(&s.NumCallsStarted, 1) - atomic.AddInt64(&s.NumCallsFinishedWithClientFailedToSend, 1) - atomic.AddInt64(&s.NumCallsFinished, 1) -} - -func (s *rpcStats) knownReceived() { - atomic.AddInt64(&s.NumCallsStarted, 1) - atomic.AddInt64(&s.NumCallsFinishedKnownReceived, 1) - atomic.AddInt64(&s.NumCallsFinished, 1) -} - -type errPicker struct { - // Pick always returns this err. - err error -} - -func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { - return nil, nil, p.err -} - -// rrPicker does roundrobin on subConns. It's typically used when there's no -// response from remote balancer, and grpclb falls back to the resolved -// backends. -// -// It guaranteed that len(subConns) > 0. -type rrPicker struct { - mu sync.Mutex - subConns []balancer.SubConn // The subConns that were READY when taking the snapshot. - subConnsNext int -} - -func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { - p.mu.Lock() - defer p.mu.Unlock() - sc := p.subConns[p.subConnsNext] - p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns) - return sc, nil, nil -} - -// lbPicker does two layers of picks: -// -// First layer: roundrobin on all servers in serverList, including drops and backends. -// - If it picks a drop, the RPC will fail as being dropped. -// - If it picks a backend, do a second layer pick to pick the real backend. -// -// Second layer: roundrobin on all READY backends. -// -// It's guaranteed that len(serverList) > 0. -type lbPicker struct { - mu sync.Mutex - serverList []*lbpb.Server - serverListNext int - subConns []balancer.SubConn // The subConns that were READY when taking the snapshot. - subConnsNext int - - stats *rpcStats -} - -func (p *lbPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { - p.mu.Lock() - defer p.mu.Unlock() - - // Layer one roundrobin on serverList. - s := p.serverList[p.serverListNext] - p.serverListNext = (p.serverListNext + 1) % len(p.serverList) - - // If it's a drop, return an error and fail the RPC. - if s.DropForRateLimiting { - p.stats.dropForRateLimiting() - return nil, nil, status.Errorf(codes.Unavailable, "request dropped by grpclb") - } - if s.DropForLoadBalancing { - p.stats.dropForLoadBalancing() - return nil, nil, status.Errorf(codes.Unavailable, "request dropped by grpclb") - } - - // If not a drop but there's no ready subConns. - if len(p.subConns) <= 0 { - return nil, nil, balancer.ErrNoSubConnAvailable - } - - // Return the next ready subConn in the list, also collect rpc stats. - sc := p.subConns[p.subConnsNext] - p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns) - done := func(info balancer.DoneInfo) { - if !info.BytesSent { - p.stats.failedToSend() - } else if info.BytesReceived { - p.stats.knownReceived() - } - } - return sc, done, nil -} diff --git a/vendor/google.golang.org/grpc/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/grpclb_remote_balancer.go deleted file mode 100644 index b8dd4f18c..000000000 --- a/vendor/google.golang.org/grpc/grpclb_remote_balancer.go +++ /dev/null @@ -1,266 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "fmt" - "net" - "reflect" - "time" - - "golang.org/x/net/context" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/channelz" - - "google.golang.org/grpc/connectivity" - lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/resolver" -) - -// processServerList updates balaner's internal state, create/remove SubConns -// and regenerates picker using the received serverList. -func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { - grpclog.Infof("lbBalancer: processing server list: %+v", l) - lb.mu.Lock() - defer lb.mu.Unlock() - - // Set serverListReceived to true so fallback will not take effect if it has - // not hit timeout. - lb.serverListReceived = true - - // If the new server list == old server list, do nothing. - if reflect.DeepEqual(lb.fullServerList, l.Servers) { - grpclog.Infof("lbBalancer: new serverlist same as the previous one, ignoring") - return - } - lb.fullServerList = l.Servers - - var backendAddrs []resolver.Address - for _, s := range l.Servers { - if s.DropForLoadBalancing || s.DropForRateLimiting { - continue - } - - md := metadata.Pairs(lbTokeyKey, s.LoadBalanceToken) - ip := net.IP(s.IpAddress) - ipStr := ip.String() - if ip.To4() == nil { - // Add square brackets to ipv6 addresses, otherwise net.Dial() and - // net.SplitHostPort() will return too many colons error. - ipStr = fmt.Sprintf("[%s]", ipStr) - } - addr := resolver.Address{ - Addr: fmt.Sprintf("%s:%d", ipStr, s.Port), - Metadata: &md, - } - - backendAddrs = append(backendAddrs, addr) - } - - // Call refreshSubConns to create/remove SubConns. - lb.refreshSubConns(backendAddrs) - // Regenerate and update picker no matter if there's update on backends (if - // any SubConn will be newed/removed). Because since the full serverList was - // different, there might be updates in drops or pick weights(different - // number of duplicates). We need to update picker with the fulllist. - // - // Now with cache, even if SubConn was newed/removed, there might be no - // state changes. - lb.regeneratePicker() - lb.cc.UpdateBalancerState(lb.state, lb.picker) -} - -// refreshSubConns creates/removes SubConns with backendAddrs. It returns a bool -// indicating whether the backendAddrs are different from the cached -// backendAddrs (whether any SubConn was newed/removed). -// Caller must hold lb.mu. -func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address) bool { - lb.backendAddrs = nil - var backendsUpdated bool - // addrsSet is the set converted from backendAddrs, it's used to quick - // lookup for an address. - addrsSet := make(map[resolver.Address]struct{}) - // Create new SubConns. - for _, addr := range backendAddrs { - addrWithoutMD := addr - addrWithoutMD.Metadata = nil - addrsSet[addrWithoutMD] = struct{}{} - lb.backendAddrs = append(lb.backendAddrs, addrWithoutMD) - - if _, ok := lb.subConns[addrWithoutMD]; !ok { - backendsUpdated = true - - // Use addrWithMD to create the SubConn. - sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Warningf("roundrobinBalancer: failed to create new SubConn: %v", err) - continue - } - lb.subConns[addrWithoutMD] = sc // Use the addr without MD as key for the map. - if _, ok := lb.scStates[sc]; !ok { - // Only set state of new sc to IDLE. The state could already be - // READY for cached SubConns. - lb.scStates[sc] = connectivity.Idle - } - sc.Connect() - } - } - - for a, sc := range lb.subConns { - // a was removed by resolver. - if _, ok := addrsSet[a]; !ok { - backendsUpdated = true - - lb.cc.RemoveSubConn(sc) - delete(lb.subConns, a) - // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in HandleSubConnStateChange. - } - } - - return backendsUpdated -} - -func (lb *lbBalancer) readServerList(s *balanceLoadClientStream) error { - for { - reply, err := s.Recv() - if err != nil { - return fmt.Errorf("grpclb: failed to recv server list: %v", err) - } - if serverList := reply.GetServerList(); serverList != nil { - lb.processServerList(serverList) - } - } -} - -func (lb *lbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration) { - ticker := time.NewTicker(interval) - defer ticker.Stop() - for { - select { - case <-ticker.C: - case <-s.Context().Done(): - return - } - stats := lb.clientStats.toClientStats() - t := time.Now() - stats.Timestamp = &lbpb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), - } - if err := s.Send(&lbpb.LoadBalanceRequest{ - LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{ - ClientStats: stats, - }, - }); err != nil { - return - } - } -} - -func (lb *lbBalancer) callRemoteBalancer() error { - lbClient := &loadBalancerClient{cc: lb.ccRemoteLB} - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - stream, err := lbClient.BalanceLoad(ctx, FailFast(false)) - if err != nil { - return fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err) - } - - // grpclb handshake on the stream. - initReq := &lbpb.LoadBalanceRequest{ - LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{ - InitialRequest: &lbpb.InitialLoadBalanceRequest{ - Name: lb.target, - }, - }, - } - if err := stream.Send(initReq); err != nil { - return fmt.Errorf("grpclb: failed to send init request: %v", err) - } - reply, err := stream.Recv() - if err != nil { - return fmt.Errorf("grpclb: failed to recv init response: %v", err) - } - initResp := reply.GetInitialResponse() - if initResp == nil { - return fmt.Errorf("grpclb: reply from remote balancer did not include initial response") - } - if initResp.LoadBalancerDelegate != "" { - return fmt.Errorf("grpclb: Delegation is not supported") - } - - go func() { - if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 { - lb.sendLoadReport(stream, d) - } - }() - return lb.readServerList(stream) -} - -func (lb *lbBalancer) watchRemoteBalancer() { - for { - err := lb.callRemoteBalancer() - select { - case <-lb.doneCh: - return - default: - if err != nil { - grpclog.Error(err) - } - } - - } -} - -func (lb *lbBalancer) dialRemoteLB(remoteLBName string) { - var dopts []DialOption - if creds := lb.opt.DialCreds; creds != nil { - if err := creds.OverrideServerName(remoteLBName); err == nil { - dopts = append(dopts, WithTransportCredentials(creds)) - } else { - grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v, using Insecure", err) - dopts = append(dopts, WithInsecure()) - } - } else { - dopts = append(dopts, WithInsecure()) - } - if lb.opt.Dialer != nil { - // WithDialer takes a different type of function, so we instead use a - // special DialOption here. - dopts = append(dopts, withContextDialer(lb.opt.Dialer)) - } - // Explicitly set pickfirst as the balancer. - dopts = append(dopts, WithBalancerName(PickFirstBalancerName)) - dopts = append(dopts, withResolverBuilder(lb.manualResolver)) - if channelz.IsOn() { - dopts = append(dopts, WithChannelzParentID(lb.opt.ChannelzParentID)) - } - - // DialContext using manualResolver.Scheme, which is a random scheme generated - // when init grpclb. The target name is not important. - cc, err := DialContext(context.Background(), "grpclb:///grpclb.server", dopts...) - if err != nil { - grpclog.Fatalf("failed to dial: %v", err) - } - lb.ccRemoteLB = cc - go lb.watchRemoteBalancer() -} diff --git a/vendor/google.golang.org/grpc/grpclb_util.go b/vendor/google.golang.org/grpc/grpclb_util.go deleted file mode 100644 index 063ba9d85..000000000 --- a/vendor/google.golang.org/grpc/grpclb_util.go +++ /dev/null @@ -1,214 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "fmt" - "sync" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/resolver" -) - -// The parent ClientConn should re-resolve when grpclb loses connection to the -// remote balancer. When the ClientConn inside grpclb gets a TransientFailure, -// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's -// ResolveNow, and eventually results in re-resolve happening in parent -// ClientConn's resolver (DNS for example). -// -// parent -// ClientConn -// +-----------------------------------------------------------------+ -// | parent +---------------------------------+ | -// | DNS ClientConn | grpclb | | -// | resolver balancerWrapper | | | -// | + + | grpclb grpclb | | -// | | | | ManualResolver ClientConn | | -// | | | | + + | | -// | | | | | | Transient | | -// | | | | | | Failure | | -// | | | | | <--------- | | | -// | | | <--------------- | ResolveNow | | | -// | | <--------- | ResolveNow | | | | | -// | | ResolveNow | | | | | | -// | | | | | | | | -// | + + | + + | | -// | +---------------------------------+ | -// +-----------------------------------------------------------------+ - -// lbManualResolver is used by the ClientConn inside grpclb. It's a manual -// resolver with a special ResolveNow() function. -// -// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn, -// so when grpclb client lose contact with remote balancers, the parent -// ClientConn's resolver will re-resolve. -type lbManualResolver struct { - scheme string - ccr resolver.ClientConn - - ccb balancer.ClientConn -} - -func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOption) (resolver.Resolver, error) { - r.ccr = cc - return r, nil -} - -func (r *lbManualResolver) Scheme() string { - return r.scheme -} - -// ResolveNow calls resolveNow on the parent ClientConn. -func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOption) { - r.ccb.ResolveNow(o) -} - -// Close is a noop for Resolver. -func (*lbManualResolver) Close() {} - -// NewAddress calls cc.NewAddress. -func (r *lbManualResolver) NewAddress(addrs []resolver.Address) { - r.ccr.NewAddress(addrs) -} - -// NewServiceConfig calls cc.NewServiceConfig. -func (r *lbManualResolver) NewServiceConfig(sc string) { - r.ccr.NewServiceConfig(sc) -} - -const subConnCacheTime = time.Second * 10 - -// lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache. -// SubConns will be kept in cache for subConnCacheTime before being removed. -// -// Its new and remove methods are updated to do cache first. -type lbCacheClientConn struct { - cc balancer.ClientConn - timeout time.Duration - - mu sync.Mutex - // subConnCache only keeps subConns that are being deleted. - subConnCache map[resolver.Address]*subConnCacheEntry - subConnToAddr map[balancer.SubConn]resolver.Address -} - -type subConnCacheEntry struct { - sc balancer.SubConn - - cancel func() - abortDeleting bool -} - -func newLBCacheClientConn(cc balancer.ClientConn) *lbCacheClientConn { - return &lbCacheClientConn{ - cc: cc, - timeout: subConnCacheTime, - subConnCache: make(map[resolver.Address]*subConnCacheEntry), - subConnToAddr: make(map[balancer.SubConn]resolver.Address), - } -} - -func (ccc *lbCacheClientConn) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if len(addrs) != 1 { - return nil, fmt.Errorf("grpclb calling NewSubConn with addrs of length %v", len(addrs)) - } - addrWithoutMD := addrs[0] - addrWithoutMD.Metadata = nil - - ccc.mu.Lock() - defer ccc.mu.Unlock() - if entry, ok := ccc.subConnCache[addrWithoutMD]; ok { - // If entry is in subConnCache, the SubConn was being deleted. - // cancel function will never be nil. - entry.cancel() - delete(ccc.subConnCache, addrWithoutMD) - return entry.sc, nil - } - - scNew, err := ccc.cc.NewSubConn(addrs, opts) - if err != nil { - return nil, err - } - - ccc.subConnToAddr[scNew] = addrWithoutMD - return scNew, nil -} - -func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { - ccc.mu.Lock() - defer ccc.mu.Unlock() - addr, ok := ccc.subConnToAddr[sc] - if !ok { - return - } - - if entry, ok := ccc.subConnCache[addr]; ok { - if entry.sc != sc { - // This could happen if NewSubConn was called multiple times for the - // same address, and those SubConns are all removed. We remove sc - // immediately here. - delete(ccc.subConnToAddr, sc) - ccc.cc.RemoveSubConn(sc) - } - return - } - - entry := &subConnCacheEntry{ - sc: sc, - } - ccc.subConnCache[addr] = entry - - timer := time.AfterFunc(ccc.timeout, func() { - ccc.mu.Lock() - if entry.abortDeleting { - return - } - ccc.cc.RemoveSubConn(sc) - delete(ccc.subConnToAddr, sc) - delete(ccc.subConnCache, addr) - ccc.mu.Unlock() - }) - entry.cancel = func() { - if !timer.Stop() { - // If stop was not successful, the timer has fired (this can only - // happen in a race). But the deleting function is blocked on ccc.mu - // because the mutex was held by the caller of this function. - // - // Set abortDeleting to true to abort the deleting function. When - // the lock is released, the deleting function will acquire the - // lock, check the value of abortDeleting and return. - entry.abortDeleting = true - } - } -} - -func (ccc *lbCacheClientConn) UpdateBalancerState(s connectivity.State, p balancer.Picker) { - ccc.cc.UpdateBalancerState(s, p) -} - -func (ccc *lbCacheClientConn) close() { - ccc.mu.Lock() - // Only cancel all existing timers. There's no need to remove SubConns. - for _, entry := range ccc.subConnCache { - entry.cancel() - } - ccc.mu.Unlock() -} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index e5906de7d..a1fda2801 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc_health_v1/health.proto +// source: grpc/health/v1/health.proto package grpc_health_v1 // import "google.golang.org/grpc/health/grpc_health_v1" @@ -46,11 +46,11 @@ func (x HealthCheckResponse_ServingStatus) String() string { return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) } func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_health_8e5b8a3074428511, []int{1, 0} + return fileDescriptor_health_85731b6c49265086, []int{1, 0} } type HealthCheckRequest struct { - Service string `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -60,7 +60,7 @@ func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) } func (*HealthCheckRequest) ProtoMessage() {} func (*HealthCheckRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_health_8e5b8a3074428511, []int{0} + return fileDescriptor_health_85731b6c49265086, []int{0} } func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b) @@ -88,7 +88,7 @@ func (m *HealthCheckRequest) GetService() string { } type HealthCheckResponse struct { - Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -98,7 +98,7 @@ func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) } func (*HealthCheckResponse) ProtoMessage() {} func (*HealthCheckResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_health_8e5b8a3074428511, []int{1} + return fileDescriptor_health_85731b6c49265086, []int{1} } func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b) @@ -139,8 +139,9 @@ var _ grpc.ClientConn // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 -// Client API for Health service - +// HealthClient is the client API for Health service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type HealthClient interface { Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) } @@ -155,15 +156,14 @@ func NewHealthClient(cc *grpc.ClientConn) HealthClient { func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { out := new(HealthCheckResponse) - err := grpc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...) if err != nil { return nil, err } return out, nil } -// Server API for Health service - +// HealthServer is the server API for Health service. type HealthServer interface { Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) } @@ -200,28 +200,28 @@ var _Health_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "grpc_health_v1/health.proto", + Metadata: "grpc/health/v1/health.proto", } -func init() { proto.RegisterFile("grpc_health_v1/health.proto", fileDescriptor_health_8e5b8a3074428511) } +func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_health_85731b6c49265086) } -var fileDescriptor_health_8e5b8a3074428511 = []byte{ - // 269 bytes of a gzipped FileDescriptorProto +var fileDescriptor_health_85731b6c49265086 = []byte{ + // 271 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48, - 0x8e, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0x88, 0x2f, 0x33, 0xd4, 0x87, 0xb0, 0xf4, 0x0a, 0x8a, - 0xf2, 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, - 0x0f, 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, - 0x82, 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, - 0x08, 0xc6, 0x55, 0x9a, 0xc3, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, - 0xc8, 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, - 0xd5, 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, - 0x0d, 0x50, 0xb2, 0xe2, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, - 0x0f, 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, - 0xf8, 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x46, 0x51, 0x5c, 0x6c, 0x10, 0x8b, - 0x84, 0x02, 0xb8, 0x58, 0xc1, 0x96, 0x09, 0x29, 0xe1, 0x75, 0x09, 0xd8, 0xbf, 0x52, 0xca, 0x44, - 0xb8, 0xd6, 0x29, 0x91, 0x4b, 0x30, 0x33, 0x1f, 0x4d, 0xa1, 0x13, 0x37, 0x44, 0x65, 0x00, 0x28, - 0x70, 0x03, 0x18, 0xa3, 0x74, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0xd2, 0xf3, 0x73, 0x12, - 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0x41, 0x1a, 0xa0, 0x71, 0xa0, 0x8f, 0x1a, 0x33, 0xab, - 0x98, 0xf8, 0xdc, 0x41, 0xa6, 0x41, 0x8c, 0xd0, 0x0b, 0x33, 0x4c, 0x62, 0x03, 0x47, 0x92, 0x31, - 0x20, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x70, 0xc4, 0xa7, 0xc3, 0x01, 0x00, 0x00, + 0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2, + 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f, + 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82, + 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, + 0xc6, 0x55, 0x9a, 0xc3, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8, + 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5, + 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d, + 0x50, 0xb2, 0xe2, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f, + 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8, + 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x46, 0x51, 0x5c, 0x6c, 0x10, 0x8b, 0x84, + 0x02, 0xb8, 0x58, 0xc1, 0x96, 0x09, 0x29, 0xe1, 0x75, 0x09, 0xd8, 0xbf, 0x52, 0xca, 0x44, 0xb8, + 0xd6, 0x29, 0x91, 0x4b, 0x30, 0x33, 0x1f, 0x4d, 0xa1, 0x13, 0x37, 0x44, 0x65, 0x00, 0x28, 0x70, + 0x03, 0x18, 0xa3, 0x74, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0xd2, 0xf3, 0x73, 0x12, 0xf3, + 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0x91, 0x63, 0x03, 0xc4, 0x8e, 0x87, 0xb0, 0xe3, 0xcb, 0x0c, + 0x57, 0x31, 0xf1, 0xb9, 0x83, 0x4c, 0x83, 0x18, 0xa1, 0x17, 0x66, 0x98, 0xc4, 0x06, 0x8e, 0x24, + 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xec, 0x66, 0x81, 0xcb, 0xc3, 0x01, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto deleted file mode 100644 index bcc02f8ac..000000000 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2015, gRPC Authors -// All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// The canonical version of this proto can be found at -// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto - -syntax = "proto3"; - -package grpc.health.v1; - -option csharp_namespace = "Grpc.Health.V1"; -option go_package = "google.golang.org/grpc/health/grpc_health_v1"; -option java_multiple_files = true; -option java_outer_classname = "HealthProto"; -option java_package = "io.grpc.health.v1"; - -message HealthCheckRequest { - string service = 1; -} - -message HealthCheckResponse { - enum ServingStatus { - UNKNOWN = 0; - SERVING = 1; - NOT_SERVING = 2; - } - ServingStatus status = 1; -} - -service Health { - rpc Check(HealthCheckRequest) returns (HealthCheckResponse); -} diff --git a/vendor/google.golang.org/grpc/health/health.go b/vendor/google.golang.org/grpc/health/health.go index de7f9ba79..c2588867e 100644 --- a/vendor/google.golang.org/grpc/health/health.go +++ b/vendor/google.golang.org/grpc/health/health.go @@ -16,7 +16,7 @@ * */ -//go:generate protoc --go_out=plugins=grpc,paths=source_relative:. grpc_health_v1/health.proto +//go:generate ./regenerate.sh // Package health provides some utility functions to health-check a server. The implementation // is based on protobuf. Users need to write their own implementations if other IDLs are used. diff --git a/vendor/google.golang.org/grpc/health/regenerate.sh b/vendor/google.golang.org/grpc/health/regenerate.sh new file mode 100755 index 000000000..b11eccb29 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/regenerate.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# Copyright 2018 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eux -o pipefail + +TMP=$(mktemp -d) + +function finish { + rm -rf "$TMP" +} +trap finish EXIT + +pushd "$TMP" +mkdir -p grpc/health/v1 +curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/health/v1/health.proto > grpc/health/v1/health.proto + +protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/health/v1/*.proto +popd +rm -f grpc_health_v1/*.pb.go +cp "$TMP"/grpc/health/v1/*.pb.go grpc_health_v1/ + diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go new file mode 100644 index 000000000..1bd0cce5a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -0,0 +1,78 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff implement the backoff strategy for gRPC. +// +// This is kept in internal until the gRPC project decides whether or not to +// allow alternative backoff strategies. +package backoff + +import ( + "time" + + "google.golang.org/grpc/internal/grpcrand" +) + +// Strategy defines the methodology for backing off after a grpc connection +// failure. +// +type Strategy interface { + // Backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + Backoff(retries int) time.Duration +} + +const ( + // baseDelay is the amount of time to wait before retrying after the first + // failure. + baseDelay = 1.0 * time.Second + // factor is applied to the backoff after each retry. + factor = 1.6 + // jitter provides a range to randomize backoff delays. + jitter = 0.2 +) + +// Exponential implements exponential backoff algorithm as defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +type Exponential struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// Backoff returns the amount of time to wait before the next retry given the +// number of retries. +func (bc Exponential) Backoff(retries int) time.Duration { + if retries == 0 { + return baseDelay + } + backoff, max := float64(baseDelay), float64(bc.MaxDelay) + for backoff < max && retries > 0 { + backoff *= factor + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + jitter*(grpcrand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go new file mode 100644 index 000000000..586a0336b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -0,0 +1,573 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz defines APIs for enabling channelz service, entry +// registration/deletion, and accessing channelz data. It also defines channelz +// metric struct formats. +// +// All APIs in this package are experimental. +package channelz + +import ( + "sort" + "sync" + "sync/atomic" + + "google.golang.org/grpc/grpclog" +) + +var ( + db dbWrapper + idGen idGenerator + // EntryPerPage defines the number of channelz entries to be shown on a web page. + EntryPerPage = 50 + curState int32 +) + +// TurnOn turns on channelz data collection. +func TurnOn() { + if !IsOn() { + NewChannelzStorage() + atomic.StoreInt32(&curState, 1) + } +} + +// IsOn returns whether channelz data collection is on. +func IsOn() bool { + return atomic.CompareAndSwapInt32(&curState, 1, 1) +} + +// dbWarpper wraps around a reference to internal channelz data storage, and +// provide synchronized functionality to set and get the reference. +type dbWrapper struct { + mu sync.RWMutex + DB *channelMap +} + +func (d *dbWrapper) set(db *channelMap) { + d.mu.Lock() + d.DB = db + d.mu.Unlock() +} + +func (d *dbWrapper) get() *channelMap { + d.mu.RLock() + defer d.mu.RUnlock() + return d.DB +} + +// NewChannelzStorage initializes channelz data storage and id generator. +// +// Note: This function is exported for testing purpose only. User should not call +// it in most cases. +func NewChannelzStorage() { + db.set(&channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + }) + idGen.reset() +} + +// GetTopChannels returns a slice of top channel's ChannelMetric, along with a +// boolean indicating whether there's more top channels to be queried for. +// +// The arg id specifies that only top channel with id at or above it will be included +// in the result. The returned slice is up to a length of EntryPerPage, and is +// sorted in ascending id order. +func GetTopChannels(id int64) ([]*ChannelMetric, bool) { + return db.get().GetTopChannels(id) +} + +// GetServers returns a slice of server's ServerMetric, along with a +// boolean indicating whether there's more servers to be queried for. +// +// The arg id specifies that only server with id at or above it will be included +// in the result. The returned slice is up to a length of EntryPerPage, and is +// sorted in ascending id order. +func GetServers(id int64) ([]*ServerMetric, bool) { + return db.get().GetServers(id) +} + +// GetServerSockets returns a slice of server's (identified by id) normal socket's +// SocketMetric, along with a boolean indicating whether there's more sockets to +// be queried for. +// +// The arg startID specifies that only sockets with id at or above it will be +// included in the result. The returned slice is up to a length of EntryPerPage, +// and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) { + return db.get().GetServerSockets(id, startID) +} + +// GetChannel returns the ChannelMetric for the channel (identified by id). +func GetChannel(id int64) *ChannelMetric { + return db.get().GetChannel(id) +} + +// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannelMetric { + return db.get().GetSubChannel(id) +} + +// GetSocket returns the SocketInternalMetric for the socket (identified by id). +func GetSocket(id int64) *SocketMetric { + return db.get().GetSocket(id) +} + +// RegisterChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). pid = 0 means no parent. It returns the unique channelz tracking id +// assigned to this channel. +func RegisterChannel(c Channel, pid int64, ref string) int64 { + id := idGen.genID() + cn := &channel{ + refName: ref, + c: c, + subChans: make(map[int64]string), + nestedChans: make(map[int64]string), + id: id, + pid: pid, + } + if pid == 0 { + db.get().addChannel(id, cn, true, pid, ref) + } else { + db.get().addChannel(id, cn, false, pid, ref) + } + return id +} + +// RegisterSubChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). It returns the unique channelz tracking id assigned to this subchannel. +func RegisterSubChannel(c Channel, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a SubChannel's parent id cannot be 0") + return 0 + } + id := idGen.genID() + sc := &subChannel{ + refName: ref, + c: c, + sockets: make(map[int64]string), + id: id, + pid: pid, + } + db.get().addSubChannel(id, sc, pid, ref) + return id +} + +// RegisterServer registers the given server s in channelz database. It returns +// the unique channelz tracking id assigned to this server. +func RegisterServer(s Server, ref string) int64 { + id := idGen.genID() + svr := &server{ + refName: ref, + s: s, + sockets: make(map[int64]string), + listenSockets: make(map[int64]string), + id: id, + } + db.get().addServer(id, svr) + return id +} + +// RegisterListenSocket registers the given listen socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this listen socket. +func RegisterListenSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a ListenSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addListenSocket(id, ls, pid, ref) + return id +} + +// RegisterNormalSocket registers the given normal socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this normal socket. +func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a NormalSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addNormalSocket(id, ns, pid, ref) + return id +} + +// RemoveEntry removes an entry with unique channelz trakcing id to be id from +// channelz database. +func RemoveEntry(id int64) { + db.get().removeEntry(id) +} + +// channelMap is the storage data structure for channelz. +// Methods of channelMap can be divided in two two categories with respect to locking. +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// A second type of method need always to be called inside a first type of method. +type channelMap struct { + mu sync.RWMutex + topLevelChannels map[int64]struct{} + servers map[int64]*server + channels map[int64]*channel + subChannels map[int64]*subChannel + listenSockets map[int64]*listenSocket + normalSockets map[int64]*normalSocket +} + +func (c *channelMap) addServer(id int64, s *server) { + c.mu.Lock() + s.cm = c + c.servers[id] = s + c.mu.Unlock() +} + +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { + c.mu.Lock() + cn.cm = c + c.channels[id] = cn + if isTopChannel { + c.topLevelChannels[id] = struct{}{} + } else { + c.findEntry(pid).addChild(id, cn) + } + c.mu.Unlock() +} + +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { + c.mu.Lock() + sc.cm = c + c.subChannels[id] = sc + c.findEntry(pid).addChild(id, sc) + c.mu.Unlock() +} + +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { + c.mu.Lock() + ls.cm = c + c.listenSockets[id] = ls + c.findEntry(pid).addChild(id, ls) + c.mu.Unlock() +} + +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { + c.mu.Lock() + ns.cm = c + c.normalSockets[id] = ns + c.findEntry(pid).addChild(id, ns) + c.mu.Unlock() +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the +// entry, if it has to wait on the deletion of its children, or may lead to a chain +// of entry deletion. For example, deleting the last socket of a gracefully shutting +// down server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { + c.mu.Lock() + c.findEntry(id).triggerDelete() + c.mu.Unlock() +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { + var v entry + var ok bool + if v, ok = c.channels[id]; ok { + return v + } + if v, ok = c.subChannels[id]; ok { + return v + } + if v, ok = c.servers[id]; ok { + return v + } + if v, ok = c.listenSockets[id]; ok { + return v + } + if v, ok = c.normalSockets[id]; ok { + return v + } + return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// deleteEntry simply deletes an entry from the channelMap. Before calling this +// method, caller must check this entry is ready to be deleted, i.e removeEntry() +// has been called on it, and no children still exist. +// Conditionals are ordered by the expected frequency of deletion of each entity +// type, in order to optimize performance. +func (c *channelMap) deleteEntry(id int64) { + var ok bool + if _, ok = c.normalSockets[id]; ok { + delete(c.normalSockets, id) + return + } + if _, ok = c.subChannels[id]; ok { + delete(c.subChannels, id) + return + } + if _, ok = c.channels[id]; ok { + delete(c.channels, id) + delete(c.topLevelChannels, id) + return + } + if _, ok = c.listenSockets[id]; ok { + delete(c.listenSockets, id) + return + } + if _, ok = c.servers[id]; ok { + delete(c.servers, id) + return + } +} + +type int64Slice []int64 + +func (s int64Slice) Len() int { return len(s) } +func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { + n := make(map[int64]string) + for k, v := range m { + n[k] = v + } + return n +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func (c *channelMap) GetTopChannels(id int64) ([]*ChannelMetric, bool) { + c.mu.RLock() + l := len(c.topLevelChannels) + ids := make([]int64, 0, l) + cns := make([]*channel, 0, min(l, EntryPerPage)) + + for k := range c.topLevelChannels { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := 0 + var end bool + var t []*ChannelMetric + for i, v := range ids[idx:] { + if count == EntryPerPage { + break + } + if cn, ok := c.channels[v]; ok { + cns = append(cns, cn) + t = append(t, &ChannelMetric{ + NestedChans: copyMap(cn.nestedChans), + SubChans: copyMap(cn.subChans), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, cn := range cns { + t[i].ChannelData = cn.c.ChannelzMetric() + t[i].ID = cn.id + t[i].RefName = cn.refName + } + return t, end +} + +func (c *channelMap) GetServers(id int64) ([]*ServerMetric, bool) { + c.mu.RLock() + l := len(c.servers) + ids := make([]int64, 0, l) + ss := make([]*server, 0, min(l, EntryPerPage)) + for k := range c.servers { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := 0 + var end bool + var s []*ServerMetric + for i, v := range ids[idx:] { + if count == EntryPerPage { + break + } + if svr, ok := c.servers[v]; ok { + ss = append(ss, svr) + s = append(s, &ServerMetric{ + ListenSockets: copyMap(svr.listenSockets), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, svr := range ss { + s[i].ServerData = svr.s.ChannelzMetric() + s[i].ID = svr.id + s[i].RefName = svr.refName + } + return s, end +} + +func (c *channelMap) GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) { + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + // server with id doesn't exist. + c.mu.RUnlock() + return nil, true + } + svrskts := svr.sockets + l := len(svrskts) + ids := make([]int64, 0, l) + sks := make([]*normalSocket, 0, min(l, EntryPerPage)) + for k := range svrskts { + ids = append(ids, k) + } + sort.Sort((int64Slice(ids))) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := 0 + var end bool + for i, v := range ids[idx:] { + if count == EntryPerPage { + break + } + if ns, ok := c.normalSockets[v]; ok { + sks = append(sks, ns) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + var s []*SocketMetric + for _, ns := range sks { + sm := &SocketMetric{} + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + s = append(s, sm) + } + return s, end +} + +func (c *channelMap) GetChannel(id int64) *ChannelMetric { + cm := &ChannelMetric{} + var cn *channel + var ok bool + c.mu.RLock() + if cn, ok = c.channels[id]; !ok { + // channel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.NestedChans = copyMap(cn.nestedChans) + cm.SubChans = copyMap(cn.subChans) + c.mu.RUnlock() + cm.ChannelData = cn.c.ChannelzMetric() + cm.ID = cn.id + cm.RefName = cn.refName + return cm +} + +func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { + cm := &SubChannelMetric{} + var sc *subChannel + var ok bool + c.mu.RLock() + if sc, ok = c.subChannels[id]; !ok { + // subchannel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.Sockets = copyMap(sc.sockets) + c.mu.RUnlock() + cm.ChannelData = sc.c.ChannelzMetric() + cm.ID = sc.id + cm.RefName = sc.refName + return cm +} + +func (c *channelMap) GetSocket(id int64) *SocketMetric { + sm := &SocketMetric{} + c.mu.RLock() + if ls, ok := c.listenSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ls.s.ChannelzMetric() + sm.ID = ls.id + sm.RefName = ls.refName + return sm + } + if ns, ok := c.normalSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + return sm + } + c.mu.RUnlock() + return nil +} + +type idGenerator struct { + id int64 +} + +func (i *idGenerator) reset() { + atomic.StoreInt64(&i.id, 0) +} + +func (i *idGenerator) genID() int64 { + return atomic.AddInt64(&i.id, 1) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go new file mode 100644 index 000000000..153d75340 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -0,0 +1,418 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "net" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" +) + +// entry represents a node in the channelz database. +type entry interface { + // addChild adds a child e, whose channelz id is id to child list + addChild(id int64, e entry) + // deleteChild deletes a child with channelz id to be id from child list + deleteChild(id int64) + // triggerDelete tries to delete self from channelz database. However, if child + // list is not empty, then deletion from the database is on hold until the last + // child is deleted from database. + triggerDelete() + // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child + // list is now empty. If both conditions are met, then delete self from database. + deleteSelfIfReady() +} + +// dummyEntry is a fake entry to handle entry not found case. +type dummyEntry struct { + idNotFound int64 +} + +func (d *dummyEntry) addChild(id int64, e entry) { + // Note: It is possible for a normal program to reach here under race condition. + // For example, there could be a race between ClientConn.Close() info being propagated + // to addrConn and http2Client. ClientConn.Close() cancel the context and result + // in http2Client to error. The error info is then caught by transport monitor + // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, + // the addrConn will create a new transport. And when registering the new transport in + // channelz, its parent addrConn could have already been torn down and deleted + // from channelz tracking, and thus reach the code here. + grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { + // It is possible for a normal program to reach here under race condition. + // Refer to the example described in addChild(). + grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { + grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { + // code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +// ChannelMetric defines the info channelz provides for a specific Channel, which +// includes ChannelInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ChannelMetric struct { + // ID is the channelz id of this channel. + ID int64 + // RefName is the human readable reference string of this channel. + RefName string + // ChannelData contains channel internal metric reported by the channel through + // ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this channel in the format of + // a map from nested channel channelz id to corresponding reference string. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this channel in the format of a + // map from subchannel channelz id to corresponding reference string. + SubChans map[int64]string + // Sockets tracks the socket type children of this channel in the format of a map + // from socket channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow channel having sockets directly, + // therefore, this is field is unused. + Sockets map[int64]string +} + +// SubChannelMetric defines the info channelz provides for a specific SubChannel, +// which includes ChannelInternalMetric and channelz-specific data, such as +// channelz id, child list, etc. +type SubChannelMetric struct { + // ID is the channelz id of this subchannel. + ID int64 + // RefName is the human readable reference string of this subchannel. + RefName string + // ChannelData contains subchannel internal metric reported by the subchannel + // through ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this subchannel in the format of + // a map from nested channel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have nested channels + // as children, therefore, this field is unused. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this subchannel in the format of a + // map from subchannel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have subchannels + // as children, therefore, this field is unused. + SubChans map[int64]string + // Sockets tracks the socket type children of this subchannel in the format of a map + // from socket channelz id to corresponding reference string. + Sockets map[int64]string +} + +// ChannelInternalMetric defines the struct that the implementor of Channel interface +// should return from ChannelzMetric(). +type ChannelInternalMetric struct { + // current connectivity state of the channel. + State connectivity.State + // The target this channel originally tried to connect to. May be absent + Target string + // The number of calls started on the channel. + CallsStarted int64 + // The number of calls that have completed with an OK status. + CallsSucceeded int64 + // The number of calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the channel. + LastCallStartedTimestamp time.Time + //TODO: trace +} + +// Channel is the interface that should be satisfied in order to be tracked by +// channelz as Channel or SubChannel. +type Channel interface { + ChannelzMetric() *ChannelInternalMetric +} + +type channel struct { + refName string + c Channel + closeCalled bool + nestedChans map[int64]string + subChans map[int64]string + id int64 + pid int64 + cm *channelMap +} + +func (c *channel) addChild(id int64, e entry) { + switch v := e.(type) { + case *subChannel: + c.subChans[id] = v.refName + case *channel: + c.nestedChans[id] = v.refName + default: + grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + } +} + +func (c *channel) deleteChild(id int64) { + delete(c.subChans, id) + delete(c.nestedChans, id) + c.deleteSelfIfReady() +} + +func (c *channel) triggerDelete() { + c.closeCalled = true + c.deleteSelfIfReady() +} + +func (c *channel) deleteSelfIfReady() { + if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { + return + } + c.cm.deleteEntry(c.id) + // not top channel + if c.pid != 0 { + c.cm.findEntry(c.pid).deleteChild(c.id) + } +} + +type subChannel struct { + refName string + c Channel + closeCalled bool + sockets map[int64]string + id int64 + pid int64 + cm *channelMap +} + +func (sc *subChannel) addChild(id int64, e entry) { + if v, ok := e.(*normalSocket); ok { + sc.sockets[id] = v.refName + } else { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + } +} + +func (sc *subChannel) deleteChild(id int64) { + delete(sc.sockets, id) + sc.deleteSelfIfReady() +} + +func (sc *subChannel) triggerDelete() { + sc.closeCalled = true + sc.deleteSelfIfReady() +} + +func (sc *subChannel) deleteSelfIfReady() { + if !sc.closeCalled || len(sc.sockets) != 0 { + return + } + sc.cm.deleteEntry(sc.id) + sc.cm.findEntry(sc.pid).deleteChild(sc.id) +} + +// SocketMetric defines the info channelz provides for a specific Socket, which +// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. +type SocketMetric struct { + // ID is the channelz id of this socket. + ID int64 + // RefName is the human readable reference string of this socket. + RefName string + // SocketData contains socket internal metric reported by the socket through + // ChannelzMetric(). + SocketData *SocketInternalMetric +} + +// SocketInternalMetric defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketInternalMetric struct { + // The number of streams that have been started. + StreamsStarted int64 + // The number of streams that have ended successfully: + // On client side, receiving frame with eos bit set. + // On server side, sending frame with eos bit set. + StreamsSucceeded int64 + // The number of streams that have ended unsuccessfully: + // On client side, termination without receiving frame with eos bit set. + // On server side, termination without sending frame with eos bit set. + StreamsFailed int64 + // The number of messages successfully sent on this socket. + MessagesSent int64 + MessagesReceived int64 + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + KeepAlivesSent int64 + // The last time a stream was created by this endpoint. Usually unset for + // servers. + LastLocalStreamCreatedTimestamp time.Time + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + LastRemoteStreamCreatedTimestamp time.Time + // The last time a message was sent by this endpoint. + LastMessageSentTimestamp time.Time + // The last time a message was received by this endpoint. + LastMessageReceivedTimestamp time.Time + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + LocalFlowControlWindow int64 + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + RemoteFlowControlWindow int64 + // The locally bound address. + LocalAddr net.Addr + // The remote bound address. May be absent. + RemoteAddr net.Addr + // Optional, represents the name of the remote endpoint, if different than + // the original target name. + RemoteName string + //TODO: socket options + //TODO: Security +} + +// Socket is the interface that should be satisfied in order to be tracked by +// channelz as Socket. +type Socket interface { + ChannelzMetric() *SocketInternalMetric +} + +type listenSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ls *listenSocket) addChild(id int64, e entry) { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *listenSocket) deleteChild(id int64) { + grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *listenSocket) triggerDelete() { + ls.cm.deleteEntry(ls.id) + ls.cm.findEntry(ls.pid).deleteChild(ls.id) +} + +func (ls *listenSocket) deleteSelfIfReady() { + grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +type normalSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ns *normalSocket) addChild(id int64, e entry) { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) +} + +func (ns *normalSocket) deleteChild(id int64) { + grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id) +} + +func (ns *normalSocket) triggerDelete() { + ns.cm.deleteEntry(ns.id) + ns.cm.findEntry(ns.pid).deleteChild(ns.id) +} + +func (ns *normalSocket) deleteSelfIfReady() { + grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket") +} + +// ServerMetric defines the info channelz provides for a specific Server, which +// includes ServerInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ServerMetric struct { + // ID is the channelz id of this server. + ID int64 + // RefName is the human readable reference string of this server. + RefName string + // ServerData contains server internal metric reported by the server through + // ChannelzMetric(). + ServerData *ServerInternalMetric + // ListenSockets tracks the listener socket type children of this server in the + // format of a map from socket channelz id to corresponding reference string. + ListenSockets map[int64]string +} + +// ServerInternalMetric defines the struct that the implementor of Server interface +// should return from ChannelzMetric(). +type ServerInternalMetric struct { + // The number of incoming calls started on the server. + CallsStarted int64 + // The number of incoming calls that have completed with an OK status. + CallsSucceeded int64 + // The number of incoming calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the server. + LastCallStartedTimestamp time.Time + //TODO: trace +} + +// Server is the interface to be satisfied in order to be tracked by channelz as +// Server. +type Server interface { + ChannelzMetric() *ServerInternalMetric +} + +type server struct { + refName string + s Server + closeCalled bool + sockets map[int64]string + listenSockets map[int64]string + id int64 + cm *channelMap +} + +func (s *server) addChild(id int64, e entry) { + switch v := e.(type) { + case *normalSocket: + s.sockets[id] = v.refName + case *listenSocket: + s.listenSockets[id] = v.refName + default: + grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + } +} + +func (s *server) deleteChild(id int64) { + delete(s.sockets, id) + delete(s.listenSockets, id) + s.deleteSelfIfReady() +} + +func (s *server) triggerDelete() { + s.closeCalled = true + s.deleteSelfIfReady() +} + +func (s *server) deleteSelfIfReady() { + if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { + return + } + s.cm.deleteEntry(s.id) +} diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go new file mode 100644 index 000000000..200b115ca --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -0,0 +1,56 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcrand implements math/rand functions in a concurrent-safe way +// with a global random source, independent of math/rand's global source. +package grpcrand + +import ( + "math/rand" + "sync" + "time" +) + +var ( + r = rand.New(rand.NewSource(time.Now().UnixNano())) + mu sync.Mutex +) + +// Int63n implements rand.Int63n on the grpcrand global source. +func Int63n(n int64) int64 { + mu.Lock() + res := r.Int63n(n) + mu.Unlock() + return res +} + +// Intn implements rand.Intn on the grpcrand global source. +func Intn(n int) int { + mu.Lock() + res := r.Intn(n) + mu.Unlock() + return res +} + +// Float64 implements rand.Float64 on the grpcrand global source. +func Float64() float64 { + mu.Lock() + res := r.Float64() + mu.Unlock() + return res +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 53f177520..cd34267f7 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -15,13 +15,22 @@ * */ -// Package internal contains gRPC-internal code for testing, to avoid polluting -// the godoc of the top-level grpc package. +// Package internal contains gRPC-internal code, to avoid polluting +// the godoc of the top-level grpc package. It must not import any grpc +// symbols to avoid circular dependencies. package internal -// TestingUseHandlerImpl enables the http.Handler-based server implementation. -// It must be called before Serve and requires TLS credentials. -// -// The provided grpcServer must be of type *grpc.Server. It is untyped -// for circular dependency reasons. -var TestingUseHandlerImpl func(grpcServer interface{}) +var ( + + // TestingUseHandlerImpl enables the http.Handler-based server implementation. + // It must be called before Serve and requires TLS credentials. + // + // The provided grpcServer must be of type *grpc.Server. It is untyped + // for circular dependency reasons. + TestingUseHandlerImpl func(grpcServer interface{}) + + // WithContextDialer is exported by clientconn.go + WithContextDialer interface{} // func(context.Context, string) (net.Conn, error) grpc.DialOption + // WithResolverBuilder is exported by clientconn.go + WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption +) diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 0a984e6c8..019e65800 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -25,9 +25,9 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/channelz" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" "google.golang.org/grpc/status" @@ -233,6 +233,8 @@ func (bp *pickerWrapper) close() { close(bp.blockingCh) } +const stickinessKeyCountLimit = 1000 + type stickyStoreEntry struct { acw *acBalancerWrapper addr resolver.Address @@ -243,12 +245,12 @@ type stickyStore struct { // curMDKey is check before every get/put to avoid races. The operation will // abort immediately when the given mdKey is different from the curMDKey. curMDKey string - store map[string]*stickyStoreEntry + store *linkedMap } func newStickyStore() *stickyStore { return &stickyStore{ - store: make(map[string]*stickyStoreEntry), + store: newLinkedMap(), } } @@ -256,7 +258,7 @@ func newStickyStore() *stickyStore { func (ss *stickyStore) reset(newMDKey string) { ss.mu.Lock() ss.curMDKey = newMDKey - ss.store = make(map[string]*stickyStoreEntry) + ss.store.clear() ss.mu.Unlock() } @@ -269,9 +271,12 @@ func (ss *stickyStore) put(mdKey, stickyKey string, acw *acBalancerWrapper) { return } // TODO(stickiness): limit the total number of entries. - ss.store[stickyKey] = &stickyStoreEntry{ + ss.store.put(stickyKey, &stickyStoreEntry{ acw: acw, addr: acw.getAddrConn().getCurAddr(), + }) + if ss.store.len() > stickinessKeyCountLimit { + ss.store.removeOldest() } } @@ -283,18 +288,18 @@ func (ss *stickyStore) get(mdKey, stickyKey string) (transport.ClientTransport, if mdKey != ss.curMDKey { return nil, false } - entry, ok := ss.store[stickyKey] + entry, ok := ss.store.get(stickyKey) if !ok { return nil, false } ac := entry.acw.getAddrConn() if ac.getCurAddr() != entry.addr { - delete(ss.store, stickyKey) + ss.store.remove(stickyKey) return nil, false } t, ok := ac.getReadyTransport() if !ok { - delete(ss.store, stickyKey) + ss.store.remove(stickyKey) return nil, false } return t, true diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go index c1cabfc99..048fde67d 100644 --- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -24,7 +24,6 @@ import ( "encoding/json" "errors" "fmt" - "math/rand" "net" "os" "strconv" @@ -34,6 +33,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/resolver" ) @@ -52,7 +52,6 @@ const ( var ( errMissingAddr = errors.New("missing address") - randomGen = rand.New(rand.NewSource(time.Now().UnixNano())) ) // NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. @@ -67,6 +66,9 @@ type dnsBuilder struct { // Build creates and starts a DNS resolver that watches the name resolution of the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { + if target.Authority != "" { + return nil, fmt.Errorf("Default DNS resolver does not support custom DNS server") + } host, port, err := parseTarget(target.Endpoint) if err != nil { return nil, err @@ -346,7 +348,7 @@ func chosenByPercentage(a *int) bool { if a == nil { return true } - return randomGen.Intn(100)+1 <= *a + return grpcrand.Intn(100)+1 <= *a } func canaryingSC(js string) string { diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 1b493db2e..494d6931e 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -65,8 +65,8 @@ func parseTarget(target string) (ret resolver.Target) { } // newCCResolverWrapper parses cc.target for scheme and gets the resolver -// builder for this scheme. It then builds the resolver and starts the -// monitoring goroutine for it. +// builder for this scheme and builds the resolver. The monitoring goroutine +// for it is not started yet and can be created by calling start(). // // If withResolverBuilder dial option is set, the specified resolver will be // used instead. @@ -148,7 +148,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { } // NewServiceConfig is called by the resolver implemenetion to send service -// configs to gPRC. +// configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { select { case <-ccr.scCh: diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 69ef3c0b5..033801f34 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -419,8 +419,8 @@ func (o CustomCodecCallOption) after(c *callInfo) {} type payloadFormat uint8 const ( - compressionNone payloadFormat = iota // no compression - compressionMade + compressionNone payloadFormat = 0 // no compression + compressionMade payloadFormat = 1 // compressed ) // parser reads complete gRPC messages from the underlying reader. @@ -477,65 +477,82 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt return pf, msg, nil } -// encode serializes msg and returns a buffer of message header and a buffer of msg. -// If msg is nil, it generates the message header and an empty msg buffer. -// TODO(ddyihai): eliminate extra Compressor parameter. -func encode(c baseCodec, msg interface{}, cp Compressor, outPayload *stats.OutPayload, compressor encoding.Compressor) ([]byte, []byte, error) { - var ( - b []byte - cbuf *bytes.Buffer - ) - const ( - payloadLen = 1 - sizeLen = 4 - ) - if msg != nil { - var err error - b, err = c.Marshal(msg) - if err != nil { - return nil, nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) +// encode serializes msg and returns a buffer containing the message, or an +// error if it is too large to be transmitted by grpc. If msg is nil, it +// generates an empty message. +func encode(c baseCodec, msg interface{}) ([]byte, error) { + if msg == nil { // NOTE: typed nils will not be caught by this check + return nil, nil + } + b, err := c.Marshal(msg) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) + } + if uint(len(b)) > math.MaxUint32 { + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + } + return b, nil +} + +// compress returns the input bytes compressed by compressor or cp. If both +// compressors are nil, returns nil. +// +// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. +func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { + if compressor == nil && cp == nil { + return nil, nil + } + wrapErr := func(err error) error { + return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) + } + cbuf := &bytes.Buffer{} + if compressor != nil { + z, _ := compressor.Compress(cbuf) + if _, err := z.Write(in); err != nil { + return nil, wrapErr(err) } - if outPayload != nil { - outPayload.Payload = msg - // TODO truncate large payload. - outPayload.Data = b - outPayload.Length = len(b) + if err := z.Close(); err != nil { + return nil, wrapErr(err) } - if compressor != nil || cp != nil { - cbuf = new(bytes.Buffer) - // Has compressor, check Compressor is set by UseCompressor first. - if compressor != nil { - z, _ := compressor.Compress(cbuf) - if _, err := z.Write(b); err != nil { - return nil, nil, status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) - } - z.Close() - } else { - // If Compressor is not set by UseCompressor, use default Compressor - if err := cp.Do(cbuf, b); err != nil { - return nil, nil, status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) - } - } - b = cbuf.Bytes() + } else { + if err := cp.Do(cbuf, in); err != nil { + return nil, wrapErr(err) } } - if uint(len(b)) > math.MaxUint32 { - return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) - } + return cbuf.Bytes(), nil +} - bufHeader := make([]byte, payloadLen+sizeLen) - if compressor != nil || cp != nil { - bufHeader[0] = byte(compressionMade) +const ( + payloadLen = 1 + sizeLen = 4 + headerLen = payloadLen + sizeLen +) + +// msgHeader returns a 5-byte header for the message being transmitted and the +// payload, which is compData if non-nil or data otherwise. +func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { + hdr = make([]byte, headerLen) + if compData != nil { + hdr[0] = byte(compressionMade) + data = compData } else { - bufHeader[0] = byte(compressionNone) + hdr[0] = byte(compressionNone) } - // Write length of b into buf - binary.BigEndian.PutUint32(bufHeader[payloadLen:], uint32(len(b))) - if outPayload != nil { - outPayload.WireLength = payloadLen + sizeLen + len(b) + // Write length of payload into buf + binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) + return hdr, data +} + +func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { + return &stats.OutPayload{ + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + SentTime: t, } - return bufHeader, b, nil } func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { @@ -721,7 +738,4 @@ const ( SupportPackageIsVersion5 = true ) -// Version is the current grpc version. -const Version = "1.12.0" - const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 4969331cb..014c72b3f 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -38,13 +38,13 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/trace" - "google.golang.org/grpc/channelz" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" @@ -827,24 +827,24 @@ func (s *Server) incrCallsFailed() { } func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { - var ( - outPayload *stats.OutPayload - ) - if s.opts.statsHandler != nil { - outPayload = &stats.OutPayload{} - } - hdr, data, err := encode(s.getCodec(stream.ContentSubtype()), msg, cp, outPayload, comp) + data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { grpclog.Errorln("grpc: server failed to encode response: ", err) return err } - if len(data) > s.opts.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), s.opts.maxSendMessageSize) + compData, err := compress(data, cp, comp) + if err != nil { + grpclog.Errorln("grpc: server failed to compress response: ", err) + return err + } + hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) } - err = t.Write(stream, hdr, data, opts) - if err == nil && outPayload != nil { - outPayload.SentTime = time.Now() - s.opts.statsHandler.HandleRPC(stream.Context(), outPayload) + err = t.Write(stream, hdr, payload, opts) + if err == nil && s.opts.statsHandler != nil { + s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) } return err } diff --git a/vendor/google.golang.org/grpc/status/go16.go b/vendor/google.golang.org/grpc/status/go16.go new file mode 100644 index 000000000..e59b53e82 --- /dev/null +++ b/vendor/google.golang.org/grpc/status/go16.go @@ -0,0 +1,42 @@ +// +build go1.6,!go1.7 + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package status + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/codes" +) + +// FromContextError converts a context error into a Status. It returns a +// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is +// non-nil and not a context error. +func FromContextError(err error) *Status { + switch err { + case nil: + return New(codes.OK, "") + case context.DeadlineExceeded: + return New(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return New(codes.Canceled, err.Error()) + default: + return New(codes.Unknown, err.Error()) + } +} diff --git a/vendor/google.golang.org/grpc/status/go17.go b/vendor/google.golang.org/grpc/status/go17.go new file mode 100644 index 000000000..090215149 --- /dev/null +++ b/vendor/google.golang.org/grpc/status/go17.go @@ -0,0 +1,44 @@ +// +build go1.7 + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package status + +import ( + "context" + + netctx "golang.org/x/net/context" + "google.golang.org/grpc/codes" +) + +// FromContextError converts a context error into a Status. It returns a +// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is +// non-nil and not a context error. +func FromContextError(err error) *Status { + switch err { + case nil: + return New(codes.OK, "") + case context.DeadlineExceeded, netctx.DeadlineExceeded: + return New(codes.DeadlineExceeded, err.Error()) + case context.Canceled, netctx.Canceled: + return New(codes.Canceled, err.Error()) + default: + return New(codes.Unknown, err.Error()) + } +} diff --git a/vendor/google.golang.org/grpc/stickiness_linkedmap.go b/vendor/google.golang.org/grpc/stickiness_linkedmap.go new file mode 100644 index 000000000..1c726af16 --- /dev/null +++ b/vendor/google.golang.org/grpc/stickiness_linkedmap.go @@ -0,0 +1,97 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "container/list" +) + +type linkedMapKVPair struct { + key string + value *stickyStoreEntry +} + +// linkedMap is an implementation of a map that supports removing the oldest +// entry. +// +// linkedMap is NOT thread safe. +// +// It's for use of stickiness only! +type linkedMap struct { + m map[string]*list.Element + l *list.List // Head of the list is the oldest element. +} + +// newLinkedMap returns a new LinkedMap. +func newLinkedMap() *linkedMap { + return &linkedMap{ + m: make(map[string]*list.Element), + l: list.New(), + } +} + +// put adds entry (key, value) to the map. Existing key will be overridden. +func (m *linkedMap) put(key string, value *stickyStoreEntry) { + if oldE, ok := m.m[key]; ok { + // Remove existing entry. + m.l.Remove(oldE) + } + e := m.l.PushBack(&linkedMapKVPair{key: key, value: value}) + m.m[key] = e +} + +// get returns the value of the given key. +func (m *linkedMap) get(key string) (*stickyStoreEntry, bool) { + e, ok := m.m[key] + if !ok { + return nil, false + } + m.l.MoveToBack(e) + return e.Value.(*linkedMapKVPair).value, true +} + +// remove removes key from the map, and returns the value. The map is not +// modified if key is not in the map. +func (m *linkedMap) remove(key string) (*stickyStoreEntry, bool) { + e, ok := m.m[key] + if !ok { + return nil, false + } + delete(m.m, key) + m.l.Remove(e) + return e.Value.(*linkedMapKVPair).value, true +} + +// len returns the len of the map. +func (m *linkedMap) len() int { + return len(m.m) +} + +// clear removes all elements from the map. +func (m *linkedMap) clear() { + m.m = make(map[string]*list.Element) + m.l = list.New() +} + +// removeOldest removes the oldest key from the map. +func (m *linkedMap) removeOldest() { + e := m.l.Front() + m.l.Remove(e) + delete(m.m, e.Value.(*linkedMapKVPair).key) +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 82921a15a..152d9eccd 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -27,9 +27,9 @@ import ( "golang.org/x/net/context" "golang.org/x/net/trace" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/channelz" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" @@ -101,7 +101,21 @@ type ClientStream interface { } // NewStream creates a new Stream for the client side. This is typically -// called by generated code. +// called by generated code. ctx is used for the lifetime of the stream. +// +// To ensure resources are not leaked due to the stream returned, one of the following +// actions must be performed: +// +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// +// If none of the above happen, a goroutine and a context will be leaked, and grpc +// will not call the optionally-configured stats handler with a stats.End message. func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options @@ -113,8 +127,7 @@ func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method st return newClientStream(ctx, desc, cc, method, opts...) } -// NewClientStream creates a new Stream for the client side. This is typically -// called by generated code. +// NewClientStream is a wrapper for ClientConn.NewStream. // // DEPRECATED: Use ClientConn.NewStream instead. func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { @@ -466,27 +479,27 @@ func (a *csAttempt) sendMsg(m interface{}) (err error) { } a.mu.Unlock() } - var outPayload *stats.OutPayload - if a.statsHandler != nil { - outPayload = &stats.OutPayload{ - Client: true, - } + data, err := encode(cs.codec, m) + if err != nil { + return err } - hdr, data, err := encode(cs.codec, m, cs.cp, outPayload, cs.comp) + compData, err := compress(data, cs.cp, cs.comp) if err != nil { return err } - if len(data) > *cs.c.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize) + hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > *cs.c.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.c.maxSendMessageSize) } + if !cs.desc.ClientStreams { cs.sentLast = true } - err = a.t.Write(a.s, hdr, data, &transport.Options{Last: !cs.desc.ClientStreams}) + err = a.t.Write(a.s, hdr, payload, &transport.Options{Last: !cs.desc.ClientStreams}) if err == nil { - if outPayload != nil { - outPayload.SentTime = time.Now() - a.statsHandler.HandleRPC(a.ctx, outPayload) + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payload, time.Now())) } if channelz.IsOn() { a.t.IncrMsgSent() @@ -696,23 +709,24 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { ss.t.IncrMsgSent() } }() - var outPayload *stats.OutPayload - if ss.statsHandler != nil { - outPayload = &stats.OutPayload{} + data, err := encode(ss.codec, m) + if err != nil { + return err } - hdr, data, err := encode(ss.codec, m, ss.cp, outPayload, ss.comp) + compData, err := compress(data, ss.cp, ss.comp) if err != nil { return err } - if len(data) > ss.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize) + hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) } - if err := ss.t.Write(ss.s, hdr, data, &transport.Options{Last: false}); err != nil { + if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } - if outPayload != nil { - outPayload.SentTime = time.Now() - ss.statsHandler.HandleRPC(ss.s.Context(), outPayload) + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) } return nil } diff --git a/vendor/google.golang.org/grpc/transport/controlbuf.go b/vendor/google.golang.org/grpc/transport/controlbuf.go index e147cd51b..5c5891a11 100644 --- a/vendor/google.golang.org/grpc/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/transport/controlbuf.go @@ -28,6 +28,10 @@ import ( "golang.org/x/net/http2/hpack" ) +var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { + e.SetMaxDynamicTableSizeLimit(v) +} + type itemNode struct { it interface{} next *itemNode @@ -80,6 +84,13 @@ func (il *itemList) isEmpty() bool { // the control buffer of transport. They represent different aspects of // control tasks, e.g., flow control, settings, streaming resetting, etc. +// registerStream is used to register an incoming stream with loopy writer. +type registerStream struct { + streamID uint32 + wq *writeQuota +} + +// headerFrame is also used to register stream on the client-side. type headerFrame struct { streamID uint32 hf []hpack.HeaderField @@ -361,44 +372,47 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato const minBatchSize = 1000 // run should be run in a separate goroutine. -func (l *loopyWriter) run() { - var ( - it interface{} - err error - isEmpty bool - ) +func (l *loopyWriter) run() (err error) { defer func() { - errorf("transport: loopyWriter.run returning. Err: %v", err) + if err == ErrConnClosing { + // Don't log ErrConnClosing as error since it happens + // 1. When the connection is closed by some other known issue. + // 2. User closed the connection. + // 3. A graceful close of connection. + infof("transport: loopyWriter.run returning. %v", err) + err = nil + } }() for { - it, err = l.cbuf.get(true) + it, err := l.cbuf.get(true) if err != nil { - return + return err } if err = l.handle(it); err != nil { - return + return err } if _, err = l.processData(); err != nil { - return + return err } gosched := true hasdata: for { - it, err = l.cbuf.get(false) + it, err := l.cbuf.get(false) if err != nil { - return + return err } if it != nil { if err = l.handle(it); err != nil { - return + return err } if _, err = l.processData(); err != nil { - return + return err } continue hasdata } - if isEmpty, err = l.processData(); err != nil { - return + isEmpty, err := l.processData() + if err != nil { + return err } if !isEmpty { continue hasdata @@ -450,30 +464,39 @@ func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { return l.framer.fr.WriteSettingsAck() } +func (l *loopyWriter) registerStreamHandler(h *registerStream) error { + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + l.estdStreams[h.streamID] = str + return nil +} + func (l *loopyWriter) headerHandler(h *headerFrame) error { if l.side == serverSide { - if h.endStream { // Case 1.A: Server wants to close stream. - // Make sure it's not a trailers only response. - if str, ok := l.estdStreams[h.streamID]; ok { - if str.state != empty { // either active or waiting on stream quota. - // add it str's list of items. - str.itl.enqueue(h) - return nil - } - } - if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { - return err - } - return l.cleanupStreamHandler(h.cleanup) + str, ok := l.estdStreams[h.streamID] + if !ok { + warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + return nil + } + // Case 1.A: Server is responding back with headers. + if !h.endStream { + return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) } - // Case 1.B: Server is responding back with headers. - str := &outStream{ - state: empty, - itl: &itemList{}, - wq: h.wq, + // else: Case 1.B: Server wants to close stream. + + if str.state != empty { // either active or waiting on stream quota. + // add it str's list of items. + str.itl.enqueue(h) + return nil + } + if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { + return err } - l.estdStreams[h.streamID] = str - return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) + return l.cleanupStreamHandler(h.cleanup) } // Case 2: Client wants to originate stream. str := &outStream{ @@ -632,6 +655,8 @@ func (l *loopyWriter) handle(i interface{}) error { return l.outgoingSettingsHandler(i) case *headerFrame: return l.headerHandler(i) + case *registerStream: + return l.registerStreamHandler(i) case *cleanupStream: return l.cleanupStreamHandler(i) case *incomingGoAway: @@ -664,6 +689,8 @@ func (l *loopyWriter) applySettings(ss []http2.Setting) error { } } } + case http2.SettingHeaderTableSize: + updateHeaderTblSize(l.hEnc, s.Val) } } return nil diff --git a/vendor/google.golang.org/grpc/transport/flowcontrol.go b/vendor/google.golang.org/grpc/transport/flowcontrol.go index 378f5c450..bbf98b6f5 100644 --- a/vendor/google.golang.org/grpc/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/transport/flowcontrol.go @@ -58,14 +58,20 @@ type writeQuota struct { ch chan struct{} // done is triggered in error case. done <-chan struct{} + // replenish is called by loopyWriter to give quota back to. + // It is implemented as a field so that it can be updated + // by tests. + replenish func(n int) } func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { - return &writeQuota{ + w := &writeQuota{ quota: sz, ch: make(chan struct{}, 1), done: done, } + w.replenish = w.realReplenish + return w } func (w *writeQuota) get(sz int32) error { @@ -83,7 +89,7 @@ func (w *writeQuota) get(sz int32) error { } } -func (w *writeQuota) replenish(n int) { +func (w *writeQuota) realReplenish(n int) { sz := int32(n) a := atomic.AddInt32(&w.quota, sz) b := a - sz diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go index 1fdabd954..eaf007eb0 100644 --- a/vendor/google.golang.org/grpc/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/transport/http2_client.go @@ -31,9 +31,9 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/channelz" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -76,8 +76,9 @@ type http2Client struct { // Boolean to keep track of reading activity on transport. // 1 is true and 0 is false. - activity uint32 // Accessed atomically. - kp keepalive.ClientParameters + activity uint32 // Accessed atomically. + kp keepalive.ClientParameters + keepaliveEnabled bool statsHandler stats.Handler @@ -259,6 +260,10 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne if channelz.IsOn() { t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, "") } + if t.kp.Time != infinity { + t.keepaliveEnabled = true + go t.keepalive() + } // Start the reader goroutine for incoming message. Each transport has // a dedicated goroutine which reads HTTP2 frame from network. Then it // dispatches the frame to the corresponding stream entity. @@ -295,13 +300,17 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne t.framer.writer.Flush() go func() { t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) - t.loopy.run() - t.conn.Close() + err := t.loopy.run() + if err != nil { + errorf("transport: loopyWriter.run returning. Err: %v", err) + } + // If it's a connection error, let reader goroutine handle it + // since there might be data in the buffers. + if _, ok := err.(net.Error); !ok { + t.conn.Close() + } close(t.writerDone) }() - if t.kp.Time != infinity { - go t.keepalive() - } return t, nil } @@ -537,7 +546,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea var sendPing bool // If the number of active streams change from 0 to 1, then check if keepalive // has gone dormant. If so, wake it up. - if len(t.activeStreams) == 1 { + if len(t.activeStreams) == 1 && t.keepaliveEnabled { select { case t.awakenKeepalive <- struct{}{}: sendPing = true @@ -735,6 +744,7 @@ func (t *http2Client) GracefulClose() error { if active == 0 { return t.Close() } + t.controlBuf.put(&incomingGoAway{}) return nil } @@ -1109,7 +1119,9 @@ func (t *http2Client) reader() { t.Close() return } - atomic.CompareAndSwapUint32(&t.activity, 0, 1) + if t.keepaliveEnabled { + atomic.CompareAndSwapUint32(&t.activity, 0, 1) + } sf, ok := frame.(*http2.SettingsFrame) if !ok { t.Close() @@ -1121,7 +1133,9 @@ func (t *http2Client) reader() { // loop to keep reading incoming messages on this transport. for { frame, err := t.framer.fr.ReadFrame() - atomic.CompareAndSwapUint32(&t.activity, 0, 1) + if t.keepaliveEnabled { + atomic.CompareAndSwapUint32(&t.activity, 0, 1) + } if err != nil { // Abort an active stream if the http2.Framer returns a // http2.StreamError. This can happen only if the server's response diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go index 8b93e222e..19acedb2b 100644 --- a/vendor/google.golang.org/grpc/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/transport/http2_server.go @@ -24,7 +24,6 @@ import ( "fmt" "io" "math" - "math/rand" "net" "strconv" "sync" @@ -36,9 +35,11 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/channelz" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -273,7 +274,9 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err go func() { t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - t.loopy.run() + if err := t.loopy.run(); err != nil { + errorf("transport: loopyWriter.run returning. Err: %v", err) + } t.conn.Close() close(t.writerDone) }() @@ -413,6 +416,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( t.updateWindow(s, uint32(n)) }, } + // Register the stream with loopy. + t.controlBuf.put(®isterStream{ + streamID: s.id, + wq: s.wq, + }) handle(s) return } @@ -682,12 +690,25 @@ func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { }) } +func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { + for k, vv := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields +} + // WriteHeader sends the header metedata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - if s.headerOk || s.getState() == streamDone { + if s.updateHeaderSent() || s.getState() == streamDone { return ErrIllegalHeaderWrite } - s.headerOk = true + s.hdrMu.Lock() if md.Len() > 0 { if s.header.Len() > 0 { s.header = metadata.Join(s.header, md) @@ -695,7 +716,12 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { s.header = md } } - md = s.header + t.writeHeaderLocked(s) + s.hdrMu.Unlock() + return nil +} + +func (t *http2Server) writeHeaderLocked(s *Stream) { // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. @@ -704,15 +730,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { if s.sendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) } - for k, vv := range md { - if isReservedHeader(k) { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - continue - } - for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } - } + headerFields = appendHeaderFieldsFromMD(headerFields, s.header) t.controlBuf.put(&headerFrame{ streamID: s.id, hf: headerFields, @@ -720,7 +738,6 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { onWrite: func() { atomic.StoreUint32(&t.resetPingStrikes, 1) }, - wq: s.wq, }) if t.stats != nil { // Note: WireLength is not set in outHeader. @@ -728,7 +745,6 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { outHeader := &stats.OutHeader{} t.stats.HandleRPC(s.Context(), outHeader) } - return nil } // WriteStatus sends stream status to the client and terminates the stream. @@ -736,21 +752,20 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { - if !s.headerOk && s.header.Len() > 0 { - if err := t.WriteHeader(s, nil); err != nil { - return err - } - } else { - if s.getState() == streamDone { - return nil - } + if s.getState() == streamDone { + return nil } + s.hdrMu.Lock() // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. - if !s.headerOk { - headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + if !s.updateHeaderSent() { // No headers have been sent. + if len(s.header) > 0 { // Send a separate header frame. + t.writeHeaderLocked(s) + } else { // Send a trailer only response. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + } } headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) @@ -759,23 +774,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. - panic(err) + grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + } else { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } - - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } // Attach the trailer metadata. - for k, vv := range s.trailer { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - if isReservedHeader(k) { - continue - } - for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } - } - trailer := &headerFrame{ + headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) + trailingHeader := &headerFrame{ streamID: s.id, hf: headerFields, endStream: true, @@ -783,7 +790,8 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { atomic.StoreUint32(&t.resetPingStrikes, 1) }, } - t.closeStream(s, false, 0, trailer, true) + s.hdrMu.Unlock() + t.closeStream(s, false, 0, trailingHeader, true) if t.stats != nil { t.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) } @@ -793,7 +801,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { - if !s.headerOk { // Headers haven't been written yet. + if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { // TODO(mmukhi, dfawley): Make sure this is the right code to return. return streamErrorf(codes.Internal, "transport: %v", err) @@ -1123,14 +1131,12 @@ func (t *http2Server) getOutFlowWindow() int64 { } } -var rgen = rand.New(rand.NewSource(time.Now().UnixNano())) - func getJitter(v time.Duration) time.Duration { if v == infinity { return 0 } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) - j := rgen.Int63n(2*r) - r + j := grpcrand.Int63n(2*r) - r return time.Duration(j) } diff --git a/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/google.golang.org/grpc/transport/http_util.go index a35586608..7d15c7d74 100644 --- a/vendor/google.golang.org/grpc/transport/http_util.go +++ b/vendor/google.golang.org/grpc/transport/http_util.go @@ -28,6 +28,7 @@ import ( "strconv" "strings" "time" + "unicode/utf8" "github.com/golang/protobuf/proto" "golang.org/x/net/http2" @@ -437,16 +438,17 @@ func decodeTimeout(s string) (time.Duration, error) { const ( spaceByte = ' ' - tildaByte = '~' + tildeByte = '~' percentByte = '%' ) // encodeGrpcMessage is used to encode status code in header field -// "grpc-message". -// It checks to see if each individual byte in msg is an -// allowable byte, and then either percent encoding or passing it through. -// When percent encoding, the byte is converted into hexadecimal notation -// with a '%' prepended. +// "grpc-message". It does percent encoding and also replaces invalid utf-8 +// characters with Unicode replacement character. +// +// It checks to see if each individual byte in msg is an allowable byte, and +// then either percent encoding or passing it through. When percent encoding, +// the byte is converted into hexadecimal notation with a '%' prepended. func encodeGrpcMessage(msg string) string { if msg == "" { return "" @@ -454,7 +456,7 @@ func encodeGrpcMessage(msg string) string { lenMsg := len(msg) for i := 0; i < lenMsg; i++ { c := msg[i] - if !(c >= spaceByte && c < tildaByte && c != percentByte) { + if !(c >= spaceByte && c <= tildeByte && c != percentByte) { return encodeGrpcMessageUnchecked(msg) } } @@ -463,14 +465,26 @@ func encodeGrpcMessage(msg string) string { func encodeGrpcMessageUnchecked(msg string) string { var buf bytes.Buffer - lenMsg := len(msg) - for i := 0; i < lenMsg; i++ { - c := msg[i] - if c >= spaceByte && c < tildaByte && c != percentByte { - buf.WriteByte(c) - } else { - buf.WriteString(fmt.Sprintf("%%%02X", c)) + for len(msg) > 0 { + r, size := utf8.DecodeRuneInString(msg) + for _, b := range []byte(string(r)) { + if size > 1 { + // If size > 1, r is not ascii. Always do percent encoding. + buf.WriteString(fmt.Sprintf("%%%02X", b)) + continue + } + + // The for loop is necessary even if size == 1. r could be + // utf8.RuneError. + // + // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". + if b >= spaceByte && b <= tildeByte && b != percentByte { + buf.WriteByte(b) + } else { + buf.WriteString(fmt.Sprintf("%%%02X", b)) + } } + msg = msg[size:] } return buf.String() } @@ -531,10 +545,14 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { if w.err != nil { return 0, w.err } - n = copy(w.buf[w.offset:], b) - w.offset += n - if w.offset >= w.batchSize { - err = w.Flush() + for len(b) > 0 { + nn := copy(w.buf[w.offset:], b) + b = b[nn:] + w.offset += nn + n += nn + if w.offset >= w.batchSize { + err = w.Flush() + } } return n, err } diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go index 2f643a3d0..f51f87888 100644 --- a/vendor/google.golang.org/grpc/transport/transport.go +++ b/vendor/google.golang.org/grpc/transport/transport.go @@ -185,13 +185,20 @@ type Stream struct { headerChan chan struct{} // closed to indicate the end of header metadata. headerDone uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. - header metadata.MD // the received header metadata. - trailer metadata.MD // the key-value map of trailer metadata. - headerOk bool // becomes true from the first header is about to send - state streamState + // hdrMu protects header and trailer metadata on the server-side. + hdrMu sync.Mutex + header metadata.MD // the received header metadata. + trailer metadata.MD // the key-value map of trailer metadata. - status *status.Status // the status error received from the server + // On the server-side, headerSent is atomically set to 1 when the headers are sent out. + headerSent uint32 + + state streamState + + // On client-side it is the status error received from the server. + // On server-side it is unused. + status *status.Status bytesReceived uint32 // indicates whether any bytes have been received on this stream unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream @@ -201,6 +208,17 @@ type Stream struct { contentSubtype string } +// isHeaderSent is only valid on the server-side. +func (s *Stream) isHeaderSent() bool { + return atomic.LoadUint32(&s.headerSent) == 1 +} + +// updateHeaderSent updates headerSent and returns true +// if it was alreay set. It is valid only on server-side. +func (s *Stream) updateHeaderSent() bool { + return atomic.SwapUint32(&s.headerSent, 1) == 1 +} + func (s *Stream) swapState(st streamState) streamState { return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st))) } @@ -313,10 +331,12 @@ func (s *Stream) SetHeader(md metadata.MD) error { if md.Len() == 0 { return nil } - if s.headerOk || atomic.LoadUint32((*uint32)(&s.state)) == uint32(streamDone) { + if s.isHeaderSent() || s.getState() == streamDone { return ErrIllegalHeaderWrite } + s.hdrMu.Lock() s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() return nil } @@ -335,7 +355,12 @@ func (s *Stream) SetTrailer(md metadata.MD) error { if md.Len() == 0 { return nil } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() return nil } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go new file mode 100644 index 000000000..7f124fbd5 --- /dev/null +++ b/vendor/google.golang.org/grpc/version.go @@ -0,0 +1,22 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// Version is the current grpc version. +const Version = "1.13.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index 6126ab64b..079bc2896 100755 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -54,7 +54,8 @@ if git status --porcelain | read; then fi git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | tee /dev/stderr | (! read) -git ls-files "*.go" | xargs grep -l "\"unsafe\"" 2>&1 | (! grep -v '_test.go') | tee /dev/stderr | (! read) +git ls-files "*.go" | xargs grep -l '"unsafe"' 2>&1 | (! grep -v '_test.go') | tee /dev/stderr | (! read) +git ls-files "*.go" | xargs grep -l '"math/rand"' 2>&1 | (! grep -v '^examples\|^stress\|grpcrand') | tee /dev/stderr | (! read) gofmt -s -d -l . 2>&1 | tee /dev/stderr | (! read) goimports -l . 2>&1 | tee /dev/stderr | (! read) golint ./... 2>&1 | (grep -vE "(_mock|\.pb)\.go:" || true) | tee /dev/stderr | (! read) diff --git a/vendor/gopkg.in/olivere/elastic.v5/.travis.yml b/vendor/gopkg.in/olivere/elastic.v5/.travis.yml index 345d1bf86..d88c9c642 100644 --- a/vendor/gopkg.in/olivere/elastic.v5/.travis.yml +++ b/vendor/gopkg.in/olivere/elastic.v5/.travis.yml @@ -10,6 +10,7 @@ matrix: services: - docker before_install: + - go get github.com/fortytw2/leaktest - sudo sysctl -w vm.max_map_count=262144 - docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -e "bootstrap.memory_lock=true" -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" docker.elastic.co/elasticsearch/elasticsearch:5.6.9 elasticsearch -Expack.security.enabled=false -Escript.inline=true -Escript.stored=true -Escript.file=true -Enetwork.host=_local_,_site_ -Enetwork.publish_host=_local_ >& /dev/null & - sleep 30 diff --git a/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS b/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS index 94fd0ecd2..55fe6d95a 100644 --- a/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS +++ b/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS @@ -86,6 +86,7 @@ Kenta SUZUKI [@suzuken](https://github.com/suzuken) Kevin Mulvey [@kmulvey](https://github.com/kmulvey) Kyle Brandt [@kylebrandt](https://github.com/kylebrandt) Leandro Piccilli [@lpic10](https://github.com/lpic10) +Lee [@leezhm](https://github.com/leezhm) M. Zulfa Achsani [@misterciput](https://github.com/misterciput) Maciej Lisiewski [@c2h5oh](https://github.com/c2h5oh) Mara Kim [@autochthe](https://github.com/autochthe) @@ -109,6 +110,7 @@ Nicolae Vartolomei [@nvartolomei](https://github.com/nvartolomei) Orne Brocaar [@brocaar](https://github.com/brocaar) Paul [@eyeamera](https://github.com/eyeamera) Pete C [@peteclark-ft](https://github.com/peteclark-ft) +Paolo [@ppiccolo](https://github.com/ppiccolo) Radoslaw Wesolowski [r--w](https://github.com/r--w) rchicoli [@rchicoli](https://github.com/rchicoli) Roman Colohanin [@zuzmic](https://github.com/zuzmic) diff --git a/vendor/gopkg.in/olivere/elastic.v5/client.go b/vendor/gopkg.in/olivere/elastic.v5/client.go index 3d7478422..2e190a579 100644 --- a/vendor/gopkg.in/olivere/elastic.v5/client.go +++ b/vendor/gopkg.in/olivere/elastic.v5/client.go @@ -26,7 +26,7 @@ import ( const ( // Version is the current version of Elastic. - Version = "5.0.69" + Version = "5.0.70" // DefaultURL is the default endpoint of Elasticsearch on the local machine. // It is used e.g. when initializing a new Client without a specific URL. @@ -1307,7 +1307,7 @@ func (c *Client) PerformRequestWithOptions(ctx context.Context, opt PerformReque } if ue, ok := err.(*url.Error); ok { // This happens e.g. on redirect errors, see https://golang.org/src/net/http/client_test.go#L329 - if ue.Err == context.Canceled || ue.Err == context.DeadlineExceeded { + if ue.Err == context.Canceled || ue.Err == context.DeadlineExceeded || ue.Temporary() { // Proceed, but don't mark the node as dead return nil, err } diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_parent.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_parent.go index 7df110cd2..aa46b34c9 100644 --- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_parent.go +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_parent.go @@ -19,6 +19,7 @@ type HasParentQuery struct { score *bool queryName string innerHit *InnerHit + ignoreUnmapped *bool } // NewHasParentQuery creates and initializes a new has_parent query. @@ -55,6 +56,13 @@ func (q *HasParentQuery) InnerHit(innerHit *InnerHit) *HasParentQuery { return q } +// IgnoreUnmapped specifies whether unmapped types should be ignored. +// If set to false, the query failes when an unmapped type is found. +func (q *HasParentQuery) IgnoreUnmapped(ignore bool) *HasParentQuery { + q.ignoreUnmapped = &ignore + return q +} + // Source returns JSON for the function score query. func (q *HasParentQuery) Source() (interface{}, error) { // { @@ -93,5 +101,8 @@ func (q *HasParentQuery) Source() (interface{}, error) { } query["inner_hits"] = src } + if q.ignoreUnmapped != nil { + query["ignore_unmapped"] = *q.ignoreUnmapped + } return source, nil } -- cgit v1.2.3-1-g7c22